Merge tag 'pull-tomoyo' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-block.git] / mm / percpu.c
CommitLineData
55716d26 1// SPDX-License-Identifier: GPL-2.0-only
fbf59bc9 2/*
88999a89 3 * mm/percpu.c - percpu memory allocator
fbf59bc9
TH
4 *
5 * Copyright (C) 2009 SUSE Linux Products GmbH
6 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 *
5e81ee3e 8 * Copyright (C) 2017 Facebook Inc.
bfacd38f 9 * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
5e81ee3e 10 *
9c015162
DZF
11 * The percpu allocator handles both static and dynamic areas. Percpu
12 * areas are allocated in chunks which are divided into units. There is
13 * a 1-to-1 mapping for units to possible cpus. These units are grouped
14 * based on NUMA properties of the machine.
fbf59bc9
TH
15 *
16 * c0 c1 c2
17 * ------------------- ------------------- ------------
18 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
19 * ------------------- ...... ------------------- .... ------------
20 *
9c015162
DZF
21 * Allocation is done by offsets into a unit's address space. Ie., an
22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
23 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
24 * and even sparse. Access is handled by configuring percpu base
25 * registers according to the cpu to unit mappings and offsetting the
26 * base address using pcpu_unit_size.
27 *
28 * There is special consideration for the first chunk which must handle
29 * the static percpu variables in the kernel image as allocation services
5e81ee3e 30 * are not online yet. In short, the first chunk is structured like so:
9c015162
DZF
31 *
32 * <Static | [Reserved] | Dynamic>
33 *
34 * The static data is copied from the original section managed by the
35 * linker. The reserved section, if non-zero, primarily manages static
36 * percpu variables from kernel modules. Finally, the dynamic section
37 * takes care of normal allocations.
fbf59bc9 38 *
5e81ee3e 39 * The allocator organizes chunks into lists according to free size and
3c7be18a
RG
40 * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
41 * flag should be passed. All memcg-aware allocations are sharing one set
42 * of chunks and all unaccounted allocations and allocations performed
43 * by processes belonging to the root memory cgroup are using the second set.
44 *
45 * The allocator tries to allocate from the fullest chunk first. Each chunk
46 * is managed by a bitmap with metadata blocks. The allocation map is updated
47 * on every allocation and free to reflect the current state while the boundary
5e81ee3e
DZF
48 * map is only updated on allocation. Each metadata block contains
49 * information to help mitigate the need to iterate over large portions
50 * of the bitmap. The reverse mapping from page to chunk is stored in
51 * the page's index. Lastly, units are lazily backed and grow in unison.
52 *
53 * There is a unique conversion that goes on here between bytes and bits.
54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
55 * tracks the number of pages it is responsible for in nr_pages. Helper
56 * functions are used to convert from between the bytes, bits, and blocks.
57 * All hints are managed in bits unless explicitly stated.
9c015162 58 *
4091fb95 59 * To use this allocator, arch code should do the following:
fbf59bc9 60 *
fbf59bc9 61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
e0100983
TH
62 * regular address to percpu pointer and back if they need to be
63 * different from the default
fbf59bc9 64 *
8d408b4b
TH
65 * - use pcpu_setup_first_chunk() during percpu area initialization to
66 * setup the first chunk containing the kernel static percpu area
fbf59bc9
TH
67 */
68
870d4b12
JP
69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70
fbf59bc9 71#include <linux/bitmap.h>
d7d29ac7 72#include <linux/cpumask.h>
57c8a661 73#include <linux/memblock.h>
fd1e8a1f 74#include <linux/err.h>
ca460b3c 75#include <linux/lcm.h>
fbf59bc9 76#include <linux/list.h>
a530b795 77#include <linux/log2.h>
fbf59bc9
TH
78#include <linux/mm.h>
79#include <linux/module.h>
80#include <linux/mutex.h>
81#include <linux/percpu.h>
82#include <linux/pfn.h>
fbf59bc9 83#include <linux/slab.h>
ccea34b5 84#include <linux/spinlock.h>
fbf59bc9 85#include <linux/vmalloc.h>
a56dbddf 86#include <linux/workqueue.h>
f528f0b8 87#include <linux/kmemleak.h>
71546d10 88#include <linux/sched.h>
28307d93 89#include <linux/sched/mm.h>
3c7be18a 90#include <linux/memcontrol.h>
fbf59bc9
TH
91
92#include <asm/cacheflush.h>
e0100983 93#include <asm/sections.h>
fbf59bc9 94#include <asm/tlbflush.h>
3b034b0d 95#include <asm/io.h>
fbf59bc9 96
df95e795
DZ
97#define CREATE_TRACE_POINTS
98#include <trace/events/percpu.h>
99
8fa3ed80
DZ
100#include "percpu-internal.h"
101
ac9380f6
RG
102/*
103 * The slots are sorted by the size of the biggest continuous free area.
104 * 1-31 bytes share the same slot.
105 */
40064aec 106#define PCPU_SLOT_BASE_SHIFT 5
8744d859
DZ
107/* chunks in slots below this are subject to being sidelined on failed alloc */
108#define PCPU_SLOT_FAIL_THRESHOLD 3
40064aec 109
1a4d7607
TH
110#define PCPU_EMPTY_POP_PAGES_LOW 2
111#define PCPU_EMPTY_POP_PAGES_HIGH 4
fbf59bc9 112
bbddff05 113#ifdef CONFIG_SMP
e0100983
TH
114/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
115#ifndef __addr_to_pcpu_ptr
116#define __addr_to_pcpu_ptr(addr) \
43cf38eb
TH
117 (void __percpu *)((unsigned long)(addr) - \
118 (unsigned long)pcpu_base_addr + \
119 (unsigned long)__per_cpu_start)
e0100983
TH
120#endif
121#ifndef __pcpu_ptr_to_addr
122#define __pcpu_ptr_to_addr(ptr) \
43cf38eb
TH
123 (void __force *)((unsigned long)(ptr) + \
124 (unsigned long)pcpu_base_addr - \
125 (unsigned long)__per_cpu_start)
e0100983 126#endif
bbddff05
TH
127#else /* CONFIG_SMP */
128/* on UP, it's always identity mapped */
129#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
130#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
131#endif /* CONFIG_SMP */
e0100983 132
1328710b
DM
133static int pcpu_unit_pages __ro_after_init;
134static int pcpu_unit_size __ro_after_init;
135static int pcpu_nr_units __ro_after_init;
136static int pcpu_atom_size __ro_after_init;
8fa3ed80 137int pcpu_nr_slots __ro_after_init;
8d55ba5d 138static int pcpu_free_slot __ro_after_init;
f1833241
RG
139int pcpu_sidelined_slot __ro_after_init;
140int pcpu_to_depopulate_slot __ro_after_init;
1328710b 141static size_t pcpu_chunk_struct_size __ro_after_init;
fbf59bc9 142
a855b84c 143/* cpus with the lowest and highest unit addresses */
1328710b
DM
144static unsigned int pcpu_low_unit_cpu __ro_after_init;
145static unsigned int pcpu_high_unit_cpu __ro_after_init;
2f39e637 146
fbf59bc9 147/* the address of the first chunk which starts with the kernel static area */
1328710b 148void *pcpu_base_addr __ro_after_init;
fbf59bc9 149
1328710b
DM
150static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
151const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
2f39e637 152
6563297c 153/* group information, used for vm allocation */
1328710b
DM
154static int pcpu_nr_groups __ro_after_init;
155static const unsigned long *pcpu_group_offsets __ro_after_init;
156static const size_t *pcpu_group_sizes __ro_after_init;
6563297c 157
ae9e6bc9
TH
158/*
159 * The first chunk which always exists. Note that unlike other
160 * chunks, this one can be allocated and mapped in several different
161 * ways and thus often doesn't live in the vmalloc area.
162 */
8fa3ed80 163struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
ae9e6bc9
TH
164
165/*
166 * Optional reserved chunk. This chunk reserves part of the first
e2266705
DZF
167 * chunk and serves it for reserved allocations. When the reserved
168 * region doesn't exist, the following variable is NULL.
ae9e6bc9 169 */
8fa3ed80 170struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
edcb4639 171
8fa3ed80 172DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
6710e594 173static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
fbf59bc9 174
3c7be18a 175struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
fbf59bc9 176
4f996e23
TH
177/* chunks which need their map areas extended, protected by pcpu_lock */
178static LIST_HEAD(pcpu_map_extend_chunks);
179
b539b87f 180/*
faf65dde 181 * The number of empty populated pages, protected by pcpu_lock.
0760fa3d 182 * The reserved chunk doesn't contribute to the count.
b539b87f 183 */
faf65dde 184int pcpu_nr_empty_pop_pages;
b539b87f 185
7e8a6304
DZF
186/*
187 * The number of populated pages in use by the allocator, protected by
188 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
189 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
190 * and increments/decrements this count by 1).
191 */
192static unsigned long pcpu_nr_populated;
193
1a4d7607
TH
194/*
195 * Balance work is used to populate or destroy chunks asynchronously. We
196 * try to keep the number of populated free pages between
197 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
198 * empty chunk.
199 */
fe6bd8c3
TH
200static void pcpu_balance_workfn(struct work_struct *work);
201static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
1a4d7607
TH
202static bool pcpu_async_enabled __read_mostly;
203static bool pcpu_atomic_alloc_failed;
204
205static void pcpu_schedule_balance_work(void)
206{
207 if (pcpu_async_enabled)
208 schedule_work(&pcpu_balance_work);
209}
a56dbddf 210
c0ebfdc3 211/**
560f2c23
DZF
212 * pcpu_addr_in_chunk - check if the address is served from this chunk
213 * @chunk: chunk of interest
214 * @addr: percpu address
c0ebfdc3
DZF
215 *
216 * RETURNS:
560f2c23 217 * True if the address is served from this chunk.
c0ebfdc3 218 */
560f2c23 219static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
020ec653 220{
c0ebfdc3
DZF
221 void *start_addr, *end_addr;
222
560f2c23 223 if (!chunk)
c0ebfdc3 224 return false;
020ec653 225
560f2c23
DZF
226 start_addr = chunk->base_addr + chunk->start_offset;
227 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
228 chunk->end_offset;
c0ebfdc3
DZF
229
230 return addr >= start_addr && addr < end_addr;
020ec653
TH
231}
232
d9b55eeb 233static int __pcpu_size_to_slot(int size)
fbf59bc9 234{
cae3aeb8 235 int highbit = fls(size); /* size is in bytes */
fbf59bc9
TH
236 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
237}
238
d9b55eeb
TH
239static int pcpu_size_to_slot(int size)
240{
241 if (size == pcpu_unit_size)
1c29a3ce 242 return pcpu_free_slot;
d9b55eeb
TH
243 return __pcpu_size_to_slot(size);
244}
245
fbf59bc9
TH
246static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
247{
92c14cab
DZ
248 const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
249
250 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
251 chunk_md->contig_hint == 0)
fbf59bc9
TH
252 return 0;
253
92c14cab 254 return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
fbf59bc9
TH
255}
256
88999a89
TH
257/* set the pointer to a chunk in a page struct */
258static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
259{
260 page->index = (unsigned long)pcpu;
261}
262
263/* obtain pointer to a chunk from a page struct */
264static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
265{
266 return (struct pcpu_chunk *)page->index;
267}
268
269static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
fbf59bc9 270{
2f39e637 271 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
fbf59bc9
TH
272}
273
c0ebfdc3
DZF
274static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
275{
276 return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
277}
278
9983b6f0
TH
279static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
280 unsigned int cpu, int page_idx)
fbf59bc9 281{
c0ebfdc3
DZF
282 return (unsigned long)chunk->base_addr +
283 pcpu_unit_page_offset(cpu, page_idx);
fbf59bc9
TH
284}
285
ca460b3c
DZF
286/*
287 * The following are helper functions to help access bitmaps and convert
288 * between bitmap offsets to address offsets.
289 */
290static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
291{
292 return chunk->alloc_map +
293 (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
294}
295
296static unsigned long pcpu_off_to_block_index(int off)
297{
298 return off / PCPU_BITMAP_BLOCK_BITS;
299}
300
301static unsigned long pcpu_off_to_block_off(int off)
302{
303 return off & (PCPU_BITMAP_BLOCK_BITS - 1);
304}
305
b185cd0d
DZF
306static unsigned long pcpu_block_off_to_off(int index, int off)
307{
308 return index * PCPU_BITMAP_BLOCK_BITS + off;
309}
310
8ea2e1e3
RG
311/**
312 * pcpu_check_block_hint - check against the contig hint
313 * @block: block of interest
314 * @bits: size of allocation
315 * @align: alignment of area (max PAGE_SIZE)
316 *
317 * Check to see if the allocation can fit in the block's contig hint.
318 * Note, a chunk uses the same hints as a block so this can also check against
319 * the chunk's contig hint.
320 */
321static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
322 size_t align)
323{
324 int bit_off = ALIGN(block->contig_hint_start, align) -
325 block->contig_hint_start;
326
327 return bit_off + bits <= block->contig_hint;
328}
329
382b88e9
DZ
330/*
331 * pcpu_next_hint - determine which hint to use
332 * @block: block of interest
333 * @alloc_bits: size of allocation
334 *
335 * This determines if we should scan based on the scan_hint or first_free.
336 * In general, we want to scan from first_free to fulfill allocations by
337 * first fit. However, if we know a scan_hint at position scan_hint_start
338 * cannot fulfill an allocation, we can begin scanning from there knowing
339 * the contig_hint will be our fallback.
340 */
341static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
342{
343 /*
344 * The three conditions below determine if we can skip past the
345 * scan_hint. First, does the scan hint exist. Second, is the
346 * contig_hint after the scan_hint (possibly not true iff
347 * contig_hint == scan_hint). Third, is the allocation request
348 * larger than the scan_hint.
349 */
350 if (block->scan_hint &&
351 block->contig_hint_start > block->scan_hint_start &&
352 alloc_bits > block->scan_hint)
353 return block->scan_hint_start + block->scan_hint;
354
355 return block->first_free;
356}
357
525ca84d
DZF
358/**
359 * pcpu_next_md_free_region - finds the next hint free area
360 * @chunk: chunk of interest
361 * @bit_off: chunk offset
362 * @bits: size of free area
363 *
364 * Helper function for pcpu_for_each_md_free_region. It checks
365 * block->contig_hint and performs aggregation across blocks to find the
366 * next hint. It modifies bit_off and bits in-place to be consumed in the
367 * loop.
368 */
369static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
370 int *bits)
371{
372 int i = pcpu_off_to_block_index(*bit_off);
373 int block_off = pcpu_off_to_block_off(*bit_off);
374 struct pcpu_block_md *block;
375
376 *bits = 0;
377 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
378 block++, i++) {
379 /* handles contig area across blocks */
380 if (*bits) {
381 *bits += block->left_free;
382 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
383 continue;
384 return;
385 }
386
387 /*
388 * This checks three things. First is there a contig_hint to
389 * check. Second, have we checked this hint before by
390 * comparing the block_off. Third, is this the same as the
391 * right contig hint. In the last case, it spills over into
392 * the next block and should be handled by the contig area
393 * across blocks code.
394 */
395 *bits = block->contig_hint;
396 if (*bits && block->contig_hint_start >= block_off &&
397 *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
398 *bit_off = pcpu_block_off_to_off(i,
399 block->contig_hint_start);
400 return;
401 }
1fa4df3e
DZ
402 /* reset to satisfy the second predicate above */
403 block_off = 0;
525ca84d
DZF
404
405 *bits = block->right_free;
406 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
407 }
408}
409
b4c2116c
DZF
410/**
411 * pcpu_next_fit_region - finds fit areas for a given allocation request
412 * @chunk: chunk of interest
413 * @alloc_bits: size of allocation
414 * @align: alignment of area (max PAGE_SIZE)
415 * @bit_off: chunk offset
416 * @bits: size of free area
417 *
418 * Finds the next free region that is viable for use with a given size and
419 * alignment. This only returns if there is a valid area to be used for this
420 * allocation. block->first_free is returned if the allocation request fits
421 * within the block to see if the request can be fulfilled prior to the contig
422 * hint.
423 */
424static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
425 int align, int *bit_off, int *bits)
426{
427 int i = pcpu_off_to_block_index(*bit_off);
428 int block_off = pcpu_off_to_block_off(*bit_off);
429 struct pcpu_block_md *block;
430
431 *bits = 0;
432 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
433 block++, i++) {
434 /* handles contig area across blocks */
435 if (*bits) {
436 *bits += block->left_free;
437 if (*bits >= alloc_bits)
438 return;
439 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
440 continue;
441 }
442
443 /* check block->contig_hint */
444 *bits = ALIGN(block->contig_hint_start, align) -
445 block->contig_hint_start;
446 /*
447 * This uses the block offset to determine if this has been
448 * checked in the prior iteration.
449 */
450 if (block->contig_hint &&
451 block->contig_hint_start >= block_off &&
452 block->contig_hint >= *bits + alloc_bits) {
382b88e9
DZ
453 int start = pcpu_next_hint(block, alloc_bits);
454
b4c2116c 455 *bits += alloc_bits + block->contig_hint_start -
382b88e9
DZ
456 start;
457 *bit_off = pcpu_block_off_to_off(i, start);
b4c2116c
DZF
458 return;
459 }
1fa4df3e
DZ
460 /* reset to satisfy the second predicate above */
461 block_off = 0;
b4c2116c
DZF
462
463 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
464 align);
465 *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
466 *bit_off = pcpu_block_off_to_off(i, *bit_off);
467 if (*bits >= alloc_bits)
468 return;
469 }
470
471 /* no valid offsets were found - fail condition */
472 *bit_off = pcpu_chunk_map_bits(chunk);
473}
474
525ca84d
DZF
475/*
476 * Metadata free area iterators. These perform aggregation of free areas
477 * based on the metadata blocks and return the offset @bit_off and size in
b4c2116c
DZF
478 * bits of the free area @bits. pcpu_for_each_fit_region only returns when
479 * a fit is found for the allocation request.
525ca84d
DZF
480 */
481#define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
482 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
483 (bit_off) < pcpu_chunk_map_bits((chunk)); \
484 (bit_off) += (bits) + 1, \
485 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
486
b4c2116c
DZF
487#define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
488 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
489 &(bits)); \
490 (bit_off) < pcpu_chunk_map_bits((chunk)); \
491 (bit_off) += (bits), \
492 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
493 &(bits)))
494
fbf59bc9 495/**
90459ce0 496 * pcpu_mem_zalloc - allocate memory
1880d93b 497 * @size: bytes to allocate
47504ee0 498 * @gfp: allocation flags
fbf59bc9 499 *
1880d93b 500 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
47504ee0
DZ
501 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
502 * This is to facilitate passing through whitelisted flags. The
503 * returned memory is always zeroed.
fbf59bc9
TH
504 *
505 * RETURNS:
1880d93b 506 * Pointer to the allocated area on success, NULL on failure.
fbf59bc9 507 */
47504ee0 508static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
fbf59bc9 509{
099a19d9
TH
510 if (WARN_ON_ONCE(!slab_is_available()))
511 return NULL;
512
1880d93b 513 if (size <= PAGE_SIZE)
554fef1c 514 return kzalloc(size, gfp);
7af4c093 515 else
88dca4ca 516 return __vmalloc(size, gfp | __GFP_ZERO);
1880d93b 517}
fbf59bc9 518
1880d93b
TH
519/**
520 * pcpu_mem_free - free memory
521 * @ptr: memory to free
1880d93b 522 *
90459ce0 523 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
1880d93b 524 */
1d5cfdb0 525static void pcpu_mem_free(void *ptr)
1880d93b 526{
1d5cfdb0 527 kvfree(ptr);
fbf59bc9
TH
528}
529
8744d859
DZ
530static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
531 bool move_front)
532{
533 if (chunk != pcpu_reserved_chunk) {
534 if (move_front)
faf65dde 535 list_move(&chunk->list, &pcpu_chunk_lists[slot]);
8744d859 536 else
faf65dde 537 list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
8744d859
DZ
538 }
539}
540
541static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
542{
543 __pcpu_chunk_move(chunk, slot, true);
544}
545
fbf59bc9
TH
546/**
547 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
548 * @chunk: chunk of interest
549 * @oslot: the previous slot it was on
550 *
551 * This function is called after an allocation or free changed @chunk.
552 * New slot according to the changed state is determined and @chunk is
edcb4639
TH
553 * moved to the slot. Note that the reserved chunk is never put on
554 * chunk slots.
ccea34b5
TH
555 *
556 * CONTEXT:
557 * pcpu_lock.
fbf59bc9
TH
558 */
559static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
560{
561 int nslot = pcpu_chunk_slot(chunk);
562
f1833241
RG
563 /* leave isolated chunks in-place */
564 if (chunk->isolated)
565 return;
566
8744d859
DZ
567 if (oslot != nslot)
568 __pcpu_chunk_move(chunk, nslot, oslot < nslot);
833af842
TH
569}
570
f1833241
RG
571static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
572{
f1833241
RG
573 lockdep_assert_held(&pcpu_lock);
574
575 if (!chunk->isolated) {
576 chunk->isolated = true;
faf65dde 577 pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
f1833241 578 }
faf65dde 579 list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
f1833241
RG
580}
581
582static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
583{
f1833241
RG
584 lockdep_assert_held(&pcpu_lock);
585
586 if (chunk->isolated) {
587 chunk->isolated = false;
faf65dde 588 pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
f1833241
RG
589 pcpu_chunk_relocate(chunk, -1);
590 }
591}
592
b239f7da
DZ
593/*
594 * pcpu_update_empty_pages - update empty page counters
833af842 595 * @chunk: chunk of interest
b239f7da 596 * @nr: nr of empty pages
833af842 597 *
b239f7da
DZ
598 * This is used to keep track of the empty pages now based on the premise
599 * a md_block covers a page. The hint update functions recognize if a block
600 * is made full or broken to calculate deltas for keeping track of free pages.
40064aec 601 */
b239f7da 602static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
40064aec 603{
b239f7da 604 chunk->nr_empty_pop_pages += nr;
f1833241 605 if (chunk != pcpu_reserved_chunk && !chunk->isolated)
faf65dde 606 pcpu_nr_empty_pop_pages += nr;
40064aec
DZF
607}
608
d9f3a01e
DZ
609/*
610 * pcpu_region_overlap - determines if two regions overlap
611 * @a: start of first region, inclusive
612 * @b: end of first region, exclusive
613 * @x: start of second region, inclusive
614 * @y: end of second region, exclusive
833af842 615 *
d9f3a01e
DZ
616 * This is used to determine if the hint region [a, b) overlaps with the
617 * allocated region [x, y).
833af842 618 */
d9f3a01e 619static inline bool pcpu_region_overlap(int a, int b, int x, int y)
833af842 620{
d9f3a01e 621 return (a < y) && (x < b);
40064aec 622}
9f7dcf22 623
ca460b3c
DZF
624/**
625 * pcpu_block_update - updates a block given a free area
626 * @block: block of interest
627 * @start: start offset in block
628 * @end: end offset in block
629 *
630 * Updates a block given a known free area. The region [start, end) is
268625a6
DZF
631 * expected to be the entirety of the free area within a block. Chooses
632 * the best starting offset if the contig hints are equal.
ca460b3c
DZF
633 */
634static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
635{
636 int contig = end - start;
637
638 block->first_free = min(block->first_free, start);
639 if (start == 0)
640 block->left_free = contig;
641
047924c9 642 if (end == block->nr_bits)
ca460b3c
DZF
643 block->right_free = contig;
644
645 if (contig > block->contig_hint) {
382b88e9
DZ
646 /* promote the old contig_hint to be the new scan_hint */
647 if (start > block->contig_hint_start) {
648 if (block->contig_hint > block->scan_hint) {
649 block->scan_hint_start =
650 block->contig_hint_start;
651 block->scan_hint = block->contig_hint;
652 } else if (start < block->scan_hint_start) {
653 /*
654 * The old contig_hint == scan_hint. But, the
655 * new contig is larger so hold the invariant
656 * scan_hint_start < contig_hint_start.
657 */
658 block->scan_hint = 0;
659 }
660 } else {
661 block->scan_hint = 0;
662 }
ca460b3c
DZF
663 block->contig_hint_start = start;
664 block->contig_hint = contig;
382b88e9
DZ
665 } else if (contig == block->contig_hint) {
666 if (block->contig_hint_start &&
667 (!start ||
668 __ffs(start) > __ffs(block->contig_hint_start))) {
669 /* start has a better alignment so use it */
670 block->contig_hint_start = start;
671 if (start < block->scan_hint_start &&
672 block->contig_hint > block->scan_hint)
673 block->scan_hint = 0;
674 } else if (start > block->scan_hint_start ||
675 block->contig_hint > block->scan_hint) {
676 /*
677 * Knowing contig == contig_hint, update the scan_hint
678 * if it is farther than or larger than the current
679 * scan_hint.
680 */
681 block->scan_hint_start = start;
682 block->scan_hint = contig;
683 }
684 } else {
685 /*
686 * The region is smaller than the contig_hint. So only update
687 * the scan_hint if it is larger than or equal and farther than
688 * the current scan_hint.
689 */
690 if ((start < block->contig_hint_start &&
691 (contig > block->scan_hint ||
692 (contig == block->scan_hint &&
693 start > block->scan_hint_start)))) {
694 block->scan_hint_start = start;
695 block->scan_hint = contig;
696 }
ca460b3c
DZF
697 }
698}
699
b89462a9
DZ
700/*
701 * pcpu_block_update_scan - update a block given a free area from a scan
702 * @chunk: chunk of interest
703 * @bit_off: chunk offset
704 * @bits: size of free area
705 *
706 * Finding the final allocation spot first goes through pcpu_find_block_fit()
707 * to find a block that can hold the allocation and then pcpu_alloc_area()
708 * where a scan is used. When allocations require specific alignments,
709 * we can inadvertently create holes which will not be seen in the alloc
710 * or free paths.
711 *
712 * This takes a given free area hole and updates a block as it may change the
713 * scan_hint. We need to scan backwards to ensure we don't miss free bits
714 * from alignment.
715 */
716static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
717 int bits)
718{
719 int s_off = pcpu_off_to_block_off(bit_off);
720 int e_off = s_off + bits;
721 int s_index, l_bit;
722 struct pcpu_block_md *block;
723
724 if (e_off > PCPU_BITMAP_BLOCK_BITS)
725 return;
726
727 s_index = pcpu_off_to_block_index(bit_off);
728 block = chunk->md_blocks + s_index;
729
730 /* scan backwards in case of alignment skipping free bits */
731 l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
732 s_off = (s_off == l_bit) ? 0 : l_bit + 1;
733
734 pcpu_block_update(block, s_off, e_off);
735}
736
92c14cab
DZ
737/**
738 * pcpu_chunk_refresh_hint - updates metadata about a chunk
739 * @chunk: chunk of interest
d33d9f3d 740 * @full_scan: if we should scan from the beginning
92c14cab
DZ
741 *
742 * Iterates over the metadata blocks to find the largest contig area.
d33d9f3d
DZ
743 * A full scan can be avoided on the allocation path as this is triggered
744 * if we broke the contig_hint. In doing so, the scan_hint will be before
745 * the contig_hint or after if the scan_hint == contig_hint. This cannot
746 * be prevented on freeing as we want to find the largest area possibly
747 * spanning blocks.
92c14cab 748 */
d33d9f3d 749static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
92c14cab
DZ
750{
751 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
752 int bit_off, bits;
753
d33d9f3d
DZ
754 /* promote scan_hint to contig_hint */
755 if (!full_scan && chunk_md->scan_hint) {
756 bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
757 chunk_md->contig_hint_start = chunk_md->scan_hint_start;
758 chunk_md->contig_hint = chunk_md->scan_hint;
759 chunk_md->scan_hint = 0;
760 } else {
761 bit_off = chunk_md->first_free;
762 chunk_md->contig_hint = 0;
763 }
92c14cab 764
92c14cab 765 bits = 0;
e837dfde 766 pcpu_for_each_md_free_region(chunk, bit_off, bits)
92c14cab 767 pcpu_block_update(chunk_md, bit_off, bit_off + bits);
ca460b3c
DZF
768}
769
770/**
771 * pcpu_block_refresh_hint
772 * @chunk: chunk of interest
773 * @index: index of the metadata block
774 *
775 * Scans over the block beginning at first_free and updates the block
776 * metadata accordingly.
777 */
778static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
779{
780 struct pcpu_block_md *block = chunk->md_blocks + index;
781 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
ec288a2c 782 unsigned int start, end; /* region start, region end */
da3afdd5
DZ
783
784 /* promote scan_hint to contig_hint */
785 if (block->scan_hint) {
786 start = block->scan_hint_start + block->scan_hint;
787 block->contig_hint_start = block->scan_hint_start;
788 block->contig_hint = block->scan_hint;
789 block->scan_hint = 0;
790 } else {
791 start = block->first_free;
792 block->contig_hint = 0;
793 }
ca460b3c 794
da3afdd5 795 block->right_free = 0;
ca460b3c
DZF
796
797 /* iterate over free areas and update the contig hints */
ec288a2c
YN
798 for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS)
799 pcpu_block_update(block, start, end);
ca460b3c
DZF
800}
801
802/**
803 * pcpu_block_update_hint_alloc - update hint on allocation path
804 * @chunk: chunk of interest
805 * @bit_off: chunk offset
806 * @bits: size of request
fc304334
DZF
807 *
808 * Updates metadata for the allocation path. The metadata only has to be
809 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
810 * scans are required if the block's contig hint is broken.
ca460b3c
DZF
811 */
812static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
813 int bits)
814{
92c14cab 815 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
b239f7da 816 int nr_empty_pages = 0;
ca460b3c
DZF
817 struct pcpu_block_md *s_block, *e_block, *block;
818 int s_index, e_index; /* block indexes of the freed allocation */
819 int s_off, e_off; /* block offsets of the freed allocation */
820
821 /*
822 * Calculate per block offsets.
823 * The calculation uses an inclusive range, but the resulting offsets
824 * are [start, end). e_index always points to the last block in the
825 * range.
826 */
827 s_index = pcpu_off_to_block_index(bit_off);
828 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
829 s_off = pcpu_off_to_block_off(bit_off);
830 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
831
832 s_block = chunk->md_blocks + s_index;
833 e_block = chunk->md_blocks + e_index;
834
835 /*
836 * Update s_block.
fc304334
DZF
837 * block->first_free must be updated if the allocation takes its place.
838 * If the allocation breaks the contig_hint, a scan is required to
839 * restore this hint.
ca460b3c 840 */
b239f7da
DZ
841 if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
842 nr_empty_pages++;
843
fc304334
DZF
844 if (s_off == s_block->first_free)
845 s_block->first_free = find_next_zero_bit(
846 pcpu_index_alloc_map(chunk, s_index),
847 PCPU_BITMAP_BLOCK_BITS,
848 s_off + bits);
849
382b88e9
DZ
850 if (pcpu_region_overlap(s_block->scan_hint_start,
851 s_block->scan_hint_start + s_block->scan_hint,
852 s_off,
853 s_off + bits))
854 s_block->scan_hint = 0;
855
d9f3a01e
DZ
856 if (pcpu_region_overlap(s_block->contig_hint_start,
857 s_block->contig_hint_start +
858 s_block->contig_hint,
859 s_off,
860 s_off + bits)) {
fc304334 861 /* block contig hint is broken - scan to fix it */
da3afdd5
DZ
862 if (!s_off)
863 s_block->left_free = 0;
fc304334
DZF
864 pcpu_block_refresh_hint(chunk, s_index);
865 } else {
866 /* update left and right contig manually */
867 s_block->left_free = min(s_block->left_free, s_off);
868 if (s_index == e_index)
869 s_block->right_free = min_t(int, s_block->right_free,
870 PCPU_BITMAP_BLOCK_BITS - e_off);
871 else
872 s_block->right_free = 0;
873 }
ca460b3c
DZF
874
875 /*
876 * Update e_block.
877 */
878 if (s_index != e_index) {
b239f7da
DZ
879 if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
880 nr_empty_pages++;
881
fc304334
DZF
882 /*
883 * When the allocation is across blocks, the end is along
884 * the left part of the e_block.
885 */
886 e_block->first_free = find_next_zero_bit(
887 pcpu_index_alloc_map(chunk, e_index),
888 PCPU_BITMAP_BLOCK_BITS, e_off);
889
890 if (e_off == PCPU_BITMAP_BLOCK_BITS) {
891 /* reset the block */
892 e_block++;
893 } else {
382b88e9
DZ
894 if (e_off > e_block->scan_hint_start)
895 e_block->scan_hint = 0;
896
da3afdd5 897 e_block->left_free = 0;
fc304334
DZF
898 if (e_off > e_block->contig_hint_start) {
899 /* contig hint is broken - scan to fix it */
900 pcpu_block_refresh_hint(chunk, e_index);
901 } else {
fc304334
DZF
902 e_block->right_free =
903 min_t(int, e_block->right_free,
904 PCPU_BITMAP_BLOCK_BITS - e_off);
905 }
906 }
ca460b3c
DZF
907
908 /* update in-between md_blocks */
b239f7da 909 nr_empty_pages += (e_index - s_index - 1);
ca460b3c 910 for (block = s_block + 1; block < e_block; block++) {
382b88e9 911 block->scan_hint = 0;
ca460b3c
DZF
912 block->contig_hint = 0;
913 block->left_free = 0;
914 block->right_free = 0;
915 }
916 }
917
b239f7da
DZ
918 if (nr_empty_pages)
919 pcpu_update_empty_pages(chunk, -nr_empty_pages);
920
d33d9f3d
DZ
921 if (pcpu_region_overlap(chunk_md->scan_hint_start,
922 chunk_md->scan_hint_start +
923 chunk_md->scan_hint,
924 bit_off,
925 bit_off + bits))
926 chunk_md->scan_hint = 0;
927
fc304334
DZF
928 /*
929 * The only time a full chunk scan is required is if the chunk
930 * contig hint is broken. Otherwise, it means a smaller space
931 * was used and therefore the chunk contig hint is still correct.
932 */
92c14cab
DZ
933 if (pcpu_region_overlap(chunk_md->contig_hint_start,
934 chunk_md->contig_hint_start +
935 chunk_md->contig_hint,
d9f3a01e
DZ
936 bit_off,
937 bit_off + bits))
d33d9f3d 938 pcpu_chunk_refresh_hint(chunk, false);
ca460b3c
DZF
939}
940
941/**
942 * pcpu_block_update_hint_free - updates the block hints on the free path
943 * @chunk: chunk of interest
944 * @bit_off: chunk offset
945 * @bits: size of request
b185cd0d
DZF
946 *
947 * Updates metadata for the allocation path. This avoids a blind block
948 * refresh by making use of the block contig hints. If this fails, it scans
949 * forward and backward to determine the extent of the free area. This is
950 * capped at the boundary of blocks.
951 *
952 * A chunk update is triggered if a page becomes free, a block becomes free,
953 * or the free spans across blocks. This tradeoff is to minimize iterating
92c14cab
DZ
954 * over the block metadata to update chunk_md->contig_hint.
955 * chunk_md->contig_hint may be off by up to a page, but it will never be more
956 * than the available space. If the contig hint is contained in one block, it
957 * will be accurate.
ca460b3c
DZF
958 */
959static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
960 int bits)
961{
b239f7da 962 int nr_empty_pages = 0;
ca460b3c
DZF
963 struct pcpu_block_md *s_block, *e_block, *block;
964 int s_index, e_index; /* block indexes of the freed allocation */
965 int s_off, e_off; /* block offsets of the freed allocation */
b185cd0d 966 int start, end; /* start and end of the whole free area */
ca460b3c
DZF
967
968 /*
969 * Calculate per block offsets.
970 * The calculation uses an inclusive range, but the resulting offsets
971 * are [start, end). e_index always points to the last block in the
972 * range.
973 */
974 s_index = pcpu_off_to_block_index(bit_off);
975 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
976 s_off = pcpu_off_to_block_off(bit_off);
977 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
978
979 s_block = chunk->md_blocks + s_index;
980 e_block = chunk->md_blocks + e_index;
981
b185cd0d
DZF
982 /*
983 * Check if the freed area aligns with the block->contig_hint.
984 * If it does, then the scan to find the beginning/end of the
985 * larger free area can be avoided.
986 *
987 * start and end refer to beginning and end of the free area
988 * within each their respective blocks. This is not necessarily
989 * the entire free area as it may span blocks past the beginning
990 * or end of the block.
991 */
992 start = s_off;
993 if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
994 start = s_block->contig_hint_start;
995 } else {
996 /*
997 * Scan backwards to find the extent of the free area.
998 * find_last_bit returns the starting bit, so if the start bit
999 * is returned, that means there was no last bit and the
1000 * remainder of the chunk is free.
1001 */
1002 int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
1003 start);
1004 start = (start == l_bit) ? 0 : l_bit + 1;
1005 }
1006
1007 end = e_off;
1008 if (e_off == e_block->contig_hint_start)
1009 end = e_block->contig_hint_start + e_block->contig_hint;
1010 else
1011 end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
1012 PCPU_BITMAP_BLOCK_BITS, end);
1013
ca460b3c 1014 /* update s_block */
b185cd0d 1015 e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
b239f7da
DZ
1016 if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
1017 nr_empty_pages++;
b185cd0d 1018 pcpu_block_update(s_block, start, e_off);
ca460b3c
DZF
1019
1020 /* freeing in the same block */
1021 if (s_index != e_index) {
1022 /* update e_block */
b239f7da
DZ
1023 if (end == PCPU_BITMAP_BLOCK_BITS)
1024 nr_empty_pages++;
b185cd0d 1025 pcpu_block_update(e_block, 0, end);
ca460b3c
DZF
1026
1027 /* reset md_blocks in the middle */
b239f7da 1028 nr_empty_pages += (e_index - s_index - 1);
ca460b3c
DZF
1029 for (block = s_block + 1; block < e_block; block++) {
1030 block->first_free = 0;
382b88e9 1031 block->scan_hint = 0;
ca460b3c
DZF
1032 block->contig_hint_start = 0;
1033 block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1034 block->left_free = PCPU_BITMAP_BLOCK_BITS;
1035 block->right_free = PCPU_BITMAP_BLOCK_BITS;
1036 }
1037 }
1038
b239f7da
DZ
1039 if (nr_empty_pages)
1040 pcpu_update_empty_pages(chunk, nr_empty_pages);
1041
b185cd0d 1042 /*
b239f7da
DZ
1043 * Refresh chunk metadata when the free makes a block free or spans
1044 * across blocks. The contig_hint may be off by up to a page, but if
1045 * the contig_hint is contained in a block, it will be accurate with
1046 * the else condition below.
b185cd0d 1047 */
b239f7da 1048 if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
d33d9f3d 1049 pcpu_chunk_refresh_hint(chunk, true);
b185cd0d 1050 else
92c14cab
DZ
1051 pcpu_block_update(&chunk->chunk_md,
1052 pcpu_block_off_to_off(s_index, start),
1053 end);
ca460b3c
DZF
1054}
1055
40064aec
DZF
1056/**
1057 * pcpu_is_populated - determines if the region is populated
1058 * @chunk: chunk of interest
1059 * @bit_off: chunk offset
1060 * @bits: size of area
1061 * @next_off: return value for the next offset to start searching
1062 *
1063 * For atomic allocations, check if the backing pages are populated.
1064 *
1065 * RETURNS:
1066 * Bool if the backing pages are populated.
1067 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1068 */
1069static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1070 int *next_off)
1071{
801a5736 1072 unsigned int start, end;
833af842 1073
801a5736
YN
1074 start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1075 end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
833af842 1076
801a5736
YN
1077 start = find_next_zero_bit(chunk->populated, end, start);
1078 if (start >= end)
40064aec 1079 return true;
833af842 1080
801a5736
YN
1081 end = find_next_bit(chunk->populated, end, start + 1);
1082
1083 *next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
40064aec 1084 return false;
9f7dcf22
TH
1085}
1086
a16037c8 1087/**
40064aec
DZF
1088 * pcpu_find_block_fit - finds the block index to start searching
1089 * @chunk: chunk of interest
1090 * @alloc_bits: size of request in allocation units
1091 * @align: alignment of area (max PAGE_SIZE bytes)
1092 * @pop_only: use populated regions only
1093 *
b4c2116c
DZF
1094 * Given a chunk and an allocation spec, find the offset to begin searching
1095 * for a free region. This iterates over the bitmap metadata blocks to
1096 * find an offset that will be guaranteed to fit the requirements. It is
1097 * not quite first fit as if the allocation does not fit in the contig hint
1098 * of a block or chunk, it is skipped. This errs on the side of caution
1099 * to prevent excess iteration. Poor alignment can cause the allocator to
1100 * skip over blocks and chunks that have valid free areas.
1101 *
40064aec
DZF
1102 * RETURNS:
1103 * The offset in the bitmap to begin searching.
1104 * -1 if no offset is found.
a16037c8 1105 */
40064aec
DZF
1106static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1107 size_t align, bool pop_only)
a16037c8 1108{
92c14cab 1109 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
b4c2116c 1110 int bit_off, bits, next_off;
a16037c8 1111
13f96637 1112 /*
8ea2e1e3
RG
1113 * This is an optimization to prevent scanning by assuming if the
1114 * allocation cannot fit in the global hint, there is memory pressure
1115 * and creating a new chunk would happen soon.
13f96637 1116 */
8ea2e1e3 1117 if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
13f96637
DZF
1118 return -1;
1119
d33d9f3d 1120 bit_off = pcpu_next_hint(chunk_md, alloc_bits);
b4c2116c
DZF
1121 bits = 0;
1122 pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
40064aec 1123 if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
b4c2116c 1124 &next_off))
40064aec 1125 break;
a16037c8 1126
b4c2116c 1127 bit_off = next_off;
40064aec 1128 bits = 0;
a16037c8 1129 }
40064aec
DZF
1130
1131 if (bit_off == pcpu_chunk_map_bits(chunk))
1132 return -1;
1133
1134 return bit_off;
a16037c8
TH
1135}
1136
b89462a9
DZ
1137/*
1138 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1139 * @map: the address to base the search on
1140 * @size: the bitmap size in bits
1141 * @start: the bitnumber to start searching at
1142 * @nr: the number of zeroed bits we're looking for
1143 * @align_mask: alignment mask for zero area
1144 * @largest_off: offset of the largest area skipped
1145 * @largest_bits: size of the largest area skipped
1146 *
1147 * The @align_mask should be one less than a power of 2.
1148 *
1149 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1150 * the largest area that was skipped. This is imperfect, but in general is
1151 * good enough. The largest remembered region is the largest failed region
1152 * seen. This does not include anything we possibly skipped due to alignment.
1153 * pcpu_block_update_scan() does scan backwards to try and recover what was
1154 * lost to alignment. While this can cause scanning to miss earlier possible
1155 * free areas, smaller allocations will eventually fill those holes.
1156 */
1157static unsigned long pcpu_find_zero_area(unsigned long *map,
1158 unsigned long size,
1159 unsigned long start,
1160 unsigned long nr,
1161 unsigned long align_mask,
1162 unsigned long *largest_off,
1163 unsigned long *largest_bits)
1164{
1165 unsigned long index, end, i, area_off, area_bits;
1166again:
1167 index = find_next_zero_bit(map, size, start);
1168
1169 /* Align allocation */
1170 index = __ALIGN_MASK(index, align_mask);
1171 area_off = index;
1172
1173 end = index + nr;
1174 if (end > size)
1175 return end;
1176 i = find_next_bit(map, end, index);
1177 if (i < end) {
1178 area_bits = i - area_off;
1179 /* remember largest unused area with best alignment */
1180 if (area_bits > *largest_bits ||
1181 (area_bits == *largest_bits && *largest_off &&
1182 (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1183 *largest_off = area_off;
1184 *largest_bits = area_bits;
1185 }
1186
1187 start = i + 1;
1188 goto again;
1189 }
1190 return index;
1191}
1192
fbf59bc9 1193/**
40064aec 1194 * pcpu_alloc_area - allocates an area from a pcpu_chunk
fbf59bc9 1195 * @chunk: chunk of interest
40064aec
DZF
1196 * @alloc_bits: size of request in allocation units
1197 * @align: alignment of area (max PAGE_SIZE)
1198 * @start: bit_off to start searching
9f7dcf22 1199 *
40064aec 1200 * This function takes in a @start offset to begin searching to fit an
b4c2116c
DZF
1201 * allocation of @alloc_bits with alignment @align. It needs to scan
1202 * the allocation map because if it fits within the block's contig hint,
1203 * @start will be block->first_free. This is an attempt to fill the
1204 * allocation prior to breaking the contig hint. The allocation and
1205 * boundary maps are updated accordingly if it confirms a valid
1206 * free area.
ccea34b5 1207 *
fbf59bc9 1208 * RETURNS:
40064aec
DZF
1209 * Allocated addr offset in @chunk on success.
1210 * -1 if no matching area is found.
fbf59bc9 1211 */
40064aec
DZF
1212static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1213 size_t align, int start)
fbf59bc9 1214{
92c14cab 1215 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
40064aec 1216 size_t align_mask = (align) ? (align - 1) : 0;
b89462a9 1217 unsigned long area_off = 0, area_bits = 0;
40064aec 1218 int bit_off, end, oslot;
a16037c8 1219
40064aec 1220 lockdep_assert_held(&pcpu_lock);
fbf59bc9 1221
40064aec 1222 oslot = pcpu_chunk_slot(chunk);
fbf59bc9 1223
40064aec
DZF
1224 /*
1225 * Search to find a fit.
1226 */
8c43004a
DZ
1227 end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1228 pcpu_chunk_map_bits(chunk));
b89462a9
DZ
1229 bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1230 align_mask, &area_off, &area_bits);
40064aec
DZF
1231 if (bit_off >= end)
1232 return -1;
fbf59bc9 1233
b89462a9
DZ
1234 if (area_bits)
1235 pcpu_block_update_scan(chunk, area_off, area_bits);
1236
40064aec
DZF
1237 /* update alloc map */
1238 bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
3d331ad7 1239
40064aec
DZF
1240 /* update boundary map */
1241 set_bit(bit_off, chunk->bound_map);
1242 bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1243 set_bit(bit_off + alloc_bits, chunk->bound_map);
fbf59bc9 1244
40064aec 1245 chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
fbf59bc9 1246
86b442fb 1247 /* update first free bit */
92c14cab
DZ
1248 if (bit_off == chunk_md->first_free)
1249 chunk_md->first_free = find_next_zero_bit(
86b442fb
DZF
1250 chunk->alloc_map,
1251 pcpu_chunk_map_bits(chunk),
1252 bit_off + alloc_bits);
1253
ca460b3c 1254 pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
fbf59bc9 1255
fbf59bc9
TH
1256 pcpu_chunk_relocate(chunk, oslot);
1257
40064aec 1258 return bit_off * PCPU_MIN_ALLOC_SIZE;
fbf59bc9
TH
1259}
1260
1261/**
40064aec 1262 * pcpu_free_area - frees the corresponding offset
fbf59bc9 1263 * @chunk: chunk of interest
40064aec 1264 * @off: addr offset into chunk
ccea34b5 1265 *
40064aec
DZF
1266 * This function determines the size of an allocation to free using
1267 * the boundary bitmap and clears the allocation map.
5b32af91
RG
1268 *
1269 * RETURNS:
1270 * Number of freed bytes.
fbf59bc9 1271 */
5b32af91 1272static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
fbf59bc9 1273{
92c14cab 1274 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
5b32af91 1275 int bit_off, bits, end, oslot, freed;
723ad1d9 1276
5ccd30e4 1277 lockdep_assert_held(&pcpu_lock);
30a5b536 1278 pcpu_stats_area_dealloc(chunk);
5ccd30e4 1279
40064aec 1280 oslot = pcpu_chunk_slot(chunk);
fbf59bc9 1281
40064aec 1282 bit_off = off / PCPU_MIN_ALLOC_SIZE;
3d331ad7 1283
40064aec
DZF
1284 /* find end index */
1285 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1286 bit_off + 1);
1287 bits = end - bit_off;
1288 bitmap_clear(chunk->alloc_map, bit_off, bits);
fbf59bc9 1289
5b32af91
RG
1290 freed = bits * PCPU_MIN_ALLOC_SIZE;
1291
40064aec 1292 /* update metadata */
5b32af91 1293 chunk->free_bytes += freed;
b539b87f 1294
86b442fb 1295 /* update first free bit */
92c14cab 1296 chunk_md->first_free = min(chunk_md->first_free, bit_off);
86b442fb 1297
ca460b3c 1298 pcpu_block_update_hint_free(chunk, bit_off, bits);
fbf59bc9 1299
fbf59bc9 1300 pcpu_chunk_relocate(chunk, oslot);
5b32af91
RG
1301
1302 return freed;
fbf59bc9
TH
1303}
1304
047924c9
DZ
1305static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1306{
1307 block->scan_hint = 0;
1308 block->contig_hint = nr_bits;
1309 block->left_free = nr_bits;
1310 block->right_free = nr_bits;
1311 block->first_free = 0;
1312 block->nr_bits = nr_bits;
1313}
1314
ca460b3c
DZF
1315static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1316{
1317 struct pcpu_block_md *md_block;
1318
92c14cab
DZ
1319 /* init the chunk's block */
1320 pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1321
ca460b3c
DZF
1322 for (md_block = chunk->md_blocks;
1323 md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
047924c9
DZ
1324 md_block++)
1325 pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
ca460b3c
DZF
1326}
1327
40064aec
DZF
1328/**
1329 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1330 * @tmp_addr: the start of the region served
1331 * @map_size: size of the region served
1332 *
1333 * This is responsible for creating the chunks that serve the first chunk. The
1334 * base_addr is page aligned down of @tmp_addr while the region end is page
1335 * aligned up. Offsets are kept track of to determine the region served. All
1336 * this is done to appease the bitmap allocator in avoiding partial blocks.
1337 *
1338 * RETURNS:
1339 * Chunk serving the region at @tmp_addr of @map_size.
1340 */
c0ebfdc3 1341static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
40064aec 1342 int map_size)
10edf5b0
DZF
1343{
1344 struct pcpu_chunk *chunk;
ca460b3c 1345 unsigned long aligned_addr, lcm_align;
40064aec 1346 int start_offset, offset_bits, region_size, region_bits;
f655f405 1347 size_t alloc_size;
c0ebfdc3
DZF
1348
1349 /* region calculations */
1350 aligned_addr = tmp_addr & PAGE_MASK;
1351
1352 start_offset = tmp_addr - aligned_addr;
6b9d7c8e 1353
ca460b3c
DZF
1354 /*
1355 * Align the end of the region with the LCM of PAGE_SIZE and
1356 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1357 * the other.
1358 */
1359 lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1360 region_size = ALIGN(start_offset + map_size, lcm_align);
10edf5b0 1361
c0ebfdc3 1362 /* allocate chunk */
61cf93d3
DZ
1363 alloc_size = struct_size(chunk, populated,
1364 BITS_TO_LONGS(region_size >> PAGE_SHIFT));
f655f405
MR
1365 chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1366 if (!chunk)
1367 panic("%s: Failed to allocate %zu bytes\n", __func__,
1368 alloc_size);
c0ebfdc3 1369
10edf5b0 1370 INIT_LIST_HEAD(&chunk->list);
c0ebfdc3
DZF
1371
1372 chunk->base_addr = (void *)aligned_addr;
10edf5b0 1373 chunk->start_offset = start_offset;
6b9d7c8e 1374 chunk->end_offset = region_size - chunk->start_offset - map_size;
c0ebfdc3 1375
8ab16c43 1376 chunk->nr_pages = region_size >> PAGE_SHIFT;
40064aec 1377 region_bits = pcpu_chunk_map_bits(chunk);
c0ebfdc3 1378
f655f405
MR
1379 alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1380 chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1381 if (!chunk->alloc_map)
1382 panic("%s: Failed to allocate %zu bytes\n", __func__,
1383 alloc_size);
1384
1385 alloc_size =
1386 BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1387 chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1388 if (!chunk->bound_map)
1389 panic("%s: Failed to allocate %zu bytes\n", __func__,
1390 alloc_size);
1391
1392 alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1393 chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1394 if (!chunk->md_blocks)
1395 panic("%s: Failed to allocate %zu bytes\n", __func__,
1396 alloc_size);
1397
3c7be18a 1398#ifdef CONFIG_MEMCG_KMEM
faf65dde 1399 /* first chunk is free to use */
3c7be18a
RG
1400 chunk->obj_cgroups = NULL;
1401#endif
ca460b3c 1402 pcpu_init_md_blocks(chunk);
10edf5b0
DZF
1403
1404 /* manage populated page bitmap */
1405 chunk->immutable = true;
8ab16c43
DZF
1406 bitmap_fill(chunk->populated, chunk->nr_pages);
1407 chunk->nr_populated = chunk->nr_pages;
b239f7da 1408 chunk->nr_empty_pop_pages = chunk->nr_pages;
10edf5b0 1409
40064aec 1410 chunk->free_bytes = map_size;
c0ebfdc3
DZF
1411
1412 if (chunk->start_offset) {
1413 /* hide the beginning of the bitmap */
40064aec
DZF
1414 offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1415 bitmap_set(chunk->alloc_map, 0, offset_bits);
1416 set_bit(0, chunk->bound_map);
1417 set_bit(offset_bits, chunk->bound_map);
ca460b3c 1418
92c14cab 1419 chunk->chunk_md.first_free = offset_bits;
86b442fb 1420
ca460b3c 1421 pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
c0ebfdc3
DZF
1422 }
1423
6b9d7c8e
DZF
1424 if (chunk->end_offset) {
1425 /* hide the end of the bitmap */
40064aec
DZF
1426 offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1427 bitmap_set(chunk->alloc_map,
1428 pcpu_chunk_map_bits(chunk) - offset_bits,
1429 offset_bits);
1430 set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1431 chunk->bound_map);
1432 set_bit(region_bits, chunk->bound_map);
6b9d7c8e 1433
ca460b3c
DZF
1434 pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1435 - offset_bits, offset_bits);
1436 }
40064aec 1437
10edf5b0
DZF
1438 return chunk;
1439}
1440
faf65dde 1441static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
6081089f
TH
1442{
1443 struct pcpu_chunk *chunk;
40064aec 1444 int region_bits;
6081089f 1445
47504ee0 1446 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
6081089f
TH
1447 if (!chunk)
1448 return NULL;
1449
40064aec
DZF
1450 INIT_LIST_HEAD(&chunk->list);
1451 chunk->nr_pages = pcpu_unit_pages;
1452 region_bits = pcpu_chunk_map_bits(chunk);
6081089f 1453
40064aec 1454 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
47504ee0 1455 sizeof(chunk->alloc_map[0]), gfp);
40064aec
DZF
1456 if (!chunk->alloc_map)
1457 goto alloc_map_fail;
6081089f 1458
40064aec 1459 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
47504ee0 1460 sizeof(chunk->bound_map[0]), gfp);
40064aec
DZF
1461 if (!chunk->bound_map)
1462 goto bound_map_fail;
6081089f 1463
ca460b3c 1464 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
47504ee0 1465 sizeof(chunk->md_blocks[0]), gfp);
ca460b3c
DZF
1466 if (!chunk->md_blocks)
1467 goto md_blocks_fail;
1468
3c7be18a 1469#ifdef CONFIG_MEMCG_KMEM
faf65dde 1470 if (!mem_cgroup_kmem_disabled()) {
3c7be18a
RG
1471 chunk->obj_cgroups =
1472 pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1473 sizeof(struct obj_cgroup *), gfp);
1474 if (!chunk->obj_cgroups)
1475 goto objcg_fail;
1476 }
1477#endif
1478
ca460b3c
DZF
1479 pcpu_init_md_blocks(chunk);
1480
40064aec 1481 /* init metadata */
40064aec 1482 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
c0ebfdc3 1483
6081089f 1484 return chunk;
40064aec 1485
3c7be18a
RG
1486#ifdef CONFIG_MEMCG_KMEM
1487objcg_fail:
1488 pcpu_mem_free(chunk->md_blocks);
1489#endif
ca460b3c
DZF
1490md_blocks_fail:
1491 pcpu_mem_free(chunk->bound_map);
40064aec
DZF
1492bound_map_fail:
1493 pcpu_mem_free(chunk->alloc_map);
1494alloc_map_fail:
1495 pcpu_mem_free(chunk);
1496
1497 return NULL;
6081089f
TH
1498}
1499
1500static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1501{
1502 if (!chunk)
1503 return;
3c7be18a
RG
1504#ifdef CONFIG_MEMCG_KMEM
1505 pcpu_mem_free(chunk->obj_cgroups);
1506#endif
6685b357 1507 pcpu_mem_free(chunk->md_blocks);
40064aec
DZF
1508 pcpu_mem_free(chunk->bound_map);
1509 pcpu_mem_free(chunk->alloc_map);
1d5cfdb0 1510 pcpu_mem_free(chunk);
6081089f
TH
1511}
1512
b539b87f
TH
1513/**
1514 * pcpu_chunk_populated - post-population bookkeeping
1515 * @chunk: pcpu_chunk which got populated
1516 * @page_start: the start page
1517 * @page_end: the end page
1518 *
1519 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1520 * the bookkeeping information accordingly. Must be called after each
1521 * successful population.
1522 */
40064aec 1523static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
b239f7da 1524 int page_end)
b539b87f
TH
1525{
1526 int nr = page_end - page_start;
1527
1528 lockdep_assert_held(&pcpu_lock);
1529
1530 bitmap_set(chunk->populated, page_start, nr);
1531 chunk->nr_populated += nr;
7e8a6304 1532 pcpu_nr_populated += nr;
40064aec 1533
b239f7da 1534 pcpu_update_empty_pages(chunk, nr);
b539b87f
TH
1535}
1536
1537/**
1538 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1539 * @chunk: pcpu_chunk which got depopulated
1540 * @page_start: the start page
1541 * @page_end: the end page
1542 *
1543 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1544 * Update the bookkeeping information accordingly. Must be called after
1545 * each successful depopulation.
1546 */
1547static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1548 int page_start, int page_end)
1549{
1550 int nr = page_end - page_start;
1551
1552 lockdep_assert_held(&pcpu_lock);
1553
1554 bitmap_clear(chunk->populated, page_start, nr);
1555 chunk->nr_populated -= nr;
7e8a6304 1556 pcpu_nr_populated -= nr;
b239f7da
DZ
1557
1558 pcpu_update_empty_pages(chunk, -nr);
b539b87f
TH
1559}
1560
9f645532
TH
1561/*
1562 * Chunk management implementation.
1563 *
1564 * To allow different implementations, chunk alloc/free and
1565 * [de]population are implemented in a separate file which is pulled
1566 * into this file and compiled together. The following functions
1567 * should be implemented.
1568 *
1569 * pcpu_populate_chunk - populate the specified range of a chunk
1570 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
93274f1d 1571 * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
9f645532
TH
1572 * pcpu_create_chunk - create a new chunk
1573 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1574 * pcpu_addr_to_page - translate address to physical address
1575 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
fbf59bc9 1576 */
15d9f3d1 1577static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
47504ee0 1578 int page_start, int page_end, gfp_t gfp);
15d9f3d1
DZ
1579static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1580 int page_start, int page_end);
93274f1d
DZ
1581static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
1582 int page_start, int page_end);
faf65dde 1583static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
9f645532
TH
1584static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1585static struct page *pcpu_addr_to_page(void *addr);
1586static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
fbf59bc9 1587
b0c9778b
TH
1588#ifdef CONFIG_NEED_PER_CPU_KM
1589#include "percpu-km.c"
1590#else
9f645532 1591#include "percpu-vm.c"
b0c9778b 1592#endif
fbf59bc9 1593
88999a89
TH
1594/**
1595 * pcpu_chunk_addr_search - determine chunk containing specified address
1596 * @addr: address for which the chunk needs to be determined.
1597 *
c0ebfdc3
DZF
1598 * This is an internal function that handles all but static allocations.
1599 * Static percpu address values should never be passed into the allocator.
1600 *
88999a89
TH
1601 * RETURNS:
1602 * The address of the found chunk.
1603 */
1604static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1605{
c0ebfdc3 1606 /* is it in the dynamic region (first chunk)? */
560f2c23 1607 if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
88999a89 1608 return pcpu_first_chunk;
c0ebfdc3
DZF
1609
1610 /* is it in the reserved region? */
560f2c23 1611 if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
c0ebfdc3 1612 return pcpu_reserved_chunk;
88999a89
TH
1613
1614 /*
1615 * The address is relative to unit0 which might be unused and
1616 * thus unmapped. Offset the address to the unit space of the
1617 * current processor before looking it up in the vmalloc
1618 * space. Note that any possible cpu id can be used here, so
1619 * there's no need to worry about preemption or cpu hotplug.
1620 */
1621 addr += pcpu_unit_offsets[raw_smp_processor_id()];
9f645532 1622 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
88999a89
TH
1623}
1624
3c7be18a 1625#ifdef CONFIG_MEMCG_KMEM
faf65dde
RG
1626static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1627 struct obj_cgroup **objcgp)
3c7be18a
RG
1628{
1629 struct obj_cgroup *objcg;
1630
279c3393 1631 if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
faf65dde 1632 return true;
3c7be18a
RG
1633
1634 objcg = get_obj_cgroup_from_current();
1635 if (!objcg)
faf65dde 1636 return true;
3c7be18a 1637
8c57c077 1638 if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size))) {
3c7be18a 1639 obj_cgroup_put(objcg);
faf65dde 1640 return false;
3c7be18a
RG
1641 }
1642
1643 *objcgp = objcg;
faf65dde 1644 return true;
3c7be18a
RG
1645}
1646
1647static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1648 struct pcpu_chunk *chunk, int off,
1649 size_t size)
1650{
1651 if (!objcg)
1652 return;
1653
faf65dde 1654 if (likely(chunk && chunk->obj_cgroups)) {
3c7be18a 1655 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
772616b0
RG
1656
1657 rcu_read_lock();
1658 mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
8c57c077 1659 pcpu_obj_full_size(size));
772616b0 1660 rcu_read_unlock();
3c7be18a 1661 } else {
8c57c077 1662 obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
3c7be18a
RG
1663 obj_cgroup_put(objcg);
1664 }
1665}
1666
1667static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1668{
1669 struct obj_cgroup *objcg;
1670
faf65dde 1671 if (unlikely(!chunk->obj_cgroups))
3c7be18a
RG
1672 return;
1673
1674 objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
faf65dde
RG
1675 if (!objcg)
1676 return;
3c7be18a
RG
1677 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
1678
8c57c077 1679 obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
3c7be18a 1680
772616b0
RG
1681 rcu_read_lock();
1682 mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
8c57c077 1683 -pcpu_obj_full_size(size));
772616b0
RG
1684 rcu_read_unlock();
1685
3c7be18a
RG
1686 obj_cgroup_put(objcg);
1687}
1688
1689#else /* CONFIG_MEMCG_KMEM */
faf65dde 1690static bool
3c7be18a
RG
1691pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1692{
faf65dde 1693 return true;
3c7be18a
RG
1694}
1695
1696static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1697 struct pcpu_chunk *chunk, int off,
1698 size_t size)
1699{
1700}
1701
1702static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1703{
1704}
1705#endif /* CONFIG_MEMCG_KMEM */
1706
fbf59bc9 1707/**
edcb4639 1708 * pcpu_alloc - the percpu allocator
cae3aeb8 1709 * @size: size of area to allocate in bytes
fbf59bc9 1710 * @align: alignment of area (max PAGE_SIZE)
edcb4639 1711 * @reserved: allocate from the reserved chunk if available
5835d96e 1712 * @gfp: allocation flags
fbf59bc9 1713 *
5835d96e 1714 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
0ea7eeec
DB
1715 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1716 * then no warning will be triggered on invalid or failed allocation
1717 * requests.
fbf59bc9
TH
1718 *
1719 * RETURNS:
1720 * Percpu pointer to the allocated area on success, NULL on failure.
1721 */
5835d96e
TH
1722static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1723 gfp_t gfp)
fbf59bc9 1724{
28307d93
FM
1725 gfp_t pcpu_gfp;
1726 bool is_atomic;
1727 bool do_warn;
3c7be18a 1728 struct obj_cgroup *objcg = NULL;
f2badb0c 1729 static int warn_limit = 10;
8744d859 1730 struct pcpu_chunk *chunk, *next;
f2badb0c 1731 const char *err;
40064aec 1732 int slot, off, cpu, ret;
403a91b1 1733 unsigned long flags;
f528f0b8 1734 void __percpu *ptr;
40064aec 1735 size_t bits, bit_align;
fbf59bc9 1736
28307d93
FM
1737 gfp = current_gfp_context(gfp);
1738 /* whitelisted flags that can be passed to the backing allocators */
1739 pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1740 is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1741 do_warn = !(gfp & __GFP_NOWARN);
1742
723ad1d9 1743 /*
40064aec
DZF
1744 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1745 * therefore alignment must be a minimum of that many bytes.
1746 * An allocation may have internal fragmentation from rounding up
1747 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
723ad1d9 1748 */
d2f3c384
DZF
1749 if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1750 align = PCPU_MIN_ALLOC_SIZE;
723ad1d9 1751
d2f3c384 1752 size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
40064aec
DZF
1753 bits = size >> PCPU_MIN_ALLOC_SHIFT;
1754 bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
2f69fa82 1755
3ca45a46 1756 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1757 !is_power_of_2(align))) {
0ea7eeec 1758 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
756a025f 1759 size, align);
fbf59bc9
TH
1760 return NULL;
1761 }
1762
faf65dde 1763 if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
3c7be18a 1764 return NULL;
3c7be18a 1765
f52ba1fe
KT
1766 if (!is_atomic) {
1767 /*
1768 * pcpu_balance_workfn() allocates memory under this mutex,
1769 * and it may wait for memory reclaim. Allow current task
1770 * to become OOM victim, in case of memory pressure.
1771 */
3c7be18a 1772 if (gfp & __GFP_NOFAIL) {
f52ba1fe 1773 mutex_lock(&pcpu_alloc_mutex);
3c7be18a
RG
1774 } else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1775 pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
f52ba1fe 1776 return NULL;
3c7be18a 1777 }
f52ba1fe 1778 }
6710e594 1779
403a91b1 1780 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9 1781
edcb4639
TH
1782 /* serve reserved allocations from the reserved chunk if available */
1783 if (reserved && pcpu_reserved_chunk) {
1784 chunk = pcpu_reserved_chunk;
833af842 1785
40064aec
DZF
1786 off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1787 if (off < 0) {
833af842 1788 err = "alloc from reserved chunk failed";
ccea34b5 1789 goto fail_unlock;
f2badb0c 1790 }
833af842 1791
40064aec 1792 off = pcpu_alloc_area(chunk, bits, bit_align, off);
edcb4639
TH
1793 if (off >= 0)
1794 goto area_found;
833af842 1795
f2badb0c 1796 err = "alloc from reserved chunk failed";
ccea34b5 1797 goto fail_unlock;
edcb4639
TH
1798 }
1799
ccea34b5 1800restart:
edcb4639 1801 /* search through normal chunks */
f1833241 1802 for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) {
faf65dde
RG
1803 list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
1804 list) {
40064aec
DZF
1805 off = pcpu_find_block_fit(chunk, bits, bit_align,
1806 is_atomic);
8744d859
DZ
1807 if (off < 0) {
1808 if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1809 pcpu_chunk_move(chunk, 0);
fbf59bc9 1810 continue;
8744d859 1811 }
ccea34b5 1812
40064aec 1813 off = pcpu_alloc_area(chunk, bits, bit_align, off);
f1833241
RG
1814 if (off >= 0) {
1815 pcpu_reintegrate_chunk(chunk);
fbf59bc9 1816 goto area_found;
f1833241 1817 }
fbf59bc9
TH
1818 }
1819 }
1820
403a91b1 1821 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5 1822
b38d08f3
TH
1823 /*
1824 * No space left. Create a new chunk. We don't want multiple
1825 * tasks to create chunks simultaneously. Serialize and create iff
1826 * there's still no empty chunk after grabbing the mutex.
1827 */
11df02bf
DZ
1828 if (is_atomic) {
1829 err = "atomic alloc failed, no space left";
5835d96e 1830 goto fail;
11df02bf 1831 }
5835d96e 1832
faf65dde
RG
1833 if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
1834 chunk = pcpu_create_chunk(pcpu_gfp);
b38d08f3
TH
1835 if (!chunk) {
1836 err = "failed to allocate new chunk";
1837 goto fail;
1838 }
1839
1840 spin_lock_irqsave(&pcpu_lock, flags);
1841 pcpu_chunk_relocate(chunk, -1);
1842 } else {
1843 spin_lock_irqsave(&pcpu_lock, flags);
f2badb0c 1844 }
ccea34b5 1845
ccea34b5 1846 goto restart;
fbf59bc9
TH
1847
1848area_found:
30a5b536 1849 pcpu_stats_area_alloc(chunk, size);
403a91b1 1850 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5 1851
dca49645 1852 /* populate if not all pages are already there */
5835d96e 1853 if (!is_atomic) {
ec288a2c 1854 unsigned int page_end, rs, re;
dca49645 1855
ec288a2c 1856 rs = PFN_DOWN(off);
e04d3208 1857 page_end = PFN_UP(off + size);
b38d08f3 1858
ec288a2c 1859 for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) {
e04d3208
TH
1860 WARN_ON(chunk->immutable);
1861
554fef1c 1862 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
e04d3208
TH
1863
1864 spin_lock_irqsave(&pcpu_lock, flags);
1865 if (ret) {
40064aec 1866 pcpu_free_area(chunk, off);
e04d3208
TH
1867 err = "failed to populate";
1868 goto fail_unlock;
1869 }
b239f7da 1870 pcpu_chunk_populated(chunk, rs, re);
e04d3208 1871 spin_unlock_irqrestore(&pcpu_lock, flags);
dca49645 1872 }
fbf59bc9 1873
e04d3208
TH
1874 mutex_unlock(&pcpu_alloc_mutex);
1875 }
ccea34b5 1876
faf65dde 1877 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1a4d7607
TH
1878 pcpu_schedule_balance_work();
1879
dca49645
TH
1880 /* clear the areas and return address relative to base address */
1881 for_each_possible_cpu(cpu)
1882 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1883
f528f0b8 1884 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
8a8c35fa 1885 kmemleak_alloc_percpu(ptr, size, gfp);
df95e795 1886
f67bed13
VA
1887 trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align,
1888 chunk->base_addr, off, ptr,
1889 pcpu_obj_full_size(size), gfp);
df95e795 1890
3c7be18a
RG
1891 pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1892
f528f0b8 1893 return ptr;
ccea34b5
TH
1894
1895fail_unlock:
403a91b1 1896 spin_unlock_irqrestore(&pcpu_lock, flags);
b38d08f3 1897fail:
df95e795
DZ
1898 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1899
0ea7eeec 1900 if (!is_atomic && do_warn && warn_limit) {
870d4b12 1901 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
598d8091 1902 size, align, is_atomic, err);
f2badb0c
TH
1903 dump_stack();
1904 if (!--warn_limit)
870d4b12 1905 pr_info("limit reached, disable warning\n");
f2badb0c 1906 }
1a4d7607 1907 if (is_atomic) {
f0953a1b 1908 /* see the flag handling in pcpu_balance_workfn() */
1a4d7607
TH
1909 pcpu_atomic_alloc_failed = true;
1910 pcpu_schedule_balance_work();
6710e594
TH
1911 } else {
1912 mutex_unlock(&pcpu_alloc_mutex);
1a4d7607 1913 }
3c7be18a
RG
1914
1915 pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1916
ccea34b5 1917 return NULL;
fbf59bc9 1918}
edcb4639
TH
1919
1920/**
5835d96e 1921 * __alloc_percpu_gfp - allocate dynamic percpu area
edcb4639
TH
1922 * @size: size of area to allocate in bytes
1923 * @align: alignment of area (max PAGE_SIZE)
5835d96e 1924 * @gfp: allocation flags
edcb4639 1925 *
5835d96e
TH
1926 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1927 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
0ea7eeec
DB
1928 * be called from any context but is a lot more likely to fail. If @gfp
1929 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1930 * allocation requests.
ccea34b5 1931 *
edcb4639
TH
1932 * RETURNS:
1933 * Percpu pointer to the allocated area on success, NULL on failure.
1934 */
5835d96e
TH
1935void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1936{
1937 return pcpu_alloc(size, align, false, gfp);
1938}
1939EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1940
1941/**
1942 * __alloc_percpu - allocate dynamic percpu area
1943 * @size: size of area to allocate in bytes
1944 * @align: alignment of area (max PAGE_SIZE)
1945 *
1946 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1947 */
43cf38eb 1948void __percpu *__alloc_percpu(size_t size, size_t align)
edcb4639 1949{
5835d96e 1950 return pcpu_alloc(size, align, false, GFP_KERNEL);
edcb4639 1951}
fbf59bc9
TH
1952EXPORT_SYMBOL_GPL(__alloc_percpu);
1953
edcb4639
TH
1954/**
1955 * __alloc_reserved_percpu - allocate reserved percpu area
1956 * @size: size of area to allocate in bytes
1957 * @align: alignment of area (max PAGE_SIZE)
1958 *
9329ba97
TH
1959 * Allocate zero-filled percpu area of @size bytes aligned at @align
1960 * from reserved percpu area if arch has set it up; otherwise,
1961 * allocation is served from the same dynamic area. Might sleep.
1962 * Might trigger writeouts.
edcb4639 1963 *
ccea34b5
TH
1964 * CONTEXT:
1965 * Does GFP_KERNEL allocation.
1966 *
edcb4639
TH
1967 * RETURNS:
1968 * Percpu pointer to the allocated area on success, NULL on failure.
1969 */
43cf38eb 1970void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
edcb4639 1971{
5835d96e 1972 return pcpu_alloc(size, align, true, GFP_KERNEL);
edcb4639
TH
1973}
1974
a56dbddf 1975/**
67c2669d 1976 * pcpu_balance_free - manage the amount of free chunks
f1833241 1977 * @empty_only: free chunks only if there are no populated pages
a56dbddf 1978 *
f1833241
RG
1979 * If empty_only is %false, reclaim all fully free chunks regardless of the
1980 * number of populated pages. Otherwise, only reclaim chunks that have no
1981 * populated pages.
e4d77700
RG
1982 *
1983 * CONTEXT:
1984 * pcpu_lock (can be dropped temporarily)
a56dbddf 1985 */
faf65dde 1986static void pcpu_balance_free(bool empty_only)
fbf59bc9 1987{
fe6bd8c3 1988 LIST_HEAD(to_free);
faf65dde 1989 struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
a56dbddf
TH
1990 struct pcpu_chunk *chunk, *next;
1991
e4d77700 1992 lockdep_assert_held(&pcpu_lock);
a56dbddf 1993
1a4d7607
TH
1994 /*
1995 * There's no reason to keep around multiple unused chunks and VM
1996 * areas can be scarce. Destroy all free chunks except for one.
1997 */
fe6bd8c3 1998 list_for_each_entry_safe(chunk, next, free_head, list) {
a56dbddf
TH
1999 WARN_ON(chunk->immutable);
2000
2001 /* spare the first one */
fe6bd8c3 2002 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
a56dbddf
TH
2003 continue;
2004
f1833241
RG
2005 if (!empty_only || chunk->nr_empty_pop_pages == 0)
2006 list_move(&chunk->list, &to_free);
a56dbddf
TH
2007 }
2008
e4d77700
RG
2009 if (list_empty(&to_free))
2010 return;
a56dbddf 2011
e4d77700 2012 spin_unlock_irq(&pcpu_lock);
fe6bd8c3 2013 list_for_each_entry_safe(chunk, next, &to_free, list) {
e837dfde 2014 unsigned int rs, re;
dca49645 2015
ec288a2c 2016 for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
a93ace48 2017 pcpu_depopulate_chunk(chunk, rs, re);
b539b87f
TH
2018 spin_lock_irq(&pcpu_lock);
2019 pcpu_chunk_depopulated(chunk, rs, re);
2020 spin_unlock_irq(&pcpu_lock);
a93ace48 2021 }
6081089f 2022 pcpu_destroy_chunk(chunk);
accd4f36 2023 cond_resched();
a56dbddf 2024 }
e4d77700 2025 spin_lock_irq(&pcpu_lock);
67c2669d
RG
2026}
2027
2028/**
2029 * pcpu_balance_populated - manage the amount of populated pages
67c2669d
RG
2030 *
2031 * Maintain a certain amount of populated pages to satisfy atomic allocations.
2032 * It is possible that this is called when physical memory is scarce causing
2033 * OOM killer to be triggered. We should avoid doing so until an actual
2034 * allocation causes the failure as it is possible that requests can be
2035 * serviced from already backed regions.
e4d77700
RG
2036 *
2037 * CONTEXT:
2038 * pcpu_lock (can be dropped temporarily)
67c2669d 2039 */
faf65dde 2040static void pcpu_balance_populated(void)
67c2669d
RG
2041{
2042 /* gfp flags passed to underlying allocators */
2043 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
67c2669d
RG
2044 struct pcpu_chunk *chunk;
2045 int slot, nr_to_pop, ret;
971f3918 2046
e4d77700 2047 lockdep_assert_held(&pcpu_lock);
971f3918 2048
1a4d7607
TH
2049 /*
2050 * Ensure there are certain number of free populated pages for
2051 * atomic allocs. Fill up from the most packed so that atomic
2052 * allocs don't increase fragmentation. If atomic allocation
2053 * failed previously, always populate the maximum amount. This
2054 * should prevent atomic allocs larger than PAGE_SIZE from keeping
2055 * failing indefinitely; however, large atomic allocs are not
2056 * something we support properly and can be highly unreliable and
2057 * inefficient.
2058 */
2059retry_pop:
2060 if (pcpu_atomic_alloc_failed) {
2061 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
2062 /* best effort anyway, don't worry about synchronization */
2063 pcpu_atomic_alloc_failed = false;
2064 } else {
2065 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
faf65dde 2066 pcpu_nr_empty_pop_pages,
1a4d7607
TH
2067 0, PCPU_EMPTY_POP_PAGES_HIGH);
2068 }
2069
1c29a3ce 2070 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
e837dfde 2071 unsigned int nr_unpop = 0, rs, re;
1a4d7607
TH
2072
2073 if (!nr_to_pop)
2074 break;
2075
faf65dde 2076 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
8ab16c43 2077 nr_unpop = chunk->nr_pages - chunk->nr_populated;
1a4d7607
TH
2078 if (nr_unpop)
2079 break;
2080 }
1a4d7607
TH
2081
2082 if (!nr_unpop)
2083 continue;
2084
2085 /* @chunk can't go away while pcpu_alloc_mutex is held */
ec288a2c 2086 for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
e837dfde 2087 int nr = min_t(int, re - rs, nr_to_pop);
1a4d7607 2088
e4d77700 2089 spin_unlock_irq(&pcpu_lock);
47504ee0 2090 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
e4d77700
RG
2091 cond_resched();
2092 spin_lock_irq(&pcpu_lock);
1a4d7607
TH
2093 if (!ret) {
2094 nr_to_pop -= nr;
b239f7da 2095 pcpu_chunk_populated(chunk, rs, rs + nr);
1a4d7607
TH
2096 } else {
2097 nr_to_pop = 0;
2098 }
2099
2100 if (!nr_to_pop)
2101 break;
2102 }
2103 }
2104
2105 if (nr_to_pop) {
2106 /* ran out of chunks to populate, create a new one and retry */
e4d77700 2107 spin_unlock_irq(&pcpu_lock);
faf65dde 2108 chunk = pcpu_create_chunk(gfp);
e4d77700
RG
2109 cond_resched();
2110 spin_lock_irq(&pcpu_lock);
1a4d7607 2111 if (chunk) {
1a4d7607 2112 pcpu_chunk_relocate(chunk, -1);
1a4d7607
TH
2113 goto retry_pop;
2114 }
2115 }
fbf59bc9 2116}
1a4d7607 2117
f1833241
RG
2118/**
2119 * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
f1833241
RG
2120 *
2121 * Scan over chunks in the depopulate list and try to release unused populated
2122 * pages back to the system. Depopulated chunks are sidelined to prevent
2123 * repopulating these pages unless required. Fully free chunks are reintegrated
2124 * and freed accordingly (1 is kept around). If we drop below the empty
2125 * populated pages threshold, reintegrate the chunk if it has empty free pages.
2126 * Each chunk is scanned in the reverse order to keep populated pages close to
2127 * the beginning of the chunk.
e4d77700
RG
2128 *
2129 * CONTEXT:
2130 * pcpu_lock (can be dropped temporarily)
2131 *
f1833241 2132 */
faf65dde 2133static void pcpu_reclaim_populated(void)
f1833241 2134{
f1833241
RG
2135 struct pcpu_chunk *chunk;
2136 struct pcpu_block_md *block;
93274f1d 2137 int freed_page_start, freed_page_end;
f1833241 2138 int i, end;
93274f1d 2139 bool reintegrate;
f1833241 2140
e4d77700 2141 lockdep_assert_held(&pcpu_lock);
f1833241 2142
f1833241
RG
2143 /*
2144 * Once a chunk is isolated to the to_depopulate list, the chunk is no
2145 * longer discoverable to allocations whom may populate pages. The only
2146 * other accessor is the free path which only returns area back to the
2147 * allocator not touching the populated bitmap.
2148 */
faf65dde
RG
2149 while (!list_empty(&pcpu_chunk_lists[pcpu_to_depopulate_slot])) {
2150 chunk = list_first_entry(&pcpu_chunk_lists[pcpu_to_depopulate_slot],
f1833241
RG
2151 struct pcpu_chunk, list);
2152 WARN_ON(chunk->immutable);
2153
2154 /*
2155 * Scan chunk's pages in the reverse order to keep populated
2156 * pages close to the beginning of the chunk.
2157 */
93274f1d
DZ
2158 freed_page_start = chunk->nr_pages;
2159 freed_page_end = 0;
2160 reintegrate = false;
f1833241
RG
2161 for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
2162 /* no more work to do */
2163 if (chunk->nr_empty_pop_pages == 0)
2164 break;
2165
2166 /* reintegrate chunk to prevent atomic alloc failures */
faf65dde 2167 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
93274f1d
DZ
2168 reintegrate = true;
2169 goto end_chunk;
f1833241
RG
2170 }
2171
2172 /*
2173 * If the page is empty and populated, start or
2174 * extend the (i, end) range. If i == 0, decrease
2175 * i and perform the depopulation to cover the last
2176 * (first) page in the chunk.
2177 */
2178 block = chunk->md_blocks + i;
2179 if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
2180 test_bit(i, chunk->populated)) {
2181 if (end == -1)
2182 end = i;
2183 if (i > 0)
2184 continue;
2185 i--;
2186 }
2187
2188 /* depopulate if there is an active range */
2189 if (end == -1)
2190 continue;
2191
2192 spin_unlock_irq(&pcpu_lock);
2193 pcpu_depopulate_chunk(chunk, i + 1, end + 1);
2194 cond_resched();
2195 spin_lock_irq(&pcpu_lock);
2196
2197 pcpu_chunk_depopulated(chunk, i + 1, end + 1);
93274f1d
DZ
2198 freed_page_start = min(freed_page_start, i + 1);
2199 freed_page_end = max(freed_page_end, end + 1);
f1833241
RG
2200
2201 /* reset the range and continue */
2202 end = -1;
2203 }
2204
93274f1d
DZ
2205end_chunk:
2206 /* batch tlb flush per chunk to amortize cost */
2207 if (freed_page_start < freed_page_end) {
2208 spin_unlock_irq(&pcpu_lock);
2209 pcpu_post_unmap_tlb_flush(chunk,
2210 freed_page_start,
2211 freed_page_end);
2212 cond_resched();
2213 spin_lock_irq(&pcpu_lock);
2214 }
2215
2216 if (reintegrate || chunk->free_bytes == pcpu_unit_size)
f1833241
RG
2217 pcpu_reintegrate_chunk(chunk);
2218 else
93274f1d
DZ
2219 list_move_tail(&chunk->list,
2220 &pcpu_chunk_lists[pcpu_sidelined_slot]);
f1833241 2221 }
fbf59bc9
TH
2222}
2223
3c7be18a
RG
2224/**
2225 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2226 * @work: unused
2227 *
f1833241
RG
2228 * For each chunk type, manage the number of fully free chunks and the number of
2229 * populated pages. An important thing to consider is when pages are freed and
2230 * how they contribute to the global counts.
3c7be18a
RG
2231 */
2232static void pcpu_balance_workfn(struct work_struct *work)
2233{
f1833241
RG
2234 /*
2235 * pcpu_balance_free() is called twice because the first time we may
2236 * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
2237 * to grow other chunks. This then gives pcpu_reclaim_populated() time
2238 * to move fully free chunks to the active list to be freed if
2239 * appropriate.
2240 */
faf65dde 2241 mutex_lock(&pcpu_alloc_mutex);
e4d77700
RG
2242 spin_lock_irq(&pcpu_lock);
2243
faf65dde
RG
2244 pcpu_balance_free(false);
2245 pcpu_reclaim_populated();
2246 pcpu_balance_populated();
2247 pcpu_balance_free(true);
3c7be18a 2248
e4d77700 2249 spin_unlock_irq(&pcpu_lock);
faf65dde 2250 mutex_unlock(&pcpu_alloc_mutex);
3c7be18a
RG
2251}
2252
fbf59bc9
TH
2253/**
2254 * free_percpu - free percpu area
2255 * @ptr: pointer to area to free
2256 *
ccea34b5
TH
2257 * Free percpu area @ptr.
2258 *
2259 * CONTEXT:
2260 * Can be called from atomic context.
fbf59bc9 2261 */
43cf38eb 2262void free_percpu(void __percpu *ptr)
fbf59bc9 2263{
129182e5 2264 void *addr;
fbf59bc9 2265 struct pcpu_chunk *chunk;
ccea34b5 2266 unsigned long flags;
3c7be18a 2267 int size, off;
198790d9 2268 bool need_balance = false;
fbf59bc9
TH
2269
2270 if (!ptr)
2271 return;
2272
f528f0b8
CM
2273 kmemleak_free_percpu(ptr);
2274
129182e5
AM
2275 addr = __pcpu_ptr_to_addr(ptr);
2276
ccea34b5 2277 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9
TH
2278
2279 chunk = pcpu_chunk_addr_search(addr);
bba174f5 2280 off = addr - chunk->base_addr;
fbf59bc9 2281
3c7be18a
RG
2282 size = pcpu_free_area(chunk, off);
2283
3c7be18a 2284 pcpu_memcg_free_hook(chunk, off, size);
fbf59bc9 2285
f1833241
RG
2286 /*
2287 * If there are more than one fully free chunks, wake up grim reaper.
2288 * If the chunk is isolated, it may be in the process of being
2289 * reclaimed. Let reclaim manage cleaning up of that chunk.
2290 */
2291 if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
fbf59bc9
TH
2292 struct pcpu_chunk *pos;
2293
faf65dde 2294 list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
fbf59bc9 2295 if (pos != chunk) {
198790d9 2296 need_balance = true;
fbf59bc9
TH
2297 break;
2298 }
f1833241
RG
2299 } else if (pcpu_should_reclaim_chunk(chunk)) {
2300 pcpu_isolate_chunk(chunk);
2301 need_balance = true;
fbf59bc9
TH
2302 }
2303
df95e795
DZ
2304 trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2305
ccea34b5 2306 spin_unlock_irqrestore(&pcpu_lock, flags);
198790d9
JS
2307
2308 if (need_balance)
2309 pcpu_schedule_balance_work();
fbf59bc9
TH
2310}
2311EXPORT_SYMBOL_GPL(free_percpu);
2312
383776fa 2313bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
10fad5e4 2314{
bbddff05 2315#ifdef CONFIG_SMP
10fad5e4
TH
2316 const size_t static_size = __per_cpu_end - __per_cpu_start;
2317 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2318 unsigned int cpu;
2319
2320 for_each_possible_cpu(cpu) {
2321 void *start = per_cpu_ptr(base, cpu);
383776fa 2322 void *va = (void *)addr;
10fad5e4 2323
383776fa 2324 if (va >= start && va < start + static_size) {
8ce371f9 2325 if (can_addr) {
383776fa 2326 *can_addr = (unsigned long) (va - start);
8ce371f9
PZ
2327 *can_addr += (unsigned long)
2328 per_cpu_ptr(base, get_boot_cpu_id());
2329 }
10fad5e4 2330 return true;
383776fa
TG
2331 }
2332 }
bbddff05
TH
2333#endif
2334 /* on UP, can't distinguish from other static vars, always false */
10fad5e4
TH
2335 return false;
2336}
2337
383776fa
TG
2338/**
2339 * is_kernel_percpu_address - test whether address is from static percpu area
2340 * @addr: address to test
2341 *
2342 * Test whether @addr belongs to in-kernel static percpu area. Module
2343 * static percpu areas are not considered. For those, use
2344 * is_module_percpu_address().
2345 *
2346 * RETURNS:
2347 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2348 */
2349bool is_kernel_percpu_address(unsigned long addr)
2350{
2351 return __is_kernel_percpu_address(addr, NULL);
2352}
2353
3b034b0d
VG
2354/**
2355 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2356 * @addr: the address to be converted to physical address
2357 *
2358 * Given @addr which is dereferenceable address obtained via one of
2359 * percpu access macros, this function translates it into its physical
2360 * address. The caller is responsible for ensuring @addr stays valid
2361 * until this function finishes.
2362 *
67589c71
DY
2363 * percpu allocator has special setup for the first chunk, which currently
2364 * supports either embedding in linear address space or vmalloc mapping,
2365 * and, from the second one, the backing allocator (currently either vm or
2366 * km) provides translation.
2367 *
bffc4375 2368 * The addr can be translated simply without checking if it falls into the
67589c71
DY
2369 * first chunk. But the current code reflects better how percpu allocator
2370 * actually works, and the verification can discover both bugs in percpu
2371 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2372 * code.
2373 *
3b034b0d
VG
2374 * RETURNS:
2375 * The physical address for @addr.
2376 */
2377phys_addr_t per_cpu_ptr_to_phys(void *addr)
2378{
9983b6f0
TH
2379 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2380 bool in_first_chunk = false;
a855b84c 2381 unsigned long first_low, first_high;
9983b6f0
TH
2382 unsigned int cpu;
2383
2384 /*
a855b84c 2385 * The following test on unit_low/high isn't strictly
9983b6f0
TH
2386 * necessary but will speed up lookups of addresses which
2387 * aren't in the first chunk.
c0ebfdc3
DZF
2388 *
2389 * The address check is against full chunk sizes. pcpu_base_addr
2390 * points to the beginning of the first chunk including the
2391 * static region. Assumes good intent as the first chunk may
2392 * not be full (ie. < pcpu_unit_pages in size).
9983b6f0 2393 */
c0ebfdc3
DZF
2394 first_low = (unsigned long)pcpu_base_addr +
2395 pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2396 first_high = (unsigned long)pcpu_base_addr +
2397 pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
a855b84c
TH
2398 if ((unsigned long)addr >= first_low &&
2399 (unsigned long)addr < first_high) {
9983b6f0
TH
2400 for_each_possible_cpu(cpu) {
2401 void *start = per_cpu_ptr(base, cpu);
2402
2403 if (addr >= start && addr < start + pcpu_unit_size) {
2404 in_first_chunk = true;
2405 break;
2406 }
2407 }
2408 }
2409
2410 if (in_first_chunk) {
eac522ef 2411 if (!is_vmalloc_addr(addr))
020ec653
TH
2412 return __pa(addr);
2413 else
9f57bd4d
ES
2414 return page_to_phys(vmalloc_to_page(addr)) +
2415 offset_in_page(addr);
020ec653 2416 } else
9f57bd4d
ES
2417 return page_to_phys(pcpu_addr_to_page(addr)) +
2418 offset_in_page(addr);
3b034b0d
VG
2419}
2420
fbf59bc9 2421/**
fd1e8a1f
TH
2422 * pcpu_alloc_alloc_info - allocate percpu allocation info
2423 * @nr_groups: the number of groups
2424 * @nr_units: the number of units
2425 *
2426 * Allocate ai which is large enough for @nr_groups groups containing
2427 * @nr_units units. The returned ai's groups[0].cpu_map points to the
2428 * cpu_map array which is long enough for @nr_units and filled with
2429 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
2430 * pointer of other groups.
2431 *
2432 * RETURNS:
2433 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2434 * failure.
2435 */
2436struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2437 int nr_units)
2438{
2439 struct pcpu_alloc_info *ai;
2440 size_t base_size, ai_size;
2441 void *ptr;
2442 int unit;
2443
14d37612 2444 base_size = ALIGN(struct_size(ai, groups, nr_groups),
fd1e8a1f
TH
2445 __alignof__(ai->groups[0].cpu_map[0]));
2446 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2447
26fb3dae 2448 ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
fd1e8a1f
TH
2449 if (!ptr)
2450 return NULL;
2451 ai = ptr;
2452 ptr += base_size;
2453
2454 ai->groups[0].cpu_map = ptr;
2455
2456 for (unit = 0; unit < nr_units; unit++)
2457 ai->groups[0].cpu_map[unit] = NR_CPUS;
2458
2459 ai->nr_groups = nr_groups;
2460 ai->__ai_size = PFN_ALIGN(ai_size);
2461
2462 return ai;
2463}
2464
2465/**
2466 * pcpu_free_alloc_info - free percpu allocation info
2467 * @ai: pcpu_alloc_info to free
2468 *
2469 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2470 */
2471void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2472{
4421cca0 2473 memblock_free(ai, ai->__ai_size);
fd1e8a1f
TH
2474}
2475
fd1e8a1f
TH
2476/**
2477 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2478 * @lvl: loglevel
2479 * @ai: allocation info to dump
2480 *
2481 * Print out information about @ai using loglevel @lvl.
2482 */
2483static void pcpu_dump_alloc_info(const char *lvl,
2484 const struct pcpu_alloc_info *ai)
033e48fb 2485{
fd1e8a1f 2486 int group_width = 1, cpu_width = 1, width;
033e48fb 2487 char empty_str[] = "--------";
fd1e8a1f
TH
2488 int alloc = 0, alloc_end = 0;
2489 int group, v;
2490 int upa, apl; /* units per alloc, allocs per line */
2491
2492 v = ai->nr_groups;
2493 while (v /= 10)
2494 group_width++;
033e48fb 2495
fd1e8a1f 2496 v = num_possible_cpus();
033e48fb 2497 while (v /= 10)
fd1e8a1f
TH
2498 cpu_width++;
2499 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
033e48fb 2500
fd1e8a1f
TH
2501 upa = ai->alloc_size / ai->unit_size;
2502 width = upa * (cpu_width + 1) + group_width + 3;
2503 apl = rounddown_pow_of_two(max(60 / width, 1));
033e48fb 2504
fd1e8a1f
TH
2505 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2506 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2507 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
033e48fb 2508
fd1e8a1f
TH
2509 for (group = 0; group < ai->nr_groups; group++) {
2510 const struct pcpu_group_info *gi = &ai->groups[group];
2511 int unit = 0, unit_end = 0;
2512
2513 BUG_ON(gi->nr_units % upa);
2514 for (alloc_end += gi->nr_units / upa;
2515 alloc < alloc_end; alloc++) {
2516 if (!(alloc % apl)) {
1170532b 2517 pr_cont("\n");
fd1e8a1f
TH
2518 printk("%spcpu-alloc: ", lvl);
2519 }
1170532b 2520 pr_cont("[%0*d] ", group_width, group);
fd1e8a1f
TH
2521
2522 for (unit_end += upa; unit < unit_end; unit++)
2523 if (gi->cpu_map[unit] != NR_CPUS)
1170532b
JP
2524 pr_cont("%0*d ",
2525 cpu_width, gi->cpu_map[unit]);
fd1e8a1f 2526 else
1170532b 2527 pr_cont("%s ", empty_str);
033e48fb 2528 }
033e48fb 2529 }
1170532b 2530 pr_cont("\n");
033e48fb 2531}
033e48fb 2532
fbf59bc9 2533/**
8d408b4b 2534 * pcpu_setup_first_chunk - initialize the first percpu chunk
fd1e8a1f 2535 * @ai: pcpu_alloc_info describing how to percpu area is shaped
38a6be52 2536 * @base_addr: mapped address
8d408b4b
TH
2537 *
2538 * Initialize the first percpu chunk which contains the kernel static
69ab285b 2539 * percpu area. This function is to be called from arch percpu area
38a6be52 2540 * setup path.
8d408b4b 2541 *
fd1e8a1f
TH
2542 * @ai contains all information necessary to initialize the first
2543 * chunk and prime the dynamic percpu allocator.
2544 *
2545 * @ai->static_size is the size of static percpu area.
2546 *
2547 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
edcb4639
TH
2548 * reserve after the static area in the first chunk. This reserves
2549 * the first chunk such that it's available only through reserved
2550 * percpu allocation. This is primarily used to serve module percpu
2551 * static areas on architectures where the addressing model has
2552 * limited offset range for symbol relocations to guarantee module
2553 * percpu symbols fall inside the relocatable range.
2554 *
fd1e8a1f
TH
2555 * @ai->dyn_size determines the number of bytes available for dynamic
2556 * allocation in the first chunk. The area between @ai->static_size +
2557 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
6074d5b0 2558 *
fd1e8a1f
TH
2559 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2560 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2561 * @ai->dyn_size.
8d408b4b 2562 *
fd1e8a1f
TH
2563 * @ai->atom_size is the allocation atom size and used as alignment
2564 * for vm areas.
8d408b4b 2565 *
fd1e8a1f
TH
2566 * @ai->alloc_size is the allocation size and always multiple of
2567 * @ai->atom_size. This is larger than @ai->atom_size if
2568 * @ai->unit_size is larger than @ai->atom_size.
2569 *
2570 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2571 * percpu areas. Units which should be colocated are put into the
2572 * same group. Dynamic VM areas will be allocated according to these
2573 * groupings. If @ai->nr_groups is zero, a single group containing
2574 * all units is assumed.
8d408b4b 2575 *
38a6be52
TH
2576 * The caller should have mapped the first chunk at @base_addr and
2577 * copied static data to each unit.
fbf59bc9 2578 *
c0ebfdc3
DZF
2579 * The first chunk will always contain a static and a dynamic region.
2580 * However, the static region is not managed by any chunk. If the first
2581 * chunk also contains a reserved region, it is served by two chunks -
2582 * one for the reserved region and one for the dynamic region. They
2583 * share the same vm, but use offset regions in the area allocation map.
2584 * The chunk serving the dynamic region is circulated in the chunk slots
2585 * and available for dynamic allocation like any other chunk.
fbf59bc9 2586 */
163fa234
KW
2587void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2588 void *base_addr)
fbf59bc9 2589{
b9c39442 2590 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
d2f3c384 2591 size_t static_size, dyn_size;
0c4169c3 2592 struct pcpu_chunk *chunk;
6563297c
TH
2593 unsigned long *group_offsets;
2594 size_t *group_sizes;
fb435d52 2595 unsigned long *unit_off;
fbf59bc9 2596 unsigned int cpu;
fd1e8a1f
TH
2597 int *unit_map;
2598 int group, unit, i;
c0ebfdc3
DZF
2599 int map_size;
2600 unsigned long tmp_addr;
f655f405 2601 size_t alloc_size;
fbf59bc9 2602
635b75fc
TH
2603#define PCPU_SETUP_BUG_ON(cond) do { \
2604 if (unlikely(cond)) { \
870d4b12
JP
2605 pr_emerg("failed to initialize, %s\n", #cond); \
2606 pr_emerg("cpu_possible_mask=%*pb\n", \
807de073 2607 cpumask_pr_args(cpu_possible_mask)); \
635b75fc
TH
2608 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2609 BUG(); \
2610 } \
2611} while (0)
2612
2f39e637 2613 /* sanity checks */
635b75fc 2614 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
bbddff05 2615#ifdef CONFIG_SMP
635b75fc 2616 PCPU_SETUP_BUG_ON(!ai->static_size);
f09f1243 2617 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
bbddff05 2618#endif
635b75fc 2619 PCPU_SETUP_BUG_ON(!base_addr);
f09f1243 2620 PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
635b75fc 2621 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
f09f1243 2622 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
635b75fc 2623 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
ca460b3c 2624 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
099a19d9 2625 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
fb29a2cc 2626 PCPU_SETUP_BUG_ON(!ai->dyn_size);
d2f3c384 2627 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
ca460b3c
DZF
2628 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2629 IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
9f645532 2630 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
8d408b4b 2631
6563297c 2632 /* process group information and build config tables accordingly */
f655f405
MR
2633 alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2634 group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2635 if (!group_offsets)
2636 panic("%s: Failed to allocate %zu bytes\n", __func__,
2637 alloc_size);
2638
2639 alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2640 group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2641 if (!group_sizes)
2642 panic("%s: Failed to allocate %zu bytes\n", __func__,
2643 alloc_size);
2644
2645 alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2646 unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2647 if (!unit_map)
2648 panic("%s: Failed to allocate %zu bytes\n", __func__,
2649 alloc_size);
2650
2651 alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2652 unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2653 if (!unit_off)
2654 panic("%s: Failed to allocate %zu bytes\n", __func__,
2655 alloc_size);
2f39e637 2656
fd1e8a1f 2657 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
ffe0d5a5 2658 unit_map[cpu] = UINT_MAX;
a855b84c
TH
2659
2660 pcpu_low_unit_cpu = NR_CPUS;
2661 pcpu_high_unit_cpu = NR_CPUS;
2f39e637 2662
fd1e8a1f
TH
2663 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2664 const struct pcpu_group_info *gi = &ai->groups[group];
2f39e637 2665
6563297c
TH
2666 group_offsets[group] = gi->base_offset;
2667 group_sizes[group] = gi->nr_units * ai->unit_size;
2668
fd1e8a1f
TH
2669 for (i = 0; i < gi->nr_units; i++) {
2670 cpu = gi->cpu_map[i];
2671 if (cpu == NR_CPUS)
2672 continue;
8d408b4b 2673
9f295664 2674 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
635b75fc
TH
2675 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2676 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
fbf59bc9 2677
fd1e8a1f 2678 unit_map[cpu] = unit + i;
fb435d52
TH
2679 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2680
a855b84c
TH
2681 /* determine low/high unit_cpu */
2682 if (pcpu_low_unit_cpu == NR_CPUS ||
2683 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2684 pcpu_low_unit_cpu = cpu;
2685 if (pcpu_high_unit_cpu == NR_CPUS ||
2686 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2687 pcpu_high_unit_cpu = cpu;
fd1e8a1f 2688 }
2f39e637 2689 }
fd1e8a1f
TH
2690 pcpu_nr_units = unit;
2691
2692 for_each_possible_cpu(cpu)
635b75fc
TH
2693 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2694
2695 /* we're done parsing the input, undefine BUG macro and dump config */
2696#undef PCPU_SETUP_BUG_ON
bcbea798 2697 pcpu_dump_alloc_info(KERN_DEBUG, ai);
fd1e8a1f 2698
6563297c
TH
2699 pcpu_nr_groups = ai->nr_groups;
2700 pcpu_group_offsets = group_offsets;
2701 pcpu_group_sizes = group_sizes;
fd1e8a1f 2702 pcpu_unit_map = unit_map;
fb435d52 2703 pcpu_unit_offsets = unit_off;
2f39e637
TH
2704
2705 /* determine basic parameters */
fd1e8a1f 2706 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
d9b55eeb 2707 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
6563297c 2708 pcpu_atom_size = ai->atom_size;
61cf93d3
DZ
2709 pcpu_chunk_struct_size = struct_size(chunk, populated,
2710 BITS_TO_LONGS(pcpu_unit_pages));
cafe8816 2711
30a5b536
DZ
2712 pcpu_stats_save_ai(ai);
2713
d9b55eeb 2714 /*
f1833241
RG
2715 * Allocate chunk slots. The slots after the active slots are:
2716 * sidelined_slot - isolated, depopulated chunks
2717 * free_slot - fully free chunks
2718 * to_depopulate_slot - isolated, chunks to depopulate
d9b55eeb 2719 */
f1833241
RG
2720 pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
2721 pcpu_free_slot = pcpu_sidelined_slot + 1;
2722 pcpu_to_depopulate_slot = pcpu_free_slot + 1;
2723 pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
3c7be18a 2724 pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
faf65dde 2725 sizeof(pcpu_chunk_lists[0]),
3c7be18a
RG
2726 SMP_CACHE_BYTES);
2727 if (!pcpu_chunk_lists)
f655f405 2728 panic("%s: Failed to allocate %zu bytes\n", __func__,
faf65dde 2729 pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
3c7be18a 2730
faf65dde
RG
2731 for (i = 0; i < pcpu_nr_slots; i++)
2732 INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
fbf59bc9 2733
d2f3c384
DZF
2734 /*
2735 * The end of the static region needs to be aligned with the
2736 * minimum allocation size as this offsets the reserved and
2737 * dynamic region. The first chunk ends page aligned by
2738 * expanding the dynamic region, therefore the dynamic region
2739 * can be shrunk to compensate while still staying above the
2740 * configured sizes.
2741 */
2742 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2743 dyn_size = ai->dyn_size - (static_size - ai->static_size);
2744
edcb4639 2745 /*
c0ebfdc3
DZF
2746 * Initialize first chunk.
2747 * If the reserved_size is non-zero, this initializes the reserved
2748 * chunk. If the reserved_size is zero, the reserved chunk is NULL
2749 * and the dynamic region is initialized here. The first chunk,
2750 * pcpu_first_chunk, will always point to the chunk that serves
2751 * the dynamic region.
edcb4639 2752 */
d2f3c384
DZF
2753 tmp_addr = (unsigned long)base_addr + static_size;
2754 map_size = ai->reserved_size ?: dyn_size;
40064aec 2755 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
61ace7fa 2756
edcb4639 2757 /* init dynamic chunk if necessary */
b9c39442 2758 if (ai->reserved_size) {
0c4169c3 2759 pcpu_reserved_chunk = chunk;
b9c39442 2760
d2f3c384 2761 tmp_addr = (unsigned long)base_addr + static_size +
c0ebfdc3 2762 ai->reserved_size;
d2f3c384 2763 map_size = dyn_size;
40064aec 2764 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
edcb4639
TH
2765 }
2766
2441d15c 2767 /* link the first chunk in */
0c4169c3 2768 pcpu_first_chunk = chunk;
faf65dde 2769 pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
ae9e6bc9 2770 pcpu_chunk_relocate(pcpu_first_chunk, -1);
fbf59bc9 2771
7e8a6304
DZF
2772 /* include all regions of the first chunk */
2773 pcpu_nr_populated += PFN_DOWN(size_sum);
2774
30a5b536 2775 pcpu_stats_chunk_alloc();
df95e795 2776 trace_percpu_create_chunk(base_addr);
30a5b536 2777
fbf59bc9 2778 /* we're done */
bba174f5 2779 pcpu_base_addr = base_addr;
fbf59bc9 2780}
66c3a757 2781
bbddff05
TH
2782#ifdef CONFIG_SMP
2783
17f3609c 2784const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
f58dc01b
TH
2785 [PCPU_FC_AUTO] = "auto",
2786 [PCPU_FC_EMBED] = "embed",
2787 [PCPU_FC_PAGE] = "page",
f58dc01b 2788};
66c3a757 2789
f58dc01b 2790enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
66c3a757 2791
f58dc01b
TH
2792static int __init percpu_alloc_setup(char *str)
2793{
5479c78a
CG
2794 if (!str)
2795 return -EINVAL;
2796
f58dc01b
TH
2797 if (0)
2798 /* nada */;
2799#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2800 else if (!strcmp(str, "embed"))
2801 pcpu_chosen_fc = PCPU_FC_EMBED;
2802#endif
2803#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2804 else if (!strcmp(str, "page"))
2805 pcpu_chosen_fc = PCPU_FC_PAGE;
f58dc01b
TH
2806#endif
2807 else
870d4b12 2808 pr_warn("unknown allocator %s specified\n", str);
66c3a757 2809
f58dc01b 2810 return 0;
66c3a757 2811}
f58dc01b 2812early_param("percpu_alloc", percpu_alloc_setup);
66c3a757 2813
3c9a024f
TH
2814/*
2815 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2816 * Build it if needed by the arch config or the generic setup is going
2817 * to be used.
2818 */
08fc4580
TH
2819#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2820 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
3c9a024f
TH
2821#define BUILD_EMBED_FIRST_CHUNK
2822#endif
2823
2824/* build pcpu_page_first_chunk() iff needed by the arch config */
2825#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2826#define BUILD_PAGE_FIRST_CHUNK
2827#endif
2828
2829/* pcpu_build_alloc_info() is used by both embed and page first chunk */
2830#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2831/**
2832 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2833 * @reserved_size: the size of reserved percpu area in bytes
2834 * @dyn_size: minimum free size for dynamic allocation in bytes
2835 * @atom_size: allocation atom size
2836 * @cpu_distance_fn: callback to determine distance between cpus, optional
2837 *
2838 * This function determines grouping of units, their mappings to cpus
2839 * and other parameters considering needed percpu size, allocation
2840 * atom size and distances between CPUs.
2841 *
bffc4375 2842 * Groups are always multiples of atom size and CPUs which are of
3c9a024f
TH
2843 * LOCAL_DISTANCE both ways are grouped together and share space for
2844 * units in the same group. The returned configuration is guaranteed
2845 * to have CPUs on different nodes on different groups and >=75% usage
2846 * of allocated virtual address space.
2847 *
2848 * RETURNS:
2849 * On success, pointer to the new allocation_info is returned. On
2850 * failure, ERR_PTR value is returned.
2851 */
258e0815 2852static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
3c9a024f
TH
2853 size_t reserved_size, size_t dyn_size,
2854 size_t atom_size,
2855 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2856{
2857 static int group_map[NR_CPUS] __initdata;
2858 static int group_cnt[NR_CPUS] __initdata;
d7d29ac7 2859 static struct cpumask mask __initdata;
3c9a024f
TH
2860 const size_t static_size = __per_cpu_end - __per_cpu_start;
2861 int nr_groups = 1, nr_units = 0;
2862 size_t size_sum, min_unit_size, alloc_size;
3f649ab7 2863 int upa, max_upa, best_upa; /* units_per_alloc */
3c9a024f
TH
2864 int last_allocs, group, unit;
2865 unsigned int cpu, tcpu;
2866 struct pcpu_alloc_info *ai;
2867 unsigned int *cpu_map;
2868
2869 /* this function may be called multiple times */
2870 memset(group_map, 0, sizeof(group_map));
2871 memset(group_cnt, 0, sizeof(group_cnt));
d7d29ac7 2872 cpumask_clear(&mask);
3c9a024f
TH
2873
2874 /* calculate size_sum and ensure dyn_size is enough for early alloc */
2875 size_sum = PFN_ALIGN(static_size + reserved_size +
2876 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2877 dyn_size = size_sum - static_size - reserved_size;
2878
2879 /*
2880 * Determine min_unit_size, alloc_size and max_upa such that
2881 * alloc_size is multiple of atom_size and is the smallest
25985edc 2882 * which can accommodate 4k aligned segments which are equal to
3c9a024f
TH
2883 * or larger than min_unit_size.
2884 */
2885 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2886
9c015162 2887 /* determine the maximum # of units that can fit in an allocation */
3c9a024f
TH
2888 alloc_size = roundup(min_unit_size, atom_size);
2889 upa = alloc_size / min_unit_size;
f09f1243 2890 while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
3c9a024f
TH
2891 upa--;
2892 max_upa = upa;
2893
d7d29ac7
WY
2894 cpumask_copy(&mask, cpu_possible_mask);
2895
3c9a024f 2896 /* group cpus according to their proximity */
d7d29ac7
WY
2897 for (group = 0; !cpumask_empty(&mask); group++) {
2898 /* pop the group's first cpu */
2899 cpu = cpumask_first(&mask);
3c9a024f
TH
2900 group_map[cpu] = group;
2901 group_cnt[group]++;
d7d29ac7
WY
2902 cpumask_clear_cpu(cpu, &mask);
2903
2904 for_each_cpu(tcpu, &mask) {
2905 if (!cpu_distance_fn ||
2906 (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
2907 cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
2908 group_map[tcpu] = group;
2909 group_cnt[group]++;
2910 cpumask_clear_cpu(tcpu, &mask);
2911 }
2912 }
3c9a024f 2913 }
d7d29ac7 2914 nr_groups = group;
3c9a024f
TH
2915
2916 /*
9c015162
DZF
2917 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2918 * Expand the unit_size until we use >= 75% of the units allocated.
2919 * Related to atom_size, which could be much larger than the unit_size.
3c9a024f
TH
2920 */
2921 last_allocs = INT_MAX;
4829c791 2922 best_upa = 0;
3c9a024f
TH
2923 for (upa = max_upa; upa; upa--) {
2924 int allocs = 0, wasted = 0;
2925
f09f1243 2926 if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
3c9a024f
TH
2927 continue;
2928
2929 for (group = 0; group < nr_groups; group++) {
2930 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2931 allocs += this_allocs;
2932 wasted += this_allocs * upa - group_cnt[group];
2933 }
2934
2935 /*
2936 * Don't accept if wastage is over 1/3. The
2937 * greater-than comparison ensures upa==1 always
2938 * passes the following check.
2939 */
2940 if (wasted > num_possible_cpus() / 3)
2941 continue;
2942
2943 /* and then don't consume more memory */
2944 if (allocs > last_allocs)
2945 break;
2946 last_allocs = allocs;
2947 best_upa = upa;
2948 }
4829c791 2949 BUG_ON(!best_upa);
3c9a024f
TH
2950 upa = best_upa;
2951
2952 /* allocate and fill alloc_info */
2953 for (group = 0; group < nr_groups; group++)
2954 nr_units += roundup(group_cnt[group], upa);
2955
2956 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2957 if (!ai)
2958 return ERR_PTR(-ENOMEM);
2959 cpu_map = ai->groups[0].cpu_map;
2960
2961 for (group = 0; group < nr_groups; group++) {
2962 ai->groups[group].cpu_map = cpu_map;
2963 cpu_map += roundup(group_cnt[group], upa);
2964 }
2965
2966 ai->static_size = static_size;
2967 ai->reserved_size = reserved_size;
2968 ai->dyn_size = dyn_size;
2969 ai->unit_size = alloc_size / upa;
2970 ai->atom_size = atom_size;
2971 ai->alloc_size = alloc_size;
2972
2de7852f 2973 for (group = 0, unit = 0; group < nr_groups; group++) {
3c9a024f
TH
2974 struct pcpu_group_info *gi = &ai->groups[group];
2975
2976 /*
2977 * Initialize base_offset as if all groups are located
2978 * back-to-back. The caller should update this to
2979 * reflect actual allocation.
2980 */
2981 gi->base_offset = unit * ai->unit_size;
2982
2983 for_each_possible_cpu(cpu)
2984 if (group_map[cpu] == group)
2985 gi->cpu_map[gi->nr_units++] = cpu;
2986 gi->nr_units = roundup(gi->nr_units, upa);
2987 unit += gi->nr_units;
2988 }
2989 BUG_ON(unit != nr_units);
2990
2991 return ai;
2992}
23f91716
KW
2993
2994static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
2995 pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
2996{
2997 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
2998#ifdef CONFIG_NUMA
2999 int node = NUMA_NO_NODE;
3000 void *ptr;
3001
3002 if (cpu_to_nd_fn)
3003 node = cpu_to_nd_fn(cpu);
3004
3005 if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
3006 ptr = memblock_alloc_from(size, align, goal);
3007 pr_info("cpu %d has no node %d or node-local memory\n",
3008 cpu, node);
3009 pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n",
3010 cpu, size, (u64)__pa(ptr));
3011 } else {
3012 ptr = memblock_alloc_try_nid(size, align, goal,
3013 MEMBLOCK_ALLOC_ACCESSIBLE,
3014 node);
3015
3016 pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n",
3017 cpu, size, node, (u64)__pa(ptr));
3018 }
3019 return ptr;
3020#else
3021 return memblock_alloc_from(size, align, goal);
3022#endif
3023}
3024
3025static void __init pcpu_fc_free(void *ptr, size_t size)
3026{
3027 memblock_free(ptr, size);
3028}
3c9a024f
TH
3029#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
3030
3031#if defined(BUILD_EMBED_FIRST_CHUNK)
66c3a757
TH
3032/**
3033 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
66c3a757 3034 * @reserved_size: the size of reserved percpu area in bytes
4ba6ce25 3035 * @dyn_size: minimum free size for dynamic allocation in bytes
c8826dd5
TH
3036 * @atom_size: allocation atom size
3037 * @cpu_distance_fn: callback to determine distance between cpus, optional
1ca3fb3a 3038 * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
66c3a757
TH
3039 *
3040 * This is a helper to ease setting up embedded first percpu chunk and
3041 * can be called where pcpu_setup_first_chunk() is expected.
3042 *
3043 * If this function is used to setup the first chunk, it is allocated
23f91716 3044 * by calling pcpu_fc_alloc and used as-is without being mapped into
c8826dd5
TH
3045 * vmalloc area. Allocations are always whole multiples of @atom_size
3046 * aligned to @atom_size.
3047 *
3048 * This enables the first chunk to piggy back on the linear physical
3049 * mapping which often uses larger page size. Please note that this
3050 * can result in very sparse cpu->unit mapping on NUMA machines thus
3051 * requiring large vmalloc address space. Don't use this allocator if
3052 * vmalloc space is not orders of magnitude larger than distances
3053 * between node memory addresses (ie. 32bit NUMA machines).
66c3a757 3054 *
4ba6ce25 3055 * @dyn_size specifies the minimum dynamic area size.
66c3a757
TH
3056 *
3057 * If the needed size is smaller than the minimum or specified unit
23f91716 3058 * size, the leftover is returned using pcpu_fc_free.
66c3a757
TH
3059 *
3060 * RETURNS:
fb435d52 3061 * 0 on success, -errno on failure.
66c3a757 3062 */
4ba6ce25 3063int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
c8826dd5
TH
3064 size_t atom_size,
3065 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
23f91716 3066 pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
66c3a757 3067{
c8826dd5
TH
3068 void *base = (void *)ULONG_MAX;
3069 void **areas = NULL;
fd1e8a1f 3070 struct pcpu_alloc_info *ai;
93c76b6b 3071 size_t size_sum, areas_size;
3072 unsigned long max_distance;
163fa234 3073 int group, i, highest_group, rc = 0;
66c3a757 3074
c8826dd5
TH
3075 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3076 cpu_distance_fn);
fd1e8a1f
TH
3077 if (IS_ERR(ai))
3078 return PTR_ERR(ai);
66c3a757 3079
fd1e8a1f 3080 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
c8826dd5 3081 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
fa8a7094 3082
26fb3dae 3083 areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
c8826dd5 3084 if (!areas) {
fb435d52 3085 rc = -ENOMEM;
c8826dd5 3086 goto out_free;
fa8a7094 3087 }
66c3a757 3088
9b739662 3089 /* allocate, copy and determine base address & max_distance */
3090 highest_group = 0;
c8826dd5
TH
3091 for (group = 0; group < ai->nr_groups; group++) {
3092 struct pcpu_group_info *gi = &ai->groups[group];
3093 unsigned int cpu = NR_CPUS;
3094 void *ptr;
3095
3096 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
3097 cpu = gi->cpu_map[i];
3098 BUG_ON(cpu == NR_CPUS);
3099
3100 /* allocate space for the whole group */
23f91716 3101 ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
c8826dd5
TH
3102 if (!ptr) {
3103 rc = -ENOMEM;
3104 goto out_free_areas;
3105 }
f528f0b8 3106 /* kmemleak tracks the percpu allocations separately */
a317ebcc 3107 kmemleak_ignore_phys(__pa(ptr));
c8826dd5 3108 areas[group] = ptr;
fd1e8a1f 3109
c8826dd5 3110 base = min(ptr, base);
9b739662 3111 if (ptr > areas[highest_group])
3112 highest_group = group;
3113 }
3114 max_distance = areas[highest_group] - base;
3115 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
3116
3117 /* warn if maximum distance is further than 75% of vmalloc space */
3118 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
3119 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
3120 max_distance, VMALLOC_TOTAL);
3121#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
3122 /* and fail if we have fallback */
3123 rc = -EINVAL;
3124 goto out_free_areas;
3125#endif
42b64281
TH
3126 }
3127
3128 /*
3129 * Copy data and free unused parts. This should happen after all
3130 * allocations are complete; otherwise, we may end up with
3131 * overlapping groups.
3132 */
3133 for (group = 0; group < ai->nr_groups; group++) {
3134 struct pcpu_group_info *gi = &ai->groups[group];
3135 void *ptr = areas[group];
c8826dd5
TH
3136
3137 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3138 if (gi->cpu_map[i] == NR_CPUS) {
3139 /* unused unit, free whole */
23f91716 3140 pcpu_fc_free(ptr, ai->unit_size);
c8826dd5
TH
3141 continue;
3142 }
3143 /* copy and return the unused part */
3144 memcpy(ptr, __per_cpu_load, ai->static_size);
23f91716 3145 pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
c8826dd5 3146 }
fa8a7094 3147 }
66c3a757 3148
c8826dd5 3149 /* base address is now known, determine group base offsets */
6ea529a2 3150 for (group = 0; group < ai->nr_groups; group++) {
c8826dd5 3151 ai->groups[group].base_offset = areas[group] - base;
6ea529a2 3152 }
c8826dd5 3153
00206a69
MC
3154 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
3155 PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
fd1e8a1f 3156 ai->dyn_size, ai->unit_size);
d4b95f80 3157
163fa234 3158 pcpu_setup_first_chunk(ai, base);
c8826dd5
TH
3159 goto out_free;
3160
3161out_free_areas:
3162 for (group = 0; group < ai->nr_groups; group++)
f851c8d8 3163 if (areas[group])
23f91716 3164 pcpu_fc_free(areas[group],
f851c8d8 3165 ai->groups[group].nr_units * ai->unit_size);
c8826dd5 3166out_free:
fd1e8a1f 3167 pcpu_free_alloc_info(ai);
c8826dd5 3168 if (areas)
4421cca0 3169 memblock_free(areas, areas_size);
fb435d52 3170 return rc;
d4b95f80 3171}
3c9a024f 3172#endif /* BUILD_EMBED_FIRST_CHUNK */
d4b95f80 3173
3c9a024f 3174#ifdef BUILD_PAGE_FIRST_CHUNK
20c03576
KW
3175#include <asm/pgalloc.h>
3176
3177#ifndef P4D_TABLE_SIZE
3178#define P4D_TABLE_SIZE PAGE_SIZE
3179#endif
3180
3181#ifndef PUD_TABLE_SIZE
3182#define PUD_TABLE_SIZE PAGE_SIZE
3183#endif
3184
3185#ifndef PMD_TABLE_SIZE
3186#define PMD_TABLE_SIZE PAGE_SIZE
3187#endif
3188
3189#ifndef PTE_TABLE_SIZE
3190#define PTE_TABLE_SIZE PAGE_SIZE
3191#endif
3192void __init __weak pcpu_populate_pte(unsigned long addr)
3193{
3194 pgd_t *pgd = pgd_offset_k(addr);
3195 p4d_t *p4d;
3196 pud_t *pud;
3197 pmd_t *pmd;
3198
3199 if (pgd_none(*pgd)) {
3200 p4d_t *new;
3201
3202 new = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
3203 if (!new)
3204 goto err_alloc;
3205 pgd_populate(&init_mm, pgd, new);
3206 }
3207
3208 p4d = p4d_offset(pgd, addr);
3209 if (p4d_none(*p4d)) {
3210 pud_t *new;
3211
3212 new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
3213 if (!new)
3214 goto err_alloc;
3215 p4d_populate(&init_mm, p4d, new);
3216 }
3217
3218 pud = pud_offset(p4d, addr);
3219 if (pud_none(*pud)) {
3220 pmd_t *new;
3221
3222 new = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
3223 if (!new)
3224 goto err_alloc;
3225 pud_populate(&init_mm, pud, new);
3226 }
3227
3228 pmd = pmd_offset(pud, addr);
3229 if (!pmd_present(*pmd)) {
3230 pte_t *new;
3231
3232 new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
3233 if (!new)
3234 goto err_alloc;
3235 pmd_populate_kernel(&init_mm, pmd, new);
3236 }
3237
3238 return;
3239
3240err_alloc:
3241 panic("%s: Failed to allocate memory\n", __func__);
3242}
3243
d4b95f80 3244/**
00ae4064 3245 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
d4b95f80 3246 * @reserved_size: the size of reserved percpu area in bytes
1ca3fb3a 3247 * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
d4b95f80 3248 *
00ae4064
TH
3249 * This is a helper to ease setting up page-remapped first percpu
3250 * chunk and can be called where pcpu_setup_first_chunk() is expected.
d4b95f80
TH
3251 *
3252 * This is the basic allocator. Static percpu area is allocated
3253 * page-by-page into vmalloc area.
3254 *
3255 * RETURNS:
fb435d52 3256 * 0 on success, -errno on failure.
d4b95f80 3257 */
20c03576 3258int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
d4b95f80 3259{
8f05a6a6 3260 static struct vm_struct vm;
fd1e8a1f 3261 struct pcpu_alloc_info *ai;
00ae4064 3262 char psize_str[16];
ce3141a2 3263 int unit_pages;
d4b95f80 3264 size_t pages_size;
ce3141a2 3265 struct page **pages;
163fa234 3266 int unit, i, j, rc = 0;
8f606604 3267 int upa;
3268 int nr_g0_units;
d4b95f80 3269
00ae4064
TH
3270 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
3271
4ba6ce25 3272 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
fd1e8a1f
TH
3273 if (IS_ERR(ai))
3274 return PTR_ERR(ai);
3275 BUG_ON(ai->nr_groups != 1);
8f606604 3276 upa = ai->alloc_size/ai->unit_size;
3277 nr_g0_units = roundup(num_possible_cpus(), upa);
0b59c25f 3278 if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
8f606604 3279 pcpu_free_alloc_info(ai);
3280 return -EINVAL;
3281 }
fd1e8a1f
TH
3282
3283 unit_pages = ai->unit_size >> PAGE_SHIFT;
d4b95f80
TH
3284
3285 /* unaligned allocations can't be freed, round up to page size */
fd1e8a1f
TH
3286 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
3287 sizeof(pages[0]));
7e1c4e27 3288 pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
f655f405
MR
3289 if (!pages)
3290 panic("%s: Failed to allocate %zu bytes\n", __func__,
3291 pages_size);
d4b95f80 3292
8f05a6a6 3293 /* allocate pages */
d4b95f80 3294 j = 0;
8f606604 3295 for (unit = 0; unit < num_possible_cpus(); unit++) {
3296 unsigned int cpu = ai->groups[0].cpu_map[unit];
ce3141a2 3297 for (i = 0; i < unit_pages; i++) {
d4b95f80
TH
3298 void *ptr;
3299
23f91716 3300 ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn);
d4b95f80 3301 if (!ptr) {
870d4b12 3302 pr_warn("failed to allocate %s page for cpu%u\n",
8f606604 3303 psize_str, cpu);
d4b95f80
TH
3304 goto enomem;
3305 }
f528f0b8 3306 /* kmemleak tracks the percpu allocations separately */
a317ebcc 3307 kmemleak_ignore_phys(__pa(ptr));
ce3141a2 3308 pages[j++] = virt_to_page(ptr);
d4b95f80 3309 }
8f606604 3310 }
d4b95f80 3311
8f05a6a6
TH
3312 /* allocate vm area, map the pages and copy static data */
3313 vm.flags = VM_ALLOC;
fd1e8a1f 3314 vm.size = num_possible_cpus() * ai->unit_size;
8f05a6a6
TH
3315 vm_area_register_early(&vm, PAGE_SIZE);
3316
fd1e8a1f 3317 for (unit = 0; unit < num_possible_cpus(); unit++) {
1d9d3257 3318 unsigned long unit_addr =
fd1e8a1f 3319 (unsigned long)vm.addr + unit * ai->unit_size;
8f05a6a6 3320
ce3141a2 3321 for (i = 0; i < unit_pages; i++)
20c03576 3322 pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT));
8f05a6a6
TH
3323
3324 /* pte already populated, the following shouldn't fail */
fb435d52
TH
3325 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3326 unit_pages);
3327 if (rc < 0)
3328 panic("failed to map percpu area, err=%d\n", rc);
66c3a757 3329
8f05a6a6
TH
3330 /*
3331 * FIXME: Archs with virtual cache should flush local
3332 * cache for the linear mapping here - something
3333 * equivalent to flush_cache_vmap() on the local cpu.
3334 * flush_cache_vmap() can't be used as most supporting
3335 * data structures are not set up yet.
3336 */
3337
3338 /* copy static data */
fd1e8a1f 3339 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
66c3a757
TH
3340 }
3341
3342 /* we're ready, commit */
00206a69
MC
3343 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3344 unit_pages, psize_str, ai->static_size,
fd1e8a1f 3345 ai->reserved_size, ai->dyn_size);
d4b95f80 3346
163fa234 3347 pcpu_setup_first_chunk(ai, vm.addr);
d4b95f80
TH
3348 goto out_free_ar;
3349
3350enomem:
3351 while (--j >= 0)
23f91716 3352 pcpu_fc_free(page_address(pages[j]), PAGE_SIZE);
fb435d52 3353 rc = -ENOMEM;
d4b95f80 3354out_free_ar:
4421cca0 3355 memblock_free(pages, pages_size);
fd1e8a1f 3356 pcpu_free_alloc_info(ai);
fb435d52 3357 return rc;
d4b95f80 3358}
3c9a024f 3359#endif /* BUILD_PAGE_FIRST_CHUNK */
d4b95f80 3360
bbddff05 3361#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
e74e3962 3362/*
bbddff05 3363 * Generic SMP percpu area setup.
e74e3962
TH
3364 *
3365 * The embedding helper is used because its behavior closely resembles
3366 * the original non-dynamic generic percpu area setup. This is
3367 * important because many archs have addressing restrictions and might
3368 * fail if the percpu area is located far away from the previous
3369 * location. As an added bonus, in non-NUMA cases, embedding is
3370 * generally a good idea TLB-wise because percpu area can piggy back
3371 * on the physical linear memory mapping which uses large page
3372 * mappings on applicable archs.
3373 */
e74e3962
TH
3374unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3375EXPORT_SYMBOL(__per_cpu_offset);
3376
3377void __init setup_per_cpu_areas(void)
3378{
e74e3962
TH
3379 unsigned long delta;
3380 unsigned int cpu;
fb435d52 3381 int rc;
e74e3962
TH
3382
3383 /*
3384 * Always reserve area for module percpu variables. That's
3385 * what the legacy allocator did.
3386 */
23f91716
KW
3387 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE,
3388 PAGE_SIZE, NULL, NULL);
fb435d52 3389 if (rc < 0)
bbddff05 3390 panic("Failed to initialize percpu areas.");
e74e3962
TH
3391
3392 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3393 for_each_possible_cpu(cpu)
fb435d52 3394 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
66c3a757 3395}
bbddff05
TH
3396#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3397
3398#else /* CONFIG_SMP */
3399
3400/*
3401 * UP percpu area setup.
3402 *
3403 * UP always uses km-based percpu allocator with identity mapping.
3404 * Static percpu variables are indistinguishable from the usual static
3405 * variables and don't require any special preparation.
3406 */
3407void __init setup_per_cpu_areas(void)
3408{
3409 const size_t unit_size =
3410 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3411 PERCPU_DYNAMIC_RESERVE));
3412 struct pcpu_alloc_info *ai;
3413 void *fc;
3414
3415 ai = pcpu_alloc_alloc_info(1, 1);
26fb3dae 3416 fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
bbddff05
TH
3417 if (!ai || !fc)
3418 panic("Failed to allocate memory for percpu areas.");
100d13c3 3419 /* kmemleak tracks the percpu allocations separately */
a317ebcc 3420 kmemleak_ignore_phys(__pa(fc));
bbddff05
TH
3421
3422 ai->dyn_size = unit_size;
3423 ai->unit_size = unit_size;
3424 ai->atom_size = unit_size;
3425 ai->alloc_size = unit_size;
3426 ai->groups[0].nr_units = 1;
3427 ai->groups[0].cpu_map[0] = 0;
3428
163fa234 3429 pcpu_setup_first_chunk(ai, fc);
438a5061 3430 pcpu_free_alloc_info(ai);
bbddff05
TH
3431}
3432
3433#endif /* CONFIG_SMP */
099a19d9 3434
7e8a6304
DZF
3435/*
3436 * pcpu_nr_pages - calculate total number of populated backing pages
3437 *
3438 * This reflects the number of pages populated to back chunks. Metadata is
3439 * excluded in the number exposed in meminfo as the number of backing pages
3440 * scales with the number of cpus and can quickly outweigh the memory used for
3441 * metadata. It also keeps this calculation nice and simple.
3442 *
3443 * RETURNS:
3444 * Total number of populated backing pages in use by the allocator.
3445 */
3446unsigned long pcpu_nr_pages(void)
3447{
3448 return pcpu_nr_populated * pcpu_nr_units;
3449}
3450
1a4d7607
TH
3451/*
3452 * Percpu allocator is initialized early during boot when neither slab or
3453 * workqueue is available. Plug async management until everything is up
3454 * and running.
3455 */
3456static int __init percpu_enable_async(void)
3457{
3458 pcpu_async_enabled = true;
3459 return 0;
3460}
3461subsys_initcall(percpu_enable_async);