Commit | Line | Data |
---|---|---|
fbf59bc9 | 1 | /* |
88999a89 | 2 | * mm/percpu.c - percpu memory allocator |
fbf59bc9 TH |
3 | * |
4 | * Copyright (C) 2009 SUSE Linux Products GmbH | |
5 | * Copyright (C) 2009 Tejun Heo <tj@kernel.org> | |
6 | * | |
7 | * This file is released under the GPLv2. | |
8 | * | |
9 | * This is percpu allocator which can handle both static and dynamic | |
88999a89 TH |
10 | * areas. Percpu areas are allocated in chunks. Each chunk is |
11 | * consisted of boot-time determined number of units and the first | |
12 | * chunk is used for static percpu variables in the kernel image | |
2f39e637 TH |
13 | * (special boot time alloc/init handling necessary as these areas |
14 | * need to be brought up before allocation services are running). | |
15 | * Unit grows as necessary and all units grow or shrink in unison. | |
88999a89 | 16 | * When a chunk is filled up, another chunk is allocated. |
fbf59bc9 TH |
17 | * |
18 | * c0 c1 c2 | |
19 | * ------------------- ------------------- ------------ | |
20 | * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u | |
21 | * ------------------- ...... ------------------- .... ------------ | |
22 | * | |
23 | * Allocation is done in offset-size areas of single unit space. Ie, | |
24 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, | |
2f39e637 TH |
25 | * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to |
26 | * cpus. On NUMA, the mapping can be non-linear and even sparse. | |
27 | * Percpu access can be done by configuring percpu base registers | |
28 | * according to cpu to unit mapping and pcpu_unit_size. | |
fbf59bc9 | 29 | * |
2f39e637 TH |
30 | * There are usually many small percpu allocations many of them being |
31 | * as small as 4 bytes. The allocator organizes chunks into lists | |
fbf59bc9 TH |
32 | * according to free size and tries to allocate from the fullest one. |
33 | * Each chunk keeps the maximum contiguous area size hint which is | |
4785879e | 34 | * guaranteed to be equal to or larger than the maximum contiguous |
fbf59bc9 TH |
35 | * area in the chunk. This helps the allocator not to iterate the |
36 | * chunk maps unnecessarily. | |
37 | * | |
38 | * Allocation state in each chunk is kept using an array of integers | |
39 | * on chunk->map. A positive value in the map represents a free | |
40 | * region and negative allocated. Allocation inside a chunk is done | |
41 | * by scanning this map sequentially and serving the first matching | |
42 | * entry. This is mostly copied from the percpu_modalloc() allocator. | |
e1b9aa3f CL |
43 | * Chunks can be determined from the address using the index field |
44 | * in the page struct. The index field contains a pointer to the chunk. | |
fbf59bc9 TH |
45 | * |
46 | * To use this allocator, arch code should do the followings. | |
47 | * | |
fbf59bc9 | 48 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate |
e0100983 TH |
49 | * regular address to percpu pointer and back if they need to be |
50 | * different from the default | |
fbf59bc9 | 51 | * |
8d408b4b TH |
52 | * - use pcpu_setup_first_chunk() during percpu area initialization to |
53 | * setup the first chunk containing the kernel static percpu area | |
fbf59bc9 TH |
54 | */ |
55 | ||
56 | #include <linux/bitmap.h> | |
57 | #include <linux/bootmem.h> | |
fd1e8a1f | 58 | #include <linux/err.h> |
fbf59bc9 | 59 | #include <linux/list.h> |
a530b795 | 60 | #include <linux/log2.h> |
fbf59bc9 TH |
61 | #include <linux/mm.h> |
62 | #include <linux/module.h> | |
63 | #include <linux/mutex.h> | |
64 | #include <linux/percpu.h> | |
65 | #include <linux/pfn.h> | |
fbf59bc9 | 66 | #include <linux/slab.h> |
ccea34b5 | 67 | #include <linux/spinlock.h> |
fbf59bc9 | 68 | #include <linux/vmalloc.h> |
a56dbddf | 69 | #include <linux/workqueue.h> |
f528f0b8 | 70 | #include <linux/kmemleak.h> |
fbf59bc9 TH |
71 | |
72 | #include <asm/cacheflush.h> | |
e0100983 | 73 | #include <asm/sections.h> |
fbf59bc9 | 74 | #include <asm/tlbflush.h> |
3b034b0d | 75 | #include <asm/io.h> |
fbf59bc9 | 76 | |
fbf59bc9 TH |
77 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ |
78 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ | |
79 | ||
bbddff05 | 80 | #ifdef CONFIG_SMP |
e0100983 TH |
81 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
82 | #ifndef __addr_to_pcpu_ptr | |
83 | #define __addr_to_pcpu_ptr(addr) \ | |
43cf38eb TH |
84 | (void __percpu *)((unsigned long)(addr) - \ |
85 | (unsigned long)pcpu_base_addr + \ | |
86 | (unsigned long)__per_cpu_start) | |
e0100983 TH |
87 | #endif |
88 | #ifndef __pcpu_ptr_to_addr | |
89 | #define __pcpu_ptr_to_addr(ptr) \ | |
43cf38eb TH |
90 | (void __force *)((unsigned long)(ptr) + \ |
91 | (unsigned long)pcpu_base_addr - \ | |
92 | (unsigned long)__per_cpu_start) | |
e0100983 | 93 | #endif |
bbddff05 TH |
94 | #else /* CONFIG_SMP */ |
95 | /* on UP, it's always identity mapped */ | |
96 | #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) | |
97 | #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) | |
98 | #endif /* CONFIG_SMP */ | |
e0100983 | 99 | |
fbf59bc9 TH |
100 | struct pcpu_chunk { |
101 | struct list_head list; /* linked to pcpu_slot lists */ | |
fbf59bc9 TH |
102 | int free_size; /* free bytes in the chunk */ |
103 | int contig_hint; /* max contiguous size hint */ | |
bba174f5 | 104 | void *base_addr; /* base address of this chunk */ |
723ad1d9 | 105 | int map_used; /* # of map entries used before the sentry */ |
fbf59bc9 TH |
106 | int map_alloc; /* # of map entries allocated */ |
107 | int *map; /* allocation map */ | |
88999a89 | 108 | void *data; /* chunk data */ |
3d331ad7 | 109 | int first_free; /* no free below this */ |
8d408b4b | 110 | bool immutable; /* no [de]population allowed */ |
ce3141a2 | 111 | unsigned long populated[]; /* populated bitmap */ |
fbf59bc9 TH |
112 | }; |
113 | ||
40150d37 TH |
114 | static int pcpu_unit_pages __read_mostly; |
115 | static int pcpu_unit_size __read_mostly; | |
2f39e637 | 116 | static int pcpu_nr_units __read_mostly; |
6563297c | 117 | static int pcpu_atom_size __read_mostly; |
40150d37 TH |
118 | static int pcpu_nr_slots __read_mostly; |
119 | static size_t pcpu_chunk_struct_size __read_mostly; | |
fbf59bc9 | 120 | |
a855b84c TH |
121 | /* cpus with the lowest and highest unit addresses */ |
122 | static unsigned int pcpu_low_unit_cpu __read_mostly; | |
123 | static unsigned int pcpu_high_unit_cpu __read_mostly; | |
2f39e637 | 124 | |
fbf59bc9 | 125 | /* the address of the first chunk which starts with the kernel static area */ |
40150d37 | 126 | void *pcpu_base_addr __read_mostly; |
fbf59bc9 TH |
127 | EXPORT_SYMBOL_GPL(pcpu_base_addr); |
128 | ||
fb435d52 TH |
129 | static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ |
130 | const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ | |
2f39e637 | 131 | |
6563297c TH |
132 | /* group information, used for vm allocation */ |
133 | static int pcpu_nr_groups __read_mostly; | |
134 | static const unsigned long *pcpu_group_offsets __read_mostly; | |
135 | static const size_t *pcpu_group_sizes __read_mostly; | |
136 | ||
ae9e6bc9 TH |
137 | /* |
138 | * The first chunk which always exists. Note that unlike other | |
139 | * chunks, this one can be allocated and mapped in several different | |
140 | * ways and thus often doesn't live in the vmalloc area. | |
141 | */ | |
142 | static struct pcpu_chunk *pcpu_first_chunk; | |
143 | ||
144 | /* | |
145 | * Optional reserved chunk. This chunk reserves part of the first | |
146 | * chunk and serves it for reserved allocations. The amount of | |
147 | * reserved offset is in pcpu_reserved_chunk_limit. When reserved | |
148 | * area doesn't exist, the following variables contain NULL and 0 | |
149 | * respectively. | |
150 | */ | |
edcb4639 | 151 | static struct pcpu_chunk *pcpu_reserved_chunk; |
edcb4639 TH |
152 | static int pcpu_reserved_chunk_limit; |
153 | ||
b38d08f3 TH |
154 | static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ |
155 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */ | |
fbf59bc9 | 156 | |
40150d37 | 157 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
fbf59bc9 | 158 | |
a56dbddf TH |
159 | /* reclaim work to release fully free chunks, scheduled from free path */ |
160 | static void pcpu_reclaim(struct work_struct *work); | |
161 | static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); | |
162 | ||
020ec653 TH |
163 | static bool pcpu_addr_in_first_chunk(void *addr) |
164 | { | |
165 | void *first_start = pcpu_first_chunk->base_addr; | |
166 | ||
167 | return addr >= first_start && addr < first_start + pcpu_unit_size; | |
168 | } | |
169 | ||
170 | static bool pcpu_addr_in_reserved_chunk(void *addr) | |
171 | { | |
172 | void *first_start = pcpu_first_chunk->base_addr; | |
173 | ||
174 | return addr >= first_start && | |
175 | addr < first_start + pcpu_reserved_chunk_limit; | |
176 | } | |
177 | ||
d9b55eeb | 178 | static int __pcpu_size_to_slot(int size) |
fbf59bc9 | 179 | { |
cae3aeb8 | 180 | int highbit = fls(size); /* size is in bytes */ |
fbf59bc9 TH |
181 | return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); |
182 | } | |
183 | ||
d9b55eeb TH |
184 | static int pcpu_size_to_slot(int size) |
185 | { | |
186 | if (size == pcpu_unit_size) | |
187 | return pcpu_nr_slots - 1; | |
188 | return __pcpu_size_to_slot(size); | |
189 | } | |
190 | ||
fbf59bc9 TH |
191 | static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) |
192 | { | |
193 | if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) | |
194 | return 0; | |
195 | ||
196 | return pcpu_size_to_slot(chunk->free_size); | |
197 | } | |
198 | ||
88999a89 TH |
199 | /* set the pointer to a chunk in a page struct */ |
200 | static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) | |
201 | { | |
202 | page->index = (unsigned long)pcpu; | |
203 | } | |
204 | ||
205 | /* obtain pointer to a chunk from a page struct */ | |
206 | static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) | |
207 | { | |
208 | return (struct pcpu_chunk *)page->index; | |
209 | } | |
210 | ||
211 | static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) | |
fbf59bc9 | 212 | { |
2f39e637 | 213 | return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; |
fbf59bc9 TH |
214 | } |
215 | ||
9983b6f0 TH |
216 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, |
217 | unsigned int cpu, int page_idx) | |
fbf59bc9 | 218 | { |
bba174f5 | 219 | return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + |
fb435d52 | 220 | (page_idx << PAGE_SHIFT); |
fbf59bc9 TH |
221 | } |
222 | ||
88999a89 TH |
223 | static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, |
224 | int *rs, int *re, int end) | |
ce3141a2 TH |
225 | { |
226 | *rs = find_next_zero_bit(chunk->populated, end, *rs); | |
227 | *re = find_next_bit(chunk->populated, end, *rs + 1); | |
228 | } | |
229 | ||
88999a89 TH |
230 | static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, |
231 | int *rs, int *re, int end) | |
ce3141a2 TH |
232 | { |
233 | *rs = find_next_bit(chunk->populated, end, *rs); | |
234 | *re = find_next_zero_bit(chunk->populated, end, *rs + 1); | |
235 | } | |
236 | ||
237 | /* | |
238 | * (Un)populated page region iterators. Iterate over (un)populated | |
b595076a | 239 | * page regions between @start and @end in @chunk. @rs and @re should |
ce3141a2 TH |
240 | * be integer variables and will be set to start and end page index of |
241 | * the current region. | |
242 | */ | |
243 | #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ | |
244 | for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ | |
245 | (rs) < (re); \ | |
246 | (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) | |
247 | ||
248 | #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ | |
249 | for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ | |
250 | (rs) < (re); \ | |
251 | (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) | |
252 | ||
fbf59bc9 | 253 | /** |
90459ce0 | 254 | * pcpu_mem_zalloc - allocate memory |
1880d93b | 255 | * @size: bytes to allocate |
fbf59bc9 | 256 | * |
1880d93b | 257 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, |
90459ce0 | 258 | * kzalloc() is used; otherwise, vzalloc() is used. The returned |
1880d93b | 259 | * memory is always zeroed. |
fbf59bc9 | 260 | * |
ccea34b5 TH |
261 | * CONTEXT: |
262 | * Does GFP_KERNEL allocation. | |
263 | * | |
fbf59bc9 | 264 | * RETURNS: |
1880d93b | 265 | * Pointer to the allocated area on success, NULL on failure. |
fbf59bc9 | 266 | */ |
90459ce0 | 267 | static void *pcpu_mem_zalloc(size_t size) |
fbf59bc9 | 268 | { |
099a19d9 TH |
269 | if (WARN_ON_ONCE(!slab_is_available())) |
270 | return NULL; | |
271 | ||
1880d93b TH |
272 | if (size <= PAGE_SIZE) |
273 | return kzalloc(size, GFP_KERNEL); | |
7af4c093 JJ |
274 | else |
275 | return vzalloc(size); | |
1880d93b | 276 | } |
fbf59bc9 | 277 | |
1880d93b TH |
278 | /** |
279 | * pcpu_mem_free - free memory | |
280 | * @ptr: memory to free | |
281 | * @size: size of the area | |
282 | * | |
90459ce0 | 283 | * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). |
1880d93b TH |
284 | */ |
285 | static void pcpu_mem_free(void *ptr, size_t size) | |
286 | { | |
fbf59bc9 | 287 | if (size <= PAGE_SIZE) |
1880d93b | 288 | kfree(ptr); |
fbf59bc9 | 289 | else |
1880d93b | 290 | vfree(ptr); |
fbf59bc9 TH |
291 | } |
292 | ||
293 | /** | |
294 | * pcpu_chunk_relocate - put chunk in the appropriate chunk slot | |
295 | * @chunk: chunk of interest | |
296 | * @oslot: the previous slot it was on | |
297 | * | |
298 | * This function is called after an allocation or free changed @chunk. | |
299 | * New slot according to the changed state is determined and @chunk is | |
edcb4639 TH |
300 | * moved to the slot. Note that the reserved chunk is never put on |
301 | * chunk slots. | |
ccea34b5 TH |
302 | * |
303 | * CONTEXT: | |
304 | * pcpu_lock. | |
fbf59bc9 TH |
305 | */ |
306 | static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | |
307 | { | |
308 | int nslot = pcpu_chunk_slot(chunk); | |
309 | ||
edcb4639 | 310 | if (chunk != pcpu_reserved_chunk && oslot != nslot) { |
fbf59bc9 TH |
311 | if (oslot < nslot) |
312 | list_move(&chunk->list, &pcpu_slot[nslot]); | |
313 | else | |
314 | list_move_tail(&chunk->list, &pcpu_slot[nslot]); | |
315 | } | |
316 | } | |
317 | ||
9f7dcf22 | 318 | /** |
833af842 TH |
319 | * pcpu_need_to_extend - determine whether chunk area map needs to be extended |
320 | * @chunk: chunk of interest | |
9f7dcf22 | 321 | * |
833af842 | 322 | * Determine whether area map of @chunk needs to be extended to |
25985edc | 323 | * accommodate a new allocation. |
9f7dcf22 | 324 | * |
ccea34b5 | 325 | * CONTEXT: |
833af842 | 326 | * pcpu_lock. |
ccea34b5 | 327 | * |
9f7dcf22 | 328 | * RETURNS: |
833af842 TH |
329 | * New target map allocation length if extension is necessary, 0 |
330 | * otherwise. | |
9f7dcf22 | 331 | */ |
833af842 | 332 | static int pcpu_need_to_extend(struct pcpu_chunk *chunk) |
9f7dcf22 TH |
333 | { |
334 | int new_alloc; | |
9f7dcf22 | 335 | |
723ad1d9 | 336 | if (chunk->map_alloc >= chunk->map_used + 3) |
9f7dcf22 TH |
337 | return 0; |
338 | ||
339 | new_alloc = PCPU_DFL_MAP_ALLOC; | |
723ad1d9 | 340 | while (new_alloc < chunk->map_used + 3) |
9f7dcf22 TH |
341 | new_alloc *= 2; |
342 | ||
833af842 TH |
343 | return new_alloc; |
344 | } | |
345 | ||
346 | /** | |
347 | * pcpu_extend_area_map - extend area map of a chunk | |
348 | * @chunk: chunk of interest | |
349 | * @new_alloc: new target allocation length of the area map | |
350 | * | |
351 | * Extend area map of @chunk to have @new_alloc entries. | |
352 | * | |
353 | * CONTEXT: | |
354 | * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. | |
355 | * | |
356 | * RETURNS: | |
357 | * 0 on success, -errno on failure. | |
358 | */ | |
359 | static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) | |
360 | { | |
361 | int *old = NULL, *new = NULL; | |
362 | size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); | |
363 | unsigned long flags; | |
364 | ||
90459ce0 | 365 | new = pcpu_mem_zalloc(new_size); |
833af842 | 366 | if (!new) |
9f7dcf22 | 367 | return -ENOMEM; |
ccea34b5 | 368 | |
833af842 TH |
369 | /* acquire pcpu_lock and switch to new area map */ |
370 | spin_lock_irqsave(&pcpu_lock, flags); | |
371 | ||
372 | if (new_alloc <= chunk->map_alloc) | |
373 | goto out_unlock; | |
9f7dcf22 | 374 | |
833af842 | 375 | old_size = chunk->map_alloc * sizeof(chunk->map[0]); |
a002d148 HS |
376 | old = chunk->map; |
377 | ||
378 | memcpy(new, old, old_size); | |
9f7dcf22 | 379 | |
9f7dcf22 TH |
380 | chunk->map_alloc = new_alloc; |
381 | chunk->map = new; | |
833af842 TH |
382 | new = NULL; |
383 | ||
384 | out_unlock: | |
385 | spin_unlock_irqrestore(&pcpu_lock, flags); | |
386 | ||
387 | /* | |
388 | * pcpu_mem_free() might end up calling vfree() which uses | |
389 | * IRQ-unsafe lock and thus can't be called under pcpu_lock. | |
390 | */ | |
391 | pcpu_mem_free(old, old_size); | |
392 | pcpu_mem_free(new, new_size); | |
393 | ||
9f7dcf22 TH |
394 | return 0; |
395 | } | |
396 | ||
a16037c8 TH |
397 | /** |
398 | * pcpu_fit_in_area - try to fit the requested allocation in a candidate area | |
399 | * @chunk: chunk the candidate area belongs to | |
400 | * @off: the offset to the start of the candidate area | |
401 | * @this_size: the size of the candidate area | |
402 | * @size: the size of the target allocation | |
403 | * @align: the alignment of the target allocation | |
404 | * @pop_only: only allocate from already populated region | |
405 | * | |
406 | * We're trying to allocate @size bytes aligned at @align. @chunk's area | |
407 | * at @off sized @this_size is a candidate. This function determines | |
408 | * whether the target allocation fits in the candidate area and returns the | |
409 | * number of bytes to pad after @off. If the target area doesn't fit, -1 | |
410 | * is returned. | |
411 | * | |
412 | * If @pop_only is %true, this function only considers the already | |
413 | * populated part of the candidate area. | |
414 | */ | |
415 | static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, | |
416 | int size, int align, bool pop_only) | |
417 | { | |
418 | int cand_off = off; | |
419 | ||
420 | while (true) { | |
421 | int head = ALIGN(cand_off, align) - off; | |
422 | int page_start, page_end, rs, re; | |
423 | ||
424 | if (this_size < head + size) | |
425 | return -1; | |
426 | ||
427 | if (!pop_only) | |
428 | return head; | |
429 | ||
430 | /* | |
431 | * If the first unpopulated page is beyond the end of the | |
432 | * allocation, the whole allocation is populated; | |
433 | * otherwise, retry from the end of the unpopulated area. | |
434 | */ | |
435 | page_start = PFN_DOWN(head + off); | |
436 | page_end = PFN_UP(head + off + size); | |
437 | ||
438 | rs = page_start; | |
439 | pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size)); | |
440 | if (rs >= page_end) | |
441 | return head; | |
442 | cand_off = re * PAGE_SIZE; | |
443 | } | |
444 | } | |
445 | ||
fbf59bc9 TH |
446 | /** |
447 | * pcpu_alloc_area - allocate area from a pcpu_chunk | |
448 | * @chunk: chunk of interest | |
cae3aeb8 | 449 | * @size: wanted size in bytes |
fbf59bc9 | 450 | * @align: wanted align |
a16037c8 | 451 | * @pop_only: allocate only from the populated area |
fbf59bc9 TH |
452 | * |
453 | * Try to allocate @size bytes area aligned at @align from @chunk. | |
454 | * Note that this function only allocates the offset. It doesn't | |
455 | * populate or map the area. | |
456 | * | |
9f7dcf22 TH |
457 | * @chunk->map must have at least two free slots. |
458 | * | |
ccea34b5 TH |
459 | * CONTEXT: |
460 | * pcpu_lock. | |
461 | * | |
fbf59bc9 | 462 | * RETURNS: |
9f7dcf22 TH |
463 | * Allocated offset in @chunk on success, -1 if no matching area is |
464 | * found. | |
fbf59bc9 | 465 | */ |
a16037c8 TH |
466 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align, |
467 | bool pop_only) | |
fbf59bc9 TH |
468 | { |
469 | int oslot = pcpu_chunk_slot(chunk); | |
470 | int max_contig = 0; | |
471 | int i, off; | |
3d331ad7 | 472 | bool seen_free = false; |
723ad1d9 | 473 | int *p; |
fbf59bc9 | 474 | |
3d331ad7 | 475 | for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { |
fbf59bc9 | 476 | int head, tail; |
723ad1d9 AV |
477 | int this_size; |
478 | ||
479 | off = *p; | |
480 | if (off & 1) | |
481 | continue; | |
fbf59bc9 | 482 | |
723ad1d9 | 483 | this_size = (p[1] & ~1) - off; |
a16037c8 TH |
484 | |
485 | head = pcpu_fit_in_area(chunk, off, this_size, size, align, | |
486 | pop_only); | |
487 | if (head < 0) { | |
3d331ad7 AV |
488 | if (!seen_free) { |
489 | chunk->first_free = i; | |
490 | seen_free = true; | |
491 | } | |
723ad1d9 | 492 | max_contig = max(this_size, max_contig); |
fbf59bc9 TH |
493 | continue; |
494 | } | |
495 | ||
496 | /* | |
497 | * If head is small or the previous block is free, | |
498 | * merge'em. Note that 'small' is defined as smaller | |
499 | * than sizeof(int), which is very small but isn't too | |
500 | * uncommon for percpu allocations. | |
501 | */ | |
723ad1d9 | 502 | if (head && (head < sizeof(int) || !(p[-1] & 1))) { |
21ddfd38 | 503 | *p = off += head; |
723ad1d9 | 504 | if (p[-1] & 1) |
fbf59bc9 | 505 | chunk->free_size -= head; |
21ddfd38 JZ |
506 | else |
507 | max_contig = max(*p - p[-1], max_contig); | |
723ad1d9 | 508 | this_size -= head; |
fbf59bc9 TH |
509 | head = 0; |
510 | } | |
511 | ||
512 | /* if tail is small, just keep it around */ | |
723ad1d9 AV |
513 | tail = this_size - head - size; |
514 | if (tail < sizeof(int)) { | |
fbf59bc9 | 515 | tail = 0; |
723ad1d9 AV |
516 | size = this_size - head; |
517 | } | |
fbf59bc9 TH |
518 | |
519 | /* split if warranted */ | |
520 | if (head || tail) { | |
706c16f2 AV |
521 | int nr_extra = !!head + !!tail; |
522 | ||
523 | /* insert new subblocks */ | |
723ad1d9 | 524 | memmove(p + nr_extra + 1, p + 1, |
706c16f2 AV |
525 | sizeof(chunk->map[0]) * (chunk->map_used - i)); |
526 | chunk->map_used += nr_extra; | |
527 | ||
fbf59bc9 | 528 | if (head) { |
3d331ad7 AV |
529 | if (!seen_free) { |
530 | chunk->first_free = i; | |
531 | seen_free = true; | |
532 | } | |
723ad1d9 AV |
533 | *++p = off += head; |
534 | ++i; | |
706c16f2 AV |
535 | max_contig = max(head, max_contig); |
536 | } | |
537 | if (tail) { | |
723ad1d9 | 538 | p[1] = off + size; |
706c16f2 | 539 | max_contig = max(tail, max_contig); |
fbf59bc9 | 540 | } |
fbf59bc9 TH |
541 | } |
542 | ||
3d331ad7 AV |
543 | if (!seen_free) |
544 | chunk->first_free = i + 1; | |
545 | ||
fbf59bc9 | 546 | /* update hint and mark allocated */ |
723ad1d9 | 547 | if (i + 1 == chunk->map_used) |
fbf59bc9 TH |
548 | chunk->contig_hint = max_contig; /* fully scanned */ |
549 | else | |
550 | chunk->contig_hint = max(chunk->contig_hint, | |
551 | max_contig); | |
552 | ||
723ad1d9 AV |
553 | chunk->free_size -= size; |
554 | *p |= 1; | |
fbf59bc9 TH |
555 | |
556 | pcpu_chunk_relocate(chunk, oslot); | |
557 | return off; | |
558 | } | |
559 | ||
560 | chunk->contig_hint = max_contig; /* fully scanned */ | |
561 | pcpu_chunk_relocate(chunk, oslot); | |
562 | ||
9f7dcf22 TH |
563 | /* tell the upper layer that this chunk has no matching area */ |
564 | return -1; | |
fbf59bc9 TH |
565 | } |
566 | ||
567 | /** | |
568 | * pcpu_free_area - free area to a pcpu_chunk | |
569 | * @chunk: chunk of interest | |
570 | * @freeme: offset of area to free | |
571 | * | |
572 | * Free area starting from @freeme to @chunk. Note that this function | |
573 | * only modifies the allocation map. It doesn't depopulate or unmap | |
574 | * the area. | |
ccea34b5 TH |
575 | * |
576 | * CONTEXT: | |
577 | * pcpu_lock. | |
fbf59bc9 TH |
578 | */ |
579 | static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) | |
580 | { | |
581 | int oslot = pcpu_chunk_slot(chunk); | |
723ad1d9 AV |
582 | int off = 0; |
583 | unsigned i, j; | |
584 | int to_free = 0; | |
585 | int *p; | |
586 | ||
587 | freeme |= 1; /* we are searching for <given offset, in use> pair */ | |
588 | ||
589 | i = 0; | |
590 | j = chunk->map_used; | |
591 | while (i != j) { | |
592 | unsigned k = (i + j) / 2; | |
593 | off = chunk->map[k]; | |
594 | if (off < freeme) | |
595 | i = k + 1; | |
596 | else if (off > freeme) | |
597 | j = k; | |
598 | else | |
599 | i = j = k; | |
600 | } | |
fbf59bc9 | 601 | BUG_ON(off != freeme); |
fbf59bc9 | 602 | |
3d331ad7 AV |
603 | if (i < chunk->first_free) |
604 | chunk->first_free = i; | |
605 | ||
723ad1d9 AV |
606 | p = chunk->map + i; |
607 | *p = off &= ~1; | |
608 | chunk->free_size += (p[1] & ~1) - off; | |
fbf59bc9 | 609 | |
723ad1d9 AV |
610 | /* merge with next? */ |
611 | if (!(p[1] & 1)) | |
612 | to_free++; | |
fbf59bc9 | 613 | /* merge with previous? */ |
723ad1d9 AV |
614 | if (i > 0 && !(p[-1] & 1)) { |
615 | to_free++; | |
fbf59bc9 | 616 | i--; |
723ad1d9 | 617 | p--; |
fbf59bc9 | 618 | } |
723ad1d9 AV |
619 | if (to_free) { |
620 | chunk->map_used -= to_free; | |
621 | memmove(p + 1, p + 1 + to_free, | |
622 | (chunk->map_used - i) * sizeof(chunk->map[0])); | |
fbf59bc9 TH |
623 | } |
624 | ||
723ad1d9 | 625 | chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint); |
fbf59bc9 TH |
626 | pcpu_chunk_relocate(chunk, oslot); |
627 | } | |
628 | ||
6081089f TH |
629 | static struct pcpu_chunk *pcpu_alloc_chunk(void) |
630 | { | |
631 | struct pcpu_chunk *chunk; | |
632 | ||
90459ce0 | 633 | chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); |
6081089f TH |
634 | if (!chunk) |
635 | return NULL; | |
636 | ||
90459ce0 BL |
637 | chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * |
638 | sizeof(chunk->map[0])); | |
6081089f | 639 | if (!chunk->map) { |
5a838c3b | 640 | pcpu_mem_free(chunk, pcpu_chunk_struct_size); |
6081089f TH |
641 | return NULL; |
642 | } | |
643 | ||
644 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; | |
723ad1d9 AV |
645 | chunk->map[0] = 0; |
646 | chunk->map[1] = pcpu_unit_size | 1; | |
647 | chunk->map_used = 1; | |
6081089f TH |
648 | |
649 | INIT_LIST_HEAD(&chunk->list); | |
650 | chunk->free_size = pcpu_unit_size; | |
651 | chunk->contig_hint = pcpu_unit_size; | |
652 | ||
653 | return chunk; | |
654 | } | |
655 | ||
656 | static void pcpu_free_chunk(struct pcpu_chunk *chunk) | |
657 | { | |
658 | if (!chunk) | |
659 | return; | |
660 | pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); | |
b4916cb1 | 661 | pcpu_mem_free(chunk, pcpu_chunk_struct_size); |
6081089f TH |
662 | } |
663 | ||
9f645532 TH |
664 | /* |
665 | * Chunk management implementation. | |
666 | * | |
667 | * To allow different implementations, chunk alloc/free and | |
668 | * [de]population are implemented in a separate file which is pulled | |
669 | * into this file and compiled together. The following functions | |
670 | * should be implemented. | |
671 | * | |
672 | * pcpu_populate_chunk - populate the specified range of a chunk | |
673 | * pcpu_depopulate_chunk - depopulate the specified range of a chunk | |
674 | * pcpu_create_chunk - create a new chunk | |
675 | * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop | |
676 | * pcpu_addr_to_page - translate address to physical address | |
677 | * pcpu_verify_alloc_info - check alloc_info is acceptable during init | |
fbf59bc9 | 678 | */ |
9f645532 TH |
679 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); |
680 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); | |
681 | static struct pcpu_chunk *pcpu_create_chunk(void); | |
682 | static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); | |
683 | static struct page *pcpu_addr_to_page(void *addr); | |
684 | static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); | |
fbf59bc9 | 685 | |
b0c9778b TH |
686 | #ifdef CONFIG_NEED_PER_CPU_KM |
687 | #include "percpu-km.c" | |
688 | #else | |
9f645532 | 689 | #include "percpu-vm.c" |
b0c9778b | 690 | #endif |
fbf59bc9 | 691 | |
88999a89 TH |
692 | /** |
693 | * pcpu_chunk_addr_search - determine chunk containing specified address | |
694 | * @addr: address for which the chunk needs to be determined. | |
695 | * | |
696 | * RETURNS: | |
697 | * The address of the found chunk. | |
698 | */ | |
699 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |
700 | { | |
701 | /* is it in the first chunk? */ | |
702 | if (pcpu_addr_in_first_chunk(addr)) { | |
703 | /* is it in the reserved area? */ | |
704 | if (pcpu_addr_in_reserved_chunk(addr)) | |
705 | return pcpu_reserved_chunk; | |
706 | return pcpu_first_chunk; | |
707 | } | |
708 | ||
709 | /* | |
710 | * The address is relative to unit0 which might be unused and | |
711 | * thus unmapped. Offset the address to the unit space of the | |
712 | * current processor before looking it up in the vmalloc | |
713 | * space. Note that any possible cpu id can be used here, so | |
714 | * there's no need to worry about preemption or cpu hotplug. | |
715 | */ | |
716 | addr += pcpu_unit_offsets[raw_smp_processor_id()]; | |
9f645532 | 717 | return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); |
88999a89 TH |
718 | } |
719 | ||
fbf59bc9 | 720 | /** |
edcb4639 | 721 | * pcpu_alloc - the percpu allocator |
cae3aeb8 | 722 | * @size: size of area to allocate in bytes |
fbf59bc9 | 723 | * @align: alignment of area (max PAGE_SIZE) |
edcb4639 | 724 | * @reserved: allocate from the reserved chunk if available |
5835d96e | 725 | * @gfp: allocation flags |
fbf59bc9 | 726 | * |
5835d96e TH |
727 | * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't |
728 | * contain %GFP_KERNEL, the allocation is atomic. | |
fbf59bc9 TH |
729 | * |
730 | * RETURNS: | |
731 | * Percpu pointer to the allocated area on success, NULL on failure. | |
732 | */ | |
5835d96e TH |
733 | static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, |
734 | gfp_t gfp) | |
fbf59bc9 | 735 | { |
f2badb0c | 736 | static int warn_limit = 10; |
fbf59bc9 | 737 | struct pcpu_chunk *chunk; |
f2badb0c | 738 | const char *err; |
5835d96e | 739 | bool is_atomic = !(gfp & GFP_KERNEL); |
b38d08f3 | 740 | int slot, off, new_alloc, cpu, ret; |
403a91b1 | 741 | unsigned long flags; |
f528f0b8 | 742 | void __percpu *ptr; |
fbf59bc9 | 743 | |
723ad1d9 AV |
744 | /* |
745 | * We want the lowest bit of offset available for in-use/free | |
2f69fa82 | 746 | * indicator, so force >= 16bit alignment and make size even. |
723ad1d9 AV |
747 | */ |
748 | if (unlikely(align < 2)) | |
749 | align = 2; | |
750 | ||
fb009e3a | 751 | size = ALIGN(size, 2); |
2f69fa82 | 752 | |
8d408b4b | 753 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { |
fbf59bc9 TH |
754 | WARN(true, "illegal size (%zu) or align (%zu) for " |
755 | "percpu allocation\n", size, align); | |
756 | return NULL; | |
757 | } | |
758 | ||
403a91b1 | 759 | spin_lock_irqsave(&pcpu_lock, flags); |
fbf59bc9 | 760 | |
edcb4639 TH |
761 | /* serve reserved allocations from the reserved chunk if available */ |
762 | if (reserved && pcpu_reserved_chunk) { | |
763 | chunk = pcpu_reserved_chunk; | |
833af842 TH |
764 | |
765 | if (size > chunk->contig_hint) { | |
766 | err = "alloc from reserved chunk failed"; | |
ccea34b5 | 767 | goto fail_unlock; |
f2badb0c | 768 | } |
833af842 TH |
769 | |
770 | while ((new_alloc = pcpu_need_to_extend(chunk))) { | |
771 | spin_unlock_irqrestore(&pcpu_lock, flags); | |
5835d96e TH |
772 | if (is_atomic || |
773 | pcpu_extend_area_map(chunk, new_alloc) < 0) { | |
833af842 | 774 | err = "failed to extend area map of reserved chunk"; |
b38d08f3 | 775 | goto fail; |
833af842 TH |
776 | } |
777 | spin_lock_irqsave(&pcpu_lock, flags); | |
778 | } | |
779 | ||
5835d96e | 780 | off = pcpu_alloc_area(chunk, size, align, is_atomic); |
edcb4639 TH |
781 | if (off >= 0) |
782 | goto area_found; | |
833af842 | 783 | |
f2badb0c | 784 | err = "alloc from reserved chunk failed"; |
ccea34b5 | 785 | goto fail_unlock; |
edcb4639 TH |
786 | } |
787 | ||
ccea34b5 | 788 | restart: |
edcb4639 | 789 | /* search through normal chunks */ |
fbf59bc9 TH |
790 | for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { |
791 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { | |
792 | if (size > chunk->contig_hint) | |
793 | continue; | |
ccea34b5 | 794 | |
833af842 TH |
795 | new_alloc = pcpu_need_to_extend(chunk); |
796 | if (new_alloc) { | |
5835d96e TH |
797 | if (is_atomic) |
798 | continue; | |
833af842 TH |
799 | spin_unlock_irqrestore(&pcpu_lock, flags); |
800 | if (pcpu_extend_area_map(chunk, | |
801 | new_alloc) < 0) { | |
802 | err = "failed to extend area map"; | |
b38d08f3 | 803 | goto fail; |
833af842 TH |
804 | } |
805 | spin_lock_irqsave(&pcpu_lock, flags); | |
806 | /* | |
807 | * pcpu_lock has been dropped, need to | |
808 | * restart cpu_slot list walking. | |
809 | */ | |
810 | goto restart; | |
ccea34b5 TH |
811 | } |
812 | ||
5835d96e | 813 | off = pcpu_alloc_area(chunk, size, align, is_atomic); |
fbf59bc9 TH |
814 | if (off >= 0) |
815 | goto area_found; | |
fbf59bc9 TH |
816 | } |
817 | } | |
818 | ||
403a91b1 | 819 | spin_unlock_irqrestore(&pcpu_lock, flags); |
ccea34b5 | 820 | |
b38d08f3 TH |
821 | /* |
822 | * No space left. Create a new chunk. We don't want multiple | |
823 | * tasks to create chunks simultaneously. Serialize and create iff | |
824 | * there's still no empty chunk after grabbing the mutex. | |
825 | */ | |
5835d96e TH |
826 | if (is_atomic) |
827 | goto fail; | |
828 | ||
b38d08f3 TH |
829 | mutex_lock(&pcpu_alloc_mutex); |
830 | ||
831 | if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { | |
832 | chunk = pcpu_create_chunk(); | |
833 | if (!chunk) { | |
834 | err = "failed to allocate new chunk"; | |
835 | goto fail; | |
836 | } | |
837 | ||
838 | spin_lock_irqsave(&pcpu_lock, flags); | |
839 | pcpu_chunk_relocate(chunk, -1); | |
840 | } else { | |
841 | spin_lock_irqsave(&pcpu_lock, flags); | |
f2badb0c | 842 | } |
ccea34b5 | 843 | |
b38d08f3 | 844 | mutex_unlock(&pcpu_alloc_mutex); |
ccea34b5 | 845 | goto restart; |
fbf59bc9 TH |
846 | |
847 | area_found: | |
403a91b1 | 848 | spin_unlock_irqrestore(&pcpu_lock, flags); |
ccea34b5 | 849 | |
dca49645 | 850 | /* populate if not all pages are already there */ |
5835d96e | 851 | if (!is_atomic) { |
e04d3208 | 852 | int page_start, page_end, rs, re; |
dca49645 | 853 | |
e04d3208 | 854 | mutex_lock(&pcpu_alloc_mutex); |
dca49645 | 855 | |
e04d3208 TH |
856 | page_start = PFN_DOWN(off); |
857 | page_end = PFN_UP(off + size); | |
b38d08f3 | 858 | |
e04d3208 TH |
859 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { |
860 | WARN_ON(chunk->immutable); | |
861 | ||
862 | ret = pcpu_populate_chunk(chunk, rs, re); | |
863 | ||
864 | spin_lock_irqsave(&pcpu_lock, flags); | |
865 | if (ret) { | |
866 | mutex_unlock(&pcpu_alloc_mutex); | |
867 | pcpu_free_area(chunk, off); | |
868 | err = "failed to populate"; | |
869 | goto fail_unlock; | |
870 | } | |
871 | bitmap_set(chunk->populated, rs, re - rs); | |
872 | spin_unlock_irqrestore(&pcpu_lock, flags); | |
dca49645 | 873 | } |
fbf59bc9 | 874 | |
e04d3208 TH |
875 | mutex_unlock(&pcpu_alloc_mutex); |
876 | } | |
ccea34b5 | 877 | |
dca49645 TH |
878 | /* clear the areas and return address relative to base address */ |
879 | for_each_possible_cpu(cpu) | |
880 | memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); | |
881 | ||
f528f0b8 CM |
882 | ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); |
883 | kmemleak_alloc_percpu(ptr, size); | |
884 | return ptr; | |
ccea34b5 TH |
885 | |
886 | fail_unlock: | |
403a91b1 | 887 | spin_unlock_irqrestore(&pcpu_lock, flags); |
b38d08f3 | 888 | fail: |
5835d96e TH |
889 | if (!is_atomic && warn_limit) { |
890 | pr_warning("PERCPU: allocation failed, size=%zu align=%zu atomic=%d, %s\n", | |
891 | size, align, is_atomic, err); | |
f2badb0c TH |
892 | dump_stack(); |
893 | if (!--warn_limit) | |
894 | pr_info("PERCPU: limit reached, disable warning\n"); | |
895 | } | |
ccea34b5 | 896 | return NULL; |
fbf59bc9 | 897 | } |
edcb4639 TH |
898 | |
899 | /** | |
5835d96e | 900 | * __alloc_percpu_gfp - allocate dynamic percpu area |
edcb4639 TH |
901 | * @size: size of area to allocate in bytes |
902 | * @align: alignment of area (max PAGE_SIZE) | |
5835d96e | 903 | * @gfp: allocation flags |
edcb4639 | 904 | * |
5835d96e TH |
905 | * Allocate zero-filled percpu area of @size bytes aligned at @align. If |
906 | * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can | |
907 | * be called from any context but is a lot more likely to fail. | |
ccea34b5 | 908 | * |
edcb4639 TH |
909 | * RETURNS: |
910 | * Percpu pointer to the allocated area on success, NULL on failure. | |
911 | */ | |
5835d96e TH |
912 | void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) |
913 | { | |
914 | return pcpu_alloc(size, align, false, gfp); | |
915 | } | |
916 | EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); | |
917 | ||
918 | /** | |
919 | * __alloc_percpu - allocate dynamic percpu area | |
920 | * @size: size of area to allocate in bytes | |
921 | * @align: alignment of area (max PAGE_SIZE) | |
922 | * | |
923 | * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). | |
924 | */ | |
43cf38eb | 925 | void __percpu *__alloc_percpu(size_t size, size_t align) |
edcb4639 | 926 | { |
5835d96e | 927 | return pcpu_alloc(size, align, false, GFP_KERNEL); |
edcb4639 | 928 | } |
fbf59bc9 TH |
929 | EXPORT_SYMBOL_GPL(__alloc_percpu); |
930 | ||
edcb4639 TH |
931 | /** |
932 | * __alloc_reserved_percpu - allocate reserved percpu area | |
933 | * @size: size of area to allocate in bytes | |
934 | * @align: alignment of area (max PAGE_SIZE) | |
935 | * | |
9329ba97 TH |
936 | * Allocate zero-filled percpu area of @size bytes aligned at @align |
937 | * from reserved percpu area if arch has set it up; otherwise, | |
938 | * allocation is served from the same dynamic area. Might sleep. | |
939 | * Might trigger writeouts. | |
edcb4639 | 940 | * |
ccea34b5 TH |
941 | * CONTEXT: |
942 | * Does GFP_KERNEL allocation. | |
943 | * | |
edcb4639 TH |
944 | * RETURNS: |
945 | * Percpu pointer to the allocated area on success, NULL on failure. | |
946 | */ | |
43cf38eb | 947 | void __percpu *__alloc_reserved_percpu(size_t size, size_t align) |
edcb4639 | 948 | { |
5835d96e | 949 | return pcpu_alloc(size, align, true, GFP_KERNEL); |
edcb4639 TH |
950 | } |
951 | ||
a56dbddf TH |
952 | /** |
953 | * pcpu_reclaim - reclaim fully free chunks, workqueue function | |
954 | * @work: unused | |
955 | * | |
956 | * Reclaim all fully free chunks except for the first one. | |
ccea34b5 TH |
957 | * |
958 | * CONTEXT: | |
959 | * workqueue context. | |
a56dbddf TH |
960 | */ |
961 | static void pcpu_reclaim(struct work_struct *work) | |
fbf59bc9 | 962 | { |
a56dbddf TH |
963 | LIST_HEAD(todo); |
964 | struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; | |
965 | struct pcpu_chunk *chunk, *next; | |
966 | ||
ccea34b5 TH |
967 | mutex_lock(&pcpu_alloc_mutex); |
968 | spin_lock_irq(&pcpu_lock); | |
a56dbddf TH |
969 | |
970 | list_for_each_entry_safe(chunk, next, head, list) { | |
971 | WARN_ON(chunk->immutable); | |
972 | ||
973 | /* spare the first one */ | |
974 | if (chunk == list_first_entry(head, struct pcpu_chunk, list)) | |
975 | continue; | |
976 | ||
a56dbddf TH |
977 | list_move(&chunk->list, &todo); |
978 | } | |
979 | ||
ccea34b5 | 980 | spin_unlock_irq(&pcpu_lock); |
a56dbddf TH |
981 | |
982 | list_for_each_entry_safe(chunk, next, &todo, list) { | |
a93ace48 | 983 | int rs, re; |
dca49645 | 984 | |
a93ace48 TH |
985 | pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) { |
986 | pcpu_depopulate_chunk(chunk, rs, re); | |
987 | bitmap_clear(chunk->populated, rs, re - rs); | |
988 | } | |
6081089f | 989 | pcpu_destroy_chunk(chunk); |
a56dbddf | 990 | } |
971f3918 TH |
991 | |
992 | mutex_unlock(&pcpu_alloc_mutex); | |
fbf59bc9 TH |
993 | } |
994 | ||
995 | /** | |
996 | * free_percpu - free percpu area | |
997 | * @ptr: pointer to area to free | |
998 | * | |
ccea34b5 TH |
999 | * Free percpu area @ptr. |
1000 | * | |
1001 | * CONTEXT: | |
1002 | * Can be called from atomic context. | |
fbf59bc9 | 1003 | */ |
43cf38eb | 1004 | void free_percpu(void __percpu *ptr) |
fbf59bc9 | 1005 | { |
129182e5 | 1006 | void *addr; |
fbf59bc9 | 1007 | struct pcpu_chunk *chunk; |
ccea34b5 | 1008 | unsigned long flags; |
fbf59bc9 TH |
1009 | int off; |
1010 | ||
1011 | if (!ptr) | |
1012 | return; | |
1013 | ||
f528f0b8 CM |
1014 | kmemleak_free_percpu(ptr); |
1015 | ||
129182e5 AM |
1016 | addr = __pcpu_ptr_to_addr(ptr); |
1017 | ||
ccea34b5 | 1018 | spin_lock_irqsave(&pcpu_lock, flags); |
fbf59bc9 TH |
1019 | |
1020 | chunk = pcpu_chunk_addr_search(addr); | |
bba174f5 | 1021 | off = addr - chunk->base_addr; |
fbf59bc9 TH |
1022 | |
1023 | pcpu_free_area(chunk, off); | |
1024 | ||
a56dbddf | 1025 | /* if there are more than one fully free chunks, wake up grim reaper */ |
fbf59bc9 TH |
1026 | if (chunk->free_size == pcpu_unit_size) { |
1027 | struct pcpu_chunk *pos; | |
1028 | ||
a56dbddf | 1029 | list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) |
fbf59bc9 | 1030 | if (pos != chunk) { |
a56dbddf | 1031 | schedule_work(&pcpu_reclaim_work); |
fbf59bc9 TH |
1032 | break; |
1033 | } | |
1034 | } | |
1035 | ||
ccea34b5 | 1036 | spin_unlock_irqrestore(&pcpu_lock, flags); |
fbf59bc9 TH |
1037 | } |
1038 | EXPORT_SYMBOL_GPL(free_percpu); | |
1039 | ||
10fad5e4 TH |
1040 | /** |
1041 | * is_kernel_percpu_address - test whether address is from static percpu area | |
1042 | * @addr: address to test | |
1043 | * | |
1044 | * Test whether @addr belongs to in-kernel static percpu area. Module | |
1045 | * static percpu areas are not considered. For those, use | |
1046 | * is_module_percpu_address(). | |
1047 | * | |
1048 | * RETURNS: | |
1049 | * %true if @addr is from in-kernel static percpu area, %false otherwise. | |
1050 | */ | |
1051 | bool is_kernel_percpu_address(unsigned long addr) | |
1052 | { | |
bbddff05 | 1053 | #ifdef CONFIG_SMP |
10fad5e4 TH |
1054 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
1055 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); | |
1056 | unsigned int cpu; | |
1057 | ||
1058 | for_each_possible_cpu(cpu) { | |
1059 | void *start = per_cpu_ptr(base, cpu); | |
1060 | ||
1061 | if ((void *)addr >= start && (void *)addr < start + static_size) | |
1062 | return true; | |
1063 | } | |
bbddff05 TH |
1064 | #endif |
1065 | /* on UP, can't distinguish from other static vars, always false */ | |
10fad5e4 TH |
1066 | return false; |
1067 | } | |
1068 | ||
3b034b0d VG |
1069 | /** |
1070 | * per_cpu_ptr_to_phys - convert translated percpu address to physical address | |
1071 | * @addr: the address to be converted to physical address | |
1072 | * | |
1073 | * Given @addr which is dereferenceable address obtained via one of | |
1074 | * percpu access macros, this function translates it into its physical | |
1075 | * address. The caller is responsible for ensuring @addr stays valid | |
1076 | * until this function finishes. | |
1077 | * | |
67589c71 DY |
1078 | * percpu allocator has special setup for the first chunk, which currently |
1079 | * supports either embedding in linear address space or vmalloc mapping, | |
1080 | * and, from the second one, the backing allocator (currently either vm or | |
1081 | * km) provides translation. | |
1082 | * | |
1083 | * The addr can be tranlated simply without checking if it falls into the | |
1084 | * first chunk. But the current code reflects better how percpu allocator | |
1085 | * actually works, and the verification can discover both bugs in percpu | |
1086 | * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current | |
1087 | * code. | |
1088 | * | |
3b034b0d VG |
1089 | * RETURNS: |
1090 | * The physical address for @addr. | |
1091 | */ | |
1092 | phys_addr_t per_cpu_ptr_to_phys(void *addr) | |
1093 | { | |
9983b6f0 TH |
1094 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); |
1095 | bool in_first_chunk = false; | |
a855b84c | 1096 | unsigned long first_low, first_high; |
9983b6f0 TH |
1097 | unsigned int cpu; |
1098 | ||
1099 | /* | |
a855b84c | 1100 | * The following test on unit_low/high isn't strictly |
9983b6f0 TH |
1101 | * necessary but will speed up lookups of addresses which |
1102 | * aren't in the first chunk. | |
1103 | */ | |
a855b84c TH |
1104 | first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); |
1105 | first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, | |
1106 | pcpu_unit_pages); | |
1107 | if ((unsigned long)addr >= first_low && | |
1108 | (unsigned long)addr < first_high) { | |
9983b6f0 TH |
1109 | for_each_possible_cpu(cpu) { |
1110 | void *start = per_cpu_ptr(base, cpu); | |
1111 | ||
1112 | if (addr >= start && addr < start + pcpu_unit_size) { | |
1113 | in_first_chunk = true; | |
1114 | break; | |
1115 | } | |
1116 | } | |
1117 | } | |
1118 | ||
1119 | if (in_first_chunk) { | |
eac522ef | 1120 | if (!is_vmalloc_addr(addr)) |
020ec653 TH |
1121 | return __pa(addr); |
1122 | else | |
9f57bd4d ES |
1123 | return page_to_phys(vmalloc_to_page(addr)) + |
1124 | offset_in_page(addr); | |
020ec653 | 1125 | } else |
9f57bd4d ES |
1126 | return page_to_phys(pcpu_addr_to_page(addr)) + |
1127 | offset_in_page(addr); | |
3b034b0d VG |
1128 | } |
1129 | ||
fbf59bc9 | 1130 | /** |
fd1e8a1f TH |
1131 | * pcpu_alloc_alloc_info - allocate percpu allocation info |
1132 | * @nr_groups: the number of groups | |
1133 | * @nr_units: the number of units | |
1134 | * | |
1135 | * Allocate ai which is large enough for @nr_groups groups containing | |
1136 | * @nr_units units. The returned ai's groups[0].cpu_map points to the | |
1137 | * cpu_map array which is long enough for @nr_units and filled with | |
1138 | * NR_CPUS. It's the caller's responsibility to initialize cpu_map | |
1139 | * pointer of other groups. | |
1140 | * | |
1141 | * RETURNS: | |
1142 | * Pointer to the allocated pcpu_alloc_info on success, NULL on | |
1143 | * failure. | |
1144 | */ | |
1145 | struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, | |
1146 | int nr_units) | |
1147 | { | |
1148 | struct pcpu_alloc_info *ai; | |
1149 | size_t base_size, ai_size; | |
1150 | void *ptr; | |
1151 | int unit; | |
1152 | ||
1153 | base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), | |
1154 | __alignof__(ai->groups[0].cpu_map[0])); | |
1155 | ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); | |
1156 | ||
999c17e3 | 1157 | ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); |
fd1e8a1f TH |
1158 | if (!ptr) |
1159 | return NULL; | |
1160 | ai = ptr; | |
1161 | ptr += base_size; | |
1162 | ||
1163 | ai->groups[0].cpu_map = ptr; | |
1164 | ||
1165 | for (unit = 0; unit < nr_units; unit++) | |
1166 | ai->groups[0].cpu_map[unit] = NR_CPUS; | |
1167 | ||
1168 | ai->nr_groups = nr_groups; | |
1169 | ai->__ai_size = PFN_ALIGN(ai_size); | |
1170 | ||
1171 | return ai; | |
1172 | } | |
1173 | ||
1174 | /** | |
1175 | * pcpu_free_alloc_info - free percpu allocation info | |
1176 | * @ai: pcpu_alloc_info to free | |
1177 | * | |
1178 | * Free @ai which was allocated by pcpu_alloc_alloc_info(). | |
1179 | */ | |
1180 | void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) | |
1181 | { | |
999c17e3 | 1182 | memblock_free_early(__pa(ai), ai->__ai_size); |
fd1e8a1f TH |
1183 | } |
1184 | ||
fd1e8a1f TH |
1185 | /** |
1186 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info | |
1187 | * @lvl: loglevel | |
1188 | * @ai: allocation info to dump | |
1189 | * | |
1190 | * Print out information about @ai using loglevel @lvl. | |
1191 | */ | |
1192 | static void pcpu_dump_alloc_info(const char *lvl, | |
1193 | const struct pcpu_alloc_info *ai) | |
033e48fb | 1194 | { |
fd1e8a1f | 1195 | int group_width = 1, cpu_width = 1, width; |
033e48fb | 1196 | char empty_str[] = "--------"; |
fd1e8a1f TH |
1197 | int alloc = 0, alloc_end = 0; |
1198 | int group, v; | |
1199 | int upa, apl; /* units per alloc, allocs per line */ | |
1200 | ||
1201 | v = ai->nr_groups; | |
1202 | while (v /= 10) | |
1203 | group_width++; | |
033e48fb | 1204 | |
fd1e8a1f | 1205 | v = num_possible_cpus(); |
033e48fb | 1206 | while (v /= 10) |
fd1e8a1f TH |
1207 | cpu_width++; |
1208 | empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; | |
033e48fb | 1209 | |
fd1e8a1f TH |
1210 | upa = ai->alloc_size / ai->unit_size; |
1211 | width = upa * (cpu_width + 1) + group_width + 3; | |
1212 | apl = rounddown_pow_of_two(max(60 / width, 1)); | |
033e48fb | 1213 | |
fd1e8a1f TH |
1214 | printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", |
1215 | lvl, ai->static_size, ai->reserved_size, ai->dyn_size, | |
1216 | ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); | |
033e48fb | 1217 | |
fd1e8a1f TH |
1218 | for (group = 0; group < ai->nr_groups; group++) { |
1219 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
1220 | int unit = 0, unit_end = 0; | |
1221 | ||
1222 | BUG_ON(gi->nr_units % upa); | |
1223 | for (alloc_end += gi->nr_units / upa; | |
1224 | alloc < alloc_end; alloc++) { | |
1225 | if (!(alloc % apl)) { | |
cb129820 | 1226 | printk(KERN_CONT "\n"); |
fd1e8a1f TH |
1227 | printk("%spcpu-alloc: ", lvl); |
1228 | } | |
cb129820 | 1229 | printk(KERN_CONT "[%0*d] ", group_width, group); |
fd1e8a1f TH |
1230 | |
1231 | for (unit_end += upa; unit < unit_end; unit++) | |
1232 | if (gi->cpu_map[unit] != NR_CPUS) | |
cb129820 | 1233 | printk(KERN_CONT "%0*d ", cpu_width, |
fd1e8a1f TH |
1234 | gi->cpu_map[unit]); |
1235 | else | |
cb129820 | 1236 | printk(KERN_CONT "%s ", empty_str); |
033e48fb | 1237 | } |
033e48fb | 1238 | } |
cb129820 | 1239 | printk(KERN_CONT "\n"); |
033e48fb | 1240 | } |
033e48fb | 1241 | |
fbf59bc9 | 1242 | /** |
8d408b4b | 1243 | * pcpu_setup_first_chunk - initialize the first percpu chunk |
fd1e8a1f | 1244 | * @ai: pcpu_alloc_info describing how to percpu area is shaped |
38a6be52 | 1245 | * @base_addr: mapped address |
8d408b4b TH |
1246 | * |
1247 | * Initialize the first percpu chunk which contains the kernel static | |
1248 | * perpcu area. This function is to be called from arch percpu area | |
38a6be52 | 1249 | * setup path. |
8d408b4b | 1250 | * |
fd1e8a1f TH |
1251 | * @ai contains all information necessary to initialize the first |
1252 | * chunk and prime the dynamic percpu allocator. | |
1253 | * | |
1254 | * @ai->static_size is the size of static percpu area. | |
1255 | * | |
1256 | * @ai->reserved_size, if non-zero, specifies the amount of bytes to | |
edcb4639 TH |
1257 | * reserve after the static area in the first chunk. This reserves |
1258 | * the first chunk such that it's available only through reserved | |
1259 | * percpu allocation. This is primarily used to serve module percpu | |
1260 | * static areas on architectures where the addressing model has | |
1261 | * limited offset range for symbol relocations to guarantee module | |
1262 | * percpu symbols fall inside the relocatable range. | |
1263 | * | |
fd1e8a1f TH |
1264 | * @ai->dyn_size determines the number of bytes available for dynamic |
1265 | * allocation in the first chunk. The area between @ai->static_size + | |
1266 | * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. | |
6074d5b0 | 1267 | * |
fd1e8a1f TH |
1268 | * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE |
1269 | * and equal to or larger than @ai->static_size + @ai->reserved_size + | |
1270 | * @ai->dyn_size. | |
8d408b4b | 1271 | * |
fd1e8a1f TH |
1272 | * @ai->atom_size is the allocation atom size and used as alignment |
1273 | * for vm areas. | |
8d408b4b | 1274 | * |
fd1e8a1f TH |
1275 | * @ai->alloc_size is the allocation size and always multiple of |
1276 | * @ai->atom_size. This is larger than @ai->atom_size if | |
1277 | * @ai->unit_size is larger than @ai->atom_size. | |
1278 | * | |
1279 | * @ai->nr_groups and @ai->groups describe virtual memory layout of | |
1280 | * percpu areas. Units which should be colocated are put into the | |
1281 | * same group. Dynamic VM areas will be allocated according to these | |
1282 | * groupings. If @ai->nr_groups is zero, a single group containing | |
1283 | * all units is assumed. | |
8d408b4b | 1284 | * |
38a6be52 TH |
1285 | * The caller should have mapped the first chunk at @base_addr and |
1286 | * copied static data to each unit. | |
fbf59bc9 | 1287 | * |
edcb4639 TH |
1288 | * If the first chunk ends up with both reserved and dynamic areas, it |
1289 | * is served by two chunks - one to serve the core static and reserved | |
1290 | * areas and the other for the dynamic area. They share the same vm | |
1291 | * and page map but uses different area allocation map to stay away | |
1292 | * from each other. The latter chunk is circulated in the chunk slots | |
1293 | * and available for dynamic allocation like any other chunks. | |
1294 | * | |
fbf59bc9 | 1295 | * RETURNS: |
fb435d52 | 1296 | * 0 on success, -errno on failure. |
fbf59bc9 | 1297 | */ |
fb435d52 TH |
1298 | int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
1299 | void *base_addr) | |
fbf59bc9 | 1300 | { |
635b75fc | 1301 | static char cpus_buf[4096] __initdata; |
099a19d9 TH |
1302 | static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; |
1303 | static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; | |
fd1e8a1f TH |
1304 | size_t dyn_size = ai->dyn_size; |
1305 | size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; | |
edcb4639 | 1306 | struct pcpu_chunk *schunk, *dchunk = NULL; |
6563297c TH |
1307 | unsigned long *group_offsets; |
1308 | size_t *group_sizes; | |
fb435d52 | 1309 | unsigned long *unit_off; |
fbf59bc9 | 1310 | unsigned int cpu; |
fd1e8a1f TH |
1311 | int *unit_map; |
1312 | int group, unit, i; | |
fbf59bc9 | 1313 | |
635b75fc TH |
1314 | cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); |
1315 | ||
1316 | #define PCPU_SETUP_BUG_ON(cond) do { \ | |
1317 | if (unlikely(cond)) { \ | |
1318 | pr_emerg("PERCPU: failed to initialize, %s", #cond); \ | |
1319 | pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ | |
1320 | pcpu_dump_alloc_info(KERN_EMERG, ai); \ | |
1321 | BUG(); \ | |
1322 | } \ | |
1323 | } while (0) | |
1324 | ||
2f39e637 | 1325 | /* sanity checks */ |
635b75fc | 1326 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); |
bbddff05 | 1327 | #ifdef CONFIG_SMP |
635b75fc | 1328 | PCPU_SETUP_BUG_ON(!ai->static_size); |
0415b00d | 1329 | PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK); |
bbddff05 | 1330 | #endif |
635b75fc | 1331 | PCPU_SETUP_BUG_ON(!base_addr); |
0415b00d | 1332 | PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK); |
635b75fc TH |
1333 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); |
1334 | PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); | |
1335 | PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); | |
099a19d9 | 1336 | PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); |
9f645532 | 1337 | PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); |
8d408b4b | 1338 | |
6563297c | 1339 | /* process group information and build config tables accordingly */ |
999c17e3 SS |
1340 | group_offsets = memblock_virt_alloc(ai->nr_groups * |
1341 | sizeof(group_offsets[0]), 0); | |
1342 | group_sizes = memblock_virt_alloc(ai->nr_groups * | |
1343 | sizeof(group_sizes[0]), 0); | |
1344 | unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); | |
1345 | unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); | |
2f39e637 | 1346 | |
fd1e8a1f | 1347 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
ffe0d5a5 | 1348 | unit_map[cpu] = UINT_MAX; |
a855b84c TH |
1349 | |
1350 | pcpu_low_unit_cpu = NR_CPUS; | |
1351 | pcpu_high_unit_cpu = NR_CPUS; | |
2f39e637 | 1352 | |
fd1e8a1f TH |
1353 | for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { |
1354 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
2f39e637 | 1355 | |
6563297c TH |
1356 | group_offsets[group] = gi->base_offset; |
1357 | group_sizes[group] = gi->nr_units * ai->unit_size; | |
1358 | ||
fd1e8a1f TH |
1359 | for (i = 0; i < gi->nr_units; i++) { |
1360 | cpu = gi->cpu_map[i]; | |
1361 | if (cpu == NR_CPUS) | |
1362 | continue; | |
8d408b4b | 1363 | |
635b75fc TH |
1364 | PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); |
1365 | PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); | |
1366 | PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); | |
fbf59bc9 | 1367 | |
fd1e8a1f | 1368 | unit_map[cpu] = unit + i; |
fb435d52 TH |
1369 | unit_off[cpu] = gi->base_offset + i * ai->unit_size; |
1370 | ||
a855b84c TH |
1371 | /* determine low/high unit_cpu */ |
1372 | if (pcpu_low_unit_cpu == NR_CPUS || | |
1373 | unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) | |
1374 | pcpu_low_unit_cpu = cpu; | |
1375 | if (pcpu_high_unit_cpu == NR_CPUS || | |
1376 | unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) | |
1377 | pcpu_high_unit_cpu = cpu; | |
fd1e8a1f | 1378 | } |
2f39e637 | 1379 | } |
fd1e8a1f TH |
1380 | pcpu_nr_units = unit; |
1381 | ||
1382 | for_each_possible_cpu(cpu) | |
635b75fc TH |
1383 | PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); |
1384 | ||
1385 | /* we're done parsing the input, undefine BUG macro and dump config */ | |
1386 | #undef PCPU_SETUP_BUG_ON | |
bcbea798 | 1387 | pcpu_dump_alloc_info(KERN_DEBUG, ai); |
fd1e8a1f | 1388 | |
6563297c TH |
1389 | pcpu_nr_groups = ai->nr_groups; |
1390 | pcpu_group_offsets = group_offsets; | |
1391 | pcpu_group_sizes = group_sizes; | |
fd1e8a1f | 1392 | pcpu_unit_map = unit_map; |
fb435d52 | 1393 | pcpu_unit_offsets = unit_off; |
2f39e637 TH |
1394 | |
1395 | /* determine basic parameters */ | |
fd1e8a1f | 1396 | pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; |
d9b55eeb | 1397 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
6563297c | 1398 | pcpu_atom_size = ai->atom_size; |
ce3141a2 TH |
1399 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + |
1400 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); | |
cafe8816 | 1401 | |
d9b55eeb TH |
1402 | /* |
1403 | * Allocate chunk slots. The additional last slot is for | |
1404 | * empty chunks. | |
1405 | */ | |
1406 | pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; | |
999c17e3 SS |
1407 | pcpu_slot = memblock_virt_alloc( |
1408 | pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); | |
fbf59bc9 TH |
1409 | for (i = 0; i < pcpu_nr_slots; i++) |
1410 | INIT_LIST_HEAD(&pcpu_slot[i]); | |
1411 | ||
edcb4639 TH |
1412 | /* |
1413 | * Initialize static chunk. If reserved_size is zero, the | |
1414 | * static chunk covers static area + dynamic allocation area | |
1415 | * in the first chunk. If reserved_size is not zero, it | |
1416 | * covers static area + reserved area (mostly used for module | |
1417 | * static percpu allocation). | |
1418 | */ | |
999c17e3 | 1419 | schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); |
2441d15c | 1420 | INIT_LIST_HEAD(&schunk->list); |
bba174f5 | 1421 | schunk->base_addr = base_addr; |
61ace7fa TH |
1422 | schunk->map = smap; |
1423 | schunk->map_alloc = ARRAY_SIZE(smap); | |
38a6be52 | 1424 | schunk->immutable = true; |
ce3141a2 | 1425 | bitmap_fill(schunk->populated, pcpu_unit_pages); |
edcb4639 | 1426 | |
fd1e8a1f TH |
1427 | if (ai->reserved_size) { |
1428 | schunk->free_size = ai->reserved_size; | |
ae9e6bc9 | 1429 | pcpu_reserved_chunk = schunk; |
fd1e8a1f | 1430 | pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; |
edcb4639 TH |
1431 | } else { |
1432 | schunk->free_size = dyn_size; | |
1433 | dyn_size = 0; /* dynamic area covered */ | |
1434 | } | |
2441d15c | 1435 | schunk->contig_hint = schunk->free_size; |
fbf59bc9 | 1436 | |
723ad1d9 AV |
1437 | schunk->map[0] = 1; |
1438 | schunk->map[1] = ai->static_size; | |
1439 | schunk->map_used = 1; | |
61ace7fa | 1440 | if (schunk->free_size) |
723ad1d9 AV |
1441 | schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size); |
1442 | else | |
1443 | schunk->map[1] |= 1; | |
61ace7fa | 1444 | |
edcb4639 TH |
1445 | /* init dynamic chunk if necessary */ |
1446 | if (dyn_size) { | |
999c17e3 | 1447 | dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); |
edcb4639 | 1448 | INIT_LIST_HEAD(&dchunk->list); |
bba174f5 | 1449 | dchunk->base_addr = base_addr; |
edcb4639 TH |
1450 | dchunk->map = dmap; |
1451 | dchunk->map_alloc = ARRAY_SIZE(dmap); | |
38a6be52 | 1452 | dchunk->immutable = true; |
ce3141a2 | 1453 | bitmap_fill(dchunk->populated, pcpu_unit_pages); |
edcb4639 TH |
1454 | |
1455 | dchunk->contig_hint = dchunk->free_size = dyn_size; | |
723ad1d9 AV |
1456 | dchunk->map[0] = 1; |
1457 | dchunk->map[1] = pcpu_reserved_chunk_limit; | |
1458 | dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1; | |
1459 | dchunk->map_used = 2; | |
edcb4639 TH |
1460 | } |
1461 | ||
2441d15c | 1462 | /* link the first chunk in */ |
ae9e6bc9 TH |
1463 | pcpu_first_chunk = dchunk ?: schunk; |
1464 | pcpu_chunk_relocate(pcpu_first_chunk, -1); | |
fbf59bc9 TH |
1465 | |
1466 | /* we're done */ | |
bba174f5 | 1467 | pcpu_base_addr = base_addr; |
fb435d52 | 1468 | return 0; |
fbf59bc9 | 1469 | } |
66c3a757 | 1470 | |
bbddff05 TH |
1471 | #ifdef CONFIG_SMP |
1472 | ||
17f3609c | 1473 | const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { |
f58dc01b TH |
1474 | [PCPU_FC_AUTO] = "auto", |
1475 | [PCPU_FC_EMBED] = "embed", | |
1476 | [PCPU_FC_PAGE] = "page", | |
f58dc01b | 1477 | }; |
66c3a757 | 1478 | |
f58dc01b | 1479 | enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; |
66c3a757 | 1480 | |
f58dc01b TH |
1481 | static int __init percpu_alloc_setup(char *str) |
1482 | { | |
5479c78a CG |
1483 | if (!str) |
1484 | return -EINVAL; | |
1485 | ||
f58dc01b TH |
1486 | if (0) |
1487 | /* nada */; | |
1488 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK | |
1489 | else if (!strcmp(str, "embed")) | |
1490 | pcpu_chosen_fc = PCPU_FC_EMBED; | |
1491 | #endif | |
1492 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | |
1493 | else if (!strcmp(str, "page")) | |
1494 | pcpu_chosen_fc = PCPU_FC_PAGE; | |
f58dc01b TH |
1495 | #endif |
1496 | else | |
1497 | pr_warning("PERCPU: unknown allocator %s specified\n", str); | |
66c3a757 | 1498 | |
f58dc01b | 1499 | return 0; |
66c3a757 | 1500 | } |
f58dc01b | 1501 | early_param("percpu_alloc", percpu_alloc_setup); |
66c3a757 | 1502 | |
3c9a024f TH |
1503 | /* |
1504 | * pcpu_embed_first_chunk() is used by the generic percpu setup. | |
1505 | * Build it if needed by the arch config or the generic setup is going | |
1506 | * to be used. | |
1507 | */ | |
08fc4580 TH |
1508 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ |
1509 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) | |
3c9a024f TH |
1510 | #define BUILD_EMBED_FIRST_CHUNK |
1511 | #endif | |
1512 | ||
1513 | /* build pcpu_page_first_chunk() iff needed by the arch config */ | |
1514 | #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) | |
1515 | #define BUILD_PAGE_FIRST_CHUNK | |
1516 | #endif | |
1517 | ||
1518 | /* pcpu_build_alloc_info() is used by both embed and page first chunk */ | |
1519 | #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) | |
1520 | /** | |
1521 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs | |
1522 | * @reserved_size: the size of reserved percpu area in bytes | |
1523 | * @dyn_size: minimum free size for dynamic allocation in bytes | |
1524 | * @atom_size: allocation atom size | |
1525 | * @cpu_distance_fn: callback to determine distance between cpus, optional | |
1526 | * | |
1527 | * This function determines grouping of units, their mappings to cpus | |
1528 | * and other parameters considering needed percpu size, allocation | |
1529 | * atom size and distances between CPUs. | |
1530 | * | |
1531 | * Groups are always mutliples of atom size and CPUs which are of | |
1532 | * LOCAL_DISTANCE both ways are grouped together and share space for | |
1533 | * units in the same group. The returned configuration is guaranteed | |
1534 | * to have CPUs on different nodes on different groups and >=75% usage | |
1535 | * of allocated virtual address space. | |
1536 | * | |
1537 | * RETURNS: | |
1538 | * On success, pointer to the new allocation_info is returned. On | |
1539 | * failure, ERR_PTR value is returned. | |
1540 | */ | |
1541 | static struct pcpu_alloc_info * __init pcpu_build_alloc_info( | |
1542 | size_t reserved_size, size_t dyn_size, | |
1543 | size_t atom_size, | |
1544 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | |
1545 | { | |
1546 | static int group_map[NR_CPUS] __initdata; | |
1547 | static int group_cnt[NR_CPUS] __initdata; | |
1548 | const size_t static_size = __per_cpu_end - __per_cpu_start; | |
1549 | int nr_groups = 1, nr_units = 0; | |
1550 | size_t size_sum, min_unit_size, alloc_size; | |
1551 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ | |
1552 | int last_allocs, group, unit; | |
1553 | unsigned int cpu, tcpu; | |
1554 | struct pcpu_alloc_info *ai; | |
1555 | unsigned int *cpu_map; | |
1556 | ||
1557 | /* this function may be called multiple times */ | |
1558 | memset(group_map, 0, sizeof(group_map)); | |
1559 | memset(group_cnt, 0, sizeof(group_cnt)); | |
1560 | ||
1561 | /* calculate size_sum and ensure dyn_size is enough for early alloc */ | |
1562 | size_sum = PFN_ALIGN(static_size + reserved_size + | |
1563 | max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); | |
1564 | dyn_size = size_sum - static_size - reserved_size; | |
1565 | ||
1566 | /* | |
1567 | * Determine min_unit_size, alloc_size and max_upa such that | |
1568 | * alloc_size is multiple of atom_size and is the smallest | |
25985edc | 1569 | * which can accommodate 4k aligned segments which are equal to |
3c9a024f TH |
1570 | * or larger than min_unit_size. |
1571 | */ | |
1572 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | |
1573 | ||
1574 | alloc_size = roundup(min_unit_size, atom_size); | |
1575 | upa = alloc_size / min_unit_size; | |
1576 | while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | |
1577 | upa--; | |
1578 | max_upa = upa; | |
1579 | ||
1580 | /* group cpus according to their proximity */ | |
1581 | for_each_possible_cpu(cpu) { | |
1582 | group = 0; | |
1583 | next_group: | |
1584 | for_each_possible_cpu(tcpu) { | |
1585 | if (cpu == tcpu) | |
1586 | break; | |
1587 | if (group_map[tcpu] == group && cpu_distance_fn && | |
1588 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || | |
1589 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { | |
1590 | group++; | |
1591 | nr_groups = max(nr_groups, group + 1); | |
1592 | goto next_group; | |
1593 | } | |
1594 | } | |
1595 | group_map[cpu] = group; | |
1596 | group_cnt[group]++; | |
1597 | } | |
1598 | ||
1599 | /* | |
1600 | * Expand unit size until address space usage goes over 75% | |
1601 | * and then as much as possible without using more address | |
1602 | * space. | |
1603 | */ | |
1604 | last_allocs = INT_MAX; | |
1605 | for (upa = max_upa; upa; upa--) { | |
1606 | int allocs = 0, wasted = 0; | |
1607 | ||
1608 | if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | |
1609 | continue; | |
1610 | ||
1611 | for (group = 0; group < nr_groups; group++) { | |
1612 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); | |
1613 | allocs += this_allocs; | |
1614 | wasted += this_allocs * upa - group_cnt[group]; | |
1615 | } | |
1616 | ||
1617 | /* | |
1618 | * Don't accept if wastage is over 1/3. The | |
1619 | * greater-than comparison ensures upa==1 always | |
1620 | * passes the following check. | |
1621 | */ | |
1622 | if (wasted > num_possible_cpus() / 3) | |
1623 | continue; | |
1624 | ||
1625 | /* and then don't consume more memory */ | |
1626 | if (allocs > last_allocs) | |
1627 | break; | |
1628 | last_allocs = allocs; | |
1629 | best_upa = upa; | |
1630 | } | |
1631 | upa = best_upa; | |
1632 | ||
1633 | /* allocate and fill alloc_info */ | |
1634 | for (group = 0; group < nr_groups; group++) | |
1635 | nr_units += roundup(group_cnt[group], upa); | |
1636 | ||
1637 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); | |
1638 | if (!ai) | |
1639 | return ERR_PTR(-ENOMEM); | |
1640 | cpu_map = ai->groups[0].cpu_map; | |
1641 | ||
1642 | for (group = 0; group < nr_groups; group++) { | |
1643 | ai->groups[group].cpu_map = cpu_map; | |
1644 | cpu_map += roundup(group_cnt[group], upa); | |
1645 | } | |
1646 | ||
1647 | ai->static_size = static_size; | |
1648 | ai->reserved_size = reserved_size; | |
1649 | ai->dyn_size = dyn_size; | |
1650 | ai->unit_size = alloc_size / upa; | |
1651 | ai->atom_size = atom_size; | |
1652 | ai->alloc_size = alloc_size; | |
1653 | ||
1654 | for (group = 0, unit = 0; group_cnt[group]; group++) { | |
1655 | struct pcpu_group_info *gi = &ai->groups[group]; | |
1656 | ||
1657 | /* | |
1658 | * Initialize base_offset as if all groups are located | |
1659 | * back-to-back. The caller should update this to | |
1660 | * reflect actual allocation. | |
1661 | */ | |
1662 | gi->base_offset = unit * ai->unit_size; | |
1663 | ||
1664 | for_each_possible_cpu(cpu) | |
1665 | if (group_map[cpu] == group) | |
1666 | gi->cpu_map[gi->nr_units++] = cpu; | |
1667 | gi->nr_units = roundup(gi->nr_units, upa); | |
1668 | unit += gi->nr_units; | |
1669 | } | |
1670 | BUG_ON(unit != nr_units); | |
1671 | ||
1672 | return ai; | |
1673 | } | |
1674 | #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ | |
1675 | ||
1676 | #if defined(BUILD_EMBED_FIRST_CHUNK) | |
66c3a757 TH |
1677 | /** |
1678 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem | |
66c3a757 | 1679 | * @reserved_size: the size of reserved percpu area in bytes |
4ba6ce25 | 1680 | * @dyn_size: minimum free size for dynamic allocation in bytes |
c8826dd5 TH |
1681 | * @atom_size: allocation atom size |
1682 | * @cpu_distance_fn: callback to determine distance between cpus, optional | |
1683 | * @alloc_fn: function to allocate percpu page | |
25985edc | 1684 | * @free_fn: function to free percpu page |
66c3a757 TH |
1685 | * |
1686 | * This is a helper to ease setting up embedded first percpu chunk and | |
1687 | * can be called where pcpu_setup_first_chunk() is expected. | |
1688 | * | |
1689 | * If this function is used to setup the first chunk, it is allocated | |
c8826dd5 TH |
1690 | * by calling @alloc_fn and used as-is without being mapped into |
1691 | * vmalloc area. Allocations are always whole multiples of @atom_size | |
1692 | * aligned to @atom_size. | |
1693 | * | |
1694 | * This enables the first chunk to piggy back on the linear physical | |
1695 | * mapping which often uses larger page size. Please note that this | |
1696 | * can result in very sparse cpu->unit mapping on NUMA machines thus | |
1697 | * requiring large vmalloc address space. Don't use this allocator if | |
1698 | * vmalloc space is not orders of magnitude larger than distances | |
1699 | * between node memory addresses (ie. 32bit NUMA machines). | |
66c3a757 | 1700 | * |
4ba6ce25 | 1701 | * @dyn_size specifies the minimum dynamic area size. |
66c3a757 TH |
1702 | * |
1703 | * If the needed size is smaller than the minimum or specified unit | |
c8826dd5 | 1704 | * size, the leftover is returned using @free_fn. |
66c3a757 TH |
1705 | * |
1706 | * RETURNS: | |
fb435d52 | 1707 | * 0 on success, -errno on failure. |
66c3a757 | 1708 | */ |
4ba6ce25 | 1709 | int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, |
c8826dd5 TH |
1710 | size_t atom_size, |
1711 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, | |
1712 | pcpu_fc_alloc_fn_t alloc_fn, | |
1713 | pcpu_fc_free_fn_t free_fn) | |
66c3a757 | 1714 | { |
c8826dd5 TH |
1715 | void *base = (void *)ULONG_MAX; |
1716 | void **areas = NULL; | |
fd1e8a1f | 1717 | struct pcpu_alloc_info *ai; |
6ea529a2 | 1718 | size_t size_sum, areas_size, max_distance; |
c8826dd5 | 1719 | int group, i, rc; |
66c3a757 | 1720 | |
c8826dd5 TH |
1721 | ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, |
1722 | cpu_distance_fn); | |
fd1e8a1f TH |
1723 | if (IS_ERR(ai)) |
1724 | return PTR_ERR(ai); | |
66c3a757 | 1725 | |
fd1e8a1f | 1726 | size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; |
c8826dd5 | 1727 | areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); |
fa8a7094 | 1728 | |
999c17e3 | 1729 | areas = memblock_virt_alloc_nopanic(areas_size, 0); |
c8826dd5 | 1730 | if (!areas) { |
fb435d52 | 1731 | rc = -ENOMEM; |
c8826dd5 | 1732 | goto out_free; |
fa8a7094 | 1733 | } |
66c3a757 | 1734 | |
c8826dd5 TH |
1735 | /* allocate, copy and determine base address */ |
1736 | for (group = 0; group < ai->nr_groups; group++) { | |
1737 | struct pcpu_group_info *gi = &ai->groups[group]; | |
1738 | unsigned int cpu = NR_CPUS; | |
1739 | void *ptr; | |
1740 | ||
1741 | for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) | |
1742 | cpu = gi->cpu_map[i]; | |
1743 | BUG_ON(cpu == NR_CPUS); | |
1744 | ||
1745 | /* allocate space for the whole group */ | |
1746 | ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); | |
1747 | if (!ptr) { | |
1748 | rc = -ENOMEM; | |
1749 | goto out_free_areas; | |
1750 | } | |
f528f0b8 CM |
1751 | /* kmemleak tracks the percpu allocations separately */ |
1752 | kmemleak_free(ptr); | |
c8826dd5 | 1753 | areas[group] = ptr; |
fd1e8a1f | 1754 | |
c8826dd5 | 1755 | base = min(ptr, base); |
42b64281 TH |
1756 | } |
1757 | ||
1758 | /* | |
1759 | * Copy data and free unused parts. This should happen after all | |
1760 | * allocations are complete; otherwise, we may end up with | |
1761 | * overlapping groups. | |
1762 | */ | |
1763 | for (group = 0; group < ai->nr_groups; group++) { | |
1764 | struct pcpu_group_info *gi = &ai->groups[group]; | |
1765 | void *ptr = areas[group]; | |
c8826dd5 TH |
1766 | |
1767 | for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { | |
1768 | if (gi->cpu_map[i] == NR_CPUS) { | |
1769 | /* unused unit, free whole */ | |
1770 | free_fn(ptr, ai->unit_size); | |
1771 | continue; | |
1772 | } | |
1773 | /* copy and return the unused part */ | |
1774 | memcpy(ptr, __per_cpu_load, ai->static_size); | |
1775 | free_fn(ptr + size_sum, ai->unit_size - size_sum); | |
1776 | } | |
fa8a7094 | 1777 | } |
66c3a757 | 1778 | |
c8826dd5 | 1779 | /* base address is now known, determine group base offsets */ |
6ea529a2 TH |
1780 | max_distance = 0; |
1781 | for (group = 0; group < ai->nr_groups; group++) { | |
c8826dd5 | 1782 | ai->groups[group].base_offset = areas[group] - base; |
1a0c3298 TH |
1783 | max_distance = max_t(size_t, max_distance, |
1784 | ai->groups[group].base_offset); | |
6ea529a2 TH |
1785 | } |
1786 | max_distance += ai->unit_size; | |
1787 | ||
1788 | /* warn if maximum distance is further than 75% of vmalloc space */ | |
8a092171 | 1789 | if (max_distance > VMALLOC_TOTAL * 3 / 4) { |
1a0c3298 | 1790 | pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " |
787e5b06 | 1791 | "space 0x%lx\n", max_distance, |
8a092171 | 1792 | VMALLOC_TOTAL); |
6ea529a2 TH |
1793 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
1794 | /* and fail if we have fallback */ | |
1795 | rc = -EINVAL; | |
1796 | goto out_free; | |
1797 | #endif | |
1798 | } | |
c8826dd5 | 1799 | |
004018e2 | 1800 | pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", |
fd1e8a1f TH |
1801 | PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, |
1802 | ai->dyn_size, ai->unit_size); | |
d4b95f80 | 1803 | |
fb435d52 | 1804 | rc = pcpu_setup_first_chunk(ai, base); |
c8826dd5 TH |
1805 | goto out_free; |
1806 | ||
1807 | out_free_areas: | |
1808 | for (group = 0; group < ai->nr_groups; group++) | |
f851c8d8 MH |
1809 | if (areas[group]) |
1810 | free_fn(areas[group], | |
1811 | ai->groups[group].nr_units * ai->unit_size); | |
c8826dd5 | 1812 | out_free: |
fd1e8a1f | 1813 | pcpu_free_alloc_info(ai); |
c8826dd5 | 1814 | if (areas) |
999c17e3 | 1815 | memblock_free_early(__pa(areas), areas_size); |
fb435d52 | 1816 | return rc; |
d4b95f80 | 1817 | } |
3c9a024f | 1818 | #endif /* BUILD_EMBED_FIRST_CHUNK */ |
d4b95f80 | 1819 | |
3c9a024f | 1820 | #ifdef BUILD_PAGE_FIRST_CHUNK |
d4b95f80 | 1821 | /** |
00ae4064 | 1822 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages |
d4b95f80 TH |
1823 | * @reserved_size: the size of reserved percpu area in bytes |
1824 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE | |
25985edc | 1825 | * @free_fn: function to free percpu page, always called with PAGE_SIZE |
d4b95f80 TH |
1826 | * @populate_pte_fn: function to populate pte |
1827 | * | |
00ae4064 TH |
1828 | * This is a helper to ease setting up page-remapped first percpu |
1829 | * chunk and can be called where pcpu_setup_first_chunk() is expected. | |
d4b95f80 TH |
1830 | * |
1831 | * This is the basic allocator. Static percpu area is allocated | |
1832 | * page-by-page into vmalloc area. | |
1833 | * | |
1834 | * RETURNS: | |
fb435d52 | 1835 | * 0 on success, -errno on failure. |
d4b95f80 | 1836 | */ |
fb435d52 TH |
1837 | int __init pcpu_page_first_chunk(size_t reserved_size, |
1838 | pcpu_fc_alloc_fn_t alloc_fn, | |
1839 | pcpu_fc_free_fn_t free_fn, | |
1840 | pcpu_fc_populate_pte_fn_t populate_pte_fn) | |
d4b95f80 | 1841 | { |
8f05a6a6 | 1842 | static struct vm_struct vm; |
fd1e8a1f | 1843 | struct pcpu_alloc_info *ai; |
00ae4064 | 1844 | char psize_str[16]; |
ce3141a2 | 1845 | int unit_pages; |
d4b95f80 | 1846 | size_t pages_size; |
ce3141a2 | 1847 | struct page **pages; |
fb435d52 | 1848 | int unit, i, j, rc; |
d4b95f80 | 1849 | |
00ae4064 TH |
1850 | snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); |
1851 | ||
4ba6ce25 | 1852 | ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); |
fd1e8a1f TH |
1853 | if (IS_ERR(ai)) |
1854 | return PTR_ERR(ai); | |
1855 | BUG_ON(ai->nr_groups != 1); | |
1856 | BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); | |
1857 | ||
1858 | unit_pages = ai->unit_size >> PAGE_SHIFT; | |
d4b95f80 TH |
1859 | |
1860 | /* unaligned allocations can't be freed, round up to page size */ | |
fd1e8a1f TH |
1861 | pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * |
1862 | sizeof(pages[0])); | |
999c17e3 | 1863 | pages = memblock_virt_alloc(pages_size, 0); |
d4b95f80 | 1864 | |
8f05a6a6 | 1865 | /* allocate pages */ |
d4b95f80 | 1866 | j = 0; |
fd1e8a1f | 1867 | for (unit = 0; unit < num_possible_cpus(); unit++) |
ce3141a2 | 1868 | for (i = 0; i < unit_pages; i++) { |
fd1e8a1f | 1869 | unsigned int cpu = ai->groups[0].cpu_map[unit]; |
d4b95f80 TH |
1870 | void *ptr; |
1871 | ||
3cbc8565 | 1872 | ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); |
d4b95f80 | 1873 | if (!ptr) { |
00ae4064 TH |
1874 | pr_warning("PERCPU: failed to allocate %s page " |
1875 | "for cpu%u\n", psize_str, cpu); | |
d4b95f80 TH |
1876 | goto enomem; |
1877 | } | |
f528f0b8 CM |
1878 | /* kmemleak tracks the percpu allocations separately */ |
1879 | kmemleak_free(ptr); | |
ce3141a2 | 1880 | pages[j++] = virt_to_page(ptr); |
d4b95f80 TH |
1881 | } |
1882 | ||
8f05a6a6 TH |
1883 | /* allocate vm area, map the pages and copy static data */ |
1884 | vm.flags = VM_ALLOC; | |
fd1e8a1f | 1885 | vm.size = num_possible_cpus() * ai->unit_size; |
8f05a6a6 TH |
1886 | vm_area_register_early(&vm, PAGE_SIZE); |
1887 | ||
fd1e8a1f | 1888 | for (unit = 0; unit < num_possible_cpus(); unit++) { |
1d9d3257 | 1889 | unsigned long unit_addr = |
fd1e8a1f | 1890 | (unsigned long)vm.addr + unit * ai->unit_size; |
8f05a6a6 | 1891 | |
ce3141a2 | 1892 | for (i = 0; i < unit_pages; i++) |
8f05a6a6 TH |
1893 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); |
1894 | ||
1895 | /* pte already populated, the following shouldn't fail */ | |
fb435d52 TH |
1896 | rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], |
1897 | unit_pages); | |
1898 | if (rc < 0) | |
1899 | panic("failed to map percpu area, err=%d\n", rc); | |
66c3a757 | 1900 | |
8f05a6a6 TH |
1901 | /* |
1902 | * FIXME: Archs with virtual cache should flush local | |
1903 | * cache for the linear mapping here - something | |
1904 | * equivalent to flush_cache_vmap() on the local cpu. | |
1905 | * flush_cache_vmap() can't be used as most supporting | |
1906 | * data structures are not set up yet. | |
1907 | */ | |
1908 | ||
1909 | /* copy static data */ | |
fd1e8a1f | 1910 | memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); |
66c3a757 TH |
1911 | } |
1912 | ||
1913 | /* we're ready, commit */ | |
1d9d3257 | 1914 | pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", |
fd1e8a1f TH |
1915 | unit_pages, psize_str, vm.addr, ai->static_size, |
1916 | ai->reserved_size, ai->dyn_size); | |
d4b95f80 | 1917 | |
fb435d52 | 1918 | rc = pcpu_setup_first_chunk(ai, vm.addr); |
d4b95f80 TH |
1919 | goto out_free_ar; |
1920 | ||
1921 | enomem: | |
1922 | while (--j >= 0) | |
ce3141a2 | 1923 | free_fn(page_address(pages[j]), PAGE_SIZE); |
fb435d52 | 1924 | rc = -ENOMEM; |
d4b95f80 | 1925 | out_free_ar: |
999c17e3 | 1926 | memblock_free_early(__pa(pages), pages_size); |
fd1e8a1f | 1927 | pcpu_free_alloc_info(ai); |
fb435d52 | 1928 | return rc; |
d4b95f80 | 1929 | } |
3c9a024f | 1930 | #endif /* BUILD_PAGE_FIRST_CHUNK */ |
d4b95f80 | 1931 | |
bbddff05 | 1932 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
e74e3962 | 1933 | /* |
bbddff05 | 1934 | * Generic SMP percpu area setup. |
e74e3962 TH |
1935 | * |
1936 | * The embedding helper is used because its behavior closely resembles | |
1937 | * the original non-dynamic generic percpu area setup. This is | |
1938 | * important because many archs have addressing restrictions and might | |
1939 | * fail if the percpu area is located far away from the previous | |
1940 | * location. As an added bonus, in non-NUMA cases, embedding is | |
1941 | * generally a good idea TLB-wise because percpu area can piggy back | |
1942 | * on the physical linear memory mapping which uses large page | |
1943 | * mappings on applicable archs. | |
1944 | */ | |
e74e3962 TH |
1945 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
1946 | EXPORT_SYMBOL(__per_cpu_offset); | |
1947 | ||
c8826dd5 TH |
1948 | static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, |
1949 | size_t align) | |
1950 | { | |
999c17e3 SS |
1951 | return memblock_virt_alloc_from_nopanic( |
1952 | size, align, __pa(MAX_DMA_ADDRESS)); | |
c8826dd5 | 1953 | } |
66c3a757 | 1954 | |
c8826dd5 TH |
1955 | static void __init pcpu_dfl_fc_free(void *ptr, size_t size) |
1956 | { | |
999c17e3 | 1957 | memblock_free_early(__pa(ptr), size); |
c8826dd5 TH |
1958 | } |
1959 | ||
e74e3962 TH |
1960 | void __init setup_per_cpu_areas(void) |
1961 | { | |
e74e3962 TH |
1962 | unsigned long delta; |
1963 | unsigned int cpu; | |
fb435d52 | 1964 | int rc; |
e74e3962 TH |
1965 | |
1966 | /* | |
1967 | * Always reserve area for module percpu variables. That's | |
1968 | * what the legacy allocator did. | |
1969 | */ | |
fb435d52 | 1970 | rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
c8826dd5 TH |
1971 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, |
1972 | pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); | |
fb435d52 | 1973 | if (rc < 0) |
bbddff05 | 1974 | panic("Failed to initialize percpu areas."); |
e74e3962 TH |
1975 | |
1976 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
1977 | for_each_possible_cpu(cpu) | |
fb435d52 | 1978 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
66c3a757 | 1979 | } |
bbddff05 TH |
1980 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |
1981 | ||
1982 | #else /* CONFIG_SMP */ | |
1983 | ||
1984 | /* | |
1985 | * UP percpu area setup. | |
1986 | * | |
1987 | * UP always uses km-based percpu allocator with identity mapping. | |
1988 | * Static percpu variables are indistinguishable from the usual static | |
1989 | * variables and don't require any special preparation. | |
1990 | */ | |
1991 | void __init setup_per_cpu_areas(void) | |
1992 | { | |
1993 | const size_t unit_size = | |
1994 | roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, | |
1995 | PERCPU_DYNAMIC_RESERVE)); | |
1996 | struct pcpu_alloc_info *ai; | |
1997 | void *fc; | |
1998 | ||
1999 | ai = pcpu_alloc_alloc_info(1, 1); | |
999c17e3 SS |
2000 | fc = memblock_virt_alloc_from_nopanic(unit_size, |
2001 | PAGE_SIZE, | |
2002 | __pa(MAX_DMA_ADDRESS)); | |
bbddff05 TH |
2003 | if (!ai || !fc) |
2004 | panic("Failed to allocate memory for percpu areas."); | |
100d13c3 CM |
2005 | /* kmemleak tracks the percpu allocations separately */ |
2006 | kmemleak_free(fc); | |
bbddff05 TH |
2007 | |
2008 | ai->dyn_size = unit_size; | |
2009 | ai->unit_size = unit_size; | |
2010 | ai->atom_size = unit_size; | |
2011 | ai->alloc_size = unit_size; | |
2012 | ai->groups[0].nr_units = 1; | |
2013 | ai->groups[0].cpu_map[0] = 0; | |
2014 | ||
2015 | if (pcpu_setup_first_chunk(ai, fc) < 0) | |
2016 | panic("Failed to initialize percpu areas."); | |
3189eddb HL |
2017 | |
2018 | pcpu_free_alloc_info(ai); | |
bbddff05 TH |
2019 | } |
2020 | ||
2021 | #endif /* CONFIG_SMP */ | |
099a19d9 TH |
2022 | |
2023 | /* | |
2024 | * First and reserved chunks are initialized with temporary allocation | |
2025 | * map in initdata so that they can be used before slab is online. | |
2026 | * This function is called after slab is brought up and replaces those | |
2027 | * with properly allocated maps. | |
2028 | */ | |
2029 | void __init percpu_init_late(void) | |
2030 | { | |
2031 | struct pcpu_chunk *target_chunks[] = | |
2032 | { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; | |
2033 | struct pcpu_chunk *chunk; | |
2034 | unsigned long flags; | |
2035 | int i; | |
2036 | ||
2037 | for (i = 0; (chunk = target_chunks[i]); i++) { | |
2038 | int *map; | |
2039 | const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); | |
2040 | ||
2041 | BUILD_BUG_ON(size > PAGE_SIZE); | |
2042 | ||
90459ce0 | 2043 | map = pcpu_mem_zalloc(size); |
099a19d9 TH |
2044 | BUG_ON(!map); |
2045 | ||
2046 | spin_lock_irqsave(&pcpu_lock, flags); | |
2047 | memcpy(map, chunk->map, size); | |
2048 | chunk->map = map; | |
2049 | spin_unlock_irqrestore(&pcpu_lock, flags); | |
2050 | } | |
2051 | } |