percpu: use block scan_hint to only scan forward
[linux-2.6-block.git] / mm / percpu-internal.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
8fa3ed80
DZ
2#ifndef _MM_PERCPU_INTERNAL_H
3#define _MM_PERCPU_INTERNAL_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
7
ca460b3c
DZF
8/*
9 * pcpu_block_md is the metadata block struct.
10 * Each chunk's bitmap is split into a number of full blocks.
11 * All units are in terms of bits.
382b88e9
DZ
12 *
13 * The scan hint is the largest known contiguous area before the contig hint.
14 * It is not necessarily the actual largest contig hint though. There is an
15 * invariant that the scan_hint_start > contig_hint_start iff
16 * scan_hint == contig_hint. This is necessary because when scanning forward,
17 * we don't know if a new contig hint would be better than the current one.
ca460b3c
DZF
18 */
19struct pcpu_block_md {
382b88e9
DZ
20 int scan_hint; /* scan hint for block */
21 int scan_hint_start; /* block relative starting
22 position of the scan hint */
ca460b3c
DZF
23 int contig_hint; /* contig hint for block */
24 int contig_hint_start; /* block relative starting
25 position of the contig hint */
26 int left_free; /* size of free space along
27 the left side of the block */
28 int right_free; /* size of free space along
29 the right side of the block */
30 int first_free; /* block position of first free */
31};
32
8fa3ed80 33struct pcpu_chunk {
30a5b536
DZ
34#ifdef CONFIG_PERCPU_STATS
35 int nr_alloc; /* # of allocations */
36 size_t max_alloc_size; /* largest allocation size */
37#endif
38
8fa3ed80 39 struct list_head list; /* linked to pcpu_slot lists */
40064aec
DZF
40 int free_bytes; /* free bytes in the chunk */
41 int contig_bits; /* max contiguous size hint */
13f96637
DZF
42 int contig_bits_start; /* contig_bits starting
43 offset */
8fa3ed80
DZ
44 void *base_addr; /* base address of this chunk */
45
40064aec
DZF
46 unsigned long *alloc_map; /* allocation map */
47 unsigned long *bound_map; /* boundary map */
ca460b3c 48 struct pcpu_block_md *md_blocks; /* metadata blocks */
8fa3ed80
DZ
49
50 void *data; /* chunk data */
86b442fb 51 int first_bit; /* no free below this */
8fa3ed80 52 bool immutable; /* no [de]population allowed */
e2266705
DZF
53 int start_offset; /* the overlap with the previous
54 region to have a page aligned
55 base_addr */
6b9d7c8e
DZF
56 int end_offset; /* additional area required to
57 have the region end page
58 aligned */
c0ebfdc3
DZF
59
60 int nr_pages; /* # of pages served by this chunk */
8fa3ed80 61 int nr_populated; /* # of populated pages */
0cecf50c 62 int nr_empty_pop_pages; /* # of empty populated pages */
8fa3ed80
DZ
63 unsigned long populated[]; /* populated bitmap */
64};
65
66extern spinlock_t pcpu_lock;
67
68extern struct list_head *pcpu_slot;
69extern int pcpu_nr_slots;
6b9b6f39 70extern int pcpu_nr_empty_pop_pages;
8fa3ed80
DZ
71
72extern struct pcpu_chunk *pcpu_first_chunk;
73extern struct pcpu_chunk *pcpu_reserved_chunk;
74
ca460b3c
DZF
75/**
76 * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
77 * @chunk: chunk of interest
78 *
79 * This conversion is from the number of physical pages that the chunk
80 * serves to the number of bitmap blocks used.
81 */
82static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
83{
84 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
85}
86
40064aec
DZF
87/**
88 * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
89 * @pages: number of physical pages
90 *
91 * This conversion is from physical pages to the number of bits
92 * required in the bitmap.
93 */
94static inline int pcpu_nr_pages_to_map_bits(int pages)
95{
96 return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
97}
98
99/**
100 * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
101 * @chunk: chunk of interest
102 *
103 * This conversion is from the number of physical pages that the chunk
104 * serves to the number of bits in the bitmap.
105 */
106static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
107{
108 return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
109}
110
30a5b536
DZ
111#ifdef CONFIG_PERCPU_STATS
112
113#include <linux/spinlock.h>
114
115struct percpu_stats {
116 u64 nr_alloc; /* lifetime # of allocations */
117 u64 nr_dealloc; /* lifetime # of deallocations */
118 u64 nr_cur_alloc; /* current # of allocations */
119 u64 nr_max_alloc; /* max # of live allocations */
120 u32 nr_chunks; /* current # of live chunks */
121 u32 nr_max_chunks; /* max # of live chunks */
122 size_t min_alloc_size; /* min allocaiton size */
123 size_t max_alloc_size; /* max allocation size */
124};
125
126extern struct percpu_stats pcpu_stats;
127extern struct pcpu_alloc_info pcpu_stats_ai;
128
129/*
130 * For debug purposes. We don't care about the flexible array.
131 */
132static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
133{
134 memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
135
136 /* initialize min_alloc_size to unit_size */
137 pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
138}
139
140/*
141 * pcpu_stats_area_alloc - increment area allocation stats
142 * @chunk: the location of the area being allocated
143 * @size: size of area to allocate in bytes
144 *
145 * CONTEXT:
146 * pcpu_lock.
147 */
148static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
149{
150 lockdep_assert_held(&pcpu_lock);
151
152 pcpu_stats.nr_alloc++;
153 pcpu_stats.nr_cur_alloc++;
154 pcpu_stats.nr_max_alloc =
155 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
156 pcpu_stats.min_alloc_size =
157 min(pcpu_stats.min_alloc_size, size);
158 pcpu_stats.max_alloc_size =
159 max(pcpu_stats.max_alloc_size, size);
160
161 chunk->nr_alloc++;
162 chunk->max_alloc_size = max(chunk->max_alloc_size, size);
163}
164
165/*
166 * pcpu_stats_area_dealloc - decrement allocation stats
167 * @chunk: the location of the area being deallocated
168 *
169 * CONTEXT:
170 * pcpu_lock.
171 */
172static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
173{
174 lockdep_assert_held(&pcpu_lock);
175
176 pcpu_stats.nr_dealloc++;
177 pcpu_stats.nr_cur_alloc--;
178
179 chunk->nr_alloc--;
180}
181
182/*
183 * pcpu_stats_chunk_alloc - increment chunk stats
184 */
185static inline void pcpu_stats_chunk_alloc(void)
186{
303abfdf
DZ
187 unsigned long flags;
188 spin_lock_irqsave(&pcpu_lock, flags);
30a5b536
DZ
189
190 pcpu_stats.nr_chunks++;
191 pcpu_stats.nr_max_chunks =
192 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
193
303abfdf 194 spin_unlock_irqrestore(&pcpu_lock, flags);
30a5b536
DZ
195}
196
197/*
198 * pcpu_stats_chunk_dealloc - decrement chunk stats
199 */
200static inline void pcpu_stats_chunk_dealloc(void)
201{
303abfdf
DZ
202 unsigned long flags;
203 spin_lock_irqsave(&pcpu_lock, flags);
30a5b536
DZ
204
205 pcpu_stats.nr_chunks--;
206
303abfdf 207 spin_unlock_irqrestore(&pcpu_lock, flags);
30a5b536
DZ
208}
209
210#else
211
212static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
213{
214}
215
216static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
217{
218}
219
220static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
221{
222}
223
224static inline void pcpu_stats_chunk_alloc(void)
225{
226}
227
228static inline void pcpu_stats_chunk_dealloc(void)
229{
230}
231
232#endif /* !CONFIG_PERCPU_STATS */
233
8fa3ed80 234#endif