Commit | Line | Data |
---|---|---|
8fa3ed80 DZ |
1 | #ifndef _MM_PERCPU_INTERNAL_H |
2 | #define _MM_PERCPU_INTERNAL_H | |
3 | ||
4 | #include <linux/types.h> | |
5 | #include <linux/percpu.h> | |
6 | ||
7 | struct pcpu_chunk { | |
30a5b536 DZ |
8 | #ifdef CONFIG_PERCPU_STATS |
9 | int nr_alloc; /* # of allocations */ | |
10 | size_t max_alloc_size; /* largest allocation size */ | |
11 | #endif | |
12 | ||
8fa3ed80 DZ |
13 | struct list_head list; /* linked to pcpu_slot lists */ |
14 | int free_size; /* free bytes in the chunk */ | |
15 | int contig_hint; /* max contiguous size hint */ | |
16 | void *base_addr; /* base address of this chunk */ | |
17 | ||
18 | int map_used; /* # of map entries used before the sentry */ | |
19 | int map_alloc; /* # of map entries allocated */ | |
20 | int *map; /* allocation map */ | |
21 | struct list_head map_extend_list;/* on pcpu_map_extend_chunks */ | |
22 | ||
23 | void *data; /* chunk data */ | |
24 | int first_free; /* no free below this */ | |
25 | bool immutable; /* no [de]population allowed */ | |
e2266705 DZF |
26 | int start_offset; /* the overlap with the previous |
27 | region to have a page aligned | |
28 | base_addr */ | |
6b9d7c8e DZF |
29 | int end_offset; /* additional area required to |
30 | have the region end page | |
31 | aligned */ | |
8fa3ed80 DZ |
32 | int nr_populated; /* # of populated pages */ |
33 | unsigned long populated[]; /* populated bitmap */ | |
34 | }; | |
35 | ||
36 | extern spinlock_t pcpu_lock; | |
37 | ||
38 | extern struct list_head *pcpu_slot; | |
39 | extern int pcpu_nr_slots; | |
6b9b6f39 | 40 | extern int pcpu_nr_empty_pop_pages; |
8fa3ed80 DZ |
41 | |
42 | extern struct pcpu_chunk *pcpu_first_chunk; | |
43 | extern struct pcpu_chunk *pcpu_reserved_chunk; | |
44 | ||
30a5b536 DZ |
45 | #ifdef CONFIG_PERCPU_STATS |
46 | ||
47 | #include <linux/spinlock.h> | |
48 | ||
49 | struct percpu_stats { | |
50 | u64 nr_alloc; /* lifetime # of allocations */ | |
51 | u64 nr_dealloc; /* lifetime # of deallocations */ | |
52 | u64 nr_cur_alloc; /* current # of allocations */ | |
53 | u64 nr_max_alloc; /* max # of live allocations */ | |
54 | u32 nr_chunks; /* current # of live chunks */ | |
55 | u32 nr_max_chunks; /* max # of live chunks */ | |
56 | size_t min_alloc_size; /* min allocaiton size */ | |
57 | size_t max_alloc_size; /* max allocation size */ | |
58 | }; | |
59 | ||
60 | extern struct percpu_stats pcpu_stats; | |
61 | extern struct pcpu_alloc_info pcpu_stats_ai; | |
62 | ||
63 | /* | |
64 | * For debug purposes. We don't care about the flexible array. | |
65 | */ | |
66 | static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) | |
67 | { | |
68 | memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info)); | |
69 | ||
70 | /* initialize min_alloc_size to unit_size */ | |
71 | pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size; | |
72 | } | |
73 | ||
74 | /* | |
75 | * pcpu_stats_area_alloc - increment area allocation stats | |
76 | * @chunk: the location of the area being allocated | |
77 | * @size: size of area to allocate in bytes | |
78 | * | |
79 | * CONTEXT: | |
80 | * pcpu_lock. | |
81 | */ | |
82 | static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) | |
83 | { | |
84 | lockdep_assert_held(&pcpu_lock); | |
85 | ||
86 | pcpu_stats.nr_alloc++; | |
87 | pcpu_stats.nr_cur_alloc++; | |
88 | pcpu_stats.nr_max_alloc = | |
89 | max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc); | |
90 | pcpu_stats.min_alloc_size = | |
91 | min(pcpu_stats.min_alloc_size, size); | |
92 | pcpu_stats.max_alloc_size = | |
93 | max(pcpu_stats.max_alloc_size, size); | |
94 | ||
95 | chunk->nr_alloc++; | |
96 | chunk->max_alloc_size = max(chunk->max_alloc_size, size); | |
97 | } | |
98 | ||
99 | /* | |
100 | * pcpu_stats_area_dealloc - decrement allocation stats | |
101 | * @chunk: the location of the area being deallocated | |
102 | * | |
103 | * CONTEXT: | |
104 | * pcpu_lock. | |
105 | */ | |
106 | static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) | |
107 | { | |
108 | lockdep_assert_held(&pcpu_lock); | |
109 | ||
110 | pcpu_stats.nr_dealloc++; | |
111 | pcpu_stats.nr_cur_alloc--; | |
112 | ||
113 | chunk->nr_alloc--; | |
114 | } | |
115 | ||
116 | /* | |
117 | * pcpu_stats_chunk_alloc - increment chunk stats | |
118 | */ | |
119 | static inline void pcpu_stats_chunk_alloc(void) | |
120 | { | |
303abfdf DZ |
121 | unsigned long flags; |
122 | spin_lock_irqsave(&pcpu_lock, flags); | |
30a5b536 DZ |
123 | |
124 | pcpu_stats.nr_chunks++; | |
125 | pcpu_stats.nr_max_chunks = | |
126 | max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks); | |
127 | ||
303abfdf | 128 | spin_unlock_irqrestore(&pcpu_lock, flags); |
30a5b536 DZ |
129 | } |
130 | ||
131 | /* | |
132 | * pcpu_stats_chunk_dealloc - decrement chunk stats | |
133 | */ | |
134 | static inline void pcpu_stats_chunk_dealloc(void) | |
135 | { | |
303abfdf DZ |
136 | unsigned long flags; |
137 | spin_lock_irqsave(&pcpu_lock, flags); | |
30a5b536 DZ |
138 | |
139 | pcpu_stats.nr_chunks--; | |
140 | ||
303abfdf | 141 | spin_unlock_irqrestore(&pcpu_lock, flags); |
30a5b536 DZ |
142 | } |
143 | ||
144 | #else | |
145 | ||
146 | static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) | |
147 | { | |
148 | } | |
149 | ||
150 | static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) | |
151 | { | |
152 | } | |
153 | ||
154 | static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) | |
155 | { | |
156 | } | |
157 | ||
158 | static inline void pcpu_stats_chunk_alloc(void) | |
159 | { | |
160 | } | |
161 | ||
162 | static inline void pcpu_stats_chunk_dealloc(void) | |
163 | { | |
164 | } | |
165 | ||
166 | #endif /* !CONFIG_PERCPU_STATS */ | |
167 | ||
8fa3ed80 | 168 | #endif |