Commit | Line | Data |
---|---|---|
8fa3ed80 DZ |
1 | #ifndef _MM_PERCPU_INTERNAL_H |
2 | #define _MM_PERCPU_INTERNAL_H | |
3 | ||
4 | #include <linux/types.h> | |
5 | #include <linux/percpu.h> | |
6 | ||
7 | struct pcpu_chunk { | |
30a5b536 DZ |
8 | #ifdef CONFIG_PERCPU_STATS |
9 | int nr_alloc; /* # of allocations */ | |
10 | size_t max_alloc_size; /* largest allocation size */ | |
11 | #endif | |
12 | ||
8fa3ed80 DZ |
13 | struct list_head list; /* linked to pcpu_slot lists */ |
14 | int free_size; /* free bytes in the chunk */ | |
15 | int contig_hint; /* max contiguous size hint */ | |
16 | void *base_addr; /* base address of this chunk */ | |
17 | ||
18 | int map_used; /* # of map entries used before the sentry */ | |
19 | int map_alloc; /* # of map entries allocated */ | |
20 | int *map; /* allocation map */ | |
21 | struct list_head map_extend_list;/* on pcpu_map_extend_chunks */ | |
22 | ||
23 | void *data; /* chunk data */ | |
24 | int first_free; /* no free below this */ | |
25 | bool immutable; /* no [de]population allowed */ | |
30a5b536 DZ |
26 | bool has_reserved; /* Indicates if chunk has reserved space |
27 | at the beginning. Reserved chunk will | |
28 | contain reservation for static chunk. | |
29 | Dynamic chunk will contain reservation | |
30 | for static and reserved chunks. */ | |
8fa3ed80 DZ |
31 | int nr_populated; /* # of populated pages */ |
32 | unsigned long populated[]; /* populated bitmap */ | |
33 | }; | |
34 | ||
35 | extern spinlock_t pcpu_lock; | |
36 | ||
37 | extern struct list_head *pcpu_slot; | |
38 | extern int pcpu_nr_slots; | |
6b9b6f39 | 39 | extern int pcpu_nr_empty_pop_pages; |
8fa3ed80 DZ |
40 | |
41 | extern struct pcpu_chunk *pcpu_first_chunk; | |
42 | extern struct pcpu_chunk *pcpu_reserved_chunk; | |
43 | ||
30a5b536 DZ |
44 | #ifdef CONFIG_PERCPU_STATS |
45 | ||
46 | #include <linux/spinlock.h> | |
47 | ||
48 | struct percpu_stats { | |
49 | u64 nr_alloc; /* lifetime # of allocations */ | |
50 | u64 nr_dealloc; /* lifetime # of deallocations */ | |
51 | u64 nr_cur_alloc; /* current # of allocations */ | |
52 | u64 nr_max_alloc; /* max # of live allocations */ | |
53 | u32 nr_chunks; /* current # of live chunks */ | |
54 | u32 nr_max_chunks; /* max # of live chunks */ | |
55 | size_t min_alloc_size; /* min allocaiton size */ | |
56 | size_t max_alloc_size; /* max allocation size */ | |
57 | }; | |
58 | ||
59 | extern struct percpu_stats pcpu_stats; | |
60 | extern struct pcpu_alloc_info pcpu_stats_ai; | |
61 | ||
62 | /* | |
63 | * For debug purposes. We don't care about the flexible array. | |
64 | */ | |
65 | static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) | |
66 | { | |
67 | memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info)); | |
68 | ||
69 | /* initialize min_alloc_size to unit_size */ | |
70 | pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size; | |
71 | } | |
72 | ||
73 | /* | |
74 | * pcpu_stats_area_alloc - increment area allocation stats | |
75 | * @chunk: the location of the area being allocated | |
76 | * @size: size of area to allocate in bytes | |
77 | * | |
78 | * CONTEXT: | |
79 | * pcpu_lock. | |
80 | */ | |
81 | static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) | |
82 | { | |
83 | lockdep_assert_held(&pcpu_lock); | |
84 | ||
85 | pcpu_stats.nr_alloc++; | |
86 | pcpu_stats.nr_cur_alloc++; | |
87 | pcpu_stats.nr_max_alloc = | |
88 | max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc); | |
89 | pcpu_stats.min_alloc_size = | |
90 | min(pcpu_stats.min_alloc_size, size); | |
91 | pcpu_stats.max_alloc_size = | |
92 | max(pcpu_stats.max_alloc_size, size); | |
93 | ||
94 | chunk->nr_alloc++; | |
95 | chunk->max_alloc_size = max(chunk->max_alloc_size, size); | |
96 | } | |
97 | ||
98 | /* | |
99 | * pcpu_stats_area_dealloc - decrement allocation stats | |
100 | * @chunk: the location of the area being deallocated | |
101 | * | |
102 | * CONTEXT: | |
103 | * pcpu_lock. | |
104 | */ | |
105 | static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) | |
106 | { | |
107 | lockdep_assert_held(&pcpu_lock); | |
108 | ||
109 | pcpu_stats.nr_dealloc++; | |
110 | pcpu_stats.nr_cur_alloc--; | |
111 | ||
112 | chunk->nr_alloc--; | |
113 | } | |
114 | ||
115 | /* | |
116 | * pcpu_stats_chunk_alloc - increment chunk stats | |
117 | */ | |
118 | static inline void pcpu_stats_chunk_alloc(void) | |
119 | { | |
303abfdf DZ |
120 | unsigned long flags; |
121 | spin_lock_irqsave(&pcpu_lock, flags); | |
30a5b536 DZ |
122 | |
123 | pcpu_stats.nr_chunks++; | |
124 | pcpu_stats.nr_max_chunks = | |
125 | max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks); | |
126 | ||
303abfdf | 127 | spin_unlock_irqrestore(&pcpu_lock, flags); |
30a5b536 DZ |
128 | } |
129 | ||
130 | /* | |
131 | * pcpu_stats_chunk_dealloc - decrement chunk stats | |
132 | */ | |
133 | static inline void pcpu_stats_chunk_dealloc(void) | |
134 | { | |
303abfdf DZ |
135 | unsigned long flags; |
136 | spin_lock_irqsave(&pcpu_lock, flags); | |
30a5b536 DZ |
137 | |
138 | pcpu_stats.nr_chunks--; | |
139 | ||
303abfdf | 140 | spin_unlock_irqrestore(&pcpu_lock, flags); |
30a5b536 DZ |
141 | } |
142 | ||
143 | #else | |
144 | ||
145 | static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) | |
146 | { | |
147 | } | |
148 | ||
149 | static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) | |
150 | { | |
151 | } | |
152 | ||
153 | static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) | |
154 | { | |
155 | } | |
156 | ||
157 | static inline void pcpu_stats_chunk_alloc(void) | |
158 | { | |
159 | } | |
160 | ||
161 | static inline void pcpu_stats_chunk_dealloc(void) | |
162 | { | |
163 | } | |
164 | ||
165 | #endif /* !CONFIG_PERCPU_STATS */ | |
166 | ||
8fa3ed80 | 167 | #endif |