Commit | Line | Data |
---|---|---|
8fa3ed80 DZ |
1 | #ifndef _MM_PERCPU_INTERNAL_H |
2 | #define _MM_PERCPU_INTERNAL_H | |
3 | ||
4 | #include <linux/types.h> | |
5 | #include <linux/percpu.h> | |
6 | ||
7 | struct pcpu_chunk { | |
30a5b536 DZ |
8 | #ifdef CONFIG_PERCPU_STATS |
9 | int nr_alloc; /* # of allocations */ | |
10 | size_t max_alloc_size; /* largest allocation size */ | |
11 | #endif | |
12 | ||
8fa3ed80 DZ |
13 | struct list_head list; /* linked to pcpu_slot lists */ |
14 | int free_size; /* free bytes in the chunk */ | |
15 | int contig_hint; /* max contiguous size hint */ | |
16 | void *base_addr; /* base address of this chunk */ | |
17 | ||
18 | int map_used; /* # of map entries used before the sentry */ | |
19 | int map_alloc; /* # of map entries allocated */ | |
20 | int *map; /* allocation map */ | |
21 | struct list_head map_extend_list;/* on pcpu_map_extend_chunks */ | |
22 | ||
23 | void *data; /* chunk data */ | |
24 | int first_free; /* no free below this */ | |
25 | bool immutable; /* no [de]population allowed */ | |
30a5b536 DZ |
26 | bool has_reserved; /* Indicates if chunk has reserved space |
27 | at the beginning. Reserved chunk will | |
28 | contain reservation for static chunk. | |
29 | Dynamic chunk will contain reservation | |
30 | for static and reserved chunks. */ | |
8fa3ed80 DZ |
31 | int nr_populated; /* # of populated pages */ |
32 | unsigned long populated[]; /* populated bitmap */ | |
33 | }; | |
34 | ||
35 | extern spinlock_t pcpu_lock; | |
36 | ||
37 | extern struct list_head *pcpu_slot; | |
38 | extern int pcpu_nr_slots; | |
39 | ||
40 | extern struct pcpu_chunk *pcpu_first_chunk; | |
41 | extern struct pcpu_chunk *pcpu_reserved_chunk; | |
42 | ||
30a5b536 DZ |
43 | #ifdef CONFIG_PERCPU_STATS |
44 | ||
45 | #include <linux/spinlock.h> | |
46 | ||
47 | struct percpu_stats { | |
48 | u64 nr_alloc; /* lifetime # of allocations */ | |
49 | u64 nr_dealloc; /* lifetime # of deallocations */ | |
50 | u64 nr_cur_alloc; /* current # of allocations */ | |
51 | u64 nr_max_alloc; /* max # of live allocations */ | |
52 | u32 nr_chunks; /* current # of live chunks */ | |
53 | u32 nr_max_chunks; /* max # of live chunks */ | |
54 | size_t min_alloc_size; /* min allocaiton size */ | |
55 | size_t max_alloc_size; /* max allocation size */ | |
56 | }; | |
57 | ||
58 | extern struct percpu_stats pcpu_stats; | |
59 | extern struct pcpu_alloc_info pcpu_stats_ai; | |
60 | ||
61 | /* | |
62 | * For debug purposes. We don't care about the flexible array. | |
63 | */ | |
64 | static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) | |
65 | { | |
66 | memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info)); | |
67 | ||
68 | /* initialize min_alloc_size to unit_size */ | |
69 | pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size; | |
70 | } | |
71 | ||
72 | /* | |
73 | * pcpu_stats_area_alloc - increment area allocation stats | |
74 | * @chunk: the location of the area being allocated | |
75 | * @size: size of area to allocate in bytes | |
76 | * | |
77 | * CONTEXT: | |
78 | * pcpu_lock. | |
79 | */ | |
80 | static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) | |
81 | { | |
82 | lockdep_assert_held(&pcpu_lock); | |
83 | ||
84 | pcpu_stats.nr_alloc++; | |
85 | pcpu_stats.nr_cur_alloc++; | |
86 | pcpu_stats.nr_max_alloc = | |
87 | max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc); | |
88 | pcpu_stats.min_alloc_size = | |
89 | min(pcpu_stats.min_alloc_size, size); | |
90 | pcpu_stats.max_alloc_size = | |
91 | max(pcpu_stats.max_alloc_size, size); | |
92 | ||
93 | chunk->nr_alloc++; | |
94 | chunk->max_alloc_size = max(chunk->max_alloc_size, size); | |
95 | } | |
96 | ||
97 | /* | |
98 | * pcpu_stats_area_dealloc - decrement allocation stats | |
99 | * @chunk: the location of the area being deallocated | |
100 | * | |
101 | * CONTEXT: | |
102 | * pcpu_lock. | |
103 | */ | |
104 | static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) | |
105 | { | |
106 | lockdep_assert_held(&pcpu_lock); | |
107 | ||
108 | pcpu_stats.nr_dealloc++; | |
109 | pcpu_stats.nr_cur_alloc--; | |
110 | ||
111 | chunk->nr_alloc--; | |
112 | } | |
113 | ||
114 | /* | |
115 | * pcpu_stats_chunk_alloc - increment chunk stats | |
116 | */ | |
117 | static inline void pcpu_stats_chunk_alloc(void) | |
118 | { | |
303abfdf DZ |
119 | unsigned long flags; |
120 | spin_lock_irqsave(&pcpu_lock, flags); | |
30a5b536 DZ |
121 | |
122 | pcpu_stats.nr_chunks++; | |
123 | pcpu_stats.nr_max_chunks = | |
124 | max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks); | |
125 | ||
303abfdf | 126 | spin_unlock_irqrestore(&pcpu_lock, flags); |
30a5b536 DZ |
127 | } |
128 | ||
129 | /* | |
130 | * pcpu_stats_chunk_dealloc - decrement chunk stats | |
131 | */ | |
132 | static inline void pcpu_stats_chunk_dealloc(void) | |
133 | { | |
303abfdf DZ |
134 | unsigned long flags; |
135 | spin_lock_irqsave(&pcpu_lock, flags); | |
30a5b536 DZ |
136 | |
137 | pcpu_stats.nr_chunks--; | |
138 | ||
303abfdf | 139 | spin_unlock_irqrestore(&pcpu_lock, flags); |
30a5b536 DZ |
140 | } |
141 | ||
142 | #else | |
143 | ||
144 | static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai) | |
145 | { | |
146 | } | |
147 | ||
148 | static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size) | |
149 | { | |
150 | } | |
151 | ||
152 | static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk) | |
153 | { | |
154 | } | |
155 | ||
156 | static inline void pcpu_stats_chunk_alloc(void) | |
157 | { | |
158 | } | |
159 | ||
160 | static inline void pcpu_stats_chunk_dealloc(void) | |
161 | { | |
162 | } | |
163 | ||
164 | #endif /* !CONFIG_PERCPU_STATS */ | |
165 | ||
8fa3ed80 | 166 | #endif |