Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
67afa38e TC |
2 | /* |
3 | * Manage cache of swap slots to be used for and returned from | |
4 | * swap. | |
5 | * | |
6 | * Copyright(c) 2016 Intel Corporation. | |
7 | * | |
8 | * Author: Tim Chen <tim.c.chen@linux.intel.com> | |
9 | * | |
10 | * We allocate the swap slots from the global pool and put | |
11 | * it into local per cpu caches. This has the advantage | |
12 | * of no needing to acquire the swap_info lock every time | |
13 | * we need a new slot. | |
14 | * | |
15 | * There is also opportunity to simply return the slot | |
16 | * to local caches without needing to acquire swap_info | |
17 | * lock. We do not reuse the returned slots directly but | |
18 | * move them back to the global pool in a batch. This | |
19 | * allows the slots to coaellesce and reduce fragmentation. | |
20 | * | |
21 | * The swap entry allocated is marked with SWAP_HAS_CACHE | |
22 | * flag in map_count that prevents it from being allocated | |
23 | * again from the global pool. | |
24 | * | |
25 | * The swap slots cache is protected by a mutex instead of | |
26 | * a spin lock as when we search for slots with scan_swap_map, | |
27 | * we can possibly sleep. | |
28 | */ | |
29 | ||
30 | #include <linux/swap_slots.h> | |
31 | #include <linux/cpu.h> | |
32 | #include <linux/cpumask.h> | |
33 | #include <linux/vmalloc.h> | |
34 | #include <linux/mutex.h> | |
54f180d3 | 35 | #include <linux/mm.h> |
67afa38e TC |
36 | |
37 | #ifdef CONFIG_SWAP | |
38 | ||
39 | static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots); | |
40 | static bool swap_slot_cache_active; | |
ba81f838 | 41 | bool swap_slot_cache_enabled; |
67afa38e TC |
42 | static bool swap_slot_cache_initialized; |
43 | DEFINE_MUTEX(swap_slots_cache_mutex); | |
44 | /* Serialize swap slots cache enable/disable operations */ | |
45 | DEFINE_MUTEX(swap_slots_cache_enable_mutex); | |
46 | ||
47 | static void __drain_swap_slots_cache(unsigned int type); | |
48 | static void deactivate_swap_slots_cache(void); | |
49 | static void reactivate_swap_slots_cache(void); | |
50 | ||
51 | #define use_swap_slot_cache (swap_slot_cache_active && \ | |
52 | swap_slot_cache_enabled && swap_slot_cache_initialized) | |
53 | #define SLOTS_CACHE 0x1 | |
54 | #define SLOTS_CACHE_RET 0x2 | |
55 | ||
56 | static void deactivate_swap_slots_cache(void) | |
57 | { | |
58 | mutex_lock(&swap_slots_cache_mutex); | |
59 | swap_slot_cache_active = false; | |
60 | __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); | |
61 | mutex_unlock(&swap_slots_cache_mutex); | |
62 | } | |
63 | ||
64 | static void reactivate_swap_slots_cache(void) | |
65 | { | |
66 | mutex_lock(&swap_slots_cache_mutex); | |
67 | swap_slot_cache_active = true; | |
68 | mutex_unlock(&swap_slots_cache_mutex); | |
69 | } | |
70 | ||
71 | /* Must not be called with cpu hot plug lock */ | |
72 | void disable_swap_slots_cache_lock(void) | |
73 | { | |
74 | mutex_lock(&swap_slots_cache_enable_mutex); | |
75 | swap_slot_cache_enabled = false; | |
76 | if (swap_slot_cache_initialized) { | |
77 | /* serialize with cpu hotplug operations */ | |
78 | get_online_cpus(); | |
79 | __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); | |
80 | put_online_cpus(); | |
81 | } | |
82 | } | |
83 | ||
84 | static void __reenable_swap_slots_cache(void) | |
85 | { | |
86 | swap_slot_cache_enabled = has_usable_swap(); | |
87 | } | |
88 | ||
89 | void reenable_swap_slots_cache_unlock(void) | |
90 | { | |
91 | __reenable_swap_slots_cache(); | |
92 | mutex_unlock(&swap_slots_cache_enable_mutex); | |
93 | } | |
94 | ||
95 | static bool check_cache_active(void) | |
96 | { | |
97 | long pages; | |
98 | ||
99 | if (!swap_slot_cache_enabled || !swap_slot_cache_initialized) | |
100 | return false; | |
101 | ||
102 | pages = get_nr_swap_pages(); | |
103 | if (!swap_slot_cache_active) { | |
104 | if (pages > num_online_cpus() * | |
105 | THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE) | |
106 | reactivate_swap_slots_cache(); | |
107 | goto out; | |
108 | } | |
109 | ||
110 | /* if global pool of slot caches too low, deactivate cache */ | |
111 | if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) | |
112 | deactivate_swap_slots_cache(); | |
113 | out: | |
114 | return swap_slot_cache_active; | |
115 | } | |
116 | ||
117 | static int alloc_swap_slot_cache(unsigned int cpu) | |
118 | { | |
119 | struct swap_slots_cache *cache; | |
120 | swp_entry_t *slots, *slots_ret; | |
121 | ||
122 | /* | |
123 | * Do allocation outside swap_slots_cache_mutex | |
54f180d3 | 124 | * as kvzalloc could trigger reclaim and get_swap_page, |
67afa38e TC |
125 | * which can lock swap_slots_cache_mutex. |
126 | */ | |
54f180d3 HY |
127 | slots = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE, |
128 | GFP_KERNEL); | |
67afa38e TC |
129 | if (!slots) |
130 | return -ENOMEM; | |
131 | ||
54f180d3 HY |
132 | slots_ret = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE, |
133 | GFP_KERNEL); | |
67afa38e | 134 | if (!slots_ret) { |
54f180d3 | 135 | kvfree(slots); |
67afa38e TC |
136 | return -ENOMEM; |
137 | } | |
138 | ||
139 | mutex_lock(&swap_slots_cache_mutex); | |
140 | cache = &per_cpu(swp_slots, cpu); | |
141 | if (cache->slots || cache->slots_ret) | |
142 | /* cache already allocated */ | |
143 | goto out; | |
144 | if (!cache->lock_initialized) { | |
145 | mutex_init(&cache->alloc_lock); | |
146 | spin_lock_init(&cache->free_lock); | |
147 | cache->lock_initialized = true; | |
148 | } | |
149 | cache->nr = 0; | |
150 | cache->cur = 0; | |
151 | cache->n_ret = 0; | |
a2e16731 TC |
152 | /* |
153 | * We initialized alloc_lock and free_lock earlier. We use | |
154 | * !cache->slots or !cache->slots_ret to know if it is safe to acquire | |
155 | * the corresponding lock and use the cache. Memory barrier below | |
156 | * ensures the assumption. | |
157 | */ | |
158 | mb(); | |
67afa38e TC |
159 | cache->slots = slots; |
160 | slots = NULL; | |
161 | cache->slots_ret = slots_ret; | |
162 | slots_ret = NULL; | |
163 | out: | |
164 | mutex_unlock(&swap_slots_cache_mutex); | |
165 | if (slots) | |
54f180d3 | 166 | kvfree(slots); |
67afa38e | 167 | if (slots_ret) |
54f180d3 | 168 | kvfree(slots_ret); |
67afa38e TC |
169 | return 0; |
170 | } | |
171 | ||
172 | static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, | |
173 | bool free_slots) | |
174 | { | |
175 | struct swap_slots_cache *cache; | |
176 | swp_entry_t *slots = NULL; | |
177 | ||
178 | cache = &per_cpu(swp_slots, cpu); | |
179 | if ((type & SLOTS_CACHE) && cache->slots) { | |
180 | mutex_lock(&cache->alloc_lock); | |
181 | swapcache_free_entries(cache->slots + cache->cur, cache->nr); | |
182 | cache->cur = 0; | |
183 | cache->nr = 0; | |
184 | if (free_slots && cache->slots) { | |
54f180d3 | 185 | kvfree(cache->slots); |
67afa38e TC |
186 | cache->slots = NULL; |
187 | } | |
188 | mutex_unlock(&cache->alloc_lock); | |
189 | } | |
190 | if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { | |
191 | spin_lock_irq(&cache->free_lock); | |
192 | swapcache_free_entries(cache->slots_ret, cache->n_ret); | |
193 | cache->n_ret = 0; | |
194 | if (free_slots && cache->slots_ret) { | |
195 | slots = cache->slots_ret; | |
196 | cache->slots_ret = NULL; | |
197 | } | |
198 | spin_unlock_irq(&cache->free_lock); | |
199 | if (slots) | |
54f180d3 | 200 | kvfree(slots); |
67afa38e TC |
201 | } |
202 | } | |
203 | ||
204 | static void __drain_swap_slots_cache(unsigned int type) | |
205 | { | |
206 | unsigned int cpu; | |
207 | ||
208 | /* | |
209 | * This function is called during | |
210 | * 1) swapoff, when we have to make sure no | |
211 | * left over slots are in cache when we remove | |
212 | * a swap device; | |
213 | * 2) disabling of swap slot cache, when we run low | |
214 | * on swap slots when allocating memory and need | |
215 | * to return swap slots to global pool. | |
216 | * | |
217 | * We cannot acquire cpu hot plug lock here as | |
218 | * this function can be invoked in the cpu | |
219 | * hot plug path: | |
220 | * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback | |
221 | * -> memory allocation -> direct reclaim -> get_swap_page | |
222 | * -> drain_swap_slots_cache | |
223 | * | |
224 | * Hence the loop over current online cpu below could miss cpu that | |
225 | * is being brought online but not yet marked as online. | |
226 | * That is okay as we do not schedule and run anything on a | |
227 | * cpu before it has been marked online. Hence, we will not | |
228 | * fill any swap slots in slots cache of such cpu. | |
229 | * There are no slots on such cpu that need to be drained. | |
230 | */ | |
231 | for_each_online_cpu(cpu) | |
232 | drain_slots_cache_cpu(cpu, type, false); | |
233 | } | |
234 | ||
235 | static int free_slot_cache(unsigned int cpu) | |
236 | { | |
237 | mutex_lock(&swap_slots_cache_mutex); | |
238 | drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); | |
239 | mutex_unlock(&swap_slots_cache_mutex); | |
240 | return 0; | |
241 | } | |
242 | ||
243 | int enable_swap_slots_cache(void) | |
244 | { | |
245 | int ret = 0; | |
246 | ||
247 | mutex_lock(&swap_slots_cache_enable_mutex); | |
248 | if (swap_slot_cache_initialized) { | |
249 | __reenable_swap_slots_cache(); | |
250 | goto out_unlock; | |
251 | } | |
252 | ||
253 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache", | |
254 | alloc_swap_slot_cache, free_slot_cache); | |
9b7a8143 TC |
255 | if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating " |
256 | "without swap slots cache.\n", __func__)) | |
67afa38e | 257 | goto out_unlock; |
9b7a8143 | 258 | |
67afa38e TC |
259 | swap_slot_cache_initialized = true; |
260 | __reenable_swap_slots_cache(); | |
261 | out_unlock: | |
262 | mutex_unlock(&swap_slots_cache_enable_mutex); | |
263 | return 0; | |
264 | } | |
265 | ||
266 | /* called with swap slot cache's alloc lock held */ | |
267 | static int refill_swap_slots_cache(struct swap_slots_cache *cache) | |
268 | { | |
269 | if (!use_swap_slot_cache || cache->nr) | |
270 | return 0; | |
271 | ||
272 | cache->cur = 0; | |
273 | if (swap_slot_cache_active) | |
38d8b4e6 HY |
274 | cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, false, |
275 | cache->slots); | |
67afa38e TC |
276 | |
277 | return cache->nr; | |
278 | } | |
279 | ||
280 | int free_swap_slot(swp_entry_t entry) | |
281 | { | |
282 | struct swap_slots_cache *cache; | |
283 | ||
f07e0f84 | 284 | cache = raw_cpu_ptr(&swp_slots); |
a2e16731 | 285 | if (likely(use_swap_slot_cache && cache->slots_ret)) { |
67afa38e TC |
286 | spin_lock_irq(&cache->free_lock); |
287 | /* Swap slots cache may be deactivated before acquiring lock */ | |
f07e0f84 | 288 | if (!use_swap_slot_cache || !cache->slots_ret) { |
67afa38e TC |
289 | spin_unlock_irq(&cache->free_lock); |
290 | goto direct_free; | |
291 | } | |
292 | if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { | |
293 | /* | |
294 | * Return slots to global pool. | |
295 | * The current swap_map value is SWAP_HAS_CACHE. | |
296 | * Set it to 0 to indicate it is available for | |
297 | * allocation in global pool | |
298 | */ | |
299 | swapcache_free_entries(cache->slots_ret, cache->n_ret); | |
300 | cache->n_ret = 0; | |
301 | } | |
302 | cache->slots_ret[cache->n_ret++] = entry; | |
303 | spin_unlock_irq(&cache->free_lock); | |
304 | } else { | |
305 | direct_free: | |
306 | swapcache_free_entries(&entry, 1); | |
307 | } | |
67afa38e TC |
308 | |
309 | return 0; | |
310 | } | |
311 | ||
38d8b4e6 | 312 | swp_entry_t get_swap_page(struct page *page) |
67afa38e TC |
313 | { |
314 | swp_entry_t entry, *pentry; | |
315 | struct swap_slots_cache *cache; | |
316 | ||
38d8b4e6 HY |
317 | entry.val = 0; |
318 | ||
319 | if (PageTransHuge(page)) { | |
320 | if (IS_ENABLED(CONFIG_THP_SWAP)) | |
321 | get_swap_pages(1, true, &entry); | |
322 | return entry; | |
323 | } | |
324 | ||
67afa38e TC |
325 | /* |
326 | * Preemption is allowed here, because we may sleep | |
327 | * in refill_swap_slots_cache(). But it is safe, because | |
328 | * accesses to the per-CPU data structure are protected by the | |
329 | * mutex cache->alloc_lock. | |
330 | * | |
331 | * The alloc path here does not touch cache->slots_ret | |
332 | * so cache->free_lock is not taken. | |
333 | */ | |
334 | cache = raw_cpu_ptr(&swp_slots); | |
335 | ||
a2e16731 | 336 | if (likely(check_cache_active() && cache->slots)) { |
67afa38e TC |
337 | mutex_lock(&cache->alloc_lock); |
338 | if (cache->slots) { | |
339 | repeat: | |
340 | if (cache->nr) { | |
341 | pentry = &cache->slots[cache->cur++]; | |
342 | entry = *pentry; | |
343 | pentry->val = 0; | |
344 | cache->nr--; | |
345 | } else { | |
346 | if (refill_swap_slots_cache(cache)) | |
347 | goto repeat; | |
348 | } | |
349 | } | |
350 | mutex_unlock(&cache->alloc_lock); | |
351 | if (entry.val) | |
352 | return entry; | |
353 | } | |
354 | ||
38d8b4e6 | 355 | get_swap_pages(1, false, &entry); |
67afa38e TC |
356 | |
357 | return entry; | |
358 | } | |
359 | ||
360 | #endif /* CONFIG_SWAP */ |