Merge tag 'x86_urgent_for_v5.12_rc3' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / mm / swap_slots.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
67afa38e
TC
2/*
3 * Manage cache of swap slots to be used for and returned from
4 * swap.
5 *
6 * Copyright(c) 2016 Intel Corporation.
7 *
8 * Author: Tim Chen <tim.c.chen@linux.intel.com>
9 *
10 * We allocate the swap slots from the global pool and put
11 * it into local per cpu caches. This has the advantage
12 * of no needing to acquire the swap_info lock every time
13 * we need a new slot.
14 *
15 * There is also opportunity to simply return the slot
16 * to local caches without needing to acquire swap_info
17 * lock. We do not reuse the returned slots directly but
18 * move them back to the global pool in a batch. This
19 * allows the slots to coaellesce and reduce fragmentation.
20 *
21 * The swap entry allocated is marked with SWAP_HAS_CACHE
22 * flag in map_count that prevents it from being allocated
23 * again from the global pool.
24 *
25 * The swap slots cache is protected by a mutex instead of
26 * a spin lock as when we search for slots with scan_swap_map,
27 * we can possibly sleep.
28 */
29
30#include <linux/swap_slots.h>
31#include <linux/cpu.h>
32#include <linux/cpumask.h>
33#include <linux/vmalloc.h>
34#include <linux/mutex.h>
54f180d3 35#include <linux/mm.h>
67afa38e 36
67afa38e
TC
37static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
38static bool swap_slot_cache_active;
ba81f838 39bool swap_slot_cache_enabled;
67afa38e 40static bool swap_slot_cache_initialized;
31f21da1 41static DEFINE_MUTEX(swap_slots_cache_mutex);
67afa38e 42/* Serialize swap slots cache enable/disable operations */
31f21da1 43static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
67afa38e
TC
44
45static void __drain_swap_slots_cache(unsigned int type);
46static void deactivate_swap_slots_cache(void);
47static void reactivate_swap_slots_cache(void);
48
e0f3ebba 49#define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
67afa38e
TC
50#define SLOTS_CACHE 0x1
51#define SLOTS_CACHE_RET 0x2
52
53static void deactivate_swap_slots_cache(void)
54{
55 mutex_lock(&swap_slots_cache_mutex);
56 swap_slot_cache_active = false;
57 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
58 mutex_unlock(&swap_slots_cache_mutex);
59}
60
61static void reactivate_swap_slots_cache(void)
62{
63 mutex_lock(&swap_slots_cache_mutex);
64 swap_slot_cache_active = true;
65 mutex_unlock(&swap_slots_cache_mutex);
66}
67
68/* Must not be called with cpu hot plug lock */
69void disable_swap_slots_cache_lock(void)
70{
71 mutex_lock(&swap_slots_cache_enable_mutex);
72 swap_slot_cache_enabled = false;
73 if (swap_slot_cache_initialized) {
74 /* serialize with cpu hotplug operations */
75 get_online_cpus();
76 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
77 put_online_cpus();
78 }
79}
80
81static void __reenable_swap_slots_cache(void)
82{
83 swap_slot_cache_enabled = has_usable_swap();
84}
85
86void reenable_swap_slots_cache_unlock(void)
87{
88 __reenable_swap_slots_cache();
89 mutex_unlock(&swap_slots_cache_enable_mutex);
90}
91
92static bool check_cache_active(void)
93{
94 long pages;
95
e0f3ebba 96 if (!swap_slot_cache_enabled)
67afa38e
TC
97 return false;
98
99 pages = get_nr_swap_pages();
100 if (!swap_slot_cache_active) {
101 if (pages > num_online_cpus() *
102 THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE)
103 reactivate_swap_slots_cache();
104 goto out;
105 }
106
107 /* if global pool of slot caches too low, deactivate cache */
108 if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE)
109 deactivate_swap_slots_cache();
110out:
111 return swap_slot_cache_active;
112}
113
114static int alloc_swap_slot_cache(unsigned int cpu)
115{
116 struct swap_slots_cache *cache;
117 swp_entry_t *slots, *slots_ret;
118
119 /*
120 * Do allocation outside swap_slots_cache_mutex
54f180d3 121 * as kvzalloc could trigger reclaim and get_swap_page,
67afa38e
TC
122 * which can lock swap_slots_cache_mutex.
123 */
778e1cdd 124 slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
54f180d3 125 GFP_KERNEL);
67afa38e
TC
126 if (!slots)
127 return -ENOMEM;
128
778e1cdd 129 slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
54f180d3 130 GFP_KERNEL);
67afa38e 131 if (!slots_ret) {
54f180d3 132 kvfree(slots);
67afa38e
TC
133 return -ENOMEM;
134 }
135
136 mutex_lock(&swap_slots_cache_mutex);
137 cache = &per_cpu(swp_slots, cpu);
f90eae2a 138 if (cache->slots || cache->slots_ret) {
67afa38e 139 /* cache already allocated */
f90eae2a
ZL
140 mutex_unlock(&swap_slots_cache_mutex);
141
142 kvfree(slots);
143 kvfree(slots_ret);
144
145 return 0;
146 }
147
67afa38e
TC
148 if (!cache->lock_initialized) {
149 mutex_init(&cache->alloc_lock);
150 spin_lock_init(&cache->free_lock);
151 cache->lock_initialized = true;
152 }
153 cache->nr = 0;
154 cache->cur = 0;
155 cache->n_ret = 0;
a2e16731
TC
156 /*
157 * We initialized alloc_lock and free_lock earlier. We use
158 * !cache->slots or !cache->slots_ret to know if it is safe to acquire
159 * the corresponding lock and use the cache. Memory barrier below
160 * ensures the assumption.
161 */
162 mb();
67afa38e 163 cache->slots = slots;
67afa38e 164 cache->slots_ret = slots_ret;
67afa38e 165 mutex_unlock(&swap_slots_cache_mutex);
67afa38e
TC
166 return 0;
167}
168
169static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
170 bool free_slots)
171{
172 struct swap_slots_cache *cache;
173 swp_entry_t *slots = NULL;
174
175 cache = &per_cpu(swp_slots, cpu);
176 if ((type & SLOTS_CACHE) && cache->slots) {
177 mutex_lock(&cache->alloc_lock);
178 swapcache_free_entries(cache->slots + cache->cur, cache->nr);
179 cache->cur = 0;
180 cache->nr = 0;
181 if (free_slots && cache->slots) {
54f180d3 182 kvfree(cache->slots);
67afa38e
TC
183 cache->slots = NULL;
184 }
185 mutex_unlock(&cache->alloc_lock);
186 }
187 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
188 spin_lock_irq(&cache->free_lock);
189 swapcache_free_entries(cache->slots_ret, cache->n_ret);
190 cache->n_ret = 0;
191 if (free_slots && cache->slots_ret) {
192 slots = cache->slots_ret;
193 cache->slots_ret = NULL;
194 }
195 spin_unlock_irq(&cache->free_lock);
191a7221 196 kvfree(slots);
67afa38e
TC
197 }
198}
199
200static void __drain_swap_slots_cache(unsigned int type)
201{
202 unsigned int cpu;
203
204 /*
205 * This function is called during
206 * 1) swapoff, when we have to make sure no
207 * left over slots are in cache when we remove
208 * a swap device;
209 * 2) disabling of swap slot cache, when we run low
210 * on swap slots when allocating memory and need
211 * to return swap slots to global pool.
212 *
213 * We cannot acquire cpu hot plug lock here as
214 * this function can be invoked in the cpu
215 * hot plug path:
216 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
217 * -> memory allocation -> direct reclaim -> get_swap_page
218 * -> drain_swap_slots_cache
219 *
220 * Hence the loop over current online cpu below could miss cpu that
221 * is being brought online but not yet marked as online.
222 * That is okay as we do not schedule and run anything on a
223 * cpu before it has been marked online. Hence, we will not
224 * fill any swap slots in slots cache of such cpu.
225 * There are no slots on such cpu that need to be drained.
226 */
227 for_each_online_cpu(cpu)
228 drain_slots_cache_cpu(cpu, type, false);
229}
230
231static int free_slot_cache(unsigned int cpu)
232{
233 mutex_lock(&swap_slots_cache_mutex);
234 drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
235 mutex_unlock(&swap_slots_cache_mutex);
236 return 0;
237}
238
f3bc52cb 239void enable_swap_slots_cache(void)
67afa38e 240{
67afa38e 241 mutex_lock(&swap_slots_cache_enable_mutex);
d69a9575
ZL
242 if (!swap_slot_cache_initialized) {
243 int ret;
67afa38e 244
d69a9575
ZL
245 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
246 alloc_swap_slot_cache, free_slot_cache);
247 if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating "
248 "without swap slots cache.\n", __func__))
249 goto out_unlock;
250
251 swap_slot_cache_initialized = true;
252 }
9b7a8143 253
67afa38e
TC
254 __reenable_swap_slots_cache();
255out_unlock:
256 mutex_unlock(&swap_slots_cache_enable_mutex);
67afa38e
TC
257}
258
259/* called with swap slot cache's alloc lock held */
260static int refill_swap_slots_cache(struct swap_slots_cache *cache)
261{
262 if (!use_swap_slot_cache || cache->nr)
263 return 0;
264
265 cache->cur = 0;
266 if (swap_slot_cache_active)
5d5e8f19
HY
267 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE,
268 cache->slots, 1);
67afa38e
TC
269
270 return cache->nr;
271}
272
273int free_swap_slot(swp_entry_t entry)
274{
275 struct swap_slots_cache *cache;
276
f07e0f84 277 cache = raw_cpu_ptr(&swp_slots);
a2e16731 278 if (likely(use_swap_slot_cache && cache->slots_ret)) {
67afa38e
TC
279 spin_lock_irq(&cache->free_lock);
280 /* Swap slots cache may be deactivated before acquiring lock */
f07e0f84 281 if (!use_swap_slot_cache || !cache->slots_ret) {
67afa38e
TC
282 spin_unlock_irq(&cache->free_lock);
283 goto direct_free;
284 }
285 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
286 /*
287 * Return slots to global pool.
288 * The current swap_map value is SWAP_HAS_CACHE.
289 * Set it to 0 to indicate it is available for
290 * allocation in global pool
291 */
292 swapcache_free_entries(cache->slots_ret, cache->n_ret);
293 cache->n_ret = 0;
294 }
295 cache->slots_ret[cache->n_ret++] = entry;
296 spin_unlock_irq(&cache->free_lock);
297 } else {
298direct_free:
299 swapcache_free_entries(&entry, 1);
300 }
67afa38e
TC
301
302 return 0;
303}
304
38d8b4e6 305swp_entry_t get_swap_page(struct page *page)
67afa38e 306{
2406b76f 307 swp_entry_t entry;
67afa38e
TC
308 struct swap_slots_cache *cache;
309
38d8b4e6
HY
310 entry.val = 0;
311
312 if (PageTransHuge(page)) {
313 if (IS_ENABLED(CONFIG_THP_SWAP))
5d5e8f19 314 get_swap_pages(1, &entry, HPAGE_PMD_NR);
bb98f2c5 315 goto out;
38d8b4e6
HY
316 }
317
67afa38e
TC
318 /*
319 * Preemption is allowed here, because we may sleep
320 * in refill_swap_slots_cache(). But it is safe, because
321 * accesses to the per-CPU data structure are protected by the
322 * mutex cache->alloc_lock.
323 *
324 * The alloc path here does not touch cache->slots_ret
325 * so cache->free_lock is not taken.
326 */
327 cache = raw_cpu_ptr(&swp_slots);
328
a2e16731 329 if (likely(check_cache_active() && cache->slots)) {
67afa38e
TC
330 mutex_lock(&cache->alloc_lock);
331 if (cache->slots) {
332repeat:
333 if (cache->nr) {
2406b76f
WY
334 entry = cache->slots[cache->cur];
335 cache->slots[cache->cur++].val = 0;
67afa38e 336 cache->nr--;
2406b76f
WY
337 } else if (refill_swap_slots_cache(cache)) {
338 goto repeat;
67afa38e
TC
339 }
340 }
341 mutex_unlock(&cache->alloc_lock);
342 if (entry.val)
bb98f2c5 343 goto out;
67afa38e
TC
344 }
345
5d5e8f19 346 get_swap_pages(1, &entry, 1);
bb98f2c5
TH
347out:
348 if (mem_cgroup_try_charge_swap(page, entry)) {
349 put_swap_page(page, entry);
350 entry.val = 0;
351 }
67afa38e
TC
352 return entry;
353}