Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a528910e JW |
2 | /* |
3 | * Workingset detection | |
4 | * | |
5 | * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner | |
6 | */ | |
7 | ||
8 | #include <linux/memcontrol.h> | |
9 | #include <linux/writeback.h> | |
3a4f8a0b | 10 | #include <linux/shmem_fs.h> |
a528910e JW |
11 | #include <linux/pagemap.h> |
12 | #include <linux/atomic.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/swap.h> | |
14b46879 | 15 | #include <linux/dax.h> |
a528910e JW |
16 | #include <linux/fs.h> |
17 | #include <linux/mm.h> | |
18 | ||
19 | /* | |
20 | * Double CLOCK lists | |
21 | * | |
1e6b1085 | 22 | * Per node, two clock lists are maintained for file pages: the |
a528910e JW |
23 | * inactive and the active list. Freshly faulted pages start out at |
24 | * the head of the inactive list and page reclaim scans pages from the | |
25 | * tail. Pages that are accessed multiple times on the inactive list | |
26 | * are promoted to the active list, to protect them from reclaim, | |
27 | * whereas active pages are demoted to the inactive list when the | |
28 | * active list grows too big. | |
29 | * | |
30 | * fault ------------------------+ | |
31 | * | | |
32 | * +--------------+ | +-------------+ | |
33 | * reclaim <- | inactive | <-+-- demotion | active | <--+ | |
34 | * +--------------+ +-------------+ | | |
35 | * | | | |
36 | * +-------------- promotion ------------------+ | |
37 | * | |
38 | * | |
39 | * Access frequency and refault distance | |
40 | * | |
41 | * A workload is thrashing when its pages are frequently used but they | |
42 | * are evicted from the inactive list every time before another access | |
43 | * would have promoted them to the active list. | |
44 | * | |
45 | * In cases where the average access distance between thrashing pages | |
46 | * is bigger than the size of memory there is nothing that can be | |
47 | * done - the thrashing set could never fit into memory under any | |
48 | * circumstance. | |
49 | * | |
50 | * However, the average access distance could be bigger than the | |
51 | * inactive list, yet smaller than the size of memory. In this case, | |
52 | * the set could fit into memory if it weren't for the currently | |
53 | * active pages - which may be used more, hopefully less frequently: | |
54 | * | |
55 | * +-memory available to cache-+ | |
56 | * | | | |
57 | * +-inactive------+-active----+ | |
58 | * a b | c d e f g h i | J K L M N | | |
59 | * +---------------+-----------+ | |
60 | * | |
61 | * It is prohibitively expensive to accurately track access frequency | |
62 | * of pages. But a reasonable approximation can be made to measure | |
63 | * thrashing on the inactive list, after which refaulting pages can be | |
64 | * activated optimistically to compete with the existing active pages. | |
65 | * | |
66 | * Approximating inactive page access frequency - Observations: | |
67 | * | |
68 | * 1. When a page is accessed for the first time, it is added to the | |
69 | * head of the inactive list, slides every existing inactive page | |
70 | * towards the tail by one slot, and pushes the current tail page | |
71 | * out of memory. | |
72 | * | |
73 | * 2. When a page is accessed for the second time, it is promoted to | |
74 | * the active list, shrinking the inactive list by one slot. This | |
75 | * also slides all inactive pages that were faulted into the cache | |
76 | * more recently than the activated page towards the tail of the | |
77 | * inactive list. | |
78 | * | |
79 | * Thus: | |
80 | * | |
81 | * 1. The sum of evictions and activations between any two points in | |
82 | * time indicate the minimum number of inactive pages accessed in | |
83 | * between. | |
84 | * | |
85 | * 2. Moving one inactive page N page slots towards the tail of the | |
86 | * list requires at least N inactive page accesses. | |
87 | * | |
88 | * Combining these: | |
89 | * | |
90 | * 1. When a page is finally evicted from memory, the number of | |
91 | * inactive pages accessed while the page was in cache is at least | |
92 | * the number of page slots on the inactive list. | |
93 | * | |
94 | * 2. In addition, measuring the sum of evictions and activations (E) | |
95 | * at the time of a page's eviction, and comparing it to another | |
96 | * reading (R) at the time the page faults back into memory tells | |
97 | * the minimum number of accesses while the page was not cached. | |
98 | * This is called the refault distance. | |
99 | * | |
100 | * Because the first access of the page was the fault and the second | |
101 | * access the refault, we combine the in-cache distance with the | |
102 | * out-of-cache distance to get the complete minimum access distance | |
103 | * of this page: | |
104 | * | |
105 | * NR_inactive + (R - E) | |
106 | * | |
107 | * And knowing the minimum access distance of a page, we can easily | |
108 | * tell if the page would be able to stay in cache assuming all page | |
109 | * slots in the cache were available: | |
110 | * | |
111 | * NR_inactive + (R - E) <= NR_inactive + NR_active | |
112 | * | |
113 | * which can be further simplified to | |
114 | * | |
115 | * (R - E) <= NR_active | |
116 | * | |
117 | * Put into words, the refault distance (out-of-cache) can be seen as | |
118 | * a deficit in inactive list space (in-cache). If the inactive list | |
119 | * had (R - E) more page slots, the page would not have been evicted | |
120 | * in between accesses, but activated instead. And on a full system, | |
121 | * the only thing eating into inactive list space is active pages. | |
122 | * | |
123 | * | |
1899ad18 | 124 | * Refaulting inactive pages |
a528910e JW |
125 | * |
126 | * All that is known about the active list is that the pages have been | |
127 | * accessed more than once in the past. This means that at any given | |
128 | * time there is actually a good chance that pages on the active list | |
129 | * are no longer in active use. | |
130 | * | |
131 | * So when a refault distance of (R - E) is observed and there are at | |
132 | * least (R - E) active pages, the refaulting page is activated | |
133 | * optimistically in the hope that (R - E) active pages are actually | |
134 | * used less frequently than the refaulting page - or even not used at | |
135 | * all anymore. | |
136 | * | |
1899ad18 JW |
137 | * That means if inactive cache is refaulting with a suitable refault |
138 | * distance, we assume the cache workingset is transitioning and put | |
139 | * pressure on the current active list. | |
140 | * | |
a528910e JW |
141 | * If this is wrong and demotion kicks in, the pages which are truly |
142 | * used more frequently will be reactivated while the less frequently | |
143 | * used once will be evicted from memory. | |
144 | * | |
145 | * But if this is right, the stale pages will be pushed out of memory | |
146 | * and the used pages get to stay in cache. | |
147 | * | |
1899ad18 JW |
148 | * Refaulting active pages |
149 | * | |
150 | * If on the other hand the refaulting pages have recently been | |
151 | * deactivated, it means that the active list is no longer protecting | |
152 | * actively used cache from reclaim. The cache is NOT transitioning to | |
153 | * a different workingset; the existing workingset is thrashing in the | |
154 | * space allocated to the page cache. | |
155 | * | |
a528910e JW |
156 | * |
157 | * Implementation | |
158 | * | |
1e6b1085 MG |
159 | * For each node's file LRU lists, a counter for inactive evictions |
160 | * and activations is maintained (node->inactive_age). | |
a528910e JW |
161 | * |
162 | * On eviction, a snapshot of this counter (along with some bits to | |
a97e7904 | 163 | * identify the node) is stored in the now empty page cache |
a528910e JW |
164 | * slot of the evicted page. This is called a shadow entry. |
165 | * | |
166 | * On cache misses for which there are shadow entries, an eligible | |
167 | * refault distance will immediately activate the refaulting page. | |
168 | */ | |
169 | ||
3159f943 | 170 | #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ |
1899ad18 | 171 | 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT) |
689c94f0 JW |
172 | #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) |
173 | ||
612e4493 JW |
174 | /* |
175 | * Eviction timestamps need to be able to cover the full range of | |
a97e7904 | 176 | * actionable refaults. However, bits are tight in the xarray |
612e4493 JW |
177 | * entry, and after storing the identifier for the lruvec there might |
178 | * not be enough left to represent every single actionable refault. In | |
179 | * that case, we have to sacrifice granularity for distance, and group | |
180 | * evictions into coarser buckets by shaving off lower timestamp bits. | |
181 | */ | |
182 | static unsigned int bucket_order __read_mostly; | |
183 | ||
1899ad18 JW |
184 | static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, |
185 | bool workingset) | |
a528910e | 186 | { |
612e4493 | 187 | eviction >>= bucket_order; |
3159f943 | 188 | eviction &= EVICTION_MASK; |
23047a96 | 189 | eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; |
1e6b1085 | 190 | eviction = (eviction << NODES_SHIFT) | pgdat->node_id; |
1899ad18 | 191 | eviction = (eviction << 1) | workingset; |
a528910e | 192 | |
3159f943 | 193 | return xa_mk_value(eviction); |
a528910e JW |
194 | } |
195 | ||
1e6b1085 | 196 | static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, |
1899ad18 | 197 | unsigned long *evictionp, bool *workingsetp) |
a528910e | 198 | { |
3159f943 | 199 | unsigned long entry = xa_to_value(shadow); |
1e6b1085 | 200 | int memcgid, nid; |
1899ad18 | 201 | bool workingset; |
a528910e | 202 | |
1899ad18 JW |
203 | workingset = entry & 1; |
204 | entry >>= 1; | |
a528910e JW |
205 | nid = entry & ((1UL << NODES_SHIFT) - 1); |
206 | entry >>= NODES_SHIFT; | |
23047a96 JW |
207 | memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); |
208 | entry >>= MEM_CGROUP_ID_SHIFT; | |
a528910e | 209 | |
23047a96 | 210 | *memcgidp = memcgid; |
1e6b1085 | 211 | *pgdat = NODE_DATA(nid); |
612e4493 | 212 | *evictionp = entry << bucket_order; |
1899ad18 | 213 | *workingsetp = workingset; |
a528910e JW |
214 | } |
215 | ||
b910718a JW |
216 | static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat) |
217 | { | |
218 | /* | |
219 | * Reclaiming a cgroup means reclaiming all its children in a | |
220 | * round-robin fashion. That means that each cgroup has an LRU | |
221 | * order that is composed of the LRU orders of its child | |
222 | * cgroups; and every page has an LRU position not just in the | |
223 | * cgroup that owns it, but in all of that group's ancestors. | |
224 | * | |
225 | * So when the physical inactive list of a leaf cgroup ages, | |
226 | * the virtual inactive lists of all its parents, including | |
227 | * the root cgroup's, age as well. | |
228 | */ | |
229 | do { | |
230 | struct lruvec *lruvec; | |
231 | ||
232 | lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
233 | atomic_long_inc(&lruvec->inactive_age); | |
234 | } while (memcg && (memcg = parent_mem_cgroup(memcg))); | |
235 | } | |
236 | ||
a528910e JW |
237 | /** |
238 | * workingset_eviction - note the eviction of a page from memory | |
b910718a | 239 | * @target_memcg: the cgroup that is causing the reclaim |
a528910e JW |
240 | * @page: the page being evicted |
241 | * | |
a7ca12f9 | 242 | * Returns a shadow entry to be stored in @page->mapping->i_pages in place |
a528910e JW |
243 | * of the evicted @page so that a later refault can be detected. |
244 | */ | |
b910718a | 245 | void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) |
a528910e | 246 | { |
1e6b1085 | 247 | struct pglist_data *pgdat = page_pgdat(page); |
a528910e | 248 | unsigned long eviction; |
23047a96 | 249 | struct lruvec *lruvec; |
b910718a | 250 | int memcgid; |
a528910e | 251 | |
23047a96 JW |
252 | /* Page is fully exclusive and pins page->mem_cgroup */ |
253 | VM_BUG_ON_PAGE(PageLRU(page), page); | |
254 | VM_BUG_ON_PAGE(page_count(page), page); | |
255 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
256 | ||
b910718a JW |
257 | advance_inactive_age(page_memcg(page), pgdat); |
258 | ||
259 | lruvec = mem_cgroup_lruvec(target_memcg, pgdat); | |
260 | /* XXX: target_memcg can be NULL, go through lruvec */ | |
261 | memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); | |
262 | eviction = atomic_long_read(&lruvec->inactive_age); | |
1899ad18 | 263 | return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); |
a528910e JW |
264 | } |
265 | ||
266 | /** | |
267 | * workingset_refault - evaluate the refault of a previously evicted page | |
1899ad18 | 268 | * @page: the freshly allocated replacement page |
a528910e JW |
269 | * @shadow: shadow entry of the evicted page |
270 | * | |
271 | * Calculates and evaluates the refault distance of the previously | |
b910718a JW |
272 | * evicted page in the context of the node and the memcg whose memory |
273 | * pressure caused the eviction. | |
a528910e | 274 | */ |
1899ad18 | 275 | void workingset_refault(struct page *page, void *shadow) |
a528910e | 276 | { |
b910718a JW |
277 | struct mem_cgroup *eviction_memcg; |
278 | struct lruvec *eviction_lruvec; | |
a528910e | 279 | unsigned long refault_distance; |
1899ad18 | 280 | struct pglist_data *pgdat; |
23047a96 JW |
281 | unsigned long active_file; |
282 | struct mem_cgroup *memcg; | |
162453bf | 283 | unsigned long eviction; |
23047a96 | 284 | struct lruvec *lruvec; |
162453bf | 285 | unsigned long refault; |
1899ad18 | 286 | bool workingset; |
23047a96 | 287 | int memcgid; |
a528910e | 288 | |
1899ad18 | 289 | unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); |
162453bf | 290 | |
23047a96 JW |
291 | rcu_read_lock(); |
292 | /* | |
293 | * Look up the memcg associated with the stored ID. It might | |
294 | * have been deleted since the page's eviction. | |
295 | * | |
296 | * Note that in rare events the ID could have been recycled | |
297 | * for a new cgroup that refaults a shared page. This is | |
298 | * impossible to tell from the available data. However, this | |
299 | * should be a rare and limited disturbance, and activations | |
300 | * are always speculative anyway. Ultimately, it's the aging | |
301 | * algorithm's job to shake out the minimum access frequency | |
302 | * for the active cache. | |
303 | * | |
304 | * XXX: On !CONFIG_MEMCG, this will always return NULL; it | |
305 | * would be better if the root_mem_cgroup existed in all | |
306 | * configurations instead. | |
307 | */ | |
b910718a JW |
308 | eviction_memcg = mem_cgroup_from_id(memcgid); |
309 | if (!mem_cgroup_disabled() && !eviction_memcg) | |
1899ad18 | 310 | goto out; |
b910718a JW |
311 | eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); |
312 | refault = atomic_long_read(&eviction_lruvec->inactive_age); | |
313 | active_file = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); | |
162453bf JW |
314 | |
315 | /* | |
1899ad18 | 316 | * Calculate the refault distance |
162453bf | 317 | * |
1899ad18 JW |
318 | * The unsigned subtraction here gives an accurate distance |
319 | * across inactive_age overflows in most cases. There is a | |
320 | * special case: usually, shadow entries have a short lifetime | |
321 | * and are either refaulted or reclaimed along with the inode | |
322 | * before they get too old. But it is not impossible for the | |
323 | * inactive_age to lap a shadow entry in the field, which can | |
324 | * then result in a false small refault distance, leading to a | |
325 | * false activation should this old entry actually refault | |
326 | * again. However, earlier kernels used to deactivate | |
327 | * unconditionally with *every* reclaim invocation for the | |
328 | * longest time, so the occasional inappropriate activation | |
329 | * leading to pressure on the active list is not a problem. | |
162453bf JW |
330 | */ |
331 | refault_distance = (refault - eviction) & EVICTION_MASK; | |
332 | ||
b910718a JW |
333 | /* |
334 | * The activation decision for this page is made at the level | |
335 | * where the eviction occurred, as that is where the LRU order | |
336 | * during page reclaim is being determined. | |
337 | * | |
338 | * However, the cgroup that will own the page is the one that | |
339 | * is actually experiencing the refault event. | |
340 | */ | |
341 | memcg = page_memcg(page); | |
342 | lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
343 | ||
00f3ca2c | 344 | inc_lruvec_state(lruvec, WORKINGSET_REFAULT); |
a528910e | 345 | |
1899ad18 JW |
346 | /* |
347 | * Compare the distance to the existing workingset size. We | |
348 | * don't act on pages that couldn't stay resident even if all | |
349 | * the memory was available to the page cache. | |
350 | */ | |
351 | if (refault_distance > active_file) | |
352 | goto out; | |
353 | ||
354 | SetPageActive(page); | |
b910718a | 355 | advance_inactive_age(memcg, pgdat); |
1899ad18 JW |
356 | inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE); |
357 | ||
358 | /* Page was active prior to eviction */ | |
359 | if (workingset) { | |
360 | SetPageWorkingset(page); | |
361 | inc_lruvec_state(lruvec, WORKINGSET_RESTORE); | |
a528910e | 362 | } |
1899ad18 | 363 | out: |
2a2e4885 | 364 | rcu_read_unlock(); |
a528910e JW |
365 | } |
366 | ||
367 | /** | |
368 | * workingset_activation - note a page activation | |
369 | * @page: page that is being activated | |
370 | */ | |
371 | void workingset_activation(struct page *page) | |
372 | { | |
55779ec7 | 373 | struct mem_cgroup *memcg; |
23047a96 | 374 | |
55779ec7 | 375 | rcu_read_lock(); |
23047a96 JW |
376 | /* |
377 | * Filter non-memcg pages here, e.g. unmap can call | |
378 | * mark_page_accessed() on VDSO pages. | |
379 | * | |
380 | * XXX: See workingset_refault() - this should return | |
381 | * root_mem_cgroup even for !CONFIG_MEMCG. | |
382 | */ | |
55779ec7 JW |
383 | memcg = page_memcg_rcu(page); |
384 | if (!mem_cgroup_disabled() && !memcg) | |
23047a96 | 385 | goto out; |
b910718a | 386 | advance_inactive_age(memcg, page_pgdat(page)); |
23047a96 | 387 | out: |
55779ec7 | 388 | rcu_read_unlock(); |
a528910e | 389 | } |
449dd698 JW |
390 | |
391 | /* | |
392 | * Shadow entries reflect the share of the working set that does not | |
393 | * fit into memory, so their number depends on the access pattern of | |
394 | * the workload. In most cases, they will refault or get reclaimed | |
395 | * along with the inode, but a (malicious) workload that streams | |
396 | * through files with a total size several times that of available | |
397 | * memory, while preventing the inodes from being reclaimed, can | |
398 | * create excessive amounts of shadow nodes. To keep a lid on this, | |
399 | * track shadow nodes and reclaim them when they grow way past the | |
400 | * point where they would still be useful. | |
401 | */ | |
402 | ||
14b46879 JW |
403 | static struct list_lru shadow_nodes; |
404 | ||
a97e7904 | 405 | void workingset_update_node(struct xa_node *node) |
14b46879 | 406 | { |
14b46879 JW |
407 | /* |
408 | * Track non-empty nodes that contain only shadow entries; | |
409 | * unlink those that contain pages or are being freed. | |
410 | * | |
411 | * Avoid acquiring the list_lru lock when the nodes are | |
412 | * already where they should be. The list_empty() test is safe | |
b93b0163 | 413 | * as node->private_list is protected by the i_pages lock. |
14b46879 | 414 | */ |
68d48e6a JW |
415 | VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */ |
416 | ||
01959dfe | 417 | if (node->count && node->count == node->nr_values) { |
68d48e6a | 418 | if (list_empty(&node->private_list)) { |
14b46879 | 419 | list_lru_add(&shadow_nodes, &node->private_list); |
ec9f0238 | 420 | __inc_lruvec_slab_state(node, WORKINGSET_NODES); |
68d48e6a | 421 | } |
14b46879 | 422 | } else { |
68d48e6a | 423 | if (!list_empty(&node->private_list)) { |
14b46879 | 424 | list_lru_del(&shadow_nodes, &node->private_list); |
ec9f0238 | 425 | __dec_lruvec_slab_state(node, WORKINGSET_NODES); |
68d48e6a | 426 | } |
14b46879 JW |
427 | } |
428 | } | |
449dd698 JW |
429 | |
430 | static unsigned long count_shadow_nodes(struct shrinker *shrinker, | |
431 | struct shrink_control *sc) | |
432 | { | |
449dd698 | 433 | unsigned long max_nodes; |
14b46879 | 434 | unsigned long nodes; |
95f9ab2d | 435 | unsigned long pages; |
449dd698 | 436 | |
14b46879 | 437 | nodes = list_lru_shrink_count(&shadow_nodes, sc); |
449dd698 | 438 | |
449dd698 | 439 | /* |
a97e7904 | 440 | * Approximate a reasonable limit for the nodes |
b5388998 JW |
441 | * containing shadow entries. We don't need to keep more |
442 | * shadow entries than possible pages on the active list, | |
443 | * since refault distances bigger than that are dismissed. | |
444 | * | |
445 | * The size of the active list converges toward 100% of | |
446 | * overall page cache as memory grows, with only a tiny | |
447 | * inactive list. Assume the total cache size for that. | |
448 | * | |
449 | * Nodes might be sparsely populated, with only one shadow | |
450 | * entry in the extreme case. Obviously, we cannot keep one | |
451 | * node for every eligible shadow entry, so compromise on a | |
452 | * worst-case density of 1/8th. Below that, not all eligible | |
453 | * refaults can be detected anymore. | |
449dd698 | 454 | * |
a97e7904 | 455 | * On 64-bit with 7 xa_nodes per page and 64 slots |
449dd698 | 456 | * each, this will reclaim shadow entries when they consume |
b5388998 | 457 | * ~1.8% of available memory: |
449dd698 | 458 | * |
a97e7904 | 459 | * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE |
449dd698 | 460 | */ |
95f9ab2d | 461 | #ifdef CONFIG_MEMCG |
b5388998 | 462 | if (sc->memcg) { |
95f9ab2d | 463 | struct lruvec *lruvec; |
2b487e59 | 464 | int i; |
95f9ab2d | 465 | |
867e5e1d | 466 | lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); |
2b487e59 | 467 | for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) |
205b20cc JW |
468 | pages += lruvec_page_state_local(lruvec, |
469 | NR_LRU_BASE + i); | |
470 | pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE); | |
471 | pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE); | |
95f9ab2d JW |
472 | } else |
473 | #endif | |
474 | pages = node_present_pages(sc->nid); | |
475 | ||
dad4f140 | 476 | max_nodes = pages >> (XA_CHUNK_SHIFT - 3); |
449dd698 | 477 | |
9b996468 KT |
478 | if (!nodes) |
479 | return SHRINK_EMPTY; | |
480 | ||
14b46879 | 481 | if (nodes <= max_nodes) |
449dd698 | 482 | return 0; |
14b46879 | 483 | return nodes - max_nodes; |
449dd698 JW |
484 | } |
485 | ||
486 | static enum lru_status shadow_lru_isolate(struct list_head *item, | |
3f97b163 | 487 | struct list_lru_one *lru, |
449dd698 | 488 | spinlock_t *lru_lock, |
a97e7904 | 489 | void *arg) __must_hold(lru_lock) |
449dd698 | 490 | { |
a97e7904 MW |
491 | struct xa_node *node = container_of(item, struct xa_node, private_list); |
492 | XA_STATE(xas, node->array, 0); | |
449dd698 | 493 | struct address_space *mapping; |
449dd698 JW |
494 | int ret; |
495 | ||
496 | /* | |
497 | * Page cache insertions and deletions synchroneously maintain | |
b93b0163 | 498 | * the shadow node LRU under the i_pages lock and the |
449dd698 JW |
499 | * lru_lock. Because the page cache tree is emptied before |
500 | * the inode can be destroyed, holding the lru_lock pins any | |
a97e7904 | 501 | * address_space that has nodes on the LRU. |
449dd698 | 502 | * |
b93b0163 | 503 | * We can then safely transition to the i_pages lock to |
449dd698 JW |
504 | * pin only the address_space of the particular node we want |
505 | * to reclaim, take the node off-LRU, and drop the lru_lock. | |
506 | */ | |
507 | ||
01959dfe | 508 | mapping = container_of(node->array, struct address_space, i_pages); |
449dd698 JW |
509 | |
510 | /* Coming from the list, invert the lock order */ | |
b93b0163 | 511 | if (!xa_trylock(&mapping->i_pages)) { |
6ca342d0 | 512 | spin_unlock_irq(lru_lock); |
449dd698 JW |
513 | ret = LRU_RETRY; |
514 | goto out; | |
515 | } | |
516 | ||
3f97b163 | 517 | list_lru_isolate(lru, item); |
ec9f0238 | 518 | __dec_lruvec_slab_state(node, WORKINGSET_NODES); |
68d48e6a | 519 | |
449dd698 JW |
520 | spin_unlock(lru_lock); |
521 | ||
522 | /* | |
523 | * The nodes should only contain one or more shadow entries, | |
524 | * no pages, so we expect to be able to remove them all and | |
525 | * delete and free the empty node afterwards. | |
526 | */ | |
01959dfe | 527 | if (WARN_ON_ONCE(!node->nr_values)) |
b936887e | 528 | goto out_invalid; |
01959dfe | 529 | if (WARN_ON_ONCE(node->count != node->nr_values)) |
b936887e | 530 | goto out_invalid; |
a97e7904 MW |
531 | mapping->nrexceptional -= node->nr_values; |
532 | xas.xa_node = xa_parent_locked(&mapping->i_pages, node); | |
533 | xas.xa_offset = node->offset; | |
534 | xas.xa_shift = node->shift + XA_CHUNK_SHIFT; | |
535 | xas_set_update(&xas, workingset_update_node); | |
536 | /* | |
537 | * We could store a shadow entry here which was the minimum of the | |
538 | * shadow entries we were tracking ... | |
539 | */ | |
540 | xas_store(&xas, NULL); | |
ec9f0238 | 541 | __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM); |
449dd698 | 542 | |
b936887e | 543 | out_invalid: |
6ca342d0 | 544 | xa_unlock_irq(&mapping->i_pages); |
449dd698 JW |
545 | ret = LRU_REMOVED_RETRY; |
546 | out: | |
449dd698 | 547 | cond_resched(); |
6ca342d0 | 548 | spin_lock_irq(lru_lock); |
449dd698 JW |
549 | return ret; |
550 | } | |
551 | ||
552 | static unsigned long scan_shadow_nodes(struct shrinker *shrinker, | |
553 | struct shrink_control *sc) | |
554 | { | |
b93b0163 | 555 | /* list_lru lock nests inside the IRQ-safe i_pages lock */ |
6b51e881 SAS |
556 | return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, |
557 | NULL); | |
449dd698 JW |
558 | } |
559 | ||
560 | static struct shrinker workingset_shadow_shrinker = { | |
561 | .count_objects = count_shadow_nodes, | |
562 | .scan_objects = scan_shadow_nodes, | |
4b85afbd | 563 | .seeks = 0, /* ->count reports only fully expendable nodes */ |
0a6b76dd | 564 | .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, |
449dd698 JW |
565 | }; |
566 | ||
567 | /* | |
568 | * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe | |
b93b0163 | 569 | * i_pages lock. |
449dd698 JW |
570 | */ |
571 | static struct lock_class_key shadow_nodes_key; | |
572 | ||
573 | static int __init workingset_init(void) | |
574 | { | |
612e4493 JW |
575 | unsigned int timestamp_bits; |
576 | unsigned int max_order; | |
449dd698 JW |
577 | int ret; |
578 | ||
612e4493 JW |
579 | BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); |
580 | /* | |
581 | * Calculate the eviction bucket size to cover the longest | |
582 | * actionable refault distance, which is currently half of | |
583 | * memory (totalram_pages/2). However, memory hotplug may add | |
584 | * some more pages at runtime, so keep working with up to | |
585 | * double the initial memory by using totalram_pages as-is. | |
586 | */ | |
587 | timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; | |
ca79b0c2 | 588 | max_order = fls_long(totalram_pages() - 1); |
612e4493 JW |
589 | if (max_order > timestamp_bits) |
590 | bucket_order = max_order - timestamp_bits; | |
d3d36c4b | 591 | pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", |
612e4493 JW |
592 | timestamp_bits, max_order, bucket_order); |
593 | ||
39887653 | 594 | ret = prealloc_shrinker(&workingset_shadow_shrinker); |
449dd698 JW |
595 | if (ret) |
596 | goto err; | |
c92e8e10 KT |
597 | ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key, |
598 | &workingset_shadow_shrinker); | |
449dd698 JW |
599 | if (ret) |
600 | goto err_list_lru; | |
39887653 | 601 | register_shrinker_prepared(&workingset_shadow_shrinker); |
449dd698 JW |
602 | return 0; |
603 | err_list_lru: | |
39887653 | 604 | free_prealloced_shrinker(&workingset_shadow_shrinker); |
449dd698 JW |
605 | err: |
606 | return ret; | |
607 | } | |
608 | module_init(workingset_init); |