Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / mm / ksm.c
CommitLineData
7a338472 1// SPDX-License-Identifier: GPL-2.0-only
f8af4da3 2/*
31dbd01f
IE
3 * Memory merging support.
4 *
5 * This code enables dynamic sharing of identical pages found in different
6 * memory areas, even if they are not shared by fork()
7 *
36b2528d 8 * Copyright (C) 2008-2009 Red Hat, Inc.
31dbd01f
IE
9 * Authors:
10 * Izik Eidus
11 * Andrea Arcangeli
12 * Chris Wright
36b2528d 13 * Hugh Dickins
f8af4da3
HD
14 */
15
16#include <linux/errno.h>
31dbd01f 17#include <linux/mm.h>
36090def 18#include <linux/mm_inline.h>
31dbd01f 19#include <linux/fs.h>
f8af4da3 20#include <linux/mman.h>
31dbd01f 21#include <linux/sched.h>
6e84f315 22#include <linux/sched/mm.h>
4e5fa4f5 23#include <linux/sched/cputime.h>
31dbd01f
IE
24#include <linux/rwsem.h>
25#include <linux/pagemap.h>
26#include <linux/rmap.h>
27#include <linux/spinlock.h>
59e1a2f4 28#include <linux/xxhash.h>
31dbd01f
IE
29#include <linux/delay.h>
30#include <linux/kthread.h>
31#include <linux/wait.h>
32#include <linux/slab.h>
33#include <linux/rbtree.h>
62b61f61 34#include <linux/memory.h>
31dbd01f 35#include <linux/mmu_notifier.h>
2c6854fd 36#include <linux/swap.h>
f8af4da3 37#include <linux/ksm.h>
4ca3a69b 38#include <linux/hashtable.h>
878aee7d 39#include <linux/freezer.h>
72788c38 40#include <linux/oom.h>
90bd6fd3 41#include <linux/numa.h>
d7c0e68d 42#include <linux/pagewalk.h>
f8af4da3 43
31dbd01f 44#include <asm/tlbflush.h>
73848b46 45#include "internal.h"
58730ab6 46#include "mm_slot.h"
31dbd01f 47
739100c8
SR
48#define CREATE_TRACE_POINTS
49#include <trace/events/ksm.h>
50
e850dcf5
HD
51#ifdef CONFIG_NUMA
52#define NUMA(x) (x)
53#define DO_NUMA(x) do { (x); } while (0)
54#else
55#define NUMA(x) (0)
56#define DO_NUMA(x) do { } while (0)
57#endif
58
5e924ff5
SR
59typedef u8 rmap_age_t;
60
5a2ca3ef
MR
61/**
62 * DOC: Overview
63 *
31dbd01f
IE
64 * A few notes about the KSM scanning process,
65 * to make it easier to understand the data structures below:
66 *
67 * In order to reduce excessive scanning, KSM sorts the memory pages by their
68 * contents into a data structure that holds pointers to the pages' locations.
69 *
70 * Since the contents of the pages may change at any moment, KSM cannot just
71 * insert the pages into a normal sorted tree and expect it to find anything.
72 * Therefore KSM uses two data structures - the stable and the unstable tree.
73 *
74 * The stable tree holds pointers to all the merged pages (ksm pages), sorted
75 * by their contents. Because each such page is write-protected, searching on
76 * this tree is fully assured to be working (except when pages are unmapped),
77 * and therefore this tree is called the stable tree.
78 *
5a2ca3ef
MR
79 * The stable tree node includes information required for reverse
80 * mapping from a KSM page to virtual addresses that map this page.
81 *
82 * In order to avoid large latencies of the rmap walks on KSM pages,
83 * KSM maintains two types of nodes in the stable tree:
84 *
85 * * the regular nodes that keep the reverse mapping structures in a
86 * linked list
87 * * the "chains" that link nodes ("dups") that represent the same
88 * write protected memory content, but each "dup" corresponds to a
89 * different KSM page copy of that content
90 *
91 * Internally, the regular nodes, "dups" and "chains" are represented
21fbd591 92 * using the same struct ksm_stable_node structure.
5a2ca3ef 93 *
31dbd01f
IE
94 * In addition to the stable tree, KSM uses a second data structure called the
95 * unstable tree: this tree holds pointers to pages which have been found to
96 * be "unchanged for a period of time". The unstable tree sorts these pages
97 * by their contents, but since they are not write-protected, KSM cannot rely
98 * upon the unstable tree to work correctly - the unstable tree is liable to
99 * be corrupted as its contents are modified, and so it is called unstable.
100 *
101 * KSM solves this problem by several techniques:
102 *
103 * 1) The unstable tree is flushed every time KSM completes scanning all
104 * memory areas, and then the tree is rebuilt again from the beginning.
105 * 2) KSM will only insert into the unstable tree, pages whose hash value
106 * has not changed since the previous scan of all memory areas.
107 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
108 * colors of the nodes and not on their contents, assuring that even when
109 * the tree gets "corrupted" it won't get out of balance, so scanning time
110 * remains the same (also, searching and inserting nodes in an rbtree uses
111 * the same algorithm, so we have no overhead when we flush and rebuild).
112 * 4) KSM never flushes the stable tree, which means that even if it were to
113 * take 10 attempts to find a page in the unstable tree, once it is found,
114 * it is secured in the stable tree. (When we scan a new page, we first
115 * compare it against the stable tree, and then against the unstable tree.)
8fdb3dbf
HD
116 *
117 * If the merge_across_nodes tunable is unset, then KSM maintains multiple
118 * stable trees and multiple unstable trees: one of each for each NUMA node.
31dbd01f
IE
119 */
120
121/**
21fbd591 122 * struct ksm_mm_slot - ksm information per mm that is being scanned
58730ab6 123 * @slot: hash lookup from mm to mm_slot
6514d511 124 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
31dbd01f 125 */
21fbd591 126struct ksm_mm_slot {
58730ab6 127 struct mm_slot slot;
21fbd591 128 struct ksm_rmap_item *rmap_list;
31dbd01f
IE
129};
130
131/**
132 * struct ksm_scan - cursor for scanning
133 * @mm_slot: the current mm_slot we are scanning
134 * @address: the next address inside that to be scanned
6514d511 135 * @rmap_list: link to the next rmap to be scanned in the rmap_list
31dbd01f
IE
136 * @seqnr: count of completed full scans (needed when removing unstable node)
137 *
138 * There is only the one ksm_scan instance of this cursor structure.
139 */
140struct ksm_scan {
21fbd591 141 struct ksm_mm_slot *mm_slot;
31dbd01f 142 unsigned long address;
21fbd591 143 struct ksm_rmap_item **rmap_list;
31dbd01f
IE
144 unsigned long seqnr;
145};
146
7b6ba2c7 147/**
21fbd591 148 * struct ksm_stable_node - node of the stable rbtree
7b6ba2c7 149 * @node: rb node of this ksm page in the stable tree
4146d2d6 150 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
2c653d0e 151 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
4146d2d6 152 * @list: linked into migrate_nodes, pending placement in the proper node tree
7b6ba2c7 153 * @hlist: hlist head of rmap_items using this ksm page
4146d2d6 154 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
2c653d0e
AA
155 * @chain_prune_time: time of the last full garbage collection
156 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
4146d2d6 157 * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
7b6ba2c7 158 */
21fbd591 159struct ksm_stable_node {
4146d2d6
HD
160 union {
161 struct rb_node node; /* when node of stable tree */
162 struct { /* when listed for migration */
163 struct list_head *head;
2c653d0e
AA
164 struct {
165 struct hlist_node hlist_dup;
166 struct list_head list;
167 };
4146d2d6
HD
168 };
169 };
7b6ba2c7 170 struct hlist_head hlist;
2c653d0e
AA
171 union {
172 unsigned long kpfn;
173 unsigned long chain_prune_time;
174 };
175 /*
176 * STABLE_NODE_CHAIN can be any negative number in
177 * rmap_hlist_len negative range, but better not -1 to be able
178 * to reliably detect underflows.
179 */
180#define STABLE_NODE_CHAIN -1024
181 int rmap_hlist_len;
4146d2d6
HD
182#ifdef CONFIG_NUMA
183 int nid;
184#endif
7b6ba2c7
HD
185};
186
31dbd01f 187/**
21fbd591 188 * struct ksm_rmap_item - reverse mapping item for virtual addresses
6514d511 189 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
db114b83 190 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
bc56620b 191 * @nid: NUMA node id of unstable tree in which linked (may not match page)
31dbd01f
IE
192 * @mm: the memory structure this rmap_item is pointing into
193 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
194 * @oldchecksum: previous checksum of the page at that virtual address
7b6ba2c7
HD
195 * @node: rb node of this rmap_item in the unstable tree
196 * @head: pointer to stable_node heading this list in the stable tree
197 * @hlist: link into hlist of rmap_items hanging off that stable_node
5e924ff5
SR
198 * @age: number of scan iterations since creation
199 * @remaining_skips: how many scans to skip
31dbd01f 200 */
21fbd591
QZ
201struct ksm_rmap_item {
202 struct ksm_rmap_item *rmap_list;
bc56620b
HD
203 union {
204 struct anon_vma *anon_vma; /* when stable */
205#ifdef CONFIG_NUMA
206 int nid; /* when node of unstable tree */
207#endif
208 };
31dbd01f
IE
209 struct mm_struct *mm;
210 unsigned long address; /* + low bits used for flags below */
7b6ba2c7 211 unsigned int oldchecksum; /* when unstable */
5e924ff5
SR
212 rmap_age_t age;
213 rmap_age_t remaining_skips;
31dbd01f 214 union {
7b6ba2c7
HD
215 struct rb_node node; /* when node of unstable tree */
216 struct { /* when listed from stable tree */
21fbd591 217 struct ksm_stable_node *head;
7b6ba2c7
HD
218 struct hlist_node hlist;
219 };
31dbd01f
IE
220 };
221};
222
223#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
7b6ba2c7
HD
224#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
225#define STABLE_FLAG 0x200 /* is listed from the stable tree */
31dbd01f
IE
226
227/* The stable and unstable tree heads */
ef53d16c
HD
228static struct rb_root one_stable_tree[1] = { RB_ROOT };
229static struct rb_root one_unstable_tree[1] = { RB_ROOT };
230static struct rb_root *root_stable_tree = one_stable_tree;
231static struct rb_root *root_unstable_tree = one_unstable_tree;
31dbd01f 232
4146d2d6
HD
233/* Recently migrated nodes of stable tree, pending proper placement */
234static LIST_HEAD(migrate_nodes);
2c653d0e 235#define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
4146d2d6 236
4ca3a69b
SL
237#define MM_SLOTS_HASH_BITS 10
238static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
31dbd01f 239
21fbd591 240static struct ksm_mm_slot ksm_mm_head = {
58730ab6 241 .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node),
31dbd01f
IE
242};
243static struct ksm_scan ksm_scan = {
244 .mm_slot = &ksm_mm_head,
245};
246
247static struct kmem_cache *rmap_item_cache;
7b6ba2c7 248static struct kmem_cache *stable_node_cache;
31dbd01f
IE
249static struct kmem_cache *mm_slot_cache;
250
4e5fa4f5
SR
251/* Default number of pages to scan per batch */
252#define DEFAULT_PAGES_TO_SCAN 100
253
b348b5fe
SR
254/* The number of pages scanned */
255static unsigned long ksm_pages_scanned;
256
31dbd01f 257/* The number of nodes in the stable tree */
b4028260 258static unsigned long ksm_pages_shared;
31dbd01f 259
e178dfde 260/* The number of page slots additionally sharing those nodes */
b4028260 261static unsigned long ksm_pages_sharing;
31dbd01f 262
473b0ce4
HD
263/* The number of nodes in the unstable tree */
264static unsigned long ksm_pages_unshared;
265
266/* The number of rmap_items in use: to calculate pages_volatile */
267static unsigned long ksm_rmap_items;
268
2c653d0e
AA
269/* The number of stable_node chains */
270static unsigned long ksm_stable_node_chains;
271
272/* The number of stable_node dups linked to the stable_node chains */
273static unsigned long ksm_stable_node_dups;
274
275/* Delay in pruning stale stable_node_dups in the stable_node_chains */
584ff0df 276static unsigned int ksm_stable_node_chains_prune_millisecs = 2000;
2c653d0e
AA
277
278/* Maximum number of page slots sharing a stable node */
279static int ksm_max_page_sharing = 256;
280
31dbd01f 281/* Number of pages ksmd should scan in one batch */
4e5fa4f5 282static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
31dbd01f
IE
283
284/* Milliseconds ksmd should sleep between batches */
2ffd8679 285static unsigned int ksm_thread_sleep_millisecs = 20;
31dbd01f 286
e86c59b1
CI
287/* Checksum of an empty (zeroed) page */
288static unsigned int zero_checksum __read_mostly;
289
290/* Whether to merge empty (zeroed) pages with actual zero pages */
291static bool ksm_use_zero_pages __read_mostly;
292
5e924ff5
SR
293/* Skip pages that couldn't be de-duplicated previously */
294/* Default to true at least temporarily, for testing */
295static bool ksm_smart_scan = true;
296
e2942062 297/* The number of zero pages which is placed by KSM */
c2dc78b8 298atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
e2942062 299
e5a68991
SR
300/* The number of pages that have been skipped due to "smart scanning" */
301static unsigned long ksm_pages_skipped;
302
4e5fa4f5
SR
303/* Don't scan more than max pages per batch. */
304static unsigned long ksm_advisor_max_pages_to_scan = 30000;
305
306/* Min CPU for scanning pages per scan */
307#define KSM_ADVISOR_MIN_CPU 10
308
309/* Max CPU for scanning pages per scan */
310static unsigned int ksm_advisor_max_cpu = 70;
311
312/* Target scan time in seconds to analyze all KSM candidate pages. */
313static unsigned long ksm_advisor_target_scan_time = 200;
314
315/* Exponentially weighted moving average. */
316#define EWMA_WEIGHT 30
317
318/**
319 * struct advisor_ctx - metadata for KSM advisor
320 * @start_scan: start time of the current scan
321 * @scan_time: scan time of previous scan
322 * @change: change in percent to pages_to_scan parameter
323 * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
324 */
325struct advisor_ctx {
326 ktime_t start_scan;
327 unsigned long scan_time;
328 unsigned long change;
329 unsigned long long cpu_time;
330};
331static struct advisor_ctx advisor_ctx;
332
333/* Define different advisor's */
334enum ksm_advisor_type {
335 KSM_ADVISOR_NONE,
336 KSM_ADVISOR_SCAN_TIME,
337};
338static enum ksm_advisor_type ksm_advisor;
339
66790e9a
SR
340#ifdef CONFIG_SYSFS
341/*
342 * Only called through the sysfs control interface:
343 */
344
345/* At least scan this many pages per batch. */
346static unsigned long ksm_advisor_min_pages_to_scan = 500;
347
348static void set_advisor_defaults(void)
349{
350 if (ksm_advisor == KSM_ADVISOR_NONE) {
351 ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
352 } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) {
353 advisor_ctx = (const struct advisor_ctx){ 0 };
354 ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan;
355 }
356}
357#endif /* CONFIG_SYSFS */
358
4e5fa4f5
SR
359static inline void advisor_start_scan(void)
360{
361 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
362 advisor_ctx.start_scan = ktime_get();
363}
364
365/*
366 * Use previous scan time if available, otherwise use current scan time as an
367 * approximation for the previous scan time.
368 */
369static inline unsigned long prev_scan_time(struct advisor_ctx *ctx,
370 unsigned long scan_time)
371{
372 return ctx->scan_time ? ctx->scan_time : scan_time;
373}
374
375/* Calculate exponential weighted moving average */
376static unsigned long ewma(unsigned long prev, unsigned long curr)
377{
378 return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100;
379}
380
381/*
382 * The scan time advisor is based on the current scan rate and the target
383 * scan rate.
384 *
385 * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
386 *
387 * To avoid perturbations it calculates a change factor of previous changes.
388 * A new change factor is calculated for each iteration and it uses an
389 * exponentially weighted moving average. The new pages_to_scan value is
390 * multiplied with that change factor:
391 *
392 * new_pages_to_scan *= change facor
393 *
394 * The new_pages_to_scan value is limited by the cpu min and max values. It
395 * calculates the cpu percent for the last scan and calculates the new
396 * estimated cpu percent cost for the next scan. That value is capped by the
397 * cpu min and max setting.
398 *
399 * In addition the new pages_to_scan value is capped by the max and min
400 * limits.
401 */
402static void scan_time_advisor(void)
403{
404 unsigned int cpu_percent;
405 unsigned long cpu_time;
406 unsigned long cpu_time_diff;
407 unsigned long cpu_time_diff_ms;
408 unsigned long pages;
409 unsigned long per_page_cost;
410 unsigned long factor;
411 unsigned long change;
412 unsigned long last_scan_time;
413 unsigned long scan_time;
414
415 /* Convert scan time to seconds */
416 scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan),
417 MSEC_PER_SEC);
418 scan_time = scan_time ? scan_time : 1;
419
420 /* Calculate CPU consumption of ksmd background thread */
421 cpu_time = task_sched_runtime(current);
422 cpu_time_diff = cpu_time - advisor_ctx.cpu_time;
423 cpu_time_diff_ms = cpu_time_diff / 1000 / 1000;
424
425 cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000);
426 cpu_percent = cpu_percent ? cpu_percent : 1;
427 last_scan_time = prev_scan_time(&advisor_ctx, scan_time);
428
429 /* Calculate scan time as percentage of target scan time */
430 factor = ksm_advisor_target_scan_time * 100 / scan_time;
431 factor = factor ? factor : 1;
432
433 /*
434 * Calculate scan time as percentage of last scan time and use
435 * exponentially weighted average to smooth it
436 */
437 change = scan_time * 100 / last_scan_time;
438 change = change ? change : 1;
439 change = ewma(advisor_ctx.change, change);
440
441 /* Calculate new scan rate based on target scan rate. */
442 pages = ksm_thread_pages_to_scan * 100 / factor;
443 /* Update pages_to_scan by weighted change percentage. */
444 pages = pages * change / 100;
445
446 /* Cap new pages_to_scan value */
447 per_page_cost = ksm_thread_pages_to_scan / cpu_percent;
448 per_page_cost = per_page_cost ? per_page_cost : 1;
449
450 pages = min(pages, per_page_cost * ksm_advisor_max_cpu);
451 pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU);
452 pages = min(pages, ksm_advisor_max_pages_to_scan);
453
454 /* Update advisor context */
455 advisor_ctx.change = change;
456 advisor_ctx.scan_time = scan_time;
457 advisor_ctx.cpu_time = cpu_time;
458
459 ksm_thread_pages_to_scan = pages;
5088b497 460 trace_ksm_advisor(scan_time, pages, cpu_percent);
4e5fa4f5
SR
461}
462
463static void advisor_stop_scan(void)
464{
465 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
466 scan_time_advisor();
467}
468
e850dcf5 469#ifdef CONFIG_NUMA
90bd6fd3
PH
470/* Zeroed when merging across nodes is not allowed */
471static unsigned int ksm_merge_across_nodes = 1;
ef53d16c 472static int ksm_nr_node_ids = 1;
e850dcf5
HD
473#else
474#define ksm_merge_across_nodes 1U
ef53d16c 475#define ksm_nr_node_ids 1
e850dcf5 476#endif
90bd6fd3 477
31dbd01f
IE
478#define KSM_RUN_STOP 0
479#define KSM_RUN_MERGE 1
480#define KSM_RUN_UNMERGE 2
ef4d43a8
HD
481#define KSM_RUN_OFFLINE 4
482static unsigned long ksm_run = KSM_RUN_STOP;
483static void wait_while_offlining(void);
31dbd01f
IE
484
485static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
fcf9a0ef 486static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
31dbd01f
IE
487static DEFINE_MUTEX(ksm_thread_mutex);
488static DEFINE_SPINLOCK(ksm_mmlist_lock);
489
31dbd01f
IE
490static int __init ksm_slab_init(void)
491{
aa1b9489 492 rmap_item_cache = KMEM_CACHE(ksm_rmap_item, 0);
31dbd01f
IE
493 if (!rmap_item_cache)
494 goto out;
495
aa1b9489 496 stable_node_cache = KMEM_CACHE(ksm_stable_node, 0);
7b6ba2c7
HD
497 if (!stable_node_cache)
498 goto out_free1;
499
aa1b9489 500 mm_slot_cache = KMEM_CACHE(ksm_mm_slot, 0);
31dbd01f 501 if (!mm_slot_cache)
7b6ba2c7 502 goto out_free2;
31dbd01f
IE
503
504 return 0;
505
7b6ba2c7
HD
506out_free2:
507 kmem_cache_destroy(stable_node_cache);
508out_free1:
31dbd01f
IE
509 kmem_cache_destroy(rmap_item_cache);
510out:
511 return -ENOMEM;
512}
513
514static void __init ksm_slab_free(void)
515{
516 kmem_cache_destroy(mm_slot_cache);
7b6ba2c7 517 kmem_cache_destroy(stable_node_cache);
31dbd01f
IE
518 kmem_cache_destroy(rmap_item_cache);
519 mm_slot_cache = NULL;
520}
521
21fbd591 522static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain)
2c653d0e
AA
523{
524 return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
525}
526
21fbd591 527static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup)
2c653d0e
AA
528{
529 return dup->head == STABLE_NODE_DUP_HEAD;
530}
531
21fbd591
QZ
532static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup,
533 struct ksm_stable_node *chain)
2c653d0e
AA
534{
535 VM_BUG_ON(is_stable_node_dup(dup));
536 dup->head = STABLE_NODE_DUP_HEAD;
537 VM_BUG_ON(!is_stable_node_chain(chain));
538 hlist_add_head(&dup->hlist_dup, &chain->hlist);
539 ksm_stable_node_dups++;
540}
541
21fbd591 542static inline void __stable_node_dup_del(struct ksm_stable_node *dup)
2c653d0e 543{
b4fecc67 544 VM_BUG_ON(!is_stable_node_dup(dup));
2c653d0e
AA
545 hlist_del(&dup->hlist_dup);
546 ksm_stable_node_dups--;
547}
548
21fbd591 549static inline void stable_node_dup_del(struct ksm_stable_node *dup)
2c653d0e
AA
550{
551 VM_BUG_ON(is_stable_node_chain(dup));
552 if (is_stable_node_dup(dup))
553 __stable_node_dup_del(dup);
554 else
555 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
556#ifdef CONFIG_DEBUG_VM
557 dup->head = NULL;
558#endif
559}
560
21fbd591 561static inline struct ksm_rmap_item *alloc_rmap_item(void)
31dbd01f 562{
21fbd591 563 struct ksm_rmap_item *rmap_item;
473b0ce4 564
5b398e41 565 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
566 __GFP_NORETRY | __GFP_NOWARN);
473b0ce4
HD
567 if (rmap_item)
568 ksm_rmap_items++;
569 return rmap_item;
31dbd01f
IE
570}
571
21fbd591 572static inline void free_rmap_item(struct ksm_rmap_item *rmap_item)
31dbd01f 573{
473b0ce4 574 ksm_rmap_items--;
cb4df4ca 575 rmap_item->mm->ksm_rmap_items--;
31dbd01f
IE
576 rmap_item->mm = NULL; /* debug safety */
577 kmem_cache_free(rmap_item_cache, rmap_item);
578}
579
21fbd591 580static inline struct ksm_stable_node *alloc_stable_node(void)
7b6ba2c7 581{
6213055f 582 /*
583 * The allocation can take too long with GFP_KERNEL when memory is under
584 * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
585 * grants access to memory reserves, helping to avoid this problem.
586 */
587 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
7b6ba2c7
HD
588}
589
21fbd591 590static inline void free_stable_node(struct ksm_stable_node *stable_node)
7b6ba2c7 591{
2c653d0e
AA
592 VM_BUG_ON(stable_node->rmap_hlist_len &&
593 !is_stable_node_chain(stable_node));
7b6ba2c7
HD
594 kmem_cache_free(stable_node_cache, stable_node);
595}
596
a913e182
HD
597/*
598 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
599 * page tables after it has passed through ksm_exit() - which, if necessary,
c1e8d7c6 600 * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
a913e182
HD
601 * a special flag: they can just back out as soon as mm_users goes to zero.
602 * ksm_test_exit() is used throughout to make this test for exit: in some
603 * places for correctness, in some places just to avoid unnecessary work.
604 */
605static inline bool ksm_test_exit(struct mm_struct *mm)
606{
607 return atomic_read(&mm->mm_users) == 0;
608}
609
31dbd01f 610/*
6cce3314
DH
611 * We use break_ksm to break COW on a ksm page by triggering unsharing,
612 * such that the ksm page will get replaced by an exclusive anonymous page.
31dbd01f 613 *
6cce3314 614 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma,
31dbd01f
IE
615 * in case the application has unmapped and remapped mm,addr meanwhile.
616 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
bbcd53c9 617 * mmap of /dev/mem, where we would not want to touch it.
1b2ee126 618 *
6cce3314 619 * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context
1b2ee126
DH
620 * of the process that owns 'vma'. We also do not want to enforce
621 * protection keys here anyway.
31dbd01f 622 */
49b06385 623static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
31dbd01f 624{
50a7ca3c 625 vm_fault_t ret = 0;
e317a8d8
DH
626
627 if (lock_vma)
628 vma_start_write(vma);
31dbd01f
IE
629
630 do {
e317a8d8
DH
631 bool ksm_page = false;
632 struct folio_walk fw;
633 struct folio *folio;
58f595c6 634
31dbd01f 635 cond_resched();
e317a8d8
DH
636 folio = folio_walk_start(&fw, vma, addr,
637 FW_MIGRATION | FW_ZEROPAGE);
638 if (folio) {
639 /* Small folio implies FW_LEVEL_PTE. */
640 if (!folio_test_large(folio) &&
641 (folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte)))
642 ksm_page = true;
643 folio_walk_end(&fw, vma);
644 }
645
58f595c6
DH
646 if (!ksm_page)
647 return 0;
648 ret = handle_mm_fault(vma, addr,
6cce3314 649 FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
58f595c6
DH
650 NULL);
651 } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
d952b791 652 /*
58f595c6
DH
653 * We must loop until we no longer find a KSM page because
654 * handle_mm_fault() may back out if there's any difficulty e.g. if
655 * pte accessed bit gets updated concurrently.
d952b791
HD
656 *
657 * VM_FAULT_SIGBUS could occur if we race with truncation of the
658 * backing file, which also invalidates anonymous pages: that's
b9a25635 659 * okay, that truncation will have unmapped the KSM page for us.
d952b791
HD
660 *
661 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
662 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
663 * current task has TIF_MEMDIE set, and will be OOM killed on return
664 * to user; and ksmd, having no mm, would never be chosen for that.
665 *
666 * But if the mm is in a limited mem_cgroup, then the fault may fail
667 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
668 * even ksmd can fail in this way - though it's usually breaking ksm
669 * just to undo a merge it made a moment before, so unlikely to oom.
670 *
671 * That's a pity: we might therefore have more kernel pages allocated
672 * than we're counting as nodes in the stable tree; but ksm_do_scan
673 * will retry to break_cow on each pass, so should recover the page
674 * in due course. The important thing is to not let VM_MERGEABLE
675 * be cleared while any such pages might remain in the area.
676 */
677 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
31dbd01f
IE
678}
679
d7597f59
SR
680static bool vma_ksm_compatible(struct vm_area_struct *vma)
681{
682 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP |
683 VM_IO | VM_DONTEXPAND | VM_HUGETLB |
9651fced 684 VM_MIXEDMAP| VM_DROPPABLE))
d7597f59
SR
685 return false; /* just ignore the advice */
686
687 if (vma_is_dax(vma))
688 return false;
689
690#ifdef VM_SAO
691 if (vma->vm_flags & VM_SAO)
692 return false;
693#endif
694#ifdef VM_SPARC_ADI
695 if (vma->vm_flags & VM_SPARC_ADI)
696 return false;
697#endif
698
699 return true;
700}
701
ef694222
BL
702static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
703 unsigned long addr)
704{
705 struct vm_area_struct *vma;
706 if (ksm_test_exit(mm))
707 return NULL;
ff69fb81
LH
708 vma = vma_lookup(mm, addr);
709 if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
ef694222
BL
710 return NULL;
711 return vma;
712}
713
21fbd591 714static void break_cow(struct ksm_rmap_item *rmap_item)
31dbd01f 715{
8dd3557a
HD
716 struct mm_struct *mm = rmap_item->mm;
717 unsigned long addr = rmap_item->address;
31dbd01f
IE
718 struct vm_area_struct *vma;
719
4035c07a
HD
720 /*
721 * It is not an accident that whenever we want to break COW
722 * to undo, we also need to drop a reference to the anon_vma.
723 */
9e60109f 724 put_anon_vma(rmap_item->anon_vma);
4035c07a 725
d8ed45c5 726 mmap_read_lock(mm);
ef694222
BL
727 vma = find_mergeable_vma(mm, addr);
728 if (vma)
49b06385 729 break_ksm(vma, addr, false);
d8ed45c5 730 mmap_read_unlock(mm);
31dbd01f
IE
731}
732
21fbd591 733static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
31dbd01f
IE
734{
735 struct mm_struct *mm = rmap_item->mm;
736 unsigned long addr = rmap_item->address;
737 struct vm_area_struct *vma;
184e916c
DH
738 struct page *page = NULL;
739 struct folio_walk fw;
740 struct folio *folio;
31dbd01f 741
d8ed45c5 742 mmap_read_lock(mm);
ef694222
BL
743 vma = find_mergeable_vma(mm, addr);
744 if (!vma)
31dbd01f
IE
745 goto out;
746
184e916c
DH
747 folio = folio_walk_start(&fw, vma, addr, 0);
748 if (folio) {
749 if (!folio_is_zone_device(folio) &&
750 folio_test_anon(folio)) {
751 folio_get(folio);
752 page = fw.page;
753 }
754 folio_walk_end(&fw, vma);
755 }
756out:
757 if (page) {
31dbd01f
IE
758 flush_anon_page(vma, page, addr);
759 flush_dcache_page(page);
31dbd01f 760 }
d8ed45c5 761 mmap_read_unlock(mm);
31dbd01f
IE
762 return page;
763}
764
90bd6fd3
PH
765/*
766 * This helper is used for getting right index into array of tree roots.
767 * When merge_across_nodes knob is set to 1, there are only two rb-trees for
768 * stable and unstable pages from all nodes with roots in index 0. Otherwise,
769 * every node has its own stable and unstable tree.
770 */
771static inline int get_kpfn_nid(unsigned long kpfn)
772{
d8fc16a8 773 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
90bd6fd3
PH
774}
775
21fbd591 776static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup,
2c653d0e
AA
777 struct rb_root *root)
778{
21fbd591 779 struct ksm_stable_node *chain = alloc_stable_node();
2c653d0e
AA
780 VM_BUG_ON(is_stable_node_chain(dup));
781 if (likely(chain)) {
782 INIT_HLIST_HEAD(&chain->hlist);
783 chain->chain_prune_time = jiffies;
784 chain->rmap_hlist_len = STABLE_NODE_CHAIN;
785#if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
98fa15f3 786 chain->nid = NUMA_NO_NODE; /* debug */
2c653d0e
AA
787#endif
788 ksm_stable_node_chains++;
789
790 /*
791 * Put the stable node chain in the first dimension of
792 * the stable tree and at the same time remove the old
793 * stable node.
794 */
795 rb_replace_node(&dup->node, &chain->node, root);
796
797 /*
798 * Move the old stable node to the second dimension
799 * queued in the hlist_dup. The invariant is that all
800 * dup stable_nodes in the chain->hlist point to pages
457aef94 801 * that are write protected and have the exact same
2c653d0e
AA
802 * content.
803 */
804 stable_node_chain_add_dup(dup, chain);
805 }
806 return chain;
807}
808
21fbd591 809static inline void free_stable_node_chain(struct ksm_stable_node *chain,
2c653d0e
AA
810 struct rb_root *root)
811{
812 rb_erase(&chain->node, root);
813 free_stable_node(chain);
814 ksm_stable_node_chains--;
815}
816
21fbd591 817static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
4035c07a 818{
21fbd591 819 struct ksm_rmap_item *rmap_item;
4035c07a 820
2c653d0e
AA
821 /* check it's not STABLE_NODE_CHAIN or negative */
822 BUG_ON(stable_node->rmap_hlist_len < 0);
823
b67bfe0d 824 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
739100c8 825 if (rmap_item->hlist.next) {
4035c07a 826 ksm_pages_sharing--;
739100c8
SR
827 trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm);
828 } else {
4035c07a 829 ksm_pages_shared--;
739100c8 830 }
76093853 831
832 rmap_item->mm->ksm_merging_pages--;
833
2c653d0e
AA
834 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
835 stable_node->rmap_hlist_len--;
9e60109f 836 put_anon_vma(rmap_item->anon_vma);
4035c07a
HD
837 rmap_item->address &= PAGE_MASK;
838 cond_resched();
839 }
840
2c653d0e
AA
841 /*
842 * We need the second aligned pointer of the migrate_nodes
843 * list_head to stay clear from the rb_parent_color union
844 * (aligned and different than any node) and also different
845 * from &migrate_nodes. This will verify that future list.h changes
815f0ddb 846 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
2c653d0e 847 */
2c653d0e
AA
848 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
849 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
2c653d0e 850
739100c8 851 trace_ksm_remove_ksm_page(stable_node->kpfn);
4146d2d6
HD
852 if (stable_node->head == &migrate_nodes)
853 list_del(&stable_node->list);
854 else
2c653d0e 855 stable_node_dup_del(stable_node);
4035c07a
HD
856 free_stable_node(stable_node);
857}
858
85b67b01
DH
859enum ksm_get_folio_flags {
860 KSM_GET_FOLIO_NOLOCK,
861 KSM_GET_FOLIO_LOCK,
862 KSM_GET_FOLIO_TRYLOCK
2cee57d1
YS
863};
864
4035c07a 865/*
b91f9472 866 * ksm_get_folio: checks if the page indicated by the stable node
4035c07a
HD
867 * is still its ksm page, despite having held no reference to it.
868 * In which case we can trust the content of the page, and it
869 * returns the gotten page; but if the page has now been zapped,
870 * remove the stale node from the stable tree and return NULL.
c8d6553b 871 * But beware, the stable node's page might be being migrated.
4035c07a
HD
872 *
873 * You would expect the stable_node to hold a reference to the ksm page.
874 * But if it increments the page's count, swapping out has to wait for
875 * ksmd to come around again before it can free the page, which may take
876 * seconds or even minutes: much too unresponsive. So instead we use a
877 * "keyhole reference": access to the ksm page from the stable node peeps
878 * out through its keyhole to see if that page still holds the right key,
879 * pointing back to this stable node. This relies on freeing a PageAnon
880 * page to reset its page->mapping to NULL, and relies on no other use of
881 * a page to put something that might look like our key in page->mapping.
4035c07a
HD
882 * is on its way to being freed; but it is an anomaly to bear in mind.
883 */
b91f9472 884static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
85b67b01 885 enum ksm_get_folio_flags flags)
4035c07a 886{
b91f9472 887 struct folio *folio;
4035c07a 888 void *expected_mapping;
c8d6553b 889 unsigned long kpfn;
4035c07a 890
bda807d4
MK
891 expected_mapping = (void *)((unsigned long)stable_node |
892 PAGE_MAPPING_KSM);
c8d6553b 893again:
08df4774 894 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
b91f9472
AS
895 folio = pfn_folio(kpfn);
896 if (READ_ONCE(folio->mapping) != expected_mapping)
4035c07a 897 goto stale;
c8d6553b
HD
898
899 /*
900 * We cannot do anything with the page while its refcount is 0.
901 * Usually 0 means free, or tail of a higher-order page: in which
902 * case this node is no longer referenced, and should be freed;
1c4c3b99 903 * however, it might mean that the page is under page_ref_freeze().
c8d6553b 904 * The __remove_mapping() case is easy, again the node is now stale;
52d1e606 905 * the same is in reuse_ksm_page() case; but if page is swapcache
9800562f 906 * in folio_migrate_mapping(), it might still be our page,
52d1e606 907 * in which case it's essential to keep the node.
c8d6553b 908 */
b91f9472 909 while (!folio_try_get(folio)) {
c8d6553b 910 /*
32f51ead
MWO
911 * Another check for folio->mapping != expected_mapping
912 * would work here too. We have chosen to test the
913 * swapcache flag to optimize the common case, when the
914 * folio is or is about to be freed: the swapcache flag
915 * is cleared (under spin_lock_irq) in the ref_freeze
916 * section of __remove_mapping(); but anon folio->mapping
917 * is reset to NULL later, in free_pages_prepare().
c8d6553b 918 */
b91f9472 919 if (!folio_test_swapcache(folio))
c8d6553b
HD
920 goto stale;
921 cpu_relax();
922 }
923
b91f9472
AS
924 if (READ_ONCE(folio->mapping) != expected_mapping) {
925 folio_put(folio);
4035c07a
HD
926 goto stale;
927 }
c8d6553b 928
85b67b01 929 if (flags == KSM_GET_FOLIO_TRYLOCK) {
b91f9472
AS
930 if (!folio_trylock(folio)) {
931 folio_put(folio);
2cee57d1
YS
932 return ERR_PTR(-EBUSY);
933 }
85b67b01 934 } else if (flags == KSM_GET_FOLIO_LOCK)
b91f9472 935 folio_lock(folio);
2cee57d1 936
85b67b01 937 if (flags != KSM_GET_FOLIO_NOLOCK) {
b91f9472
AS
938 if (READ_ONCE(folio->mapping) != expected_mapping) {
939 folio_unlock(folio);
940 folio_put(folio);
8aafa6a4
HD
941 goto stale;
942 }
943 }
b91f9472 944 return folio;
c8d6553b 945
4035c07a 946stale:
c8d6553b 947 /*
32f51ead 948 * We come here from above when folio->mapping or the swapcache flag
c8d6553b 949 * suggests that the node is stale; but it might be under migration.
19138349 950 * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
c8d6553b
HD
951 * before checking whether node->kpfn has been changed.
952 */
953 smp_rmb();
4db0c3c2 954 if (READ_ONCE(stable_node->kpfn) != kpfn)
c8d6553b 955 goto again;
4035c07a
HD
956 remove_node_from_stable_tree(stable_node);
957 return NULL;
958}
959
31dbd01f
IE
960/*
961 * Removing rmap_item from stable or unstable tree.
962 * This function will clean the information from the stable/unstable tree.
963 */
21fbd591 964static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item)
31dbd01f 965{
7b6ba2c7 966 if (rmap_item->address & STABLE_FLAG) {
21fbd591 967 struct ksm_stable_node *stable_node;
f39b6e2d 968 struct folio *folio;
31dbd01f 969
7b6ba2c7 970 stable_node = rmap_item->head;
85b67b01 971 folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
f39b6e2d 972 if (!folio)
4035c07a 973 goto out;
5ad64688 974
7b6ba2c7 975 hlist_del(&rmap_item->hlist);
f39b6e2d
AS
976 folio_unlock(folio);
977 folio_put(folio);
08beca44 978
98666f8a 979 if (!hlist_empty(&stable_node->hlist))
4035c07a
HD
980 ksm_pages_sharing--;
981 else
7b6ba2c7 982 ksm_pages_shared--;
76093853 983
984 rmap_item->mm->ksm_merging_pages--;
985
2c653d0e
AA
986 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
987 stable_node->rmap_hlist_len--;
31dbd01f 988
9e60109f 989 put_anon_vma(rmap_item->anon_vma);
c89a384e 990 rmap_item->head = NULL;
93d17715 991 rmap_item->address &= PAGE_MASK;
31dbd01f 992
7b6ba2c7 993 } else if (rmap_item->address & UNSTABLE_FLAG) {
31dbd01f
IE
994 unsigned char age;
995 /*
9ba69294 996 * Usually ksmd can and must skip the rb_erase, because
31dbd01f 997 * root_unstable_tree was already reset to RB_ROOT.
9ba69294
HD
998 * But be careful when an mm is exiting: do the rb_erase
999 * if this rmap_item was inserted by this scan, rather
1000 * than left over from before.
31dbd01f
IE
1001 */
1002 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
cd551f97 1003 BUG_ON(age > 1);
31dbd01f 1004 if (!age)
90bd6fd3 1005 rb_erase(&rmap_item->node,
ef53d16c 1006 root_unstable_tree + NUMA(rmap_item->nid));
473b0ce4 1007 ksm_pages_unshared--;
93d17715 1008 rmap_item->address &= PAGE_MASK;
31dbd01f 1009 }
4035c07a 1010out:
31dbd01f
IE
1011 cond_resched(); /* we're called from many long loops */
1012}
1013
21fbd591 1014static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
31dbd01f 1015{
6514d511 1016 while (*rmap_list) {
21fbd591 1017 struct ksm_rmap_item *rmap_item = *rmap_list;
6514d511 1018 *rmap_list = rmap_item->rmap_list;
31dbd01f 1019 remove_rmap_item_from_tree(rmap_item);
31dbd01f
IE
1020 free_rmap_item(rmap_item);
1021 }
1022}
1023
1024/*
e850dcf5 1025 * Though it's very tempting to unmerge rmap_items from stable tree rather
31dbd01f
IE
1026 * than check every pte of a given vma, the locking doesn't quite work for
1027 * that - an rmap_item is assigned to the stable tree after inserting ksm
c1e8d7c6 1028 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
31dbd01f
IE
1029 * rmap_items from parent to child at fork time (so as not to waste time
1030 * if exit comes before the next scan reaches it).
81464e30
HD
1031 *
1032 * Similarly, although we'd like to remove rmap_items (so updating counts
1033 * and freeing memory) when unmerging an area, it's easier to leave that
1034 * to the next pass of ksmd - consider, for example, how ksmd might be
1035 * in cmp_and_merge_page on one of the rmap_items we would be removing.
31dbd01f 1036 */
d952b791 1037static int unmerge_ksm_pages(struct vm_area_struct *vma,
49b06385 1038 unsigned long start, unsigned long end, bool lock_vma)
31dbd01f
IE
1039{
1040 unsigned long addr;
d952b791 1041 int err = 0;
31dbd01f 1042
d952b791 1043 for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
9ba69294
HD
1044 if (ksm_test_exit(vma->vm_mm))
1045 break;
d952b791
HD
1046 if (signal_pending(current))
1047 err = -ERESTARTSYS;
1048 else
49b06385 1049 err = break_ksm(vma, addr, lock_vma);
d952b791
HD
1050 }
1051 return err;
31dbd01f
IE
1052}
1053
68158bfa
MWO
1054static inline
1055struct ksm_stable_node *folio_stable_node(const struct folio *folio)
19138349
MWO
1056{
1057 return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
1058}
1059
21fbd591 1060static inline struct ksm_stable_node *page_stable_node(struct page *page)
88484826 1061{
19138349 1062 return folio_stable_node(page_folio(page));
88484826
MR
1063}
1064
b8b0ff24
AS
1065static inline void folio_set_stable_node(struct folio *folio,
1066 struct ksm_stable_node *stable_node)
88484826 1067{
452e862f
AS
1068 VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio);
1069 folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
88484826
MR
1070}
1071
2ffd8679
HD
1072#ifdef CONFIG_SYSFS
1073/*
1074 * Only called through the sysfs control interface:
1075 */
21fbd591 1076static int remove_stable_node(struct ksm_stable_node *stable_node)
cbf86cfe 1077{
9d5cc140 1078 struct folio *folio;
cbf86cfe
HD
1079 int err;
1080
85b67b01 1081 folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
9d5cc140 1082 if (!folio) {
cbf86cfe 1083 /*
9d5cc140 1084 * ksm_get_folio did remove_node_from_stable_tree itself.
cbf86cfe
HD
1085 */
1086 return 0;
1087 }
1088
9a63236f
AR
1089 /*
1090 * Page could be still mapped if this races with __mmput() running in
1091 * between ksm_exit() and exit_mmap(). Just refuse to let
1092 * merge_across_nodes/max_page_sharing be switched.
1093 */
1094 err = -EBUSY;
9d5cc140 1095 if (!folio_mapped(folio)) {
cbf86cfe 1096 /*
9d5cc140
AS
1097 * The stable node did not yet appear stale to ksm_get_folio(),
1098 * since that allows for an unmapped ksm folio to be recognized
8fdb3dbf 1099 * right up until it is freed; but the node is safe to remove.
9d5cc140
AS
1100 * This folio might be in an LRU cache waiting to be freed,
1101 * or it might be in the swapcache (perhaps under writeback),
cbf86cfe
HD
1102 * or it might have been removed from swapcache a moment ago.
1103 */
9d5cc140 1104 folio_set_stable_node(folio, NULL);
cbf86cfe
HD
1105 remove_node_from_stable_tree(stable_node);
1106 err = 0;
1107 }
1108
9d5cc140
AS
1109 folio_unlock(folio);
1110 folio_put(folio);
cbf86cfe
HD
1111 return err;
1112}
1113
21fbd591 1114static int remove_stable_node_chain(struct ksm_stable_node *stable_node,
2c653d0e
AA
1115 struct rb_root *root)
1116{
21fbd591 1117 struct ksm_stable_node *dup;
2c653d0e
AA
1118 struct hlist_node *hlist_safe;
1119
1120 if (!is_stable_node_chain(stable_node)) {
1121 VM_BUG_ON(is_stable_node_dup(stable_node));
1122 if (remove_stable_node(stable_node))
1123 return true;
1124 else
1125 return false;
1126 }
1127
1128 hlist_for_each_entry_safe(dup, hlist_safe,
1129 &stable_node->hlist, hlist_dup) {
1130 VM_BUG_ON(!is_stable_node_dup(dup));
1131 if (remove_stable_node(dup))
1132 return true;
1133 }
1134 BUG_ON(!hlist_empty(&stable_node->hlist));
1135 free_stable_node_chain(stable_node, root);
1136 return false;
1137}
1138
cbf86cfe
HD
1139static int remove_all_stable_nodes(void)
1140{
21fbd591 1141 struct ksm_stable_node *stable_node, *next;
cbf86cfe
HD
1142 int nid;
1143 int err = 0;
1144
ef53d16c 1145 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
cbf86cfe
HD
1146 while (root_stable_tree[nid].rb_node) {
1147 stable_node = rb_entry(root_stable_tree[nid].rb_node,
21fbd591 1148 struct ksm_stable_node, node);
2c653d0e
AA
1149 if (remove_stable_node_chain(stable_node,
1150 root_stable_tree + nid)) {
cbf86cfe
HD
1151 err = -EBUSY;
1152 break; /* proceed to next nid */
1153 }
1154 cond_resched();
1155 }
1156 }
03640418 1157 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
4146d2d6
HD
1158 if (remove_stable_node(stable_node))
1159 err = -EBUSY;
1160 cond_resched();
1161 }
cbf86cfe
HD
1162 return err;
1163}
1164
d952b791 1165static int unmerge_and_remove_all_rmap_items(void)
31dbd01f 1166{
21fbd591 1167 struct ksm_mm_slot *mm_slot;
58730ab6 1168 struct mm_slot *slot;
31dbd01f
IE
1169 struct mm_struct *mm;
1170 struct vm_area_struct *vma;
d952b791
HD
1171 int err = 0;
1172
1173 spin_lock(&ksm_mmlist_lock);
58730ab6
QZ
1174 slot = list_entry(ksm_mm_head.slot.mm_node.next,
1175 struct mm_slot, mm_node);
1176 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
d952b791 1177 spin_unlock(&ksm_mmlist_lock);
31dbd01f 1178
a5f18ba0
MWO
1179 for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head;
1180 mm_slot = ksm_scan.mm_slot) {
58730ab6 1181 VMA_ITERATOR(vmi, mm_slot->slot.mm, 0);
a5f18ba0 1182
58730ab6 1183 mm = mm_slot->slot.mm;
d8ed45c5 1184 mmap_read_lock(mm);
6db504ce
LH
1185
1186 /*
1187 * Exit right away if mm is exiting to avoid lockdep issue in
1188 * the maple tree
1189 */
1190 if (ksm_test_exit(mm))
1191 goto mm_exiting;
1192
a5f18ba0 1193 for_each_vma(vmi, vma) {
31dbd01f
IE
1194 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
1195 continue;
d952b791 1196 err = unmerge_ksm_pages(vma,
49b06385 1197 vma->vm_start, vma->vm_end, false);
9ba69294
HD
1198 if (err)
1199 goto error;
31dbd01f 1200 }
9ba69294 1201
6db504ce 1202mm_exiting:
420be4ed 1203 remove_trailing_rmap_items(&mm_slot->rmap_list);
d8ed45c5 1204 mmap_read_unlock(mm);
d952b791
HD
1205
1206 spin_lock(&ksm_mmlist_lock);
58730ab6
QZ
1207 slot = list_entry(mm_slot->slot.mm_node.next,
1208 struct mm_slot, mm_node);
1209 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
9ba69294 1210 if (ksm_test_exit(mm)) {
58730ab6
QZ
1211 hash_del(&mm_slot->slot.hash);
1212 list_del(&mm_slot->slot.mm_node);
9ba69294
HD
1213 spin_unlock(&ksm_mmlist_lock);
1214
58730ab6 1215 mm_slot_free(mm_slot_cache, mm_slot);
9ba69294 1216 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
d7597f59 1217 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
9ba69294 1218 mmdrop(mm);
7496fea9 1219 } else
9ba69294 1220 spin_unlock(&ksm_mmlist_lock);
31dbd01f
IE
1221 }
1222
cbf86cfe
HD
1223 /* Clean up stable nodes, but don't worry if some are still busy */
1224 remove_all_stable_nodes();
d952b791 1225 ksm_scan.seqnr = 0;
9ba69294
HD
1226 return 0;
1227
1228error:
d8ed45c5 1229 mmap_read_unlock(mm);
31dbd01f 1230 spin_lock(&ksm_mmlist_lock);
d952b791 1231 ksm_scan.mm_slot = &ksm_mm_head;
31dbd01f 1232 spin_unlock(&ksm_mmlist_lock);
d952b791 1233 return err;
31dbd01f 1234}
2ffd8679 1235#endif /* CONFIG_SYSFS */
31dbd01f 1236
31dbd01f
IE
1237static u32 calc_checksum(struct page *page)
1238{
1239 u32 checksum;
b3351989 1240 void *addr = kmap_local_page(page);
59e1a2f4 1241 checksum = xxhash(addr, PAGE_SIZE, 0);
b3351989 1242 kunmap_local(addr);
31dbd01f
IE
1243 return checksum;
1244}
1245
40d707f3 1246static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
31dbd01f
IE
1247 pte_t *orig_pte)
1248{
1249 struct mm_struct *mm = vma->vm_mm;
40d707f3 1250 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0);
31dbd01f
IE
1251 int swapped;
1252 int err = -EFAULT;
ac46d4f3 1253 struct mmu_notifier_range range;
6c287605 1254 bool anon_exclusive;
c33c7948 1255 pte_t entry;
31dbd01f 1256
40d707f3
AS
1257 if (WARN_ON_ONCE(folio_test_large(folio)))
1258 return err;
1259
713da0b3 1260 pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma);
36eaff33 1261 if (pvmw.address == -EFAULT)
31dbd01f
IE
1262 goto out;
1263
7d4a8be0 1264 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
ac46d4f3
JG
1265 pvmw.address + PAGE_SIZE);
1266 mmu_notifier_invalidate_range_start(&range);
6bdb913f 1267
36eaff33 1268 if (!page_vma_mapped_walk(&pvmw))
6bdb913f 1269 goto out_mn;
36eaff33
KS
1270 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
1271 goto out_unlock;
31dbd01f 1272
c33c7948 1273 entry = ptep_get(pvmw.pte);
789cfc66
DH
1274 /*
1275 * Handle PFN swap PTEs, such as device-exclusive ones, that actually
1276 * map pages: give up just like the next folio_walk would.
1277 */
1278 if (unlikely(!pte_present(entry)))
1279 goto out_unlock;
1280
1281 anon_exclusive = PageAnonExclusive(&folio->page);
c33c7948 1282 if (pte_write(entry) || pte_dirty(entry) ||
6c287605 1283 anon_exclusive || mm_tlb_flush_pending(mm)) {
40d707f3
AS
1284 swapped = folio_test_swapcache(folio);
1285 flush_cache_page(vma, pvmw.address, folio_pfn(folio));
31dbd01f 1286 /*
25985edc 1287 * Ok this is tricky, when get_user_pages_fast() run it doesn't
31dbd01f 1288 * take any lock, therefore the check that we are going to make
f0953a1b 1289 * with the pagecount against the mapcount is racy and
31dbd01f
IE
1290 * O_DIRECT can happen right after the check.
1291 * So we clear the pte and flush the tlb before the check
1292 * this assure us that no O_DIRECT can happen after the check
1293 * or in the middle of the check.
0f10851e
JG
1294 *
1295 * No need to notify as we are downgrading page table to read
1296 * only not changing it to point to a new page.
1297 *
ee65728e 1298 * See Documentation/mm/mmu_notifier.rst
31dbd01f 1299 */
0f10851e 1300 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
31dbd01f
IE
1301 /*
1302 * Check that no O_DIRECT or similar I/O is in progress on the
1303 * page
1304 */
40d707f3 1305 if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) {
36eaff33 1306 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
31dbd01f
IE
1307 goto out_unlock;
1308 }
6c287605 1309
e3b4b137
DH
1310 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
1311 if (anon_exclusive &&
40d707f3 1312 folio_try_share_anon_rmap_pte(folio, &folio->page)) {
6c287605
DH
1313 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1314 goto out_unlock;
1315 }
1316
4e31635c 1317 if (pte_dirty(entry))
40d707f3 1318 folio_mark_dirty(folio);
6a56ccbc
DH
1319 entry = pte_mkclean(entry);
1320
1321 if (pte_write(entry))
1322 entry = pte_wrprotect(entry);
595cd8f2 1323
f7842747 1324 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
31dbd01f 1325 }
c33c7948 1326 *orig_pte = entry;
31dbd01f
IE
1327 err = 0;
1328
1329out_unlock:
36eaff33 1330 page_vma_mapped_walk_done(&pvmw);
6bdb913f 1331out_mn:
ac46d4f3 1332 mmu_notifier_invalidate_range_end(&range);
31dbd01f
IE
1333out:
1334 return err;
1335}
1336
1337/**
1338 * replace_page - replace page in vma by new ksm page
8dd3557a
HD
1339 * @vma: vma that holds the pte pointing to page
1340 * @page: the page we are replacing by kpage
1341 * @kpage: the ksm page we replace page by
31dbd01f
IE
1342 * @orig_pte: the original value of the pte
1343 *
1344 * Returns 0 on success, -EFAULT on failure.
1345 */
8dd3557a
HD
1346static int replace_page(struct vm_area_struct *vma, struct page *page,
1347 struct page *kpage, pte_t orig_pte)
31dbd01f 1348{
97729534 1349 struct folio *kfolio = page_folio(kpage);
31dbd01f 1350 struct mm_struct *mm = vma->vm_mm;
713da0b3 1351 struct folio *folio = page_folio(page);
31dbd01f 1352 pmd_t *pmd;
50722804 1353 pmd_t pmde;
31dbd01f 1354 pte_t *ptep;
e86c59b1 1355 pte_t newpte;
31dbd01f
IE
1356 spinlock_t *ptl;
1357 unsigned long addr;
31dbd01f 1358 int err = -EFAULT;
ac46d4f3 1359 struct mmu_notifier_range range;
31dbd01f 1360
713da0b3 1361 addr = page_address_in_vma(folio, page, vma);
31dbd01f
IE
1362 if (addr == -EFAULT)
1363 goto out;
1364
6219049a
BL
1365 pmd = mm_find_pmd(mm, addr);
1366 if (!pmd)
31dbd01f 1367 goto out;
50722804
ZK
1368 /*
1369 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
1370 * without holding anon_vma lock for write. So when looking for a
1371 * genuine pmde (in which to find pte), test present and !THP together.
1372 */
26e1a0c3 1373 pmde = pmdp_get_lockless(pmd);
50722804
ZK
1374 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
1375 goto out;
31dbd01f 1376
7d4a8be0 1377 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
6f4f13e8 1378 addr + PAGE_SIZE);
ac46d4f3 1379 mmu_notifier_invalidate_range_start(&range);
6bdb913f 1380
31dbd01f 1381 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
04dee9e8
HD
1382 if (!ptep)
1383 goto out_mn;
c33c7948 1384 if (!pte_same(ptep_get(ptep), orig_pte)) {
31dbd01f 1385 pte_unmap_unlock(ptep, ptl);
6bdb913f 1386 goto out_mn;
31dbd01f 1387 }
6c287605 1388 VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
97729534
DH
1389 VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
1390 kfolio);
31dbd01f 1391
e86c59b1
CI
1392 /*
1393 * No need to check ksm_use_zero_pages here: we can only have a
457aef94 1394 * zero_page here if ksm_use_zero_pages was enabled already.
e86c59b1
CI
1395 */
1396 if (!is_zero_pfn(page_to_pfn(kpage))) {
97729534
DH
1397 folio_get(kfolio);
1398 folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
e86c59b1
CI
1399 newpte = mk_pte(kpage, vma->vm_page_prot);
1400 } else {
79271476 1401 /*
1402 * Use pte_mkdirty to mark the zero page mapped by KSM, and then
1403 * we can easily track all KSM-placed zero pages by checking if
1404 * the dirty bit in zero page's PTE is set.
1405 */
1406 newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
c2dc78b8 1407 ksm_map_zero_page(mm);
a38c015f
CI
1408 /*
1409 * We're replacing an anonymous page with a zero page, which is
1410 * not anonymous. We need to do proper accounting otherwise we
1411 * will get wrong values in /proc, and a BUG message in dmesg
1412 * when tearing down the mm.
1413 */
1414 dec_mm_counter(mm, MM_ANONPAGES);
e86c59b1 1415 }
31dbd01f 1416
c33c7948 1417 flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep)));
0f10851e
JG
1418 /*
1419 * No need to notify as we are replacing a read only page with another
1420 * read only page with the same content.
1421 *
ee65728e 1422 * See Documentation/mm/mmu_notifier.rst
0f10851e
JG
1423 */
1424 ptep_clear_flush(vma, addr, ptep);
f7842747 1425 set_pte_at(mm, addr, ptep, newpte);
31dbd01f 1426
18e8612e 1427 folio_remove_rmap_pte(folio, page, vma);
b4e6f66e
MWO
1428 if (!folio_mapped(folio))
1429 folio_free_swap(folio);
1430 folio_put(folio);
31dbd01f
IE
1431
1432 pte_unmap_unlock(ptep, ptl);
1433 err = 0;
6bdb913f 1434out_mn:
ac46d4f3 1435 mmu_notifier_invalidate_range_end(&range);
31dbd01f
IE
1436out:
1437 return err;
1438}
1439
1440/*
1441 * try_to_merge_one_page - take two pages and merge them into one
8dd3557a
HD
1442 * @vma: the vma that holds the pte pointing to page
1443 * @page: the PageAnon page that we want to replace with kpage
b9a25635 1444 * @kpage: the KSM page that we want to map instead of page,
80e14822 1445 * or NULL the first time when we want to use page as kpage.
31dbd01f
IE
1446 *
1447 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1448 */
1449static int try_to_merge_one_page(struct vm_area_struct *vma,
8dd3557a 1450 struct page *page, struct page *kpage)
31dbd01f 1451{
9c0a1b99 1452 struct folio *folio = page_folio(page);
31dbd01f
IE
1453 pte_t orig_pte = __pte(0);
1454 int err = -EFAULT;
1455
db114b83
HD
1456 if (page == kpage) /* ksm page forked */
1457 return 0;
1458
9c0a1b99 1459 if (!folio_test_anon(folio))
31dbd01f
IE
1460 goto out;
1461
31dbd01f 1462 /*
32f51ead 1463 * We need the folio lock to read a stable swapcache flag in
9c0a1b99
MWO
1464 * write_protect_page(). We trylock because we don't want to wait
1465 * here - we prefer to continue scanning and merging different
1466 * pages, then come back to this page when it is unlocked.
31dbd01f 1467 */
9c0a1b99 1468 if (!folio_trylock(folio))
31e855ea 1469 goto out;
f765f540 1470
9c0a1b99 1471 if (folio_test_large(folio)) {
a7306c34 1472 if (split_huge_page(page))
f765f540 1473 goto out_unlock;
9c0a1b99 1474 folio = page_folio(page);
f765f540
KS
1475 }
1476
31dbd01f
IE
1477 /*
1478 * If this anonymous page is mapped only here, its pte may need
1479 * to be write-protected. If it's mapped elsewhere, all of its
1480 * ptes are necessarily already write-protected. But in either
1481 * case, we need to lock and check page_count is not raised.
1482 */
9c0a1b99 1483 if (write_protect_page(vma, folio, &orig_pte) == 0) {
80e14822
HD
1484 if (!kpage) {
1485 /*
9c0a1b99
MWO
1486 * While we hold folio lock, upgrade folio from
1487 * anon to a NULL stable_node with the KSM flag set:
80e14822
HD
1488 * stable_tree_insert() will update stable_node.
1489 */
9c0a1b99
MWO
1490 folio_set_stable_node(folio, NULL);
1491 folio_mark_accessed(folio);
337ed7eb 1492 /*
9c0a1b99 1493 * Page reclaim just frees a clean folio with no dirty
337ed7eb
MK
1494 * ptes: make sure that the ksm page would be swapped.
1495 */
9c0a1b99
MWO
1496 if (!folio_test_dirty(folio))
1497 folio_mark_dirty(folio);
80e14822
HD
1498 err = 0;
1499 } else if (pages_identical(page, kpage))
1500 err = replace_page(vma, page, kpage, orig_pte);
1501 }
31dbd01f 1502
f765f540 1503out_unlock:
9c0a1b99 1504 folio_unlock(folio);
31dbd01f
IE
1505out:
1506 return err;
1507}
1508
ac90c56b
CZ
1509/*
1510 * This function returns 0 if the pages were merged or if they are
1511 * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise.
1512 */
1513static int try_to_merge_with_zero_page(struct ksm_rmap_item *rmap_item,
1514 struct page *page)
1515{
1516 struct mm_struct *mm = rmap_item->mm;
1517 int err = -EFAULT;
1518
1519 /*
1520 * Same checksum as an empty page. We attempt to merge it with the
1521 * appropriate zero page if the user enabled this via sysfs.
1522 */
1523 if (ksm_use_zero_pages && (rmap_item->oldchecksum == zero_checksum)) {
1524 struct vm_area_struct *vma;
1525
1526 mmap_read_lock(mm);
1527 vma = find_mergeable_vma(mm, rmap_item->address);
1528 if (vma) {
1529 err = try_to_merge_one_page(vma, page,
1530 ZERO_PAGE(rmap_item->address));
1531 trace_ksm_merge_one_page(
1532 page_to_pfn(ZERO_PAGE(rmap_item->address)),
1533 rmap_item, mm, err);
1534 } else {
1535 /*
1536 * If the vma is out of date, we do not need to
1537 * continue.
1538 */
1539 err = 0;
1540 }
1541 mmap_read_unlock(mm);
1542 }
1543
1544 return err;
1545}
1546
81464e30
HD
1547/*
1548 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1549 * but no new kernel page is allocated: kpage must already be a ksm page.
8dd3557a
HD
1550 *
1551 * This function returns 0 if the pages were merged, -EFAULT otherwise.
81464e30 1552 */
21fbd591 1553static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item,
8dd3557a 1554 struct page *page, struct page *kpage)
81464e30 1555{
8dd3557a 1556 struct mm_struct *mm = rmap_item->mm;
81464e30
HD
1557 struct vm_area_struct *vma;
1558 int err = -EFAULT;
1559
d8ed45c5 1560 mmap_read_lock(mm);
85c6e8dd
AA
1561 vma = find_mergeable_vma(mm, rmap_item->address);
1562 if (!vma)
81464e30
HD
1563 goto out;
1564
8dd3557a 1565 err = try_to_merge_one_page(vma, page, kpage);
db114b83
HD
1566 if (err)
1567 goto out;
1568
bc56620b
HD
1569 /* Unstable nid is in union with stable anon_vma: remove first */
1570 remove_rmap_item_from_tree(rmap_item);
1571
c1e8d7c6 1572 /* Must get reference to anon_vma while still holding mmap_lock */
9e60109f
PZ
1573 rmap_item->anon_vma = vma->anon_vma;
1574 get_anon_vma(vma->anon_vma);
81464e30 1575out:
d8ed45c5 1576 mmap_read_unlock(mm);
739100c8
SR
1577 trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page),
1578 rmap_item, mm, err);
81464e30
HD
1579 return err;
1580}
1581
31dbd01f
IE
1582/*
1583 * try_to_merge_two_pages - take two identical pages and prepare them
1584 * to be merged into one page.
1585 *
8dd3557a
HD
1586 * This function returns the kpage if we successfully merged two identical
1587 * pages into one ksm page, NULL otherwise.
31dbd01f 1588 *
80e14822 1589 * Note that this function upgrades page to ksm page: if one of the pages
31dbd01f
IE
1590 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1591 */
98c3ca00 1592static struct folio *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item,
8dd3557a 1593 struct page *page,
21fbd591 1594 struct ksm_rmap_item *tree_rmap_item,
8dd3557a 1595 struct page *tree_page)
31dbd01f 1596{
80e14822 1597 int err;
31dbd01f 1598
80e14822 1599 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
31dbd01f 1600 if (!err) {
8dd3557a 1601 err = try_to_merge_with_ksm_page(tree_rmap_item,
80e14822 1602 tree_page, page);
31dbd01f 1603 /*
81464e30
HD
1604 * If that fails, we have a ksm page with only one pte
1605 * pointing to it: so break it.
31dbd01f 1606 */
4035c07a 1607 if (err)
8dd3557a 1608 break_cow(rmap_item);
31dbd01f 1609 }
98c3ca00 1610 return err ? NULL : page_folio(page);
31dbd01f
IE
1611}
1612
2c653d0e 1613static __always_inline
21fbd591 1614bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset)
2c653d0e
AA
1615{
1616 VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1617 /*
1618 * Check that at least one mapping still exists, otherwise
1619 * there's no much point to merge and share with this
1620 * stable_node, as the underlying tree_page of the other
1621 * sharer is going to be freed soon.
1622 */
1623 return stable_node->rmap_hlist_len &&
1624 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1625}
1626
1627static __always_inline
21fbd591 1628bool is_page_sharing_candidate(struct ksm_stable_node *stable_node)
2c653d0e
AA
1629{
1630 return __is_page_sharing_candidate(stable_node, 0);
1631}
1632
79899cce
AS
1633static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
1634 struct ksm_stable_node **_stable_node,
1635 struct rb_root *root,
1636 bool prune_stale_stable_nodes)
2c653d0e 1637{
21fbd591 1638 struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
2c653d0e 1639 struct hlist_node *hlist_safe;
6f528de2 1640 struct folio *folio, *tree_folio = NULL;
2c653d0e
AA
1641 int found_rmap_hlist_len;
1642
1643 if (!prune_stale_stable_nodes ||
1644 time_before(jiffies, stable_node->chain_prune_time +
1645 msecs_to_jiffies(
1646 ksm_stable_node_chains_prune_millisecs)))
1647 prune_stale_stable_nodes = false;
1648 else
1649 stable_node->chain_prune_time = jiffies;
1650
1651 hlist_for_each_entry_safe(dup, hlist_safe,
1652 &stable_node->hlist, hlist_dup) {
1653 cond_resched();
1654 /*
1655 * We must walk all stable_node_dup to prune the stale
1656 * stable nodes during lookup.
1657 *
6f528de2 1658 * ksm_get_folio can drop the nodes from the
2c653d0e
AA
1659 * stable_node->hlist if they point to freed pages
1660 * (that's why we do a _safe walk). The "dup"
1661 * stable_node parameter itself will be freed from
1662 * under us if it returns NULL.
1663 */
85b67b01 1664 folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK);
6f528de2 1665 if (!folio)
2c653d0e 1666 continue;
a0b856b6
CZ
1667 /* Pick the best candidate if possible. */
1668 if (!found || (is_page_sharing_candidate(dup) &&
1669 (!is_page_sharing_candidate(found) ||
1670 dup->rmap_hlist_len > found_rmap_hlist_len))) {
1671 if (found)
1672 folio_put(tree_folio);
1673 found = dup;
1674 found_rmap_hlist_len = found->rmap_hlist_len;
1675 tree_folio = folio;
1676 /* skip put_page for found candidate */
1677 if (!prune_stale_stable_nodes &&
1678 is_page_sharing_candidate(found))
1679 break;
1680 continue;
2c653d0e 1681 }
6f528de2 1682 folio_put(folio);
2c653d0e
AA
1683 }
1684
80b18dfa 1685 if (found) {
a0b856b6 1686 if (hlist_is_singular_node(&found->hlist_dup, &stable_node->hlist)) {
2c653d0e
AA
1687 /*
1688 * If there's not just one entry it would
1689 * corrupt memory, better BUG_ON. In KSM
1690 * context with no lock held it's not even
1691 * fatal.
1692 */
1693 BUG_ON(stable_node->hlist.first->next);
1694
1695 /*
1696 * There's just one entry and it is below the
1697 * deduplication limit so drop the chain.
1698 */
1699 rb_replace_node(&stable_node->node, &found->node,
1700 root);
1701 free_stable_node(stable_node);
1702 ksm_stable_node_chains--;
1703 ksm_stable_node_dups--;
b4fecc67 1704 /*
0ba1d0f7
AA
1705 * NOTE: the caller depends on the stable_node
1706 * to be equal to stable_node_dup if the chain
1707 * was collapsed.
b4fecc67 1708 */
0ba1d0f7
AA
1709 *_stable_node = found;
1710 /*
f0953a1b 1711 * Just for robustness, as stable_node is
0ba1d0f7
AA
1712 * otherwise left as a stable pointer, the
1713 * compiler shall optimize it away at build
1714 * time.
1715 */
1716 stable_node = NULL;
80b18dfa
AA
1717 } else if (stable_node->hlist.first != &found->hlist_dup &&
1718 __is_page_sharing_candidate(found, 1)) {
2c653d0e 1719 /*
80b18dfa
AA
1720 * If the found stable_node dup can accept one
1721 * more future merge (in addition to the one
1722 * that is underway) and is not at the head of
1723 * the chain, put it there so next search will
1724 * be quicker in the !prune_stale_stable_nodes
1725 * case.
1726 *
1727 * NOTE: it would be inaccurate to use nr > 1
1728 * instead of checking the hlist.first pointer
1729 * directly, because in the
1730 * prune_stale_stable_nodes case "nr" isn't
1731 * the position of the found dup in the chain,
1732 * but the total number of dups in the chain.
2c653d0e
AA
1733 */
1734 hlist_del(&found->hlist_dup);
1735 hlist_add_head(&found->hlist_dup,
1736 &stable_node->hlist);
1737 }
a0b856b6
CZ
1738 } else {
1739 /* Its hlist must be empty if no one found. */
1740 free_stable_node_chain(stable_node, root);
2c653d0e
AA
1741 }
1742
8dc5ffcd 1743 *_stable_node_dup = found;
79899cce 1744 return tree_folio;
2c653d0e
AA
1745}
1746
8dc5ffcd 1747/*
79899cce 1748 * Like for ksm_get_folio, this function can free the *_stable_node and
8dc5ffcd
AA
1749 * *_stable_node_dup if the returned tree_page is NULL.
1750 *
1751 * It can also free and overwrite *_stable_node with the found
1752 * stable_node_dup if the chain is collapsed (in which case
1753 * *_stable_node will be equal to *_stable_node_dup like if the chain
1754 * never existed). It's up to the caller to verify tree_page is not
1755 * NULL before dereferencing *_stable_node or *_stable_node_dup.
1756 *
1757 * *_stable_node_dup is really a second output parameter of this
1758 * function and will be overwritten in all cases, the caller doesn't
1759 * need to initialize it.
1760 */
79899cce
AS
1761static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
1762 struct ksm_stable_node **_stable_node,
1763 struct rb_root *root,
1764 bool prune_stale_stable_nodes)
2c653d0e 1765{
21fbd591 1766 struct ksm_stable_node *stable_node = *_stable_node;
a0b856b6 1767
2c653d0e 1768 if (!is_stable_node_chain(stable_node)) {
a0b856b6
CZ
1769 *_stable_node_dup = stable_node;
1770 return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK);
2c653d0e 1771 }
8dc5ffcd 1772 return stable_node_dup(_stable_node_dup, _stable_node, root,
2c653d0e
AA
1773 prune_stale_stable_nodes);
1774}
1775
79899cce
AS
1776static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d,
1777 struct ksm_stable_node **s_n,
1778 struct rb_root *root)
2c653d0e 1779{
8dc5ffcd 1780 return __stable_node_chain(s_n_d, s_n, root, true);
2c653d0e
AA
1781}
1782
79899cce 1783static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d,
a0b856b6 1784 struct ksm_stable_node **s_n,
79899cce 1785 struct rb_root *root)
2c653d0e 1786{
a0b856b6 1787 return __stable_node_chain(s_n_d, s_n, root, false);
2c653d0e
AA
1788}
1789
31dbd01f 1790/*
8dd3557a 1791 * stable_tree_search - search for page inside the stable tree
31dbd01f
IE
1792 *
1793 * This function checks if there is a page inside the stable tree
1794 * with identical content to the page that we are scanning right now.
1795 *
7b6ba2c7 1796 * This function returns the stable tree node of identical content if found,
98c3ca00 1797 * -EBUSY if the stable node's page is being migrated, NULL otherwise.
31dbd01f 1798 */
98c3ca00 1799static struct folio *stable_tree_search(struct page *page)
31dbd01f 1800{
90bd6fd3 1801 int nid;
ef53d16c 1802 struct rb_root *root;
4146d2d6
HD
1803 struct rb_node **new;
1804 struct rb_node *parent;
a0b856b6 1805 struct ksm_stable_node *stable_node, *stable_node_dup;
21fbd591 1806 struct ksm_stable_node *page_node;
79899cce 1807 struct folio *folio;
31dbd01f 1808
79899cce
AS
1809 folio = page_folio(page);
1810 page_node = folio_stable_node(folio);
4146d2d6
HD
1811 if (page_node && page_node->head != &migrate_nodes) {
1812 /* ksm page forked */
79899cce 1813 folio_get(folio);
98c3ca00 1814 return folio;
08beca44
HD
1815 }
1816
79899cce 1817 nid = get_kpfn_nid(folio_pfn(folio));
ef53d16c 1818 root = root_stable_tree + nid;
4146d2d6 1819again:
ef53d16c 1820 new = &root->rb_node;
4146d2d6 1821 parent = NULL;
90bd6fd3 1822
4146d2d6 1823 while (*new) {
79899cce 1824 struct folio *tree_folio;
31dbd01f
IE
1825 int ret;
1826
08beca44 1827 cond_resched();
21fbd591 1828 stable_node = rb_entry(*new, struct ksm_stable_node, node);
79899cce 1829 tree_folio = chain_prune(&stable_node_dup, &stable_node, root);
79899cce 1830 if (!tree_folio) {
f2e5ff85
AA
1831 /*
1832 * If we walked over a stale stable_node,
79899cce 1833 * ksm_get_folio() will call rb_erase() and it
f2e5ff85
AA
1834 * may rebalance the tree from under us. So
1835 * restart the search from scratch. Returning
1836 * NULL would be safe too, but we'd generate
1837 * false negative insertions just because some
1838 * stable_node was stale.
1839 */
1840 goto again;
1841 }
31dbd01f 1842
79899cce
AS
1843 ret = memcmp_pages(page, &tree_folio->page);
1844 folio_put(tree_folio);
31dbd01f 1845
4146d2d6 1846 parent = *new;
c8d6553b 1847 if (ret < 0)
4146d2d6 1848 new = &parent->rb_left;
c8d6553b 1849 else if (ret > 0)
4146d2d6 1850 new = &parent->rb_right;
c8d6553b 1851 else {
2c653d0e
AA
1852 if (page_node) {
1853 VM_BUG_ON(page_node->head != &migrate_nodes);
1854 /*
2aa33912
DH
1855 * If the mapcount of our migrated KSM folio is
1856 * at most 1, we can merge it with another
1857 * KSM folio where we know that we have space
1858 * for one more mapping without exceeding the
1859 * ksm_max_page_sharing limit: see
1860 * chain_prune(). This way, we can avoid adding
1861 * this stable node to the chain.
2c653d0e 1862 */
2aa33912 1863 if (folio_mapcount(folio) > 1)
2c653d0e
AA
1864 goto chain_append;
1865 }
1866
a0b856b6 1867 if (!is_page_sharing_candidate(stable_node_dup)) {
2c653d0e
AA
1868 /*
1869 * If the stable_node is a chain and
1870 * we got a payload match in memcmp
1871 * but we cannot merge the scanned
1872 * page in any of the existing
1873 * stable_node dups because they're
1874 * all full, we need to wait the
1875 * scanned page to find itself a match
1876 * in the unstable tree to create a
1877 * brand new KSM page to add later to
1878 * the dups of this stable_node.
1879 */
1880 return NULL;
1881 }
1882
c8d6553b
HD
1883 /*
1884 * Lock and unlock the stable_node's page (which
1885 * might already have been migrated) so that page
1886 * migration is sure to notice its raised count.
1887 * It would be more elegant to return stable_node
1888 * than kpage, but that involves more changes.
1889 */
79899cce 1890 tree_folio = ksm_get_folio(stable_node_dup,
85b67b01 1891 KSM_GET_FOLIO_TRYLOCK);
2cee57d1 1892
79899cce 1893 if (PTR_ERR(tree_folio) == -EBUSY)
2cee57d1
YS
1894 return ERR_PTR(-EBUSY);
1895
79899cce 1896 if (unlikely(!tree_folio))
2c653d0e
AA
1897 /*
1898 * The tree may have been rebalanced,
1899 * so re-evaluate parent and new.
1900 */
4146d2d6 1901 goto again;
79899cce 1902 folio_unlock(tree_folio);
2c653d0e
AA
1903
1904 if (get_kpfn_nid(stable_node_dup->kpfn) !=
1905 NUMA(stable_node_dup->nid)) {
79899cce 1906 folio_put(tree_folio);
2c653d0e
AA
1907 goto replace;
1908 }
98c3ca00 1909 return tree_folio;
c8d6553b 1910 }
31dbd01f
IE
1911 }
1912
4146d2d6
HD
1913 if (!page_node)
1914 return NULL;
1915
1916 list_del(&page_node->list);
1917 DO_NUMA(page_node->nid = nid);
1918 rb_link_node(&page_node->node, parent, new);
ef53d16c 1919 rb_insert_color(&page_node->node, root);
2c653d0e
AA
1920out:
1921 if (is_page_sharing_candidate(page_node)) {
79899cce 1922 folio_get(folio);
98c3ca00 1923 return folio;
2c653d0e
AA
1924 } else
1925 return NULL;
4146d2d6
HD
1926
1927replace:
b4fecc67
AA
1928 /*
1929 * If stable_node was a chain and chain_prune collapsed it,
0ba1d0f7
AA
1930 * stable_node has been updated to be the new regular
1931 * stable_node. A collapse of the chain is indistinguishable
1932 * from the case there was no chain in the stable
1933 * rbtree. Otherwise stable_node is the chain and
1934 * stable_node_dup is the dup to replace.
b4fecc67 1935 */
0ba1d0f7 1936 if (stable_node_dup == stable_node) {
b4fecc67
AA
1937 VM_BUG_ON(is_stable_node_chain(stable_node_dup));
1938 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
2c653d0e
AA
1939 /* there is no chain */
1940 if (page_node) {
1941 VM_BUG_ON(page_node->head != &migrate_nodes);
1942 list_del(&page_node->list);
1943 DO_NUMA(page_node->nid = nid);
b4fecc67
AA
1944 rb_replace_node(&stable_node_dup->node,
1945 &page_node->node,
2c653d0e
AA
1946 root);
1947 if (is_page_sharing_candidate(page_node))
79899cce 1948 folio_get(folio);
2c653d0e 1949 else
79899cce 1950 folio = NULL;
2c653d0e 1951 } else {
b4fecc67 1952 rb_erase(&stable_node_dup->node, root);
79899cce 1953 folio = NULL;
2c653d0e 1954 }
4146d2d6 1955 } else {
2c653d0e
AA
1956 VM_BUG_ON(!is_stable_node_chain(stable_node));
1957 __stable_node_dup_del(stable_node_dup);
1958 if (page_node) {
1959 VM_BUG_ON(page_node->head != &migrate_nodes);
1960 list_del(&page_node->list);
1961 DO_NUMA(page_node->nid = nid);
1962 stable_node_chain_add_dup(page_node, stable_node);
1963 if (is_page_sharing_candidate(page_node))
79899cce 1964 folio_get(folio);
2c653d0e 1965 else
79899cce 1966 folio = NULL;
2c653d0e 1967 } else {
79899cce 1968 folio = NULL;
2c653d0e 1969 }
4146d2d6 1970 }
2c653d0e
AA
1971 stable_node_dup->head = &migrate_nodes;
1972 list_add(&stable_node_dup->list, stable_node_dup->head);
98c3ca00 1973 return folio;
2c653d0e
AA
1974
1975chain_append:
b4fecc67
AA
1976 /*
1977 * If stable_node was a chain and chain_prune collapsed it,
0ba1d0f7
AA
1978 * stable_node has been updated to be the new regular
1979 * stable_node. A collapse of the chain is indistinguishable
1980 * from the case there was no chain in the stable
1981 * rbtree. Otherwise stable_node is the chain and
1982 * stable_node_dup is the dup to replace.
b4fecc67 1983 */
0ba1d0f7 1984 if (stable_node_dup == stable_node) {
b4fecc67 1985 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
2c653d0e
AA
1986 /* chain is missing so create it */
1987 stable_node = alloc_stable_node_chain(stable_node_dup,
1988 root);
1989 if (!stable_node)
1990 return NULL;
1991 }
1992 /*
1993 * Add this stable_node dup that was
1994 * migrated to the stable_node chain
1995 * of the current nid for this page
1996 * content.
1997 */
b4fecc67 1998 VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
2c653d0e
AA
1999 VM_BUG_ON(page_node->head != &migrate_nodes);
2000 list_del(&page_node->list);
2001 DO_NUMA(page_node->nid = nid);
2002 stable_node_chain_add_dup(page_node, stable_node);
2003 goto out;
31dbd01f
IE
2004}
2005
2006/*
e850dcf5 2007 * stable_tree_insert - insert stable tree node pointing to new ksm page
31dbd01f
IE
2008 * into the stable tree.
2009 *
7b6ba2c7
HD
2010 * This function returns the stable tree node just allocated on success,
2011 * NULL otherwise.
31dbd01f 2012 */
79899cce 2013static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
31dbd01f 2014{
90bd6fd3
PH
2015 int nid;
2016 unsigned long kpfn;
ef53d16c 2017 struct rb_root *root;
90bd6fd3 2018 struct rb_node **new;
f2e5ff85 2019 struct rb_node *parent;
a0b856b6 2020 struct ksm_stable_node *stable_node, *stable_node_dup;
2c653d0e 2021 bool need_chain = false;
31dbd01f 2022
79899cce 2023 kpfn = folio_pfn(kfolio);
90bd6fd3 2024 nid = get_kpfn_nid(kpfn);
ef53d16c 2025 root = root_stable_tree + nid;
f2e5ff85
AA
2026again:
2027 parent = NULL;
ef53d16c 2028 new = &root->rb_node;
90bd6fd3 2029
31dbd01f 2030 while (*new) {
79899cce 2031 struct folio *tree_folio;
31dbd01f
IE
2032 int ret;
2033
08beca44 2034 cond_resched();
21fbd591 2035 stable_node = rb_entry(*new, struct ksm_stable_node, node);
a0b856b6 2036 tree_folio = chain(&stable_node_dup, &stable_node, root);
79899cce 2037 if (!tree_folio) {
f2e5ff85
AA
2038 /*
2039 * If we walked over a stale stable_node,
79899cce 2040 * ksm_get_folio() will call rb_erase() and it
f2e5ff85
AA
2041 * may rebalance the tree from under us. So
2042 * restart the search from scratch. Returning
2043 * NULL would be safe too, but we'd generate
2044 * false negative insertions just because some
2045 * stable_node was stale.
2046 */
2047 goto again;
2048 }
31dbd01f 2049
79899cce
AS
2050 ret = memcmp_pages(&kfolio->page, &tree_folio->page);
2051 folio_put(tree_folio);
31dbd01f
IE
2052
2053 parent = *new;
2054 if (ret < 0)
2055 new = &parent->rb_left;
2056 else if (ret > 0)
2057 new = &parent->rb_right;
2058 else {
2c653d0e
AA
2059 need_chain = true;
2060 break;
31dbd01f
IE
2061 }
2062 }
2063
2c653d0e
AA
2064 stable_node_dup = alloc_stable_node();
2065 if (!stable_node_dup)
7b6ba2c7 2066 return NULL;
31dbd01f 2067
2c653d0e
AA
2068 INIT_HLIST_HEAD(&stable_node_dup->hlist);
2069 stable_node_dup->kpfn = kpfn;
2c653d0e
AA
2070 stable_node_dup->rmap_hlist_len = 0;
2071 DO_NUMA(stable_node_dup->nid = nid);
2072 if (!need_chain) {
2073 rb_link_node(&stable_node_dup->node, parent, new);
2074 rb_insert_color(&stable_node_dup->node, root);
2075 } else {
2076 if (!is_stable_node_chain(stable_node)) {
21fbd591 2077 struct ksm_stable_node *orig = stable_node;
2c653d0e
AA
2078 /* chain is missing so create it */
2079 stable_node = alloc_stable_node_chain(orig, root);
2080 if (!stable_node) {
2081 free_stable_node(stable_node_dup);
2082 return NULL;
2083 }
2084 }
2085 stable_node_chain_add_dup(stable_node_dup, stable_node);
2086 }
08beca44 2087
90e82349
CZ
2088 folio_set_stable_node(kfolio, stable_node_dup);
2089
2c653d0e 2090 return stable_node_dup;
31dbd01f
IE
2091}
2092
2093/*
8dd3557a
HD
2094 * unstable_tree_search_insert - search for identical page,
2095 * else insert rmap_item into the unstable tree.
31dbd01f
IE
2096 *
2097 * This function searches for a page in the unstable tree identical to the
2098 * page currently being scanned; and if no identical page is found in the
2099 * tree, we insert rmap_item as a new object into the unstable tree.
2100 *
2101 * This function returns pointer to rmap_item found to be identical
2102 * to the currently scanned page, NULL otherwise.
2103 *
2104 * This function does both searching and inserting, because they share
2105 * the same walking algorithm in an rbtree.
2106 */
8dd3557a 2107static
21fbd591 2108struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item,
8dd3557a
HD
2109 struct page *page,
2110 struct page **tree_pagep)
31dbd01f 2111{
90bd6fd3
PH
2112 struct rb_node **new;
2113 struct rb_root *root;
31dbd01f 2114 struct rb_node *parent = NULL;
90bd6fd3
PH
2115 int nid;
2116
2117 nid = get_kpfn_nid(page_to_pfn(page));
ef53d16c 2118 root = root_unstable_tree + nid;
90bd6fd3 2119 new = &root->rb_node;
31dbd01f
IE
2120
2121 while (*new) {
21fbd591 2122 struct ksm_rmap_item *tree_rmap_item;
8dd3557a 2123 struct page *tree_page;
31dbd01f
IE
2124 int ret;
2125
d178f27f 2126 cond_resched();
21fbd591 2127 tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node);
8dd3557a 2128 tree_page = get_mergeable_page(tree_rmap_item);
c8f95ed1 2129 if (!tree_page)
31dbd01f
IE
2130 return NULL;
2131
2132 /*
8dd3557a 2133 * Don't substitute a ksm page for a forked page.
31dbd01f 2134 */
8dd3557a
HD
2135 if (page == tree_page) {
2136 put_page(tree_page);
31dbd01f
IE
2137 return NULL;
2138 }
2139
8dd3557a 2140 ret = memcmp_pages(page, tree_page);
31dbd01f
IE
2141
2142 parent = *new;
2143 if (ret < 0) {
8dd3557a 2144 put_page(tree_page);
31dbd01f
IE
2145 new = &parent->rb_left;
2146 } else if (ret > 0) {
8dd3557a 2147 put_page(tree_page);
31dbd01f 2148 new = &parent->rb_right;
b599cbdf
HD
2149 } else if (!ksm_merge_across_nodes &&
2150 page_to_nid(tree_page) != nid) {
2151 /*
2152 * If tree_page has been migrated to another NUMA node,
2153 * it will be flushed out and put in the right unstable
2154 * tree next time: only merge with it when across_nodes.
2155 */
2156 put_page(tree_page);
2157 return NULL;
31dbd01f 2158 } else {
8dd3557a 2159 *tree_pagep = tree_page;
31dbd01f
IE
2160 return tree_rmap_item;
2161 }
2162 }
2163
7b6ba2c7 2164 rmap_item->address |= UNSTABLE_FLAG;
31dbd01f 2165 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
e850dcf5 2166 DO_NUMA(rmap_item->nid = nid);
31dbd01f 2167 rb_link_node(&rmap_item->node, parent, new);
90bd6fd3 2168 rb_insert_color(&rmap_item->node, root);
31dbd01f 2169
473b0ce4 2170 ksm_pages_unshared++;
31dbd01f
IE
2171 return NULL;
2172}
2173
2174/*
2175 * stable_tree_append - add another rmap_item to the linked list of
2176 * rmap_items hanging off a given node of the stable tree, all sharing
2177 * the same ksm page.
2178 */
21fbd591
QZ
2179static void stable_tree_append(struct ksm_rmap_item *rmap_item,
2180 struct ksm_stable_node *stable_node,
2c653d0e 2181 bool max_page_sharing_bypass)
31dbd01f 2182{
2c653d0e
AA
2183 /*
2184 * rmap won't find this mapping if we don't insert the
2185 * rmap_item in the right stable_node
2186 * duplicate. page_migration could break later if rmap breaks,
2187 * so we can as well crash here. We really need to check for
2188 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
457aef94 2189 * for other negative values as an underflow if detected here
2c653d0e
AA
2190 * for the first time (and not when decreasing rmap_hlist_len)
2191 * would be sign of memory corruption in the stable_node.
2192 */
2193 BUG_ON(stable_node->rmap_hlist_len < 0);
2194
2195 stable_node->rmap_hlist_len++;
2196 if (!max_page_sharing_bypass)
2197 /* possibly non fatal but unexpected overflow, only warn */
2198 WARN_ON_ONCE(stable_node->rmap_hlist_len >
2199 ksm_max_page_sharing);
2200
7b6ba2c7 2201 rmap_item->head = stable_node;
31dbd01f 2202 rmap_item->address |= STABLE_FLAG;
7b6ba2c7 2203 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
e178dfde 2204
7b6ba2c7
HD
2205 if (rmap_item->hlist.next)
2206 ksm_pages_sharing++;
2207 else
2208 ksm_pages_shared++;
76093853 2209
2210 rmap_item->mm->ksm_merging_pages++;
31dbd01f
IE
2211}
2212
2213/*
81464e30
HD
2214 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2215 * if not, compare checksum to previous and if it's the same, see if page can
2216 * be inserted into the unstable tree, or merged with a page already there and
2217 * both transferred to the stable tree.
31dbd01f
IE
2218 *
2219 * @page: the page that we are searching identical page to.
2220 * @rmap_item: the reverse mapping into the virtual address of this page
2221 */
21fbd591 2222static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item)
31dbd01f 2223{
21fbd591 2224 struct ksm_rmap_item *tree_rmap_item;
8dd3557a 2225 struct page *tree_page = NULL;
21fbd591 2226 struct ksm_stable_node *stable_node;
98c3ca00 2227 struct folio *kfolio;
31dbd01f
IE
2228 unsigned int checksum;
2229 int err;
2c653d0e 2230 bool max_page_sharing_bypass = false;
31dbd01f 2231
4146d2d6
HD
2232 stable_node = page_stable_node(page);
2233 if (stable_node) {
2234 if (stable_node->head != &migrate_nodes &&
2c653d0e
AA
2235 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2236 NUMA(stable_node->nid)) {
2237 stable_node_dup_del(stable_node);
4146d2d6
HD
2238 stable_node->head = &migrate_nodes;
2239 list_add(&stable_node->list, stable_node->head);
2240 }
2241 if (stable_node->head != &migrate_nodes &&
2242 rmap_item->head == stable_node)
2243 return;
2c653d0e
AA
2244 /*
2245 * If it's a KSM fork, allow it to go over the sharing limit
2246 * without warnings.
2247 */
2248 if (!is_page_sharing_candidate(stable_node))
2249 max_page_sharing_bypass = true;
d58a361b
CZ
2250 } else {
2251 remove_rmap_item_from_tree(rmap_item);
2252
2253 /*
2254 * If the hash value of the page has changed from the last time
2255 * we calculated it, this page is changing frequently: therefore we
2256 * don't want to insert it in the unstable tree, and we don't want
2257 * to waste our time searching for something identical to it there.
2258 */
2259 checksum = calc_checksum(page);
2260 if (rmap_item->oldchecksum != checksum) {
2261 rmap_item->oldchecksum = checksum;
2262 return;
2263 }
2264
2265 if (!try_to_merge_with_zero_page(rmap_item, page))
2266 return;
4146d2d6 2267 }
31dbd01f 2268
98c3ca00
MWO
2269 /* Start by searching for the folio in the stable tree */
2270 kfolio = stable_tree_search(page);
5c00ff74 2271 if (&kfolio->page == page && rmap_item->head == stable_node) {
98c3ca00 2272 folio_put(kfolio);
4146d2d6
HD
2273 return;
2274 }
2275
2276 remove_rmap_item_from_tree(rmap_item);
2277
98c3ca00
MWO
2278 if (kfolio) {
2279 if (kfolio == ERR_PTR(-EBUSY))
2cee57d1
YS
2280 return;
2281
98c3ca00 2282 err = try_to_merge_with_ksm_page(rmap_item, page, &kfolio->page);
31dbd01f
IE
2283 if (!err) {
2284 /*
2285 * The page was successfully merged:
2286 * add its rmap_item to the stable tree.
2287 */
98c3ca00
MWO
2288 folio_lock(kfolio);
2289 stable_tree_append(rmap_item, folio_stable_node(kfolio),
2c653d0e 2290 max_page_sharing_bypass);
98c3ca00 2291 folio_unlock(kfolio);
31dbd01f 2292 }
98c3ca00 2293 folio_put(kfolio);
31dbd01f
IE
2294 return;
2295 }
2296
8dd3557a
HD
2297 tree_rmap_item =
2298 unstable_tree_search_insert(rmap_item, page, &tree_page);
31dbd01f 2299 if (tree_rmap_item) {
77da2ba0
CI
2300 bool split;
2301
98c3ca00 2302 kfolio = try_to_merge_two_pages(rmap_item, page,
8dd3557a 2303 tree_rmap_item, tree_page);
77da2ba0
CI
2304 /*
2305 * If both pages we tried to merge belong to the same compound
2306 * page, then we actually ended up increasing the reference
2307 * count of the same compound page twice, and split_huge_page
2308 * failed.
2309 * Here we set a flag if that happened, and we use it later to
2310 * try split_huge_page again. Since we call put_page right
2311 * afterwards, the reference count will be correct and
2312 * split_huge_page should succeed.
2313 */
2314 split = PageTransCompound(page)
2315 && compound_head(page) == compound_head(tree_page);
8dd3557a 2316 put_page(tree_page);
98c3ca00 2317 if (kfolio) {
bc56620b
HD
2318 /*
2319 * The pages were successfully merged: insert new
2320 * node in the stable tree and add both rmap_items.
2321 */
98c3ca00
MWO
2322 folio_lock(kfolio);
2323 stable_node = stable_tree_insert(kfolio);
7b6ba2c7 2324 if (stable_node) {
2c653d0e
AA
2325 stable_tree_append(tree_rmap_item, stable_node,
2326 false);
2327 stable_tree_append(rmap_item, stable_node,
2328 false);
7b6ba2c7 2329 }
98c3ca00 2330 folio_unlock(kfolio);
7b6ba2c7 2331
31dbd01f
IE
2332 /*
2333 * If we fail to insert the page into the stable tree,
2334 * we will have 2 virtual addresses that are pointing
2335 * to a ksm page left outside the stable tree,
2336 * in which case we need to break_cow on both.
2337 */
7b6ba2c7 2338 if (!stable_node) {
8dd3557a
HD
2339 break_cow(tree_rmap_item);
2340 break_cow(rmap_item);
31dbd01f 2341 }
77da2ba0
CI
2342 } else if (split) {
2343 /*
2344 * We are here if we tried to merge two pages and
2345 * failed because they both belonged to the same
2346 * compound page. We will split the page now, but no
2347 * merging will take place.
2348 * We do not want to add the cost of a full lock; if
2349 * the page is locked, it is better to skip it and
2350 * perhaps try again later.
2351 */
2352 if (!trylock_page(page))
2353 return;
2354 split_huge_page(page);
2355 unlock_page(page);
31dbd01f 2356 }
31dbd01f
IE
2357 }
2358}
2359
21fbd591
QZ
2360static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
2361 struct ksm_rmap_item **rmap_list,
31dbd01f
IE
2362 unsigned long addr)
2363{
21fbd591 2364 struct ksm_rmap_item *rmap_item;
31dbd01f 2365
6514d511
HD
2366 while (*rmap_list) {
2367 rmap_item = *rmap_list;
93d17715 2368 if ((rmap_item->address & PAGE_MASK) == addr)
31dbd01f 2369 return rmap_item;
31dbd01f
IE
2370 if (rmap_item->address > addr)
2371 break;
6514d511 2372 *rmap_list = rmap_item->rmap_list;
31dbd01f 2373 remove_rmap_item_from_tree(rmap_item);
31dbd01f
IE
2374 free_rmap_item(rmap_item);
2375 }
2376
2377 rmap_item = alloc_rmap_item();
2378 if (rmap_item) {
2379 /* It has already been zeroed */
58730ab6 2380 rmap_item->mm = mm_slot->slot.mm;
cb4df4ca 2381 rmap_item->mm->ksm_rmap_items++;
31dbd01f 2382 rmap_item->address = addr;
6514d511
HD
2383 rmap_item->rmap_list = *rmap_list;
2384 *rmap_list = rmap_item;
31dbd01f
IE
2385 }
2386 return rmap_item;
2387}
2388
5e924ff5
SR
2389/*
2390 * Calculate skip age for the ksm page age. The age determines how often
2391 * de-duplicating has already been tried unsuccessfully. If the age is
2392 * smaller, the scanning of this page is skipped for less scans.
2393 *
2394 * @age: rmap_item age of page
2395 */
2396static unsigned int skip_age(rmap_age_t age)
2397{
2398 if (age <= 3)
2399 return 1;
2400 if (age <= 5)
2401 return 2;
2402 if (age <= 8)
2403 return 4;
2404
2405 return 8;
2406}
2407
2408/*
2409 * Determines if a page should be skipped for the current scan.
2410 *
76f1a826 2411 * @folio: folio containing the page to check
5e924ff5
SR
2412 * @rmap_item: associated rmap_item of page
2413 */
76f1a826 2414static bool should_skip_rmap_item(struct folio *folio,
5e924ff5
SR
2415 struct ksm_rmap_item *rmap_item)
2416{
2417 rmap_age_t age;
2418
2419 if (!ksm_smart_scan)
2420 return false;
2421
2422 /*
2423 * Never skip pages that are already KSM; pages cmp_and_merge_page()
2424 * will essentially ignore them, but we still have to process them
2425 * properly.
2426 */
76f1a826 2427 if (folio_test_ksm(folio))
5e924ff5
SR
2428 return false;
2429
2430 age = rmap_item->age;
2431 if (age != U8_MAX)
2432 rmap_item->age++;
2433
2434 /*
2435 * Smaller ages are not skipped, they need to get a chance to go
2436 * through the different phases of the KSM merging.
2437 */
2438 if (age < 3)
2439 return false;
2440
2441 /*
2442 * Are we still allowed to skip? If not, then don't skip it
2443 * and determine how much more often we are allowed to skip next.
2444 */
2445 if (!rmap_item->remaining_skips) {
2446 rmap_item->remaining_skips = skip_age(age);
2447 return false;
2448 }
2449
2450 /* Skip this page */
e5a68991 2451 ksm_pages_skipped++;
5e924ff5
SR
2452 rmap_item->remaining_skips--;
2453 remove_rmap_item_from_tree(rmap_item);
2454 return true;
2455}
2456
21fbd591 2457static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
31dbd01f
IE
2458{
2459 struct mm_struct *mm;
58730ab6
QZ
2460 struct ksm_mm_slot *mm_slot;
2461 struct mm_slot *slot;
31dbd01f 2462 struct vm_area_struct *vma;
21fbd591 2463 struct ksm_rmap_item *rmap_item;
a5f18ba0 2464 struct vma_iterator vmi;
90bd6fd3 2465 int nid;
31dbd01f 2466
58730ab6 2467 if (list_empty(&ksm_mm_head.slot.mm_node))
31dbd01f
IE
2468 return NULL;
2469
58730ab6
QZ
2470 mm_slot = ksm_scan.mm_slot;
2471 if (mm_slot == &ksm_mm_head) {
4e5fa4f5 2472 advisor_start_scan();
739100c8
SR
2473 trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
2474
2919bfd0 2475 /*
1fec6890
MWO
2476 * A number of pages can hang around indefinitely in per-cpu
2477 * LRU cache, raised page count preventing write_protect_page
2919bfd0
HD
2478 * from merging them. Though it doesn't really matter much,
2479 * it is puzzling to see some stuck in pages_volatile until
2480 * other activity jostles them out, and they also prevented
2481 * LTP's KSM test from succeeding deterministically; so drain
2482 * them here (here rather than on entry to ksm_do_scan(),
2483 * so we don't IPI too often when pages_to_scan is set low).
2484 */
2485 lru_add_drain_all();
2486
4146d2d6
HD
2487 /*
2488 * Whereas stale stable_nodes on the stable_tree itself
2489 * get pruned in the regular course of stable_tree_search(),
2490 * those moved out to the migrate_nodes list can accumulate:
2491 * so prune them once before each full scan.
2492 */
2493 if (!ksm_merge_across_nodes) {
21fbd591 2494 struct ksm_stable_node *stable_node, *next;
72556a4c 2495 struct folio *folio;
4146d2d6 2496
03640418
GT
2497 list_for_each_entry_safe(stable_node, next,
2498 &migrate_nodes, list) {
72556a4c 2499 folio = ksm_get_folio(stable_node,
85b67b01 2500 KSM_GET_FOLIO_NOLOCK);
72556a4c
AS
2501 if (folio)
2502 folio_put(folio);
4146d2d6
HD
2503 cond_resched();
2504 }
2505 }
2506
ef53d16c 2507 for (nid = 0; nid < ksm_nr_node_ids; nid++)
90bd6fd3 2508 root_unstable_tree[nid] = RB_ROOT;
31dbd01f
IE
2509
2510 spin_lock(&ksm_mmlist_lock);
58730ab6
QZ
2511 slot = list_entry(mm_slot->slot.mm_node.next,
2512 struct mm_slot, mm_node);
2513 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
2514 ksm_scan.mm_slot = mm_slot;
31dbd01f 2515 spin_unlock(&ksm_mmlist_lock);
2b472611
HD
2516 /*
2517 * Although we tested list_empty() above, a racing __ksm_exit
2518 * of the last mm on the list may have removed it since then.
2519 */
58730ab6 2520 if (mm_slot == &ksm_mm_head)
2b472611 2521 return NULL;
31dbd01f
IE
2522next_mm:
2523 ksm_scan.address = 0;
58730ab6 2524 ksm_scan.rmap_list = &mm_slot->rmap_list;
31dbd01f
IE
2525 }
2526
58730ab6 2527 slot = &mm_slot->slot;
31dbd01f 2528 mm = slot->mm;
a5f18ba0
MWO
2529 vma_iter_init(&vmi, mm, ksm_scan.address);
2530
d8ed45c5 2531 mmap_read_lock(mm);
9ba69294 2532 if (ksm_test_exit(mm))
a5f18ba0 2533 goto no_vmas;
9ba69294 2534
a5f18ba0 2535 for_each_vma(vmi, vma) {
31dbd01f
IE
2536 if (!(vma->vm_flags & VM_MERGEABLE))
2537 continue;
2538 if (ksm_scan.address < vma->vm_start)
2539 ksm_scan.address = vma->vm_start;
2540 if (!vma->anon_vma)
2541 ksm_scan.address = vma->vm_end;
2542
2543 while (ksm_scan.address < vma->vm_end) {
b1d3e9bb
DH
2544 struct page *tmp_page = NULL;
2545 struct folio_walk fw;
2546 struct folio *folio;
2547
9ba69294
HD
2548 if (ksm_test_exit(mm))
2549 break;
b1d3e9bb
DH
2550
2551 folio = folio_walk_start(&fw, vma, ksm_scan.address, 0);
2552 if (folio) {
2553 if (!folio_is_zone_device(folio) &&
2554 folio_test_anon(folio)) {
2555 folio_get(folio);
2556 tmp_page = fw.page;
2557 }
2558 folio_walk_end(&fw, vma);
21ae5b01 2559 }
b1d3e9bb
DH
2560
2561 if (tmp_page) {
2562 flush_anon_page(vma, tmp_page, ksm_scan.address);
2563 flush_dcache_page(tmp_page);
58730ab6 2564 rmap_item = get_next_rmap_item(mm_slot,
6514d511 2565 ksm_scan.rmap_list, ksm_scan.address);
31dbd01f 2566 if (rmap_item) {
6514d511
HD
2567 ksm_scan.rmap_list =
2568 &rmap_item->rmap_list;
5e924ff5 2569
76f1a826 2570 if (should_skip_rmap_item(folio, rmap_item)) {
b1d3e9bb 2571 folio_put(folio);
5e924ff5 2572 goto next_page;
b1d3e9bb 2573 }
5e924ff5 2574
31dbd01f 2575 ksm_scan.address += PAGE_SIZE;
b1d3e9bb
DH
2576 *page = tmp_page;
2577 } else {
2578 folio_put(folio);
2579 }
d8ed45c5 2580 mmap_read_unlock(mm);
31dbd01f
IE
2581 return rmap_item;
2582 }
f7091ed6 2583next_page:
31dbd01f
IE
2584 ksm_scan.address += PAGE_SIZE;
2585 cond_resched();
2586 }
2587 }
2588
9ba69294 2589 if (ksm_test_exit(mm)) {
a5f18ba0 2590no_vmas:
9ba69294 2591 ksm_scan.address = 0;
58730ab6 2592 ksm_scan.rmap_list = &mm_slot->rmap_list;
9ba69294 2593 }
31dbd01f
IE
2594 /*
2595 * Nuke all the rmap_items that are above this current rmap:
2596 * because there were no VM_MERGEABLE vmas with such addresses.
2597 */
420be4ed 2598 remove_trailing_rmap_items(ksm_scan.rmap_list);
31dbd01f
IE
2599
2600 spin_lock(&ksm_mmlist_lock);
58730ab6
QZ
2601 slot = list_entry(mm_slot->slot.mm_node.next,
2602 struct mm_slot, mm_node);
2603 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
cd551f97
HD
2604 if (ksm_scan.address == 0) {
2605 /*
c1e8d7c6 2606 * We've completed a full scan of all vmas, holding mmap_lock
cd551f97
HD
2607 * throughout, and found no VM_MERGEABLE: so do the same as
2608 * __ksm_exit does to remove this mm from all our lists now.
9ba69294
HD
2609 * This applies either when cleaning up after __ksm_exit
2610 * (but beware: we can reach here even before __ksm_exit),
2611 * or when all VM_MERGEABLE areas have been unmapped (and
c1e8d7c6 2612 * mmap_lock then protects against race with MADV_MERGEABLE).
cd551f97 2613 */
58730ab6
QZ
2614 hash_del(&mm_slot->slot.hash);
2615 list_del(&mm_slot->slot.mm_node);
9ba69294
HD
2616 spin_unlock(&ksm_mmlist_lock);
2617
58730ab6 2618 mm_slot_free(mm_slot_cache, mm_slot);
cd551f97 2619 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
d7597f59 2620 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
d8ed45c5 2621 mmap_read_unlock(mm);
9ba69294
HD
2622 mmdrop(mm);
2623 } else {
d8ed45c5 2624 mmap_read_unlock(mm);
7496fea9 2625 /*
3e4e28c5 2626 * mmap_read_unlock(mm) first because after
7496fea9
ZC
2627 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2628 * already have been freed under us by __ksm_exit()
2629 * because the "mm_slot" is still hashed and
2630 * ksm_scan.mm_slot doesn't point to it anymore.
2631 */
2632 spin_unlock(&ksm_mmlist_lock);
cd551f97 2633 }
31dbd01f
IE
2634
2635 /* Repeat until we've completed scanning the whole list */
58730ab6
QZ
2636 mm_slot = ksm_scan.mm_slot;
2637 if (mm_slot != &ksm_mm_head)
31dbd01f
IE
2638 goto next_mm;
2639
4e5fa4f5
SR
2640 advisor_stop_scan();
2641
739100c8 2642 trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
31dbd01f
IE
2643 ksm_scan.seqnr++;
2644 return NULL;
2645}
2646
2647/**
2648 * ksm_do_scan - the ksm scanner main worker function.
b7701a5f 2649 * @scan_npages: number of pages we want to scan before we return.
31dbd01f
IE
2650 */
2651static void ksm_do_scan(unsigned int scan_npages)
2652{
21fbd591 2653 struct ksm_rmap_item *rmap_item;
3f649ab7 2654 struct page *page;
31dbd01f 2655
730cdc2c 2656 while (scan_npages-- && likely(!freezing(current))) {
31dbd01f
IE
2657 cond_resched();
2658 rmap_item = scan_get_next_rmap_item(&page);
2659 if (!rmap_item)
2660 return;
4146d2d6 2661 cmp_and_merge_page(page, rmap_item);
31dbd01f 2662 put_page(page);
730cdc2c 2663 ksm_pages_scanned++;
31dbd01f
IE
2664 }
2665}
2666
6e158384
HD
2667static int ksmd_should_run(void)
2668{
58730ab6 2669 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node);
6e158384
HD
2670}
2671
31dbd01f
IE
2672static int ksm_scan_thread(void *nothing)
2673{
fcf9a0ef
KT
2674 unsigned int sleep_ms;
2675
878aee7d 2676 set_freezable();
339aa624 2677 set_user_nice(current, 5);
31dbd01f
IE
2678
2679 while (!kthread_should_stop()) {
6e158384 2680 mutex_lock(&ksm_thread_mutex);
ef4d43a8 2681 wait_while_offlining();
6e158384 2682 if (ksmd_should_run())
31dbd01f 2683 ksm_do_scan(ksm_thread_pages_to_scan);
6e158384
HD
2684 mutex_unlock(&ksm_thread_mutex);
2685
2686 if (ksmd_should_run()) {
fcf9a0ef 2687 sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
f55afd95 2688 wait_event_freezable_timeout(ksm_iter_wait,
fcf9a0ef
KT
2689 sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
2690 msecs_to_jiffies(sleep_ms));
31dbd01f 2691 } else {
878aee7d 2692 wait_event_freezable(ksm_thread_wait,
6e158384 2693 ksmd_should_run() || kthread_should_stop());
31dbd01f
IE
2694 }
2695 }
2696 return 0;
2697}
2698
d7597f59
SR
2699static void __ksm_add_vma(struct vm_area_struct *vma)
2700{
2701 unsigned long vm_flags = vma->vm_flags;
2702
2703 if (vm_flags & VM_MERGEABLE)
2704 return;
2705
2706 if (vma_ksm_compatible(vma))
2707 vm_flags_set(vma, VM_MERGEABLE);
2708}
2709
24139c07
DH
2710static int __ksm_del_vma(struct vm_area_struct *vma)
2711{
2712 int err;
2713
2714 if (!(vma->vm_flags & VM_MERGEABLE))
2715 return 0;
2716
2717 if (vma->anon_vma) {
49b06385 2718 err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true);
24139c07
DH
2719 if (err)
2720 return err;
2721 }
2722
2723 vm_flags_clear(vma, VM_MERGEABLE);
2724 return 0;
2725}
d7597f59
SR
2726/**
2727 * ksm_add_vma - Mark vma as mergeable if compatible
2728 *
2729 * @vma: Pointer to vma
2730 */
2731void ksm_add_vma(struct vm_area_struct *vma)
2732{
2733 struct mm_struct *mm = vma->vm_mm;
2734
2735 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2736 __ksm_add_vma(vma);
2737}
2738
2739static void ksm_add_vmas(struct mm_struct *mm)
2740{
2741 struct vm_area_struct *vma;
2742
2743 VMA_ITERATOR(vmi, mm, 0);
2744 for_each_vma(vmi, vma)
2745 __ksm_add_vma(vma);
2746}
2747
24139c07
DH
2748static int ksm_del_vmas(struct mm_struct *mm)
2749{
2750 struct vm_area_struct *vma;
2751 int err;
2752
2753 VMA_ITERATOR(vmi, mm, 0);
2754 for_each_vma(vmi, vma) {
2755 err = __ksm_del_vma(vma);
2756 if (err)
2757 return err;
2758 }
2759 return 0;
2760}
2761
d7597f59
SR
2762/**
2763 * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
2764 * compatible VMA's
2765 *
2766 * @mm: Pointer to mm
2767 *
2768 * Returns 0 on success, otherwise error code
2769 */
2770int ksm_enable_merge_any(struct mm_struct *mm)
2771{
2772 int err;
2773
2774 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2775 return 0;
2776
2777 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2778 err = __ksm_enter(mm);
2779 if (err)
2780 return err;
2781 }
2782
2783 set_bit(MMF_VM_MERGE_ANY, &mm->flags);
2784 ksm_add_vmas(mm);
2785
2786 return 0;
2787}
2788
24139c07
DH
2789/**
2790 * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
2791 * previously enabled via ksm_enable_merge_any().
2792 *
2793 * Disabling merging implies unmerging any merged pages, like setting
2794 * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and
2795 * merging on all compatible VMA's remains enabled.
2796 *
2797 * @mm: Pointer to mm
2798 *
2799 * Returns 0 on success, otherwise error code
2800 */
2801int ksm_disable_merge_any(struct mm_struct *mm)
2802{
2803 int err;
2804
2805 if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2806 return 0;
2807
2808 err = ksm_del_vmas(mm);
2809 if (err) {
2810 ksm_add_vmas(mm);
2811 return err;
2812 }
2813
2814 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2815 return 0;
2816}
2817
2c281f54
DH
2818int ksm_disable(struct mm_struct *mm)
2819{
2820 mmap_assert_write_locked(mm);
2821
2822 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags))
2823 return 0;
2824 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2825 return ksm_disable_merge_any(mm);
2826 return ksm_del_vmas(mm);
2827}
2828
f8af4da3
HD
2829int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
2830 unsigned long end, int advice, unsigned long *vm_flags)
2831{
2832 struct mm_struct *mm = vma->vm_mm;
d952b791 2833 int err;
f8af4da3
HD
2834
2835 switch (advice) {
2836 case MADV_MERGEABLE:
d7597f59 2837 if (vma->vm_flags & VM_MERGEABLE)
e1fb4a08 2838 return 0;
d7597f59 2839 if (!vma_ksm_compatible(vma))
74a04967 2840 return 0;
cc2383ec 2841
d952b791
HD
2842 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2843 err = __ksm_enter(mm);
2844 if (err)
2845 return err;
2846 }
f8af4da3
HD
2847
2848 *vm_flags |= VM_MERGEABLE;
2849 break;
2850
2851 case MADV_UNMERGEABLE:
2852 if (!(*vm_flags & VM_MERGEABLE))
2853 return 0; /* just ignore the advice */
2854
d952b791 2855 if (vma->anon_vma) {
49b06385 2856 err = unmerge_ksm_pages(vma, start, end, true);
d952b791
HD
2857 if (err)
2858 return err;
2859 }
f8af4da3
HD
2860
2861 *vm_flags &= ~VM_MERGEABLE;
2862 break;
2863 }
2864
2865 return 0;
2866}
33cf1707 2867EXPORT_SYMBOL_GPL(ksm_madvise);
f8af4da3
HD
2868
2869int __ksm_enter(struct mm_struct *mm)
2870{
21fbd591 2871 struct ksm_mm_slot *mm_slot;
58730ab6 2872 struct mm_slot *slot;
6e158384
HD
2873 int needs_wakeup;
2874
58730ab6 2875 mm_slot = mm_slot_alloc(mm_slot_cache);
31dbd01f
IE
2876 if (!mm_slot)
2877 return -ENOMEM;
2878
58730ab6
QZ
2879 slot = &mm_slot->slot;
2880
6e158384 2881 /* Check ksm_run too? Would need tighter locking */
58730ab6 2882 needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node);
6e158384 2883
31dbd01f 2884 spin_lock(&ksm_mmlist_lock);
58730ab6 2885 mm_slot_insert(mm_slots_hash, mm, slot);
31dbd01f 2886 /*
cbf86cfe
HD
2887 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
2888 * insert just behind the scanning cursor, to let the area settle
31dbd01f
IE
2889 * down a little; when fork is followed by immediate exec, we don't
2890 * want ksmd to waste time setting up and tearing down an rmap_list.
cbf86cfe
HD
2891 *
2892 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
2893 * scanning cursor, otherwise KSM pages in newly forked mms will be
2894 * missed: then we might as well insert at the end of the list.
31dbd01f 2895 */
cbf86cfe 2896 if (ksm_run & KSM_RUN_UNMERGE)
58730ab6 2897 list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node);
cbf86cfe 2898 else
58730ab6 2899 list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node);
31dbd01f
IE
2900 spin_unlock(&ksm_mmlist_lock);
2901
f8af4da3 2902 set_bit(MMF_VM_MERGEABLE, &mm->flags);
f1f10076 2903 mmgrab(mm);
6e158384
HD
2904
2905 if (needs_wakeup)
2906 wake_up_interruptible(&ksm_thread_wait);
2907
739100c8 2908 trace_ksm_enter(mm);
f8af4da3
HD
2909 return 0;
2910}
2911
1c2fb7a4 2912void __ksm_exit(struct mm_struct *mm)
f8af4da3 2913{
21fbd591 2914 struct ksm_mm_slot *mm_slot;
58730ab6 2915 struct mm_slot *slot;
9ba69294 2916 int easy_to_free = 0;
cd551f97 2917
31dbd01f 2918 /*
9ba69294
HD
2919 * This process is exiting: if it's straightforward (as is the
2920 * case when ksmd was never running), free mm_slot immediately.
2921 * But if it's at the cursor or has rmap_items linked to it, use
c1e8d7c6 2922 * mmap_lock to synchronize with any break_cows before pagetables
9ba69294
HD
2923 * are freed, and leave the mm_slot on the list for ksmd to free.
2924 * Beware: ksm may already have noticed it exiting and freed the slot.
31dbd01f 2925 */
9ba69294 2926
cd551f97 2927 spin_lock(&ksm_mmlist_lock);
58730ab6
QZ
2928 slot = mm_slot_lookup(mm_slots_hash, mm);
2929 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
9ba69294 2930 if (mm_slot && ksm_scan.mm_slot != mm_slot) {
6514d511 2931 if (!mm_slot->rmap_list) {
58730ab6
QZ
2932 hash_del(&slot->hash);
2933 list_del(&slot->mm_node);
9ba69294
HD
2934 easy_to_free = 1;
2935 } else {
58730ab6
QZ
2936 list_move(&slot->mm_node,
2937 &ksm_scan.mm_slot->slot.mm_node);
9ba69294 2938 }
cd551f97 2939 }
cd551f97
HD
2940 spin_unlock(&ksm_mmlist_lock);
2941
9ba69294 2942 if (easy_to_free) {
58730ab6 2943 mm_slot_free(mm_slot_cache, mm_slot);
d7597f59 2944 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
9ba69294
HD
2945 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2946 mmdrop(mm);
2947 } else if (mm_slot) {
d8ed45c5
ML
2948 mmap_write_lock(mm);
2949 mmap_write_unlock(mm);
9ba69294 2950 }
739100c8
SR
2951
2952 trace_ksm_exit(mm);
31dbd01f
IE
2953}
2954
96db66d9 2955struct folio *ksm_might_need_to_copy(struct folio *folio,
1486fb50 2956 struct vm_area_struct *vma, unsigned long addr)
5ad64688 2957{
96db66d9 2958 struct page *page = folio_page(folio, 0);
e05b3453 2959 struct anon_vma *anon_vma = folio_anon_vma(folio);
1486fb50 2960 struct folio *new_folio;
5ad64688 2961
1486fb50 2962 if (folio_test_large(folio))
96db66d9 2963 return folio;
1486fb50
KW
2964
2965 if (folio_test_ksm(folio)) {
2966 if (folio_stable_node(folio) &&
cbf86cfe 2967 !(ksm_run & KSM_RUN_UNMERGE))
96db66d9 2968 return folio; /* no need to copy it */
cbf86cfe 2969 } else if (!anon_vma) {
96db66d9 2970 return folio; /* no need to copy it */
1486fb50 2971 } else if (folio->index == linear_page_index(vma, addr) &&
e1c63e11 2972 anon_vma->root == vma->anon_vma->root) {
96db66d9 2973 return folio; /* still no need to copy it */
cbf86cfe 2974 }
f985fc32
ML
2975 if (PageHWPoison(page))
2976 return ERR_PTR(-EHWPOISON);
1486fb50 2977 if (!folio_test_uptodate(folio))
96db66d9 2978 return folio; /* let do_swap_page report the error */
cbf86cfe 2979
6359c39c 2980 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
1486fb50
KW
2981 if (new_folio &&
2982 mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
2983 folio_put(new_folio);
2984 new_folio = NULL;
62fdb163 2985 }
1486fb50 2986 if (new_folio) {
96db66d9
MWO
2987 if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
2988 addr, vma)) {
1486fb50 2989 folio_put(new_folio);
6b970599
KW
2990 return ERR_PTR(-EHWPOISON);
2991 }
1486fb50
KW
2992 folio_set_dirty(new_folio);
2993 __folio_mark_uptodate(new_folio);
2994 __folio_set_locked(new_folio);
4d45c3af
YY
2995#ifdef CONFIG_SWAP
2996 count_vm_event(KSM_SWPIN_COPY);
2997#endif
5ad64688
HD
2998 }
2999
96db66d9 3000 return new_folio;
5ad64688
HD
3001}
3002
6d4675e6 3003void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
e9995ef9 3004{
21fbd591
QZ
3005 struct ksm_stable_node *stable_node;
3006 struct ksm_rmap_item *rmap_item;
e9995ef9
HD
3007 int search_new_forks = 0;
3008
2f031c6f 3009 VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
9f32624b
JK
3010
3011 /*
3012 * Rely on the page lock to protect against concurrent modifications
3013 * to that page's node of the stable tree.
3014 */
2f031c6f 3015 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
e9995ef9 3016
2f031c6f 3017 stable_node = folio_stable_node(folio);
e9995ef9 3018 if (!stable_node)
1df631ae 3019 return;
e9995ef9 3020again:
b67bfe0d 3021 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
e9995ef9 3022 struct anon_vma *anon_vma = rmap_item->anon_vma;
5beb4930 3023 struct anon_vma_chain *vmac;
e9995ef9
HD
3024 struct vm_area_struct *vma;
3025
ad12695f 3026 cond_resched();
6d4675e6
MK
3027 if (!anon_vma_trylock_read(anon_vma)) {
3028 if (rwc->try_lock) {
3029 rwc->contended = true;
3030 return;
3031 }
3032 anon_vma_lock_read(anon_vma);
3033 }
bf181b9f
ML
3034 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
3035 0, ULONG_MAX) {
1105a2fc
JH
3036 unsigned long addr;
3037
ad12695f 3038 cond_resched();
5beb4930 3039 vma = vmac->vma;
1105a2fc
JH
3040
3041 /* Ignore the stable/unstable/sqnr flags */
cd7fae26 3042 addr = rmap_item->address & PAGE_MASK;
1105a2fc
JH
3043
3044 if (addr < vma->vm_start || addr >= vma->vm_end)
e9995ef9
HD
3045 continue;
3046 /*
3047 * Initially we examine only the vma which covers this
3048 * rmap_item; but later, if there is still work to do,
3049 * we examine covering vmas in other mms: in case they
3050 * were forked from the original since ksmd passed.
3051 */
3052 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
3053 continue;
3054
0dd1c7bb
JK
3055 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
3056 continue;
3057
2f031c6f 3058 if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
b6b19f25 3059 anon_vma_unlock_read(anon_vma);
1df631ae 3060 return;
e9995ef9 3061 }
2f031c6f 3062 if (rwc->done && rwc->done(folio)) {
0dd1c7bb 3063 anon_vma_unlock_read(anon_vma);
1df631ae 3064 return;
0dd1c7bb 3065 }
e9995ef9 3066 }
b6b19f25 3067 anon_vma_unlock_read(anon_vma);
e9995ef9
HD
3068 }
3069 if (!search_new_forks++)
3070 goto again;
e9995ef9
HD
3071}
3072
4248d008
LX
3073#ifdef CONFIG_MEMORY_FAILURE
3074/*
3075 * Collect processes when the error hit an ksm page.
3076 */
68158bfa 3077void collect_procs_ksm(const struct folio *folio, const struct page *page,
b650e1d2 3078 struct list_head *to_kill, int force_early)
4248d008
LX
3079{
3080 struct ksm_stable_node *stable_node;
3081 struct ksm_rmap_item *rmap_item;
4248d008
LX
3082 struct vm_area_struct *vma;
3083 struct task_struct *tsk;
3084
3085 stable_node = folio_stable_node(folio);
3086 if (!stable_node)
3087 return;
3088 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
3089 struct anon_vma *av = rmap_item->anon_vma;
3090
3091 anon_vma_lock_read(av);
d256d1cd 3092 rcu_read_lock();
4248d008
LX
3093 for_each_process(tsk) {
3094 struct anon_vma_chain *vmac;
3095 unsigned long addr;
3096 struct task_struct *t =
3097 task_early_kill(tsk, force_early);
3098 if (!t)
3099 continue;
3100 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0,
3101 ULONG_MAX)
3102 {
3103 vma = vmac->vma;
3104 if (vma->vm_mm == t->mm) {
3105 addr = rmap_item->address & PAGE_MASK;
3106 add_to_kill_ksm(t, page, vma, to_kill,
3107 addr);
3108 }
3109 }
3110 }
d256d1cd 3111 rcu_read_unlock();
4248d008
LX
3112 anon_vma_unlock_read(av);
3113 }
3114}
3115#endif
3116
52629506 3117#ifdef CONFIG_MIGRATION
19138349 3118void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
e9995ef9 3119{
21fbd591 3120 struct ksm_stable_node *stable_node;
e9995ef9 3121
19138349
MWO
3122 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3123 VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio);
3124 VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio);
e9995ef9 3125
19138349 3126 stable_node = folio_stable_node(folio);
e9995ef9 3127 if (stable_node) {
19138349
MWO
3128 VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio);
3129 stable_node->kpfn = folio_pfn(newfolio);
c8d6553b 3130 /*
19138349 3131 * newfolio->mapping was set in advance; now we need smp_wmb()
c8d6553b 3132 * to make sure that the new stable_node->kpfn is visible
79899cce 3133 * to ksm_get_folio() before it can see that folio->mapping
32f51ead 3134 * has gone stale (or that the swapcache flag has been cleared).
c8d6553b
HD
3135 */
3136 smp_wmb();
b8b0ff24 3137 folio_set_stable_node(folio, NULL);
e9995ef9
HD
3138 }
3139}
3140#endif /* CONFIG_MIGRATION */
3141
62b61f61 3142#ifdef CONFIG_MEMORY_HOTREMOVE
ef4d43a8
HD
3143static void wait_while_offlining(void)
3144{
3145 while (ksm_run & KSM_RUN_OFFLINE) {
3146 mutex_unlock(&ksm_thread_mutex);
3147 wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
74316201 3148 TASK_UNINTERRUPTIBLE);
ef4d43a8
HD
3149 mutex_lock(&ksm_thread_mutex);
3150 }
3151}
3152
21fbd591 3153static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node,
2c653d0e
AA
3154 unsigned long start_pfn,
3155 unsigned long end_pfn)
3156{
3157 if (stable_node->kpfn >= start_pfn &&
3158 stable_node->kpfn < end_pfn) {
3159 /*
79899cce 3160 * Don't ksm_get_folio, page has already gone:
2c653d0e
AA
3161 * which is why we keep kpfn instead of page*
3162 */
3163 remove_node_from_stable_tree(stable_node);
3164 return true;
3165 }
3166 return false;
3167}
3168
21fbd591 3169static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node,
2c653d0e
AA
3170 unsigned long start_pfn,
3171 unsigned long end_pfn,
3172 struct rb_root *root)
3173{
21fbd591 3174 struct ksm_stable_node *dup;
2c653d0e
AA
3175 struct hlist_node *hlist_safe;
3176
3177 if (!is_stable_node_chain(stable_node)) {
3178 VM_BUG_ON(is_stable_node_dup(stable_node));
3179 return stable_node_dup_remove_range(stable_node, start_pfn,
3180 end_pfn);
3181 }
3182
3183 hlist_for_each_entry_safe(dup, hlist_safe,
3184 &stable_node->hlist, hlist_dup) {
3185 VM_BUG_ON(!is_stable_node_dup(dup));
3186 stable_node_dup_remove_range(dup, start_pfn, end_pfn);
3187 }
3188 if (hlist_empty(&stable_node->hlist)) {
3189 free_stable_node_chain(stable_node, root);
3190 return true; /* notify caller that tree was rebalanced */
3191 } else
3192 return false;
3193}
3194
ee0ea59c
HD
3195static void ksm_check_stable_tree(unsigned long start_pfn,
3196 unsigned long end_pfn)
62b61f61 3197{
21fbd591 3198 struct ksm_stable_node *stable_node, *next;
62b61f61 3199 struct rb_node *node;
90bd6fd3 3200 int nid;
62b61f61 3201
ef53d16c
HD
3202 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
3203 node = rb_first(root_stable_tree + nid);
ee0ea59c 3204 while (node) {
21fbd591 3205 stable_node = rb_entry(node, struct ksm_stable_node, node);
2c653d0e
AA
3206 if (stable_node_chain_remove_range(stable_node,
3207 start_pfn, end_pfn,
3208 root_stable_tree +
3209 nid))
ef53d16c 3210 node = rb_first(root_stable_tree + nid);
2c653d0e 3211 else
ee0ea59c
HD
3212 node = rb_next(node);
3213 cond_resched();
90bd6fd3 3214 }
ee0ea59c 3215 }
03640418 3216 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
4146d2d6
HD
3217 if (stable_node->kpfn >= start_pfn &&
3218 stable_node->kpfn < end_pfn)
3219 remove_node_from_stable_tree(stable_node);
3220 cond_resched();
3221 }
62b61f61
HD
3222}
3223
3224static int ksm_memory_callback(struct notifier_block *self,
3225 unsigned long action, void *arg)
3226{
3227 struct memory_notify *mn = arg;
62b61f61
HD
3228
3229 switch (action) {
3230 case MEM_GOING_OFFLINE:
3231 /*
ef4d43a8
HD
3232 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
3233 * and remove_all_stable_nodes() while memory is going offline:
3234 * it is unsafe for them to touch the stable tree at this time.
3235 * But unmerge_ksm_pages(), rmap lookups and other entry points
3236 * which do not need the ksm_thread_mutex are all safe.
62b61f61 3237 */
ef4d43a8
HD
3238 mutex_lock(&ksm_thread_mutex);
3239 ksm_run |= KSM_RUN_OFFLINE;
3240 mutex_unlock(&ksm_thread_mutex);
62b61f61
HD
3241 break;
3242
3243 case MEM_OFFLINE:
3244 /*
3245 * Most of the work is done by page migration; but there might
3246 * be a few stable_nodes left over, still pointing to struct
ee0ea59c 3247 * pages which have been offlined: prune those from the tree,
79899cce 3248 * otherwise ksm_get_folio() might later try to access a
ee0ea59c 3249 * non-existent struct page.
62b61f61 3250 */
ee0ea59c
HD
3251 ksm_check_stable_tree(mn->start_pfn,
3252 mn->start_pfn + mn->nr_pages);
e4a9bc58 3253 fallthrough;
62b61f61 3254 case MEM_CANCEL_OFFLINE:
ef4d43a8
HD
3255 mutex_lock(&ksm_thread_mutex);
3256 ksm_run &= ~KSM_RUN_OFFLINE;
62b61f61 3257 mutex_unlock(&ksm_thread_mutex);
ef4d43a8
HD
3258
3259 smp_mb(); /* wake_up_bit advises this */
3260 wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
62b61f61
HD
3261 break;
3262 }
3263 return NOTIFY_OK;
3264}
ef4d43a8
HD
3265#else
3266static void wait_while_offlining(void)
3267{
3268}
62b61f61
HD
3269#endif /* CONFIG_MEMORY_HOTREMOVE */
3270
d21077fb 3271#ifdef CONFIG_PROC_FS
3ab76c76 3272/*
3273 * The process is mergeable only if any VMA is currently
3274 * applicable to KSM.
3275 *
3276 * The mmap lock must be held in read mode.
3277 */
3278bool ksm_process_mergeable(struct mm_struct *mm)
3279{
3280 struct vm_area_struct *vma;
3281
3282 mmap_assert_locked(mm);
3283 VMA_ITERATOR(vmi, mm, 0);
3284 for_each_vma(vmi, vma)
3285 if (vma->vm_flags & VM_MERGEABLE)
3286 return true;
3287
3288 return false;
3289}
3290
d21077fb
SR
3291long ksm_process_profit(struct mm_struct *mm)
3292{
c2dc78b8 3293 return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
d21077fb
SR
3294 mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
3295}
3296#endif /* CONFIG_PROC_FS */
3297
2ffd8679
HD
3298#ifdef CONFIG_SYSFS
3299/*
3300 * This all compiles without CONFIG_SYSFS, but is a waste of space.
3301 */
3302
31dbd01f
IE
3303#define KSM_ATTR_RO(_name) \
3304 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3305#define KSM_ATTR(_name) \
1bad2e5c 3306 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
31dbd01f
IE
3307
3308static ssize_t sleep_millisecs_show(struct kobject *kobj,
3309 struct kobj_attribute *attr, char *buf)
3310{
ae7a927d 3311 return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs);
31dbd01f
IE
3312}
3313
3314static ssize_t sleep_millisecs_store(struct kobject *kobj,
3315 struct kobj_attribute *attr,
3316 const char *buf, size_t count)
3317{
dfefd226 3318 unsigned int msecs;
31dbd01f
IE
3319 int err;
3320
dfefd226
AD
3321 err = kstrtouint(buf, 10, &msecs);
3322 if (err)
31dbd01f
IE
3323 return -EINVAL;
3324
3325 ksm_thread_sleep_millisecs = msecs;
fcf9a0ef 3326 wake_up_interruptible(&ksm_iter_wait);
31dbd01f
IE
3327
3328 return count;
3329}
3330KSM_ATTR(sleep_millisecs);
3331
3332static ssize_t pages_to_scan_show(struct kobject *kobj,
3333 struct kobj_attribute *attr, char *buf)
3334{
ae7a927d 3335 return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan);
31dbd01f
IE
3336}
3337
3338static ssize_t pages_to_scan_store(struct kobject *kobj,
3339 struct kobj_attribute *attr,
3340 const char *buf, size_t count)
3341{
dfefd226 3342 unsigned int nr_pages;
31dbd01f 3343 int err;
31dbd01f 3344
4e5fa4f5
SR
3345 if (ksm_advisor != KSM_ADVISOR_NONE)
3346 return -EINVAL;
3347
dfefd226
AD
3348 err = kstrtouint(buf, 10, &nr_pages);
3349 if (err)
31dbd01f
IE
3350 return -EINVAL;
3351
3352 ksm_thread_pages_to_scan = nr_pages;
3353
3354 return count;
3355}
3356KSM_ATTR(pages_to_scan);
3357
3358static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
3359 char *buf)
3360{
ae7a927d 3361 return sysfs_emit(buf, "%lu\n", ksm_run);
31dbd01f
IE
3362}
3363
3364static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
3365 const char *buf, size_t count)
3366{
dfefd226 3367 unsigned int flags;
31dbd01f 3368 int err;
31dbd01f 3369
dfefd226
AD
3370 err = kstrtouint(buf, 10, &flags);
3371 if (err)
31dbd01f
IE
3372 return -EINVAL;
3373 if (flags > KSM_RUN_UNMERGE)
3374 return -EINVAL;
3375
3376 /*
3377 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
3378 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
d0f209f6
HD
3379 * breaking COW to free the pages_shared (but leaves mm_slots
3380 * on the list for when ksmd may be set running again).
31dbd01f
IE
3381 */
3382
3383 mutex_lock(&ksm_thread_mutex);
ef4d43a8 3384 wait_while_offlining();
31dbd01f
IE
3385 if (ksm_run != flags) {
3386 ksm_run = flags;
d952b791 3387 if (flags & KSM_RUN_UNMERGE) {
e1e12d2f 3388 set_current_oom_origin();
d952b791 3389 err = unmerge_and_remove_all_rmap_items();
e1e12d2f 3390 clear_current_oom_origin();
d952b791
HD
3391 if (err) {
3392 ksm_run = KSM_RUN_STOP;
3393 count = err;
3394 }
3395 }
31dbd01f
IE
3396 }
3397 mutex_unlock(&ksm_thread_mutex);
3398
3399 if (flags & KSM_RUN_MERGE)
3400 wake_up_interruptible(&ksm_thread_wait);
3401
3402 return count;
3403}
3404KSM_ATTR(run);
3405
90bd6fd3
PH
3406#ifdef CONFIG_NUMA
3407static ssize_t merge_across_nodes_show(struct kobject *kobj,
ae7a927d 3408 struct kobj_attribute *attr, char *buf)
90bd6fd3 3409{
ae7a927d 3410 return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes);
90bd6fd3
PH
3411}
3412
3413static ssize_t merge_across_nodes_store(struct kobject *kobj,
3414 struct kobj_attribute *attr,
3415 const char *buf, size_t count)
3416{
3417 int err;
3418 unsigned long knob;
3419
3420 err = kstrtoul(buf, 10, &knob);
3421 if (err)
3422 return err;
3423 if (knob > 1)
3424 return -EINVAL;
3425
3426 mutex_lock(&ksm_thread_mutex);
ef4d43a8 3427 wait_while_offlining();
90bd6fd3 3428 if (ksm_merge_across_nodes != knob) {
cbf86cfe 3429 if (ksm_pages_shared || remove_all_stable_nodes())
90bd6fd3 3430 err = -EBUSY;
ef53d16c
HD
3431 else if (root_stable_tree == one_stable_tree) {
3432 struct rb_root *buf;
3433 /*
3434 * This is the first time that we switch away from the
3435 * default of merging across nodes: must now allocate
3436 * a buffer to hold as many roots as may be needed.
3437 * Allocate stable and unstable together:
3438 * MAXSMP NODES_SHIFT 10 will use 16kB.
3439 */
bafe1e14
JP
3440 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
3441 GFP_KERNEL);
ef53d16c
HD
3442 /* Let us assume that RB_ROOT is NULL is zero */
3443 if (!buf)
3444 err = -ENOMEM;
3445 else {
3446 root_stable_tree = buf;
3447 root_unstable_tree = buf + nr_node_ids;
3448 /* Stable tree is empty but not the unstable */
3449 root_unstable_tree[0] = one_unstable_tree[0];
3450 }
3451 }
3452 if (!err) {
90bd6fd3 3453 ksm_merge_across_nodes = knob;
ef53d16c
HD
3454 ksm_nr_node_ids = knob ? 1 : nr_node_ids;
3455 }
90bd6fd3
PH
3456 }
3457 mutex_unlock(&ksm_thread_mutex);
3458
3459 return err ? err : count;
3460}
3461KSM_ATTR(merge_across_nodes);
3462#endif
3463
e86c59b1 3464static ssize_t use_zero_pages_show(struct kobject *kobj,
ae7a927d 3465 struct kobj_attribute *attr, char *buf)
e86c59b1 3466{
ae7a927d 3467 return sysfs_emit(buf, "%u\n", ksm_use_zero_pages);
e86c59b1
CI
3468}
3469static ssize_t use_zero_pages_store(struct kobject *kobj,
3470 struct kobj_attribute *attr,
3471 const char *buf, size_t count)
3472{
3473 int err;
3474 bool value;
3475
3476 err = kstrtobool(buf, &value);
3477 if (err)
3478 return -EINVAL;
3479
3480 ksm_use_zero_pages = value;
3481
3482 return count;
3483}
3484KSM_ATTR(use_zero_pages);
3485
2c653d0e
AA
3486static ssize_t max_page_sharing_show(struct kobject *kobj,
3487 struct kobj_attribute *attr, char *buf)
3488{
ae7a927d 3489 return sysfs_emit(buf, "%u\n", ksm_max_page_sharing);
2c653d0e
AA
3490}
3491
3492static ssize_t max_page_sharing_store(struct kobject *kobj,
3493 struct kobj_attribute *attr,
3494 const char *buf, size_t count)
3495{
3496 int err;
3497 int knob;
3498
3499 err = kstrtoint(buf, 10, &knob);
3500 if (err)
3501 return err;
3502 /*
3503 * When a KSM page is created it is shared by 2 mappings. This
3504 * being a signed comparison, it implicitly verifies it's not
3505 * negative.
3506 */
3507 if (knob < 2)
3508 return -EINVAL;
3509
3510 if (READ_ONCE(ksm_max_page_sharing) == knob)
3511 return count;
3512
3513 mutex_lock(&ksm_thread_mutex);
3514 wait_while_offlining();
3515 if (ksm_max_page_sharing != knob) {
3516 if (ksm_pages_shared || remove_all_stable_nodes())
3517 err = -EBUSY;
3518 else
3519 ksm_max_page_sharing = knob;
3520 }
3521 mutex_unlock(&ksm_thread_mutex);
3522
3523 return err ? err : count;
3524}
3525KSM_ATTR(max_page_sharing);
3526
b348b5fe
SR
3527static ssize_t pages_scanned_show(struct kobject *kobj,
3528 struct kobj_attribute *attr, char *buf)
3529{
3530 return sysfs_emit(buf, "%lu\n", ksm_pages_scanned);
3531}
3532KSM_ATTR_RO(pages_scanned);
3533
b4028260
HD
3534static ssize_t pages_shared_show(struct kobject *kobj,
3535 struct kobj_attribute *attr, char *buf)
3536{
ae7a927d 3537 return sysfs_emit(buf, "%lu\n", ksm_pages_shared);
b4028260
HD
3538}
3539KSM_ATTR_RO(pages_shared);
3540
3541static ssize_t pages_sharing_show(struct kobject *kobj,
3542 struct kobj_attribute *attr, char *buf)
3543{
ae7a927d 3544 return sysfs_emit(buf, "%lu\n", ksm_pages_sharing);
b4028260
HD
3545}
3546KSM_ATTR_RO(pages_sharing);
3547
473b0ce4
HD
3548static ssize_t pages_unshared_show(struct kobject *kobj,
3549 struct kobj_attribute *attr, char *buf)
3550{
ae7a927d 3551 return sysfs_emit(buf, "%lu\n", ksm_pages_unshared);
473b0ce4
HD
3552}
3553KSM_ATTR_RO(pages_unshared);
3554
3555static ssize_t pages_volatile_show(struct kobject *kobj,
3556 struct kobj_attribute *attr, char *buf)
3557{
3558 long ksm_pages_volatile;
3559
3560 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
3561 - ksm_pages_sharing - ksm_pages_unshared;
3562 /*
3563 * It was not worth any locking to calculate that statistic,
3564 * but it might therefore sometimes be negative: conceal that.
3565 */
3566 if (ksm_pages_volatile < 0)
3567 ksm_pages_volatile = 0;
ae7a927d 3568 return sysfs_emit(buf, "%ld\n", ksm_pages_volatile);
473b0ce4
HD
3569}
3570KSM_ATTR_RO(pages_volatile);
3571
e5a68991
SR
3572static ssize_t pages_skipped_show(struct kobject *kobj,
3573 struct kobj_attribute *attr, char *buf)
3574{
3575 return sysfs_emit(buf, "%lu\n", ksm_pages_skipped);
3576}
3577KSM_ATTR_RO(pages_skipped);
3578
e2942062 3579static ssize_t ksm_zero_pages_show(struct kobject *kobj,
3580 struct kobj_attribute *attr, char *buf)
3581{
c2dc78b8 3582 return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
e2942062 3583}
3584KSM_ATTR_RO(ksm_zero_pages);
3585
d21077fb
SR
3586static ssize_t general_profit_show(struct kobject *kobj,
3587 struct kobj_attribute *attr, char *buf)
3588{
3589 long general_profit;
3590
c2dc78b8 3591 general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
d21077fb
SR
3592 ksm_rmap_items * sizeof(struct ksm_rmap_item);
3593
3594 return sysfs_emit(buf, "%ld\n", general_profit);
3595}
3596KSM_ATTR_RO(general_profit);
3597
2c653d0e
AA
3598static ssize_t stable_node_dups_show(struct kobject *kobj,
3599 struct kobj_attribute *attr, char *buf)
3600{
ae7a927d 3601 return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups);
2c653d0e
AA
3602}
3603KSM_ATTR_RO(stable_node_dups);
3604
3605static ssize_t stable_node_chains_show(struct kobject *kobj,
3606 struct kobj_attribute *attr, char *buf)
3607{
ae7a927d 3608 return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains);
2c653d0e
AA
3609}
3610KSM_ATTR_RO(stable_node_chains);
3611
3612static ssize_t
3613stable_node_chains_prune_millisecs_show(struct kobject *kobj,
3614 struct kobj_attribute *attr,
3615 char *buf)
3616{
ae7a927d 3617 return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
2c653d0e
AA
3618}
3619
3620static ssize_t
3621stable_node_chains_prune_millisecs_store(struct kobject *kobj,
3622 struct kobj_attribute *attr,
3623 const char *buf, size_t count)
3624{
584ff0df 3625 unsigned int msecs;
2c653d0e
AA
3626 int err;
3627
584ff0df
ZB
3628 err = kstrtouint(buf, 10, &msecs);
3629 if (err)
2c653d0e
AA
3630 return -EINVAL;
3631
3632 ksm_stable_node_chains_prune_millisecs = msecs;
3633
3634 return count;
3635}
3636KSM_ATTR(stable_node_chains_prune_millisecs);
3637
473b0ce4
HD
3638static ssize_t full_scans_show(struct kobject *kobj,
3639 struct kobj_attribute *attr, char *buf)
3640{
ae7a927d 3641 return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr);
473b0ce4
HD
3642}
3643KSM_ATTR_RO(full_scans);
3644
5e924ff5
SR
3645static ssize_t smart_scan_show(struct kobject *kobj,
3646 struct kobj_attribute *attr, char *buf)
3647{
3648 return sysfs_emit(buf, "%u\n", ksm_smart_scan);
3649}
3650
3651static ssize_t smart_scan_store(struct kobject *kobj,
3652 struct kobj_attribute *attr,
3653 const char *buf, size_t count)
3654{
3655 int err;
3656 bool value;
3657
3658 err = kstrtobool(buf, &value);
3659 if (err)
3660 return -EINVAL;
3661
3662 ksm_smart_scan = value;
3663 return count;
3664}
3665KSM_ATTR(smart_scan);
3666
66790e9a
SR
3667static ssize_t advisor_mode_show(struct kobject *kobj,
3668 struct kobj_attribute *attr, char *buf)
3669{
3670 const char *output;
3671
3672 if (ksm_advisor == KSM_ADVISOR_NONE)
3673 output = "[none] scan-time";
3674 else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
3675 output = "none [scan-time]";
3676
3677 return sysfs_emit(buf, "%s\n", output);
3678}
3679
3680static ssize_t advisor_mode_store(struct kobject *kobj,
3681 struct kobj_attribute *attr, const char *buf,
3682 size_t count)
3683{
3684 enum ksm_advisor_type curr_advisor = ksm_advisor;
3685
3686 if (sysfs_streq("scan-time", buf))
3687 ksm_advisor = KSM_ADVISOR_SCAN_TIME;
3688 else if (sysfs_streq("none", buf))
3689 ksm_advisor = KSM_ADVISOR_NONE;
3690 else
3691 return -EINVAL;
3692
3693 /* Set advisor default values */
3694 if (curr_advisor != ksm_advisor)
3695 set_advisor_defaults();
3696
3697 return count;
3698}
3699KSM_ATTR(advisor_mode);
3700
3701static ssize_t advisor_max_cpu_show(struct kobject *kobj,
3702 struct kobj_attribute *attr, char *buf)
3703{
3704 return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu);
3705}
3706
3707static ssize_t advisor_max_cpu_store(struct kobject *kobj,
3708 struct kobj_attribute *attr,
3709 const char *buf, size_t count)
3710{
3711 int err;
3712 unsigned long value;
3713
3714 err = kstrtoul(buf, 10, &value);
3715 if (err)
3716 return -EINVAL;
3717
3718 ksm_advisor_max_cpu = value;
3719 return count;
3720}
3721KSM_ATTR(advisor_max_cpu);
3722
3723static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj,
3724 struct kobj_attribute *attr, char *buf)
3725{
3726 return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan);
3727}
3728
3729static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj,
3730 struct kobj_attribute *attr,
3731 const char *buf, size_t count)
3732{
3733 int err;
3734 unsigned long value;
3735
3736 err = kstrtoul(buf, 10, &value);
3737 if (err)
3738 return -EINVAL;
3739
3740 ksm_advisor_min_pages_to_scan = value;
3741 return count;
3742}
3743KSM_ATTR(advisor_min_pages_to_scan);
3744
3745static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj,
3746 struct kobj_attribute *attr, char *buf)
3747{
3748 return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan);
3749}
3750
3751static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj,
3752 struct kobj_attribute *attr,
3753 const char *buf, size_t count)
3754{
3755 int err;
3756 unsigned long value;
3757
3758 err = kstrtoul(buf, 10, &value);
3759 if (err)
3760 return -EINVAL;
3761
3762 ksm_advisor_max_pages_to_scan = value;
3763 return count;
3764}
3765KSM_ATTR(advisor_max_pages_to_scan);
3766
3767static ssize_t advisor_target_scan_time_show(struct kobject *kobj,
3768 struct kobj_attribute *attr, char *buf)
3769{
3770 return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time);
3771}
3772
3773static ssize_t advisor_target_scan_time_store(struct kobject *kobj,
3774 struct kobj_attribute *attr,
3775 const char *buf, size_t count)
3776{
3777 int err;
3778 unsigned long value;
3779
3780 err = kstrtoul(buf, 10, &value);
3781 if (err)
3782 return -EINVAL;
3783 if (value < 1)
3784 return -EINVAL;
3785
3786 ksm_advisor_target_scan_time = value;
3787 return count;
3788}
3789KSM_ATTR(advisor_target_scan_time);
3790
31dbd01f
IE
3791static struct attribute *ksm_attrs[] = {
3792 &sleep_millisecs_attr.attr,
3793 &pages_to_scan_attr.attr,
3794 &run_attr.attr,
b348b5fe 3795 &pages_scanned_attr.attr,
b4028260
HD
3796 &pages_shared_attr.attr,
3797 &pages_sharing_attr.attr,
473b0ce4
HD
3798 &pages_unshared_attr.attr,
3799 &pages_volatile_attr.attr,
e5a68991 3800 &pages_skipped_attr.attr,
e2942062 3801 &ksm_zero_pages_attr.attr,
473b0ce4 3802 &full_scans_attr.attr,
90bd6fd3
PH
3803#ifdef CONFIG_NUMA
3804 &merge_across_nodes_attr.attr,
3805#endif
2c653d0e
AA
3806 &max_page_sharing_attr.attr,
3807 &stable_node_chains_attr.attr,
3808 &stable_node_dups_attr.attr,
3809 &stable_node_chains_prune_millisecs_attr.attr,
e86c59b1 3810 &use_zero_pages_attr.attr,
d21077fb 3811 &general_profit_attr.attr,
5e924ff5 3812 &smart_scan_attr.attr,
66790e9a
SR
3813 &advisor_mode_attr.attr,
3814 &advisor_max_cpu_attr.attr,
3815 &advisor_min_pages_to_scan_attr.attr,
3816 &advisor_max_pages_to_scan_attr.attr,
3817 &advisor_target_scan_time_attr.attr,
31dbd01f
IE
3818 NULL,
3819};
3820
f907c26a 3821static const struct attribute_group ksm_attr_group = {
31dbd01f
IE
3822 .attrs = ksm_attrs,
3823 .name = "ksm",
3824};
2ffd8679 3825#endif /* CONFIG_SYSFS */
31dbd01f
IE
3826
3827static int __init ksm_init(void)
3828{
3829 struct task_struct *ksm_thread;
3830 int err;
3831
e86c59b1
CI
3832 /* The correct value depends on page size and endianness */
3833 zero_checksum = calc_checksum(ZERO_PAGE(0));
3834 /* Default to false for backwards compatibility */
3835 ksm_use_zero_pages = false;
3836
31dbd01f
IE
3837 err = ksm_slab_init();
3838 if (err)
3839 goto out;
3840
31dbd01f
IE
3841 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
3842 if (IS_ERR(ksm_thread)) {
25acde31 3843 pr_err("ksm: creating kthread failed\n");
31dbd01f 3844 err = PTR_ERR(ksm_thread);
d9f8984c 3845 goto out_free;
31dbd01f
IE
3846 }
3847
2ffd8679 3848#ifdef CONFIG_SYSFS
31dbd01f
IE
3849 err = sysfs_create_group(mm_kobj, &ksm_attr_group);
3850 if (err) {
25acde31 3851 pr_err("ksm: register sysfs failed\n");
2ffd8679 3852 kthread_stop(ksm_thread);
d9f8984c 3853 goto out_free;
31dbd01f 3854 }
c73602ad
HD
3855#else
3856 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
3857
2ffd8679 3858#endif /* CONFIG_SYSFS */
31dbd01f 3859
62b61f61 3860#ifdef CONFIG_MEMORY_HOTREMOVE
ef4d43a8 3861 /* There is no significance to this priority 100 */
1eeaa4fd 3862 hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI);
62b61f61 3863#endif
31dbd01f
IE
3864 return 0;
3865
d9f8984c 3866out_free:
31dbd01f
IE
3867 ksm_slab_free();
3868out:
3869 return err;
f8af4da3 3870}
a64fb3cd 3871subsys_initcall(ksm_init);