Linux 6.10-rc3
[linux-block.git] / mm / Kconfig
CommitLineData
ec8f24b7 1# SPDX-License-Identifier: GPL-2.0-only
59e0b520
CH
2
3menu "Memory Management options"
4
7b42f104
JW
5#
6# For some reason microblaze and nios2 hard code SWAP=n. Hopefully we can
7# add proper SWAP support to them, in which case this can be remove.
8#
9config ARCH_NO_SWAP
10 bool
11
b3fbd58f
JW
12config ZPOOL
13 bool
14
519bcb79 15menuconfig SWAP
7b42f104
JW
16 bool "Support for paging of anonymous memory (swap)"
17 depends on MMU && BLOCK && !ARCH_NO_SWAP
18 default y
19 help
20 This option allows you to choose whether you want to have support
21 for so called swap devices or swap files in your kernel that are
22 used to provide more virtual memory than the actual RAM present
23 in your computer. If unsure say Y.
24
519bcb79 25config ZSWAP
fcab9b44 26 bool "Compressed cache for swap pages"
b3fbd58f 27 depends on SWAP
b3fbd58f 28 select CRYPTO
519bcb79
JW
29 select ZPOOL
30 help
31 A lightweight compressed cache for swap pages. It takes
32 pages that are in the process of being swapped out and attempts to
33 compress them into a dynamically allocated RAM-based memory pool.
34 This can result in a significant I/O reduction on swap device and,
1a44131d 35 in the case where decompressing from RAM is faster than swap device
519bcb79
JW
36 reads, can also improve workload performance.
37
b3fbd58f
JW
38config ZSWAP_DEFAULT_ON
39 bool "Enable the compressed cache for swap pages by default"
40 depends on ZSWAP
41 help
42 If selected, the compressed cache for swap pages will be enabled
43 at boot, otherwise it will be disabled.
44
45 The selection made here can be overridden by using the kernel
46 command line 'zswap.enabled=' option.
47
b5ba474f
NP
48config ZSWAP_SHRINKER_DEFAULT_ON
49 bool "Shrink the zswap pool on memory pressure"
50 depends on ZSWAP
51 default n
52 help
53 If selected, the zswap shrinker will be enabled, and the pages
54 stored in the zswap pool will become available for reclaim (i.e
55 written back to the backing swap device) on memory pressure.
56
57 This means that zswap writeback could happen even if the pool is
58 not yet full, or the cgroup zswap limit has not been reached,
59 reducing the chance that cold pages will reside in the zswap pool
60 and consume memory indefinitely.
61
519bcb79 62choice
b3fbd58f 63 prompt "Default compressor"
519bcb79
JW
64 depends on ZSWAP
65 default ZSWAP_COMPRESSOR_DEFAULT_LZO
66 help
67 Selects the default compression algorithm for the compressed cache
68 for swap pages.
69
70 For an overview what kind of performance can be expected from
71 a particular compression algorithm please refer to the benchmarks
72 available at the following LWN page:
73 https://lwn.net/Articles/751795/
74
75 If in doubt, select 'LZO'.
76
77 The selection made here can be overridden by using the kernel
78 command line 'zswap.compressor=' option.
79
80config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
81 bool "Deflate"
82 select CRYPTO_DEFLATE
83 help
84 Use the Deflate algorithm as the default compression algorithm.
85
86config ZSWAP_COMPRESSOR_DEFAULT_LZO
87 bool "LZO"
88 select CRYPTO_LZO
89 help
90 Use the LZO algorithm as the default compression algorithm.
91
92config ZSWAP_COMPRESSOR_DEFAULT_842
93 bool "842"
94 select CRYPTO_842
95 help
96 Use the 842 algorithm as the default compression algorithm.
97
98config ZSWAP_COMPRESSOR_DEFAULT_LZ4
99 bool "LZ4"
100 select CRYPTO_LZ4
101 help
102 Use the LZ4 algorithm as the default compression algorithm.
103
104config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
105 bool "LZ4HC"
106 select CRYPTO_LZ4HC
107 help
108 Use the LZ4HC algorithm as the default compression algorithm.
109
110config ZSWAP_COMPRESSOR_DEFAULT_ZSTD
111 bool "zstd"
112 select CRYPTO_ZSTD
113 help
114 Use the zstd algorithm as the default compression algorithm.
115endchoice
116
117config ZSWAP_COMPRESSOR_DEFAULT
118 string
119 depends on ZSWAP
120 default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
121 default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO
122 default "842" if ZSWAP_COMPRESSOR_DEFAULT_842
123 default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4
124 default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
125 default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
126 default ""
127
128choice
b3fbd58f 129 prompt "Default allocator"
519bcb79 130 depends on ZSWAP
64d4d49c 131 default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU
519bcb79
JW
132 default ZSWAP_ZPOOL_DEFAULT_ZBUD
133 help
134 Selects the default allocator for the compressed cache for
135 swap pages.
136 The default is 'zbud' for compatibility, however please do
137 read the description of each of the allocators below before
138 making a right choice.
139
140 The selection made here can be overridden by using the kernel
141 command line 'zswap.zpool=' option.
142
143config ZSWAP_ZPOOL_DEFAULT_ZBUD
144 bool "zbud"
145 select ZBUD
146 help
147 Use the zbud allocator as the default allocator.
148
149config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
150 bool "z3fold"
151 select Z3FOLD
152 help
153 Use the z3fold allocator as the default allocator.
154
155config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
156 bool "zsmalloc"
157 select ZSMALLOC
158 help
159 Use the zsmalloc allocator as the default allocator.
160endchoice
161
162config ZSWAP_ZPOOL_DEFAULT
163 string
164 depends on ZSWAP
165 default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
166 default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
167 default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
168 default ""
169
519bcb79 170config ZBUD
b3fbd58f
JW
171 tristate "2:1 compression allocator (zbud)"
172 depends on ZSWAP
519bcb79
JW
173 help
174 A special purpose allocator for storing compressed pages.
175 It is designed to store up to two compressed pages per physical
176 page. While this design limits storage density, it has simple and
177 deterministic reclaim properties that make it preferable to a higher
178 density approach when reclaim will be used.
179
180config Z3FOLD
b3fbd58f
JW
181 tristate "3:1 compression allocator (z3fold)"
182 depends on ZSWAP
519bcb79
JW
183 help
184 A special purpose allocator for storing compressed pages.
185 It is designed to store up to three compressed pages per physical
186 page. It is a ZBUD derivative so the simplicity and determinism are
187 still there.
188
189config ZSMALLOC
b3fbd58f
JW
190 tristate
191 prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
519bcb79
JW
192 depends on MMU
193 help
194 zsmalloc is a slab-based memory allocator designed to store
b3fbd58f
JW
195 pages of various compression levels efficiently. It achieves
196 the highest storage density with the least amount of fragmentation.
519bcb79
JW
197
198config ZSMALLOC_STAT
199 bool "Export zsmalloc statistics"
200 depends on ZSMALLOC
201 select DEBUG_FS
202 help
203 This option enables code in the zsmalloc to collect various
204 statistics about what's happening in zsmalloc and exports that
205 information to userspace via debugfs.
206 If unsure, say N.
207
4ff93b29
SS
208config ZSMALLOC_CHAIN_SIZE
209 int "Maximum number of physical pages per-zspage"
b46402fa 210 default 8
4ff93b29
SS
211 range 4 16
212 depends on ZSMALLOC
213 help
214 This option sets the upper limit on the number of physical pages
215 that a zmalloc page (zspage) can consist of. The optimal zspage
216 chain size is calculated for each size class during the
217 initialization of the pool.
218
219 Changing this option can alter the characteristics of size classes,
220 such as the number of pages per zspage and the number of objects
221 per zspage. This can also result in different configurations of
222 the pool, as zsmalloc merges size classes with similar
223 characteristics.
224
225 For more information, see zsmalloc documentation.
226
2a19be61 227menu "Slab allocator options"
7b42f104
JW
228
229config SLUB
2a19be61 230 def_bool y
eb07c4f3 231
e240e53a 232config SLUB_TINY
2a19be61
VB
233 bool "Configure for minimal memory footprint"
234 depends on EXPERT
e240e53a
VB
235 select SLAB_MERGE_DEFAULT
236 help
2a19be61 237 Configures the slab allocator in a way to achieve minimal memory
e240e53a
VB
238 footprint, sacrificing scalability, debugging and other features.
239 This is intended only for the smallest system that had used the
240 SLOB allocator and is not recommended for systems with more than
241 16MB RAM.
242
243 If unsure, say N.
244
7b42f104
JW
245config SLAB_MERGE_DEFAULT
246 bool "Allow slab caches to be merged"
247 default y
7b42f104
JW
248 help
249 For reduced kernel memory fragmentation, slab caches can be
250 merged when they share the same size and other characteristics.
251 This carries a risk of kernel heap overflows being able to
252 overwrite objects from merged caches (and more easily control
253 cache layout), which makes such heap attacks easier to exploit
254 by attackers. By keeping caches unmerged, these kinds of exploits
255 can usually only damage objects in the same cache. To disable
256 merging at runtime, "slab_nomerge" can be passed on the kernel
257 command line.
258
259config SLAB_FREELIST_RANDOM
260 bool "Randomize slab freelist"
2a19be61 261 depends on !SLUB_TINY
7b42f104
JW
262 help
263 Randomizes the freelist order used on creating new pages. This
264 security feature reduces the predictability of the kernel slab
265 allocator against heap overflows.
266
267config SLAB_FREELIST_HARDENED
268 bool "Harden slab freelist metadata"
2a19be61 269 depends on !SLUB_TINY
7b42f104
JW
270 help
271 Many kernel heap attacks try to target slab cache metadata and
272 other infrastructure. This options makes minor performance
273 sacrifices to harden the kernel slab allocator against common
2a19be61 274 freelist exploit methods.
7b42f104 275
0710d012
VB
276config SLUB_STATS
277 default n
2a19be61
VB
278 bool "Enable performance statistics"
279 depends on SYSFS && !SLUB_TINY
0710d012 280 help
2a19be61 281 The statistics are useful to debug slab allocation behavior in
0710d012
VB
282 order find ways to optimize the allocator. This should never be
283 enabled for production use since keeping statistics slows down
284 the allocator by a few percentage points. The slabinfo command
285 supports the determination of the most active slabs to figure
286 out which slabs are relevant to a particular load.
287 Try running: slabinfo -DA
288
519bcb79
JW
289config SLUB_CPU_PARTIAL
290 default y
2a19be61
VB
291 depends on SMP && !SLUB_TINY
292 bool "Enable per cpu partial caches"
519bcb79
JW
293 help
294 Per cpu partial caches accelerate objects allocation and freeing
295 that is local to a processor at the price of more indeterminism
296 in the latency of the free. On overflow these caches will be cleared
297 which requires the taking of locks that may cause latency spikes.
298 Typically one would choose no for a realtime system.
299
3c615294
GR
300config RANDOM_KMALLOC_CACHES
301 default n
2a19be61 302 depends on !SLUB_TINY
3c615294
GR
303 bool "Randomize slab caches for normal kmalloc"
304 help
305 A hardening feature that creates multiple copies of slab caches for
306 normal kmalloc allocation and makes kmalloc randomly pick one based
307 on code address, which makes the attackers more difficult to spray
308 vulnerable memory objects on the heap for the purpose of exploiting
309 memory vulnerabilities.
310
311 Currently the number of copies is set to 16, a reasonably large value
312 that effectively diverges the memory objects allocated for different
313 subsystems or modules into different caches, at the expense of a
314 limited degree of memory and CPU overhead that relates to hardware and
315 system workload.
316
2a19be61 317endmenu # Slab allocator options
519bcb79 318
7b42f104
JW
319config SHUFFLE_PAGE_ALLOCATOR
320 bool "Page allocator randomization"
321 default SLAB_FREELIST_RANDOM && ACPI_NUMA
322 help
323 Randomization of the page allocator improves the average
324 utilization of a direct-mapped memory-side-cache. See section
325 5.2.27 Heterogeneous Memory Attribute Table (HMAT) in the ACPI
326 6.2a specification for an example of how a platform advertises
327 the presence of a memory-side-cache. There are also incidental
328 security benefits as it reduces the predictability of page
329 allocations to compliment SLAB_FREELIST_RANDOM, but the
5e0a760b 330 default granularity of shuffling on the MAX_PAGE_ORDER i.e, 10th
23baf831
KS
331 order of pages is selected based on cache utilization benefits
332 on x86.
7b42f104
JW
333
334 While the randomization improves cache utilization it may
335 negatively impact workloads on platforms without a cache. For
b413f9cd
MC
336 this reason, by default, the randomization is not enabled even
337 if SHUFFLE_PAGE_ALLOCATOR=y. The randomization may be force enabled
338 with the 'page_alloc.shuffle' kernel command line parameter.
7b42f104
JW
339
340 Say Y if unsure.
341
0710d012
VB
342config COMPAT_BRK
343 bool "Disable heap randomization"
344 default y
345 help
346 Randomizing heap placement makes heap exploits harder, but it
347 also breaks ancient binaries (including anything libc5 based).
348 This option changes the bootup default to heap randomization
349 disabled, and can be overridden at runtime by setting
350 /proc/sys/kernel/randomize_va_space to 2.
351
352 On non-ancient distros (post-2000 ones) N is usually a safe choice.
353
354config MMAP_ALLOW_UNINITIALIZED
355 bool "Allow mmapped anonymous memory to be uninitialized"
356 depends on EXPERT && !MMU
357 default n
358 help
359 Normally, and according to the Linux spec, anonymous memory obtained
360 from mmap() has its contents cleared before it is passed to
361 userspace. Enabling this config option allows you to request that
362 mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
363 providing a huge performance boost. If this option is not enabled,
364 then the flag will be ignored.
365
366 This is taken advantage of by uClibc's malloc(), and also by
367 ELF-FDPIC binfmt's brk and stack allocator.
368
369 Because of the obvious security issues, this option should only be
370 enabled on embedded devices where you control what is run in
371 userspace. Since that isn't generally a problem on no-MMU systems,
372 it is normally safe to say Y here.
373
374 See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
375
e1785e85
DH
376config SELECT_MEMORY_MODEL
377 def_bool y
a8826eeb 378 depends on ARCH_SELECT_MEMORY_MODEL
e1785e85 379
3a9da765
DH
380choice
381 prompt "Memory model"
e1785e85 382 depends on SELECT_MEMORY_MODEL
d41dee36 383 default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
e1785e85 384 default FLATMEM_MANUAL
d66d109d
MR
385 help
386 This option allows you to change some of the ways that
387 Linux manages its memory internally. Most users will
388 only have one option here selected by the architecture
389 configuration. This is normal.
3a9da765 390
e1785e85 391config FLATMEM_MANUAL
3a9da765 392 bool "Flat Memory"
bb1c50d3 393 depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE
3a9da765 394 help
d66d109d
MR
395 This option is best suited for non-NUMA systems with
396 flat address space. The FLATMEM is the most efficient
397 system in terms of performance and resource consumption
398 and it is the best option for smaller systems.
399
400 For systems that have holes in their physical address
401 spaces and for features like NUMA and memory hotplug,
dd33d29a 402 choose "Sparse Memory".
d41dee36
AW
403
404 If unsure, choose this option (Flat Memory) over any other.
3a9da765 405
d41dee36
AW
406config SPARSEMEM_MANUAL
407 bool "Sparse Memory"
408 depends on ARCH_SPARSEMEM_ENABLE
409 help
410 This will be the only option for some systems, including
d66d109d 411 memory hot-plug systems. This is normal.
d41dee36 412
d66d109d
MR
413 This option provides efficient support for systems with
414 holes is their physical address space and allows memory
415 hot-plug and hot-remove.
d41dee36 416
d66d109d 417 If unsure, choose "Flat Memory" over this option.
d41dee36 418
3a9da765
DH
419endchoice
420
d41dee36
AW
421config SPARSEMEM
422 def_bool y
1a83e175 423 depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
d41dee36 424
e1785e85
DH
425config FLATMEM
426 def_bool y
bb1c50d3 427 depends on !SPARSEMEM || FLATMEM_MANUAL
d41dee36 428
3e347261
BP
429#
430# SPARSEMEM_EXTREME (which is the default) does some bootmem
c89ab04f 431# allocations when sparse_init() is called. If this cannot
3e347261
BP
432# be done on your architecture, select this option. However,
433# statically allocating the mem_section[] array can potentially
434# consume vast quantities of .bss, so be careful.
435#
436# This option will also potentially produce smaller runtime code
437# with gcc 3.4 and later.
438#
439config SPARSEMEM_STATIC
9ba16087 440 bool
3e347261 441
802f192e 442#
44c09201 443# Architecture platforms which require a two level mem_section in SPARSEMEM
802f192e
BP
444# must select this option. This is usually for architecture platforms with
445# an extremely sparse physical address space.
446#
3e347261
BP
447config SPARSEMEM_EXTREME
448 def_bool y
449 depends on SPARSEMEM && !SPARSEMEM_STATIC
4c21e2f2 450
29c71111 451config SPARSEMEM_VMEMMAP_ENABLE
9ba16087 452 bool
29c71111
AW
453
454config SPARSEMEM_VMEMMAP
a5ee6daa
GL
455 bool "Sparse Memory virtual memmap"
456 depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
457 default y
458 help
19fa40a0
KK
459 SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
460 pfn_to_page and page_to_pfn operations. This is the most
461 efficient option when sufficient kernel resources are available.
0b376f1e
AK
462#
463# Select this config option from the architecture Kconfig, if it is preferred
464# to enable the feature of HugeTLB/dev_dax vmemmap optimization.
465#
0b6f1582
AK
466config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
467 bool
468
469config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
0b376f1e 470 bool
29c71111 471
70210ed9 472config HAVE_MEMBLOCK_PHYS_MAP
6341e62b 473 bool
70210ed9 474
25176ad0 475config HAVE_GUP_FAST
050a9adc 476 depends on MMU
6341e62b 477 bool
2667f50e 478
52219aea
DH
479# Don't discard allocated memory used to track "memory" and "reserved" memblocks
480# after early boot, so it can still be used to test for validity of memory.
481# Also, memblocks are updated with memory hot(un)plug.
350e88ba 482config ARCH_KEEP_MEMBLOCK
6341e62b 483 bool
c378ddd5 484
1e5d8e1e
DW
485# Keep arch NUMA mapping infrastructure post-init.
486config NUMA_KEEP_MEMINFO
487 bool
488
ee6f509c 489config MEMORY_ISOLATION
6341e62b 490 bool
ee6f509c 491
a9e7b8d4
DH
492# IORESOURCE_SYSTEM_RAM regions in the kernel resource tree that are marked
493# IORESOURCE_EXCLUSIVE cannot be mapped to user space, for example, via
494# /dev/mem.
495config EXCLUSIVE_SYSTEM_RAM
496 def_bool y
497 depends on !DEVMEM || STRICT_DEVMEM
498
46723bfa
YI
499#
500# Only be set on architectures that have completely implemented memory hotplug
501# feature. If you are not sure, don't touch it.
502#
503config HAVE_BOOTMEM_INFO_NODE
504 def_bool n
505
91024b3c
AK
506config ARCH_ENABLE_MEMORY_HOTPLUG
507 bool
508
519bcb79
JW
509config ARCH_ENABLE_MEMORY_HOTREMOVE
510 bool
511
3947be19 512# eventually, we can have this option just 'select SPARSEMEM'
519bcb79
JW
513menuconfig MEMORY_HOTPLUG
514 bool "Memory hotplug"
b30c5927 515 select MEMORY_ISOLATION
71b6f2dd 516 depends on SPARSEMEM
40b31360 517 depends on ARCH_ENABLE_MEMORY_HOTPLUG
7ec58a2b 518 depends on 64BIT
1e5d8e1e 519 select NUMA_KEEP_MEMINFO if NUMA
3947be19 520
519bcb79
JW
521if MEMORY_HOTPLUG
522
8604d9e5 523config MEMORY_HOTPLUG_DEFAULT_ONLINE
19fa40a0
KK
524 bool "Online the newly added memory blocks by default"
525 depends on MEMORY_HOTPLUG
526 help
8604d9e5
VK
527 This option sets the default policy setting for memory hotplug
528 onlining policy (/sys/devices/system/memory/auto_online_blocks) which
529 determines what happens to newly added memory regions. Policy setting
530 can always be changed at runtime.
cb1aaebe 531 See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
8604d9e5
VK
532
533 Say Y here if you want all hot-plugged memory blocks to appear in
534 'online' state by default.
535 Say N here if you want the default policy to keep all hot-plugged
536 memory blocks in 'offline' state.
537
0c0e6195
KH
538config MEMORY_HOTREMOVE
539 bool "Allow for memory hot remove"
f7e3334a 540 select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
0c0e6195
KH
541 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
542 depends on MIGRATION
543
a08a2ae3
OS
544config MHP_MEMMAP_ON_MEMORY
545 def_bool y
546 depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP
547 depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
548
519bcb79
JW
549endif # MEMORY_HOTPLUG
550
04d5ea46
AK
551config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
552 bool
553
4c21e2f2
HD
554# Heavily threaded applications may benefit from splitting the mm-wide
555# page_table_lock, so that faults on different parts of the user address
556# space can be handled with less contention: split it at this NR_CPUS.
557# Default to 4 for wider testing, though 8 might be more appropriate.
558# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
7b6ac9df 559# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
60bccaa6
WD
560# SPARC32 allocates multiple pte tables within a single page, and therefore
561# a per-page lock leads to problems when multiple tables need to be locked
562# at the same time (e.g. copy_page_range()).
a70caa8b 563# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
4c21e2f2
HD
564#
565config SPLIT_PTLOCK_CPUS
566 int
9164550e 567 default "999999" if !MMU
a70caa8b
HD
568 default "999999" if ARM && !CPU_CACHE_VIPT
569 default "999999" if PARISC && !PA20
60bccaa6 570 default "999999" if SPARC32
4c21e2f2 571 default "4"
7cbe34cf 572
e009bb30 573config ARCH_ENABLE_SPLIT_PMD_PTLOCK
6341e62b 574 bool
e009bb30 575
09316c09
KK
576#
577# support for memory balloon
578config MEMORY_BALLOON
6341e62b 579 bool
09316c09 580
18468d93
RA
581#
582# support for memory balloon compaction
583config BALLOON_COMPACTION
584 bool "Allow for balloon memory compaction/migration"
cd14b018 585 default y
09316c09 586 depends on COMPACTION && MEMORY_BALLOON
18468d93
RA
587 help
588 Memory fragmentation introduced by ballooning might reduce
589 significantly the number of 2MB contiguous memory blocks that can be
590 used within a guest, thus imposing performance penalties associated
591 with the reduced number of transparent huge pages that could be used
592 by the guest workload. Allowing the compaction & migration for memory
593 pages enlisted as being part of memory balloon devices avoids the
594 scenario aforementioned and helps improving memory defragmentation.
595
e9e96b39
MG
596#
597# support for memory compaction
598config COMPACTION
599 bool "Allow for memory compaction"
cd14b018 600 default y
e9e96b39 601 select MIGRATION
33a93877 602 depends on MMU
e9e96b39 603 help
19fa40a0
KK
604 Compaction is the only memory management component to form
605 high order (larger physically contiguous) memory blocks
606 reliably. The page allocator relies on compaction heavily and
607 the lack of the feature can lead to unexpected OOM killer
608 invocations for high order memory requests. You shouldn't
609 disable this option unless there really is a strong reason for
610 it and then we would be really interested to hear about that at
611 linux-mm@kvack.org.
e9e96b39 612
c7e0b3d0
TG
613config COMPACT_UNEVICTABLE_DEFAULT
614 int
615 depends on COMPACTION
616 default 0 if PREEMPT_RT
617 default 1
618
36e66c55
AD
619#
620# support for free page reporting
621config PAGE_REPORTING
622 bool "Free page reporting"
36e66c55
AD
623 help
624 Free page reporting allows for the incremental acquisition of
625 free pages from the buddy allocator for the purpose of reporting
626 those pages to another entity, such as a hypervisor, so that the
627 memory can be freed within the host for other uses.
628
7cbe34cf
CL
629#
630# support for page migration
631#
632config MIGRATION
b20a3503 633 bool "Page migration"
cd14b018 634 default y
de32a817 635 depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
b20a3503
CL
636 help
637 Allows the migration of the physical location of pages of processes
e9e96b39
MG
638 while the virtual addresses are not changed. This is useful in
639 two situations. The first is on NUMA systems to put pages nearer
640 to the processors accessing. The second is when allocating huge
641 pages as migration can relocate pages to satisfy a huge page
642 allocation instead of reclaiming.
6550e07f 643
76cbbead 644config DEVICE_MIGRATION
d90a25f8 645 def_bool MIGRATION && ZONE_DEVICE
76cbbead 646
c177c81e 647config ARCH_ENABLE_HUGEPAGE_MIGRATION
6341e62b 648 bool
c177c81e 649
9c670ea3
NH
650config ARCH_ENABLE_THP_MIGRATION
651 bool
652
4bfb68a0
AK
653config HUGETLB_PAGE_SIZE_VARIABLE
654 def_bool n
655 help
656 Allows the pageblock_order value to be dynamic instead of just standard
657 HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
658 on a platform.
659
5e0a760b
KS
660 Note that the pageblock_order cannot exceed MAX_PAGE_ORDER and will be
661 clamped down to MAX_PAGE_ORDER.
b3d40a2b 662
8df995f6 663config CONTIG_ALLOC
19fa40a0 664 def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
8df995f6 665
52166607
HY
666config PCP_BATCH_SCALE_MAX
667 int "Maximum scale factor of PCP (Per-CPU pageset) batch allocate/free"
668 default 5
669 range 0 6
670 help
671 In page allocator, PCP (Per-CPU pageset) is refilled and drained in
672 batches. The batch number is scaled automatically to improve page
673 allocation/free throughput. But too large scale factor may hurt
674 latency. This option sets the upper limit of scale factor to limit
675 the maximum latency.
676
600715dc 677config PHYS_ADDR_T_64BIT
d4a451d5 678 def_bool 64BIT
600715dc 679
2a7326b5 680config BOUNCE
9ca24e2e
VM
681 bool "Enable bounce buffers"
682 default y
ce288e05 683 depends on BLOCK && MMU && HIGHMEM
9ca24e2e 684 help
ce288e05
CH
685 Enable bounce buffers for devices that cannot access the full range of
686 memory available to the CPU. Enabled by default when HIGHMEM is
687 selected, but you may say n to override this.
2a7326b5 688
cddb8a5c
AA
689config MMU_NOTIFIER
690 bool
99cb252f 691 select INTERVAL_TREE
fc4d5c29 692
f8af4da3
HD
693config KSM
694 bool "Enable KSM for page merging"
695 depends on MMU
59e1a2f4 696 select XXHASH
f8af4da3
HD
697 help
698 Enable Kernel Samepage Merging: KSM periodically scans those areas
699 of an application's address space that an app has advised may be
700 mergeable. When it finds pages of identical content, it replaces
d0f209f6 701 the many instances by a single page with that content, so
f8af4da3
HD
702 saving memory until one or another app needs to modify the content.
703 Recommended for use with KVM, or with other duplicative applications.
ee65728e 704 See Documentation/mm/ksm.rst for more information: KSM is inactive
c73602ad
HD
705 until a program has madvised that an area is MADV_MERGEABLE, and
706 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
f8af4da3 707
e0a94c2a 708config DEFAULT_MMAP_MIN_ADDR
19fa40a0 709 int "Low address space to protect from user allocation"
6e141546 710 depends on MMU
19fa40a0
KK
711 default 4096
712 help
e0a94c2a
CL
713 This is the portion of low virtual memory which should be protected
714 from userspace allocation. Keeping a user from writing to low pages
715 can help reduce the impact of kernel NULL pointer bugs.
716
e99fb98d 717 For most ppc64 and x86 users with lots of address space
e0a94c2a
CL
718 a value of 65536 is reasonable and should cause no problems.
719 On arm and other archs it should not be higher than 32768.
788084ab
EP
720 Programs which use vm86 functionality or have some need to map
721 this low address space will need CAP_SYS_RAWIO or disable this
722 protection by setting the value to 0.
e0a94c2a
CL
723
724 This value can be changed after boot using the
725 /proc/sys/vm/mmap_min_addr tunable.
726
d949f36f
LT
727config ARCH_SUPPORTS_MEMORY_FAILURE
728 bool
e0a94c2a 729
6a46079c
AK
730config MEMORY_FAILURE
731 depends on MMU
d949f36f 732 depends on ARCH_SUPPORTS_MEMORY_FAILURE
6a46079c 733 bool "Enable recovery from hardware memory errors"
ee6f509c 734 select MEMORY_ISOLATION
97f0b134 735 select RAS
6a46079c
AK
736 help
737 Enables code to recover from some memory failures on systems
738 with MCA recovery. This allows a system to continue running
739 even when some of its memory has uncorrected errors. This requires
740 special hardware support and typically ECC memory.
741
cae681fc 742config HWPOISON_INJECT
413f9efb 743 tristate "HWPoison pages injector"
27df5068 744 depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
478c5ffc 745 select PROC_PAGE_MONITOR
cae681fc 746
fc4d5c29
DH
747config NOMMU_INITIAL_TRIM_EXCESS
748 int "Turn on mmap() excess space trimming before booting"
749 depends on !MMU
750 default 1
751 help
752 The NOMMU mmap() frequently needs to allocate large contiguous chunks
753 of memory on which to store mappings, but it can only ask the system
754 allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
755 more than it requires. To deal with this, mmap() is able to trim off
756 the excess and return it to the allocator.
757
758 If trimming is enabled, the excess is trimmed off and returned to the
759 system allocator, which can cause extra fragmentation, particularly
760 if there are a lot of transient processes.
761
762 If trimming is disabled, the excess is kept, but not used, which for
763 long-term mappings means that the space is wasted.
764
765 Trimming can be dynamically controlled through a sysctl option
766 (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
767 excess pages there must be before trimming should occur, or zero if
768 no trimming is to occur.
769
770 This option specifies the initial value of this option. The default
771 of 1 says that all excess pages should be trimmed.
772
dd19d293 773 See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
bbddff05 774
519bcb79
JW
775config ARCH_WANT_GENERAL_HUGETLB
776 bool
777
778config ARCH_WANTS_THP_SWAP
779 def_bool n
780
781menuconfig TRANSPARENT_HUGEPAGE
13ece886 782 bool "Transparent Hugepage Support"
554b0f3c 783 depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT
5d689240 784 select COMPACTION
3a08cd52 785 select XARRAY_MULTI
4c76d9d1
AA
786 help
787 Transparent Hugepages allows the kernel to use huge pages and
788 huge tlb transparently to the applications whenever possible.
789 This feature can improve computing performance to certain
790 applications by speeding up page faults during memory
791 allocation, by reducing the number of tlb misses and by speeding
792 up the pagetable walking.
793
794 If memory constrained on embedded, you may want to say N.
795
519bcb79
JW
796if TRANSPARENT_HUGEPAGE
797
13ece886
AA
798choice
799 prompt "Transparent Hugepage Support sysfs defaults"
800 depends on TRANSPARENT_HUGEPAGE
801 default TRANSPARENT_HUGEPAGE_ALWAYS
802 help
803 Selects the sysfs defaults for Transparent Hugepage Support.
804
805 config TRANSPARENT_HUGEPAGE_ALWAYS
806 bool "always"
807 help
808 Enabling Transparent Hugepage always, can increase the
809 memory footprint of applications without a guaranteed
810 benefit but it will work automatically for all applications.
811
812 config TRANSPARENT_HUGEPAGE_MADVISE
813 bool "madvise"
814 help
815 Enabling Transparent Hugepage madvise, will only provide a
816 performance improvement benefit to the applications using
817 madvise(MADV_HUGEPAGE) but it won't risk to increase the
818 memory footprint of applications without a guaranteed
819 benefit.
683ec99f
DM
820
821 config TRANSPARENT_HUGEPAGE_NEVER
822 bool "never"
823 help
824 Disable Transparent Hugepage by default. It can still be
825 enabled at runtime via sysfs.
13ece886
AA
826endchoice
827
38d8b4e6
HY
828config THP_SWAP
829 def_bool y
dad6a5eb 830 depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP && 64BIT
38d8b4e6
HY
831 help
832 Swap transparent huge pages in one piece, without splitting.
14fef284
HY
833 XXX: For now, swap cluster backing transparent huge page
834 will be split after swapout.
38d8b4e6
HY
835
836 For selection by architectures with reasonable THP sizes.
837
519bcb79
JW
838config READ_ONLY_THP_FOR_FS
839 bool "Read-only THP for filesystems (EXPERIMENTAL)"
840 depends on TRANSPARENT_HUGEPAGE && SHMEM
841
842 help
843 Allow khugepaged to put read-only file-backed pages in THP.
844
845 This is marked experimental because it is a new feature. Write
846 support of file THPs will be developed in the next few release
847 cycles.
848
849endif # TRANSPARENT_HUGEPAGE
850
ac3830c3
PX
851#
852# The architecture supports pgtable leaves that is larger than PAGE_SIZE
853#
854config PGTABLE_HAS_HUGE_LEAVES
855 def_bool TRANSPARENT_HUGEPAGE || HUGETLB_PAGE
856
bbddff05
TH
857#
858# UP and nommu archs use km based percpu allocator
859#
860config NEED_PER_CPU_KM
3583521a 861 depends on !SMP || !MMU
bbddff05
TH
862 bool
863 default y
077b1f83 864
7ecd19cf
KW
865config NEED_PER_CPU_EMBED_FIRST_CHUNK
866 bool
867
868config NEED_PER_CPU_PAGE_FIRST_CHUNK
869 bool
870
871config USE_PERCPU_NUMA_NODE_ID
872 bool
873
874config HAVE_SETUP_PER_CPU_AREA
875 bool
876
f825c736
AK
877config CMA
878 bool "Contiguous Memory Allocator"
aca52c39 879 depends on MMU
f825c736
AK
880 select MIGRATION
881 select MEMORY_ISOLATION
882 help
883 This enables the Contiguous Memory Allocator which allows other
884 subsystems to allocate big physically-contiguous blocks of memory.
885 CMA reserves a region of memory and allows only movable pages to
886 be allocated from it. This way, the kernel can use the memory for
887 pagecache and when a subsystem requests for contiguous area, the
888 allocated pages are migrated away to serve the contiguous request.
889
890 If unsure, say "n".
891
28b24c1f
SL
892config CMA_DEBUGFS
893 bool "CMA debugfs interface"
894 depends on CMA && DEBUG_FS
895 help
896 Turns on the DebugFS interface for CMA.
897
43ca106f
MK
898config CMA_SYSFS
899 bool "CMA information through sysfs interface"
900 depends on CMA && SYSFS
901 help
902 This option exposes some sysfs attributes to get information
903 from CMA.
904
a254129e
JK
905config CMA_AREAS
906 int "Maximum count of the CMA areas"
907 depends on CMA
73307523
AK
908 default 20 if NUMA
909 default 8
a254129e
JK
910 help
911 CMA allows to create CMA areas for particular purpose, mainly,
912 used as device private area. This parameter sets the maximum
913 number of CMA area in the system.
914
73307523 915 If unsure, leave the default value "8" in UMA and "20" in NUMA.
a254129e 916
af8d417a
DS
917config MEM_SOFT_DIRTY
918 bool "Track memory changes"
919 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
920 select PROC_PAGE_MONITOR
4e2e2770 921 help
af8d417a
DS
922 This option enables memory changes tracking by introducing a
923 soft-dirty bit on pte-s. This bit it set when someone writes
924 into a page just as regular dirty bit, but unlike the latter
925 it can be cleared by hands.
926
1ad1335d 927 See Documentation/admin-guide/mm/soft-dirty.rst for more details.
4e2e2770 928
9e5c33d7
MS
929config GENERIC_EARLY_IOREMAP
930 bool
042d27ac 931
22ee3ea5
HD
932config STACK_MAX_DEFAULT_SIZE_MB
933 int "Default maximum user stack size for 32-bit processes (MB)"
934 default 100
042d27ac
HD
935 range 8 2048
936 depends on STACK_GROWSUP && (!64BIT || COMPAT)
937 help
938 This is the maximum stack size in Megabytes in the VM layout of 32-bit
939 user processes when the stack grows upwards (currently only on parisc
22ee3ea5 940 arch) when the RLIMIT_STACK hard limit is unlimited.
042d27ac 941
22ee3ea5 942 A sane initial value is 100 MB.
3a80a7fa 943
3a80a7fa 944config DEFERRED_STRUCT_PAGE_INIT
1ce22103 945 bool "Defer initialisation of struct pages to kthreads"
d39f8fb4 946 depends on SPARSEMEM
ab1e8d89 947 depends on !NEED_PER_CPU_KM
889c695d 948 depends on 64BIT
e4443149 949 select PADATA
3a80a7fa
MG
950 help
951 Ordinarily all struct pages are initialised during early boot in a
952 single thread. On very large machines this can take a considerable
953 amount of time. If this option is set, large machines will bring up
e4443149
DJ
954 a subset of memmap at boot and then initialise the rest in parallel.
955 This has a potential performance impact on tasks running early in the
1ce22103
VB
956 lifetime of the system until these kthreads finish the
957 initialisation.
033fbae9 958
1c676e0d
SP
959config PAGE_IDLE_FLAG
960 bool
961 select PAGE_EXTENSION if !64BIT
962 help
963 This adds PG_idle and PG_young flags to 'struct page'. PTE Accessed
964 bit writers can set the state of the bit in the flags so that PTE
965 Accessed bit readers may avoid disturbance.
966
33c3fc71
VD
967config IDLE_PAGE_TRACKING
968 bool "Enable idle page tracking"
969 depends on SYSFS && MMU
1c676e0d 970 select PAGE_IDLE_FLAG
33c3fc71
VD
971 help
972 This feature allows to estimate the amount of user pages that have
973 not been touched during a given period of time. This information can
974 be useful to tune memory cgroup limits and/or for job placement
975 within a compute cluster.
976
1ad1335d
MR
977 See Documentation/admin-guide/mm/idle_page_tracking.rst for
978 more details.
33c3fc71 979
8690bbcf
MD
980# Architectures which implement cpu_dcache_is_aliasing() to query
981# whether the data caches are aliased (VIVT or VIPT with dcache
982# aliasing) need to select this.
983config ARCH_HAS_CPU_CACHE_ALIASING
984 bool
985
c2280be8
AK
986config ARCH_HAS_CACHE_LINE_SIZE
987 bool
988
2792d84e
KC
989config ARCH_HAS_CURRENT_STACK_POINTER
990 bool
991 help
992 In support of HARDENED_USERCOPY performing stack variable lifetime
993 checking, an architecture-agnostic way to find the stack pointer
994 is needed. Once an architecture defines an unsigned long global
995 register alias named "current_stack_pointer", this config can be
996 selected.
997
17596731 998config ARCH_HAS_PTE_DEVMAP
65f7d049
OH
999 bool
1000
63703f37
KW
1001config ARCH_HAS_ZONE_DMA_SET
1002 bool
1003
1004config ZONE_DMA
1005 bool "Support DMA zone" if ARCH_HAS_ZONE_DMA_SET
1006 default y if ARM64 || X86
1007
1008config ZONE_DMA32
1009 bool "Support DMA32 zone" if ARCH_HAS_ZONE_DMA_SET
1010 depends on !X86_32
1011 default y if ARM64
1012
033fbae9 1013config ZONE_DEVICE
5042db43 1014 bool "Device memory (pmem, HMM, etc...) hotplug support"
033fbae9
DW
1015 depends on MEMORY_HOTPLUG
1016 depends on MEMORY_HOTREMOVE
99490f16 1017 depends on SPARSEMEM_VMEMMAP
17596731 1018 depends on ARCH_HAS_PTE_DEVMAP
3a08cd52 1019 select XARRAY_MULTI
033fbae9
DW
1020
1021 help
1022 Device memory hotplug support allows for establishing pmem,
1023 or other device driver discovered memory regions, in the
1024 memmap. This allows pfn_to_page() lookups of otherwise
1025 "device-physical" addresses which is needed for using a DAX
1026 mapping in an O_DIRECT operation, among other things.
1027
1028 If FS_DAX is enabled, then say Y.
06a660ad 1029
9c240a7b
CH
1030#
1031# Helpers to mirror range of the CPU page tables of a process into device page
1032# tables.
1033#
c0b12405 1034config HMM_MIRROR
9c240a7b 1035 bool
f442c283 1036 depends on MMU
c0b12405 1037
14b80582
DW
1038config GET_FREE_REGION
1039 depends on SPARSEMEM
1040 bool
1041
5042db43
JG
1042config DEVICE_PRIVATE
1043 bool "Unaddressable device memory (GPU memory, ...)"
7328d9cc 1044 depends on ZONE_DEVICE
14b80582 1045 select GET_FREE_REGION
5042db43
JG
1046
1047 help
1048 Allows creation of struct pages to represent unaddressable device
1049 memory; i.e., memory that is only accessible from the device (or
1050 group of devices). You likely also want to select HMM_MIRROR.
1051
3e9a9e25
CH
1052config VMAP_PFN
1053 bool
1054
63c17fb8
DH
1055config ARCH_USES_HIGH_VMA_FLAGS
1056 bool
66d37570
DH
1057config ARCH_HAS_PKEYS
1058 bool
30a5b536 1059
b0284cd2
CM
1060config ARCH_USES_PG_ARCH_X
1061 bool
1062 help
1063 Enable the definition of PG_arch_x page flags with x > 1. Only
1064 suitable for 64-bit architectures with CONFIG_FLATMEM or
1065 CONFIG_SPARSEMEM_VMEMMAP enabled, otherwise there may not be
1066 enough room for additional bits in page->flags.
1067
0710d012
VB
1068config VM_EVENT_COUNTERS
1069 default y
1070 bool "Enable VM event counters for /proc/vmstat" if EXPERT
1071 help
1072 VM event counters are needed for event counts to be shown.
1073 This option allows the disabling of the VM event counters
1074 on EXPERT systems. /proc/vmstat will only show page counts
1075 if VM event counters are disabled.
1076
30a5b536
DZ
1077config PERCPU_STATS
1078 bool "Collect percpu memory statistics"
30a5b536
DZ
1079 help
1080 This feature collects and exposes statistics via debugfs. The
1081 information includes global and per chunk statistics, which can
1082 be used to help understand percpu memory usage.
64c349f4 1083
9c84f229
JH
1084config GUP_TEST
1085 bool "Enable infrastructure for get_user_pages()-related unit tests"
d0de8241 1086 depends on DEBUG_FS
64c349f4 1087 help
9c84f229
JH
1088 Provides /sys/kernel/debug/gup_test, which in turn provides a way
1089 to make ioctl calls that can launch kernel-based unit tests for
1090 the get_user_pages*() and pin_user_pages*() family of API calls.
64c349f4 1091
9c84f229
JH
1092 These tests include benchmark testing of the _fast variants of
1093 get_user_pages*() and pin_user_pages*(), as well as smoke tests of
1094 the non-_fast variants.
1095
f4f9bda4
JH
1096 There is also a sub-test that allows running dump_page() on any
1097 of up to eight pages (selected by command line args) within the
1098 range of user-space addresses. These pages are either pinned via
1099 pin_user_pages*(), or pinned via get_user_pages*(), as specified
1100 by other command line arguments.
1101
baa489fa 1102 See tools/testing/selftests/mm/gup_test.c
3010a5ea 1103
d0de8241
BS
1104comment "GUP_TEST needs to have DEBUG_FS enabled"
1105 depends on !GUP_TEST && !DEBUG_FS
3010a5ea 1106
6ca297d4 1107config GUP_GET_PXX_LOW_HIGH
39656e83
CH
1108 bool
1109
def85743
KB
1110config DMAPOOL_TEST
1111 tristate "Enable a module to run time tests on dma_pool"
1112 depends on HAS_DMA
1113 help
1114 Provides a test module that will allocate and free many blocks of
1115 various sizes and report how long it takes. This is intended to
1116 provide a consistent way to measure how changes to the
1117 dma_pool_alloc/free routines affect performance.
1118
3010a5ea
LD
1119config ARCH_HAS_PTE_SPECIAL
1120 bool
59e0b520 1121
cbd34da7
CH
1122#
1123# Some architectures require a special hugepage directory format that is
1124# required to support multiple hugepage sizes. For example a4fe3ce76
1125# "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
1126# introduced it on powerpc. This allows for a more flexible hugepage
1127# pagetable layouts.
1128#
1129config ARCH_HAS_HUGEPD
1130 bool
1131
c5acad84
TH
1132config MAPPING_DIRTY_HELPERS
1133 bool
1134
298fa1ad
TG
1135config KMAP_LOCAL
1136 bool
1137
825c43f5
AB
1138config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
1139 bool
1140
1fbaf8fc
CH
1141# struct io_mapping based helper. Selected by drivers that need them
1142config IO_MAPPING
1143 bool
1507f512 1144
626e98cb
TW
1145config MEMFD_CREATE
1146 bool "Enable memfd_create() system call" if EXPERT
1147
1507f512 1148config SECRETMEM
74947724
LB
1149 default y
1150 bool "Enable memfd_secret() system call" if EXPERT
1151 depends on ARCH_HAS_SET_DIRECT_MAP
1152 help
1153 Enable the memfd_secret() system call with the ability to create
1154 memory areas visible only in the context of the owning process and
1155 not mapped to other processes and other kernel page tables.
1507f512 1156
9a10064f
CC
1157config ANON_VMA_NAME
1158 bool "Anonymous VMA name support"
1159 depends on PROC_FS && ADVISE_SYSCALLS && MMU
1160
1161 help
1162 Allow naming anonymous virtual memory areas.
1163
1164 This feature allows assigning names to virtual memory areas. Assigned
1165 names can be later retrieved from /proc/pid/maps and /proc/pid/smaps
1166 and help identifying individual anonymous memory areas.
1167 Assigning a name to anonymous virtual memory area might prevent that
1168 area from being merged with adjacent virtual memory areas due to the
1169 difference in their name.
1170
430529b5
PX
1171config HAVE_ARCH_USERFAULTFD_WP
1172 bool
1173 help
1174 Arch has userfaultfd write protection support
1175
1176config HAVE_ARCH_USERFAULTFD_MINOR
1177 bool
1178 help
1179 Arch has userfaultfd minor fault support
1180
97219cc3
PX
1181menuconfig USERFAULTFD
1182 bool "Enable userfaultfd() system call"
1183 depends on MMU
1184 help
1185 Enable the userfaultfd() system call that allows to intercept and
1186 handle page faults in userland.
1187
1188if USERFAULTFD
1db9dbc2 1189config PTE_MARKER_UFFD_WP
81e0f15f
PX
1190 bool "Userfaultfd write protection support for shmem/hugetlbfs"
1191 default y
1192 depends on HAVE_ARCH_USERFAULTFD_WP
1db9dbc2
PX
1193
1194 help
1195 Allows to create marker PTEs for userfaultfd write protection
1196 purposes. It is required to enable userfaultfd write protection on
1197 file-backed memory types like shmem and hugetlbfs.
97219cc3 1198endif # USERFAULTFD
1db9dbc2 1199
ac35a490 1200# multi-gen LRU {
ec1c86b2
YZ
1201config LRU_GEN
1202 bool "Multi-Gen LRU"
1203 depends on MMU
1204 # make sure folio->flags has enough spare bits
1205 depends on 64BIT || !SPARSEMEM || SPARSEMEM_VMEMMAP
1206 help
07017acb
YZ
1207 A high performance LRU implementation to overcommit memory. See
1208 Documentation/admin-guide/mm/multigen_lru.rst for details.
ec1c86b2 1209
354ed597
YZ
1210config LRU_GEN_ENABLED
1211 bool "Enable by default"
1212 depends on LRU_GEN
1213 help
1214 This option enables the multi-gen LRU by default.
1215
ac35a490
YZ
1216config LRU_GEN_STATS
1217 bool "Full stats for debugging"
1218 depends on LRU_GEN
1219 help
1220 Do not enable this option unless you plan to look at historical stats
1221 from evicted generations for debugging purpose.
1222
1223 This option has a per-memcg and per-node memory overhead.
61dd3f24
KH
1224
1225config LRU_GEN_WALKS_MMU
1226 def_bool y
1227 depends on LRU_GEN && ARCH_HAS_HW_PTE_YOUNG
ac35a490
YZ
1228# }
1229
0b6cc04f
SB
1230config ARCH_SUPPORTS_PER_VMA_LOCK
1231 def_bool n
1232
1233config PER_VMA_LOCK
1234 def_bool y
1235 depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP
1236 help
1237 Allow per-vma locking during page fault handling.
1238
1239 This feature allows locking each virtual memory area separately when
1240 handling page faults instead of taking mmap_lock.
1241
c2508ec5
LT
1242config LOCK_MM_AND_FIND_VMA
1243 bool
1244 depends on !STACK_GROWSUP
1245
8f23f5db
JG
1246config IOMMU_MM_DATA
1247 bool
1248
12af2b83
MRI
1249config EXECMEM
1250 bool
1251
2224d848
SP
1252source "mm/damon/Kconfig"
1253
59e0b520 1254endmenu