1 # SPDX-License-Identifier: GPL-2.0-only
3 menu "Memory Management options"
5 config SELECT_MEMORY_MODEL
7 depends on ARCH_SELECT_MEMORY_MODEL
11 depends on SELECT_MEMORY_MODEL
12 default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
13 default FLATMEM_MANUAL
15 This option allows you to change some of the ways that
16 Linux manages its memory internally. Most users will
17 only have one option here selected by the architecture
18 configuration. This is normal.
22 depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE
24 This option is best suited for non-NUMA systems with
25 flat address space. The FLATMEM is the most efficient
26 system in terms of performance and resource consumption
27 and it is the best option for smaller systems.
29 For systems that have holes in their physical address
30 spaces and for features like NUMA and memory hotplug,
31 choose "Sparse Memory".
33 If unsure, choose this option (Flat Memory) over any other.
35 config DISCONTIGMEM_MANUAL
36 bool "Discontiguous Memory"
37 depends on ARCH_DISCONTIGMEM_ENABLE
39 This option provides enhanced support for discontiguous
40 memory systems, over FLATMEM. These systems have holes
41 in their physical address spaces, and this option provides
42 more efficient handling of these holes.
44 Although "Discontiguous Memory" is still used by several
45 architectures, it is considered deprecated in favor of
48 If unsure, choose "Sparse Memory" over this option.
50 config SPARSEMEM_MANUAL
52 depends on ARCH_SPARSEMEM_ENABLE
54 This will be the only option for some systems, including
55 memory hot-plug systems. This is normal.
57 This option provides efficient support for systems with
58 holes is their physical address space and allows memory
59 hot-plug and hot-remove.
61 If unsure, choose "Flat Memory" over this option.
67 depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL
71 depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
75 depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL
77 config FLAT_NODE_MEM_MAP
82 # Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
83 # to represent different areas of memory. This variable allows
84 # those dependencies to exist individually.
86 config NEED_MULTIPLE_NODES
88 depends on DISCONTIGMEM || NUMA
91 # SPARSEMEM_EXTREME (which is the default) does some bootmem
92 # allocations when sparse_init() is called. If this cannot
93 # be done on your architecture, select this option. However,
94 # statically allocating the mem_section[] array can potentially
95 # consume vast quantities of .bss, so be careful.
97 # This option will also potentially produce smaller runtime code
98 # with gcc 3.4 and later.
100 config SPARSEMEM_STATIC
104 # Architecture platforms which require a two level mem_section in SPARSEMEM
105 # must select this option. This is usually for architecture platforms with
106 # an extremely sparse physical address space.
108 config SPARSEMEM_EXTREME
110 depends on SPARSEMEM && !SPARSEMEM_STATIC
112 config SPARSEMEM_VMEMMAP_ENABLE
115 config SPARSEMEM_VMEMMAP
116 bool "Sparse Memory virtual memmap"
117 depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
120 SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
121 pfn_to_page and page_to_pfn operations. This is the most
122 efficient option when sufficient kernel resources are available.
124 config HAVE_MEMBLOCK_PHYS_MAP
131 # Don't discard allocated memory used to track "memory" and "reserved" memblocks
132 # after early boot, so it can still be used to test for validity of memory.
133 # Also, memblocks are updated with memory hot(un)plug.
134 config ARCH_KEEP_MEMBLOCK
137 # Keep arch NUMA mapping infrastructure post-init.
138 config NUMA_KEEP_MEMINFO
141 config MEMORY_ISOLATION
145 # Only be set on architectures that have completely implemented memory hotplug
146 # feature. If you are not sure, don't touch it.
148 config HAVE_BOOTMEM_INFO_NODE
151 # eventually, we can have this option just 'select SPARSEMEM'
152 config MEMORY_HOTPLUG
153 bool "Allow for memory hot-add"
154 select MEMORY_ISOLATION
155 depends on SPARSEMEM || X86_64_ACPI_NUMA
156 depends on ARCH_ENABLE_MEMORY_HOTPLUG
157 depends on 64BIT || BROKEN
158 select NUMA_KEEP_MEMINFO if NUMA
160 config MEMORY_HOTPLUG_SPARSE
162 depends on SPARSEMEM && MEMORY_HOTPLUG
164 config MEMORY_HOTPLUG_DEFAULT_ONLINE
165 bool "Online the newly added memory blocks by default"
166 depends on MEMORY_HOTPLUG
168 This option sets the default policy setting for memory hotplug
169 onlining policy (/sys/devices/system/memory/auto_online_blocks) which
170 determines what happens to newly added memory regions. Policy setting
171 can always be changed at runtime.
172 See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
174 Say Y here if you want all hot-plugged memory blocks to appear in
175 'online' state by default.
176 Say N here if you want the default policy to keep all hot-plugged
177 memory blocks in 'offline' state.
179 config MEMORY_HOTREMOVE
180 bool "Allow for memory hot remove"
181 select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
182 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
185 # Heavily threaded applications may benefit from splitting the mm-wide
186 # page_table_lock, so that faults on different parts of the user address
187 # space can be handled with less contention: split it at this NR_CPUS.
188 # Default to 4 for wider testing, though 8 might be more appropriate.
189 # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
190 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
191 # SPARC32 allocates multiple pte tables within a single page, and therefore
192 # a per-page lock leads to problems when multiple tables need to be locked
193 # at the same time (e.g. copy_page_range()).
194 # DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
196 config SPLIT_PTLOCK_CPUS
198 default "999999" if !MMU
199 default "999999" if ARM && !CPU_CACHE_VIPT
200 default "999999" if PARISC && !PA20
201 default "999999" if SPARC32
204 config ARCH_ENABLE_SPLIT_PMD_PTLOCK
208 # support for memory balloon
209 config MEMORY_BALLOON
213 # support for memory balloon compaction
214 config BALLOON_COMPACTION
215 bool "Allow for balloon memory compaction/migration"
217 depends on COMPACTION && MEMORY_BALLOON
219 Memory fragmentation introduced by ballooning might reduce
220 significantly the number of 2MB contiguous memory blocks that can be
221 used within a guest, thus imposing performance penalties associated
222 with the reduced number of transparent huge pages that could be used
223 by the guest workload. Allowing the compaction & migration for memory
224 pages enlisted as being part of memory balloon devices avoids the
225 scenario aforementioned and helps improving memory defragmentation.
228 # support for memory compaction
230 bool "Allow for memory compaction"
235 Compaction is the only memory management component to form
236 high order (larger physically contiguous) memory blocks
237 reliably. The page allocator relies on compaction heavily and
238 the lack of the feature can lead to unexpected OOM killer
239 invocations for high order memory requests. You shouldn't
240 disable this option unless there really is a strong reason for
241 it and then we would be really interested to hear about that at
245 # support for free page reporting
246 config PAGE_REPORTING
247 bool "Free page reporting"
250 Free page reporting allows for the incremental acquisition of
251 free pages from the buddy allocator for the purpose of reporting
252 those pages to another entity, such as a hypervisor, so that the
253 memory can be freed within the host for other uses.
256 # support for page migration
259 bool "Page migration"
261 depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
263 Allows the migration of the physical location of pages of processes
264 while the virtual addresses are not changed. This is useful in
265 two situations. The first is on NUMA systems to put pages nearer
266 to the processors accessing. The second is when allocating huge
267 pages as migration can relocate pages to satisfy a huge page
268 allocation instead of reclaiming.
270 config ARCH_ENABLE_HUGEPAGE_MIGRATION
273 config ARCH_ENABLE_THP_MIGRATION
276 config HUGETLB_PAGE_SIZE_VARIABLE
279 Allows the pageblock_order value to be dynamic instead of just standard
280 HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
284 def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
286 config PHYS_ADDR_T_64BIT
290 bool "Enable bounce buffers"
292 depends on BLOCK && MMU && HIGHMEM
294 Enable bounce buffers for devices that cannot access the full range of
295 memory available to the CPU. Enabled by default when HIGHMEM is
296 selected, but you may say n to override this.
301 An architecture should select this if it implements the
302 deprecated interface virt_to_bus(). All new architectures
303 should probably not select this.
312 bool "Enable KSM for page merging"
316 Enable Kernel Samepage Merging: KSM periodically scans those areas
317 of an application's address space that an app has advised may be
318 mergeable. When it finds pages of identical content, it replaces
319 the many instances by a single page with that content, so
320 saving memory until one or another app needs to modify the content.
321 Recommended for use with KVM, or with other duplicative applications.
322 See Documentation/vm/ksm.rst for more information: KSM is inactive
323 until a program has madvised that an area is MADV_MERGEABLE, and
324 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
326 config DEFAULT_MMAP_MIN_ADDR
327 int "Low address space to protect from user allocation"
331 This is the portion of low virtual memory which should be protected
332 from userspace allocation. Keeping a user from writing to low pages
333 can help reduce the impact of kernel NULL pointer bugs.
335 For most ia64, ppc64 and x86 users with lots of address space
336 a value of 65536 is reasonable and should cause no problems.
337 On arm and other archs it should not be higher than 32768.
338 Programs which use vm86 functionality or have some need to map
339 this low address space will need CAP_SYS_RAWIO or disable this
340 protection by setting the value to 0.
342 This value can be changed after boot using the
343 /proc/sys/vm/mmap_min_addr tunable.
345 config ARCH_SUPPORTS_MEMORY_FAILURE
348 config MEMORY_FAILURE
350 depends on ARCH_SUPPORTS_MEMORY_FAILURE
351 bool "Enable recovery from hardware memory errors"
352 select MEMORY_ISOLATION
355 Enables code to recover from some memory failures on systems
356 with MCA recovery. This allows a system to continue running
357 even when some of its memory has uncorrected errors. This requires
358 special hardware support and typically ECC memory.
360 config HWPOISON_INJECT
361 tristate "HWPoison pages injector"
362 depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
363 select PROC_PAGE_MONITOR
365 config NOMMU_INITIAL_TRIM_EXCESS
366 int "Turn on mmap() excess space trimming before booting"
370 The NOMMU mmap() frequently needs to allocate large contiguous chunks
371 of memory on which to store mappings, but it can only ask the system
372 allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
373 more than it requires. To deal with this, mmap() is able to trim off
374 the excess and return it to the allocator.
376 If trimming is enabled, the excess is trimmed off and returned to the
377 system allocator, which can cause extra fragmentation, particularly
378 if there are a lot of transient processes.
380 If trimming is disabled, the excess is kept, but not used, which for
381 long-term mappings means that the space is wasted.
383 Trimming can be dynamically controlled through a sysctl option
384 (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
385 excess pages there must be before trimming should occur, or zero if
386 no trimming is to occur.
388 This option specifies the initial value of this option. The default
389 of 1 says that all excess pages should be trimmed.
391 See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
393 config TRANSPARENT_HUGEPAGE
394 bool "Transparent Hugepage Support"
395 depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
399 Transparent Hugepages allows the kernel to use huge pages and
400 huge tlb transparently to the applications whenever possible.
401 This feature can improve computing performance to certain
402 applications by speeding up page faults during memory
403 allocation, by reducing the number of tlb misses and by speeding
404 up the pagetable walking.
406 If memory constrained on embedded, you may want to say N.
409 prompt "Transparent Hugepage Support sysfs defaults"
410 depends on TRANSPARENT_HUGEPAGE
411 default TRANSPARENT_HUGEPAGE_ALWAYS
413 Selects the sysfs defaults for Transparent Hugepage Support.
415 config TRANSPARENT_HUGEPAGE_ALWAYS
418 Enabling Transparent Hugepage always, can increase the
419 memory footprint of applications without a guaranteed
420 benefit but it will work automatically for all applications.
422 config TRANSPARENT_HUGEPAGE_MADVISE
425 Enabling Transparent Hugepage madvise, will only provide a
426 performance improvement benefit to the applications using
427 madvise(MADV_HUGEPAGE) but it won't risk to increase the
428 memory footprint of applications without a guaranteed
432 config ARCH_WANTS_THP_SWAP
437 depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP
439 Swap transparent huge pages in one piece, without splitting.
440 XXX: For now, swap cluster backing transparent huge page
441 will be split after swapout.
443 For selection by architectures with reasonable THP sizes.
446 # UP and nommu archs use km based percpu allocator
448 config NEED_PER_CPU_KM
454 bool "Enable cleancache driver to cache clean pages if tmem is present"
456 Cleancache can be thought of as a page-granularity victim cache
457 for clean pages that the kernel's pageframe replacement algorithm
458 (PFRA) would like to keep around, but can't since there isn't enough
459 memory. So when the PFRA "evicts" a page, it first attempts to use
460 cleancache code to put the data contained in that page into
461 "transcendent memory", memory that is not directly accessible or
462 addressable by the kernel and is of unknown and possibly
463 time-varying size. And when a cleancache-enabled
464 filesystem wishes to access a page in a file on disk, it first
465 checks cleancache to see if it already contains it; if it does,
466 the page is copied into the kernel and a disk access is avoided.
467 When a transcendent memory driver is available (such as zcache or
468 Xen transcendent memory), a significant I/O reduction
469 may be achieved. When none is available, all cleancache calls
470 are reduced to a single pointer-compare-against-NULL resulting
471 in a negligible performance hit.
473 If unsure, say Y to enable cleancache
476 bool "Enable frontswap to cache swap pages if tmem is present"
479 Frontswap is so named because it can be thought of as the opposite
480 of a "backing" store for a swap device. The data is stored into
481 "transcendent memory", memory that is not directly accessible or
482 addressable by the kernel and is of unknown and possibly
483 time-varying size. When space in transcendent memory is available,
484 a significant swap I/O reduction may be achieved. When none is
485 available, all frontswap calls are reduced to a single pointer-
486 compare-against-NULL resulting in a negligible performance hit
487 and swap data is stored as normal on the matching swap device.
489 If unsure, say Y to enable frontswap.
492 bool "Contiguous Memory Allocator"
495 select MEMORY_ISOLATION
497 This enables the Contiguous Memory Allocator which allows other
498 subsystems to allocate big physically-contiguous blocks of memory.
499 CMA reserves a region of memory and allows only movable pages to
500 be allocated from it. This way, the kernel can use the memory for
501 pagecache and when a subsystem requests for contiguous area, the
502 allocated pages are migrated away to serve the contiguous request.
507 bool "CMA debug messages (DEVELOPMENT)"
508 depends on DEBUG_KERNEL && CMA
510 Turns on debug messages in CMA. This produces KERN_DEBUG
511 messages for every CMA call as well as various messages while
512 processing calls such as dma_alloc_from_contiguous().
513 This option does not affect warning and error messages.
516 bool "CMA debugfs interface"
517 depends on CMA && DEBUG_FS
519 Turns on the DebugFS interface for CMA.
522 bool "CMA information through sysfs interface"
523 depends on CMA && SYSFS
525 This option exposes some sysfs attributes to get information
529 int "Maximum count of the CMA areas"
534 CMA allows to create CMA areas for particular purpose, mainly,
535 used as device private area. This parameter sets the maximum
536 number of CMA area in the system.
538 If unsure, leave the default value "7" in UMA and "19" in NUMA.
540 config MEM_SOFT_DIRTY
541 bool "Track memory changes"
542 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
543 select PROC_PAGE_MONITOR
545 This option enables memory changes tracking by introducing a
546 soft-dirty bit on pte-s. This bit it set when someone writes
547 into a page just as regular dirty bit, but unlike the latter
548 it can be cleared by hands.
550 See Documentation/admin-guide/mm/soft-dirty.rst for more details.
553 bool "Compressed cache for swap pages (EXPERIMENTAL)"
554 depends on FRONTSWAP && CRYPTO=y
557 A lightweight compressed cache for swap pages. It takes
558 pages that are in the process of being swapped out and attempts to
559 compress them into a dynamically allocated RAM-based memory pool.
560 This can result in a significant I/O reduction on swap device and,
561 in the case where decompressing from RAM is faster that swap device
562 reads, can also improve workload performance.
564 This is marked experimental because it is a new feature (as of
565 v3.11) that interacts heavily with memory reclaim. While these
566 interactions don't cause any known issues on simple memory setups,
567 they have not be fully explored on the large set of potential
568 configurations and workloads that exist.
571 prompt "Compressed cache for swap pages default compressor"
573 default ZSWAP_COMPRESSOR_DEFAULT_LZO
575 Selects the default compression algorithm for the compressed cache
578 For an overview what kind of performance can be expected from
579 a particular compression algorithm please refer to the benchmarks
580 available at the following LWN page:
581 https://lwn.net/Articles/751795/
583 If in doubt, select 'LZO'.
585 The selection made here can be overridden by using the kernel
586 command line 'zswap.compressor=' option.
588 config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
590 select CRYPTO_DEFLATE
592 Use the Deflate algorithm as the default compression algorithm.
594 config ZSWAP_COMPRESSOR_DEFAULT_LZO
598 Use the LZO algorithm as the default compression algorithm.
600 config ZSWAP_COMPRESSOR_DEFAULT_842
604 Use the 842 algorithm as the default compression algorithm.
606 config ZSWAP_COMPRESSOR_DEFAULT_LZ4
610 Use the LZ4 algorithm as the default compression algorithm.
612 config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
616 Use the LZ4HC algorithm as the default compression algorithm.
618 config ZSWAP_COMPRESSOR_DEFAULT_ZSTD
622 Use the zstd algorithm as the default compression algorithm.
625 config ZSWAP_COMPRESSOR_DEFAULT
628 default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
629 default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO
630 default "842" if ZSWAP_COMPRESSOR_DEFAULT_842
631 default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4
632 default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
633 default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
637 prompt "Compressed cache for swap pages default allocator"
639 default ZSWAP_ZPOOL_DEFAULT_ZBUD
641 Selects the default allocator for the compressed cache for
643 The default is 'zbud' for compatibility, however please do
644 read the description of each of the allocators below before
645 making a right choice.
647 The selection made here can be overridden by using the kernel
648 command line 'zswap.zpool=' option.
650 config ZSWAP_ZPOOL_DEFAULT_ZBUD
654 Use the zbud allocator as the default allocator.
656 config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
660 Use the z3fold allocator as the default allocator.
662 config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
666 Use the zsmalloc allocator as the default allocator.
669 config ZSWAP_ZPOOL_DEFAULT
672 default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
673 default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
674 default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
677 config ZSWAP_DEFAULT_ON
678 bool "Enable the compressed cache for swap pages by default"
681 If selected, the compressed cache for swap pages will be enabled
682 at boot, otherwise it will be disabled.
684 The selection made here can be overridden by using the kernel
685 command line 'zswap.enabled=' option.
688 tristate "Common API for compressed memory storage"
690 Compressed memory storage API. This allows using either zbud or
694 tristate "Low (Up to 2x) density storage for compressed pages"
696 A special purpose allocator for storing compressed pages.
697 It is designed to store up to two compressed pages per physical
698 page. While this design limits storage density, it has simple and
699 deterministic reclaim properties that make it preferable to a higher
700 density approach when reclaim will be used.
703 tristate "Up to 3x density storage for compressed pages"
706 A special purpose allocator for storing compressed pages.
707 It is designed to store up to three compressed pages per physical
708 page. It is a ZBUD derivative so the simplicity and determinism are
712 tristate "Memory allocator for compressed pages"
715 zsmalloc is a slab-based memory allocator designed to store
716 compressed RAM pages. zsmalloc uses virtual memory mapping
717 in order to reduce fragmentation. However, this results in a
718 non-standard allocator interface where a handle, not a pointer, is
719 returned by an alloc(). This handle must be mapped in order to
720 access the allocated space.
723 bool "Export zsmalloc statistics"
727 This option enables code in the zsmalloc to collect various
728 statistics about what's happening in zsmalloc and exports that
729 information to userspace via debugfs.
732 config GENERIC_EARLY_IOREMAP
735 config STACK_MAX_DEFAULT_SIZE_MB
736 int "Default maximum user stack size for 32-bit processes (MB)"
739 depends on STACK_GROWSUP && (!64BIT || COMPAT)
741 This is the maximum stack size in Megabytes in the VM layout of 32-bit
742 user processes when the stack grows upwards (currently only on parisc
743 arch) when the RLIMIT_STACK hard limit is unlimited.
745 A sane initial value is 100 MB.
747 config DEFERRED_STRUCT_PAGE_INIT
748 bool "Defer initialisation of struct pages to kthreads"
750 depends on !NEED_PER_CPU_KM
754 Ordinarily all struct pages are initialised during early boot in a
755 single thread. On very large machines this can take a considerable
756 amount of time. If this option is set, large machines will bring up
757 a subset of memmap at boot and then initialise the rest in parallel.
758 This has a potential performance impact on tasks running early in the
759 lifetime of the system until these kthreads finish the
762 config IDLE_PAGE_TRACKING
763 bool "Enable idle page tracking"
764 depends on SYSFS && MMU
765 select PAGE_EXTENSION if !64BIT
767 This feature allows to estimate the amount of user pages that have
768 not been touched during a given period of time. This information can
769 be useful to tune memory cgroup limits and/or for job placement
770 within a compute cluster.
772 See Documentation/admin-guide/mm/idle_page_tracking.rst for
775 config ARCH_HAS_PTE_DEVMAP
779 bool "Device memory (pmem, HMM, etc...) hotplug support"
780 depends on MEMORY_HOTPLUG
781 depends on MEMORY_HOTREMOVE
782 depends on SPARSEMEM_VMEMMAP
783 depends on ARCH_HAS_PTE_DEVMAP
787 Device memory hotplug support allows for establishing pmem,
788 or other device driver discovered memory regions, in the
789 memmap. This allows pfn_to_page() lookups of otherwise
790 "device-physical" addresses which is needed for using a DAX
791 mapping in an O_DIRECT operation, among other things.
793 If FS_DAX is enabled, then say Y.
795 config DEV_PAGEMAP_OPS
799 # Helpers to mirror range of the CPU page tables of a process into device page
806 config DEVICE_PRIVATE
807 bool "Unaddressable device memory (GPU memory, ...)"
808 depends on ZONE_DEVICE
809 select DEV_PAGEMAP_OPS
812 Allows creation of struct pages to represent unaddressable device
813 memory; i.e., memory that is only accessible from the device (or
814 group of devices). You likely also want to select HMM_MIRROR.
819 config ARCH_USES_HIGH_VMA_FLAGS
821 config ARCH_HAS_PKEYS
825 bool "Collect percpu memory statistics"
827 This feature collects and exposes statistics via debugfs. The
828 information includes global and per chunk statistics, which can
829 be used to help understand percpu memory usage.
832 bool "Enable infrastructure for get_user_pages()-related unit tests"
835 Provides /sys/kernel/debug/gup_test, which in turn provides a way
836 to make ioctl calls that can launch kernel-based unit tests for
837 the get_user_pages*() and pin_user_pages*() family of API calls.
839 These tests include benchmark testing of the _fast variants of
840 get_user_pages*() and pin_user_pages*(), as well as smoke tests of
841 the non-_fast variants.
843 There is also a sub-test that allows running dump_page() on any
844 of up to eight pages (selected by command line args) within the
845 range of user-space addresses. These pages are either pinned via
846 pin_user_pages*(), or pinned via get_user_pages*(), as specified
847 by other command line arguments.
849 See tools/testing/selftests/vm/gup_test.c
851 comment "GUP_TEST needs to have DEBUG_FS enabled"
852 depends on !GUP_TEST && !DEBUG_FS
854 config GUP_GET_PTE_LOW_HIGH
857 config READ_ONLY_THP_FOR_FS
858 bool "Read-only THP for filesystems (EXPERIMENTAL)"
859 depends on TRANSPARENT_HUGEPAGE && SHMEM
862 Allow khugepaged to put read-only file-backed pages in THP.
864 This is marked experimental because it is a new feature. Write
865 support of file THPs will be developed in the next few release
868 config ARCH_HAS_PTE_SPECIAL
872 # Some architectures require a special hugepage directory format that is
873 # required to support multiple hugepage sizes. For example a4fe3ce76
874 # "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
875 # introduced it on powerpc. This allows for a more flexible hugepage
878 config ARCH_HAS_HUGEPD
881 config MAPPING_DIRTY_HELPERS
887 # struct io_mapping based helper. Selected by drivers that need them