1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_TASK_H
3 #define _LINUX_MM_TYPES_TASK_H
6 * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
8 * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
11 #include <linux/types.h>
12 #include <linux/threads.h>
13 #include <linux/atomic.h>
14 #include <linux/cpumask.h>
18 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
19 #include <asm/tlbbatch.h>
22 #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
23 #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
24 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
25 #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
28 * When updating this, please also update struct resident_page_types[] in
32 MM_FILEPAGES, /* Resident file mapping pages */
33 MM_ANONPAGES, /* Resident anonymous pages */
34 MM_SWAPENTS, /* Anonymous swap entries */
35 MM_SHMEMPAGES, /* Resident shared memory pages */
41 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
50 /* Track pages that require TLB flushes */
51 struct tlbflush_unmap_batch {
52 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
54 * The arch code makes the following promise: generic code can modify a
55 * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
56 * needed barriers), then call arch_tlbbatch_flush(), and the entries
57 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
60 struct arch_tlbflush_unmap_batch arch;
62 /* True if a flush is needed. */
66 * If true then the PTE was dirty when unmapped. The entry must be
67 * flushed before IO is initiated or a stale TLB entry potentially
68 * allows an update without redirtying the page.
74 #endif /* _LINUX_MM_TYPES_TASK_H */