Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2e58f173 IM |
2 | #ifndef _LINUX_MM_TYPES_TASK_H |
3 | #define _LINUX_MM_TYPES_TASK_H | |
4 | ||
9e7d2e44 IM |
5 | /* |
6 | * Here are the definitions of the MM data types that are embedded in 'struct task_struct'. | |
7 | * | |
8 | * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) | |
9 | */ | |
10 | ||
2e58f173 IM |
11 | #include <linux/types.h> |
12 | #include <linux/threads.h> | |
13 | #include <linux/atomic.h> | |
dcc2dc45 | 14 | #include <linux/cpumask.h> |
2e58f173 IM |
15 | |
16 | #include <asm/page.h> | |
17 | ||
e73ad5ff AL |
18 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
19 | #include <asm/tlbbatch.h> | |
20 | #endif | |
21 | ||
9e7d2e44 IM |
22 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) |
23 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ | |
24 | IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) | |
25 | #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) | |
26 | ||
27 | /* | |
28 | * The per task VMA cache array: | |
29 | */ | |
30 | #define VMACACHE_BITS 2 | |
31 | #define VMACACHE_SIZE (1U << VMACACHE_BITS) | |
32 | #define VMACACHE_MASK (VMACACHE_SIZE - 1) | |
33 | ||
34 | struct vmacache { | |
7a9cdebd | 35 | u64 seqnum; |
9e7d2e44 IM |
36 | struct vm_area_struct *vmas[VMACACHE_SIZE]; |
37 | }; | |
38 | ||
39 | enum { | |
40 | MM_FILEPAGES, /* Resident file mapping pages */ | |
41 | MM_ANONPAGES, /* Resident anonymous pages */ | |
42 | MM_SWAPENTS, /* Anonymous swap entries */ | |
43 | MM_SHMEMPAGES, /* Resident shared memory pages */ | |
44 | NR_MM_COUNTERS | |
45 | }; | |
46 | ||
47 | #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) | |
48 | #define SPLIT_RSS_COUNTING | |
49 | /* per-thread cached information, */ | |
50 | struct task_rss_stat { | |
51 | int events; /* for synchronization threshold */ | |
52 | int count[NR_MM_COUNTERS]; | |
53 | }; | |
54 | #endif /* USE_SPLIT_PTE_PTLOCKS */ | |
55 | ||
56 | struct mm_rss_stat { | |
57 | atomic_long_t count[NR_MM_COUNTERS]; | |
58 | }; | |
59 | ||
60 | struct page_frag { | |
61 | struct page *page; | |
62 | #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) | |
63 | __u32 offset; | |
64 | __u32 size; | |
65 | #else | |
66 | __u16 offset; | |
67 | __u16 size; | |
68 | #endif | |
69 | }; | |
70 | ||
dcc2dc45 IM |
71 | /* Track pages that require TLB flushes */ |
72 | struct tlbflush_unmap_batch { | |
73 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH | |
74 | /* | |
e73ad5ff AL |
75 | * The arch code makes the following promise: generic code can modify a |
76 | * PTE, then call arch_tlbbatch_add_mm() (which internally provides all | |
77 | * needed barriers), then call arch_tlbbatch_flush(), and the entries | |
78 | * will be flushed on all CPUs by the time that arch_tlbbatch_flush() | |
79 | * returns. | |
dcc2dc45 | 80 | */ |
e73ad5ff | 81 | struct arch_tlbflush_unmap_batch arch; |
dcc2dc45 | 82 | |
e73ad5ff | 83 | /* True if a flush is needed. */ |
dcc2dc45 IM |
84 | bool flush_required; |
85 | ||
86 | /* | |
87 | * If true then the PTE was dirty when unmapped. The entry must be | |
88 | * flushed before IO is initiated or a stale TLB entry potentially | |
89 | * allows an update without redirtying the page. | |
90 | */ | |
91 | bool writable; | |
92 | #endif | |
93 | }; | |
94 | ||
2e58f173 | 95 | #endif /* _LINUX_MM_TYPES_TASK_H */ |