Merge tag 'mm-hotfixes-stable-2023-05-03-16-27' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / include / linux / mm_types_task.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
2e58f173
IM
2#ifndef _LINUX_MM_TYPES_TASK_H
3#define _LINUX_MM_TYPES_TASK_H
4
9e7d2e44
IM
5/*
6 * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
7 *
8 * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
9 */
10
2e58f173
IM
11#include <linux/types.h>
12#include <linux/threads.h>
13#include <linux/atomic.h>
dcc2dc45 14#include <linux/cpumask.h>
2e58f173
IM
15
16#include <asm/page.h>
17
e73ad5ff
AL
18#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
19#include <asm/tlbbatch.h>
20#endif
21
9e7d2e44
IM
22#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
23#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
24 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
25#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
26
8495f7e6
SPP
27/*
28 * When updating this, please also update struct resident_page_types[] in
29 * kernel/fork.c
30 */
9e7d2e44
IM
31enum {
32 MM_FILEPAGES, /* Resident file mapping pages */
33 MM_ANONPAGES, /* Resident anonymous pages */
34 MM_SWAPENTS, /* Anonymous swap entries */
35 MM_SHMEMPAGES, /* Resident shared memory pages */
36 NR_MM_COUNTERS
37};
38
9e7d2e44
IM
39struct page_frag {
40 struct page *page;
41#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
42 __u32 offset;
43 __u32 size;
44#else
45 __u16 offset;
46 __u16 size;
47#endif
48};
49
dcc2dc45
IM
50/* Track pages that require TLB flushes */
51struct tlbflush_unmap_batch {
52#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
53 /*
e73ad5ff
AL
54 * The arch code makes the following promise: generic code can modify a
55 * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
56 * needed barriers), then call arch_tlbbatch_flush(), and the entries
57 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
58 * returns.
dcc2dc45 59 */
e73ad5ff 60 struct arch_tlbflush_unmap_batch arch;
dcc2dc45 61
e73ad5ff 62 /* True if a flush is needed. */
dcc2dc45
IM
63 bool flush_required;
64
65 /*
66 * If true then the PTE was dirty when unmapped. The entry must be
67 * flushed before IO is initiated or a stale TLB entry potentially
68 * allows an update without redirtying the page.
69 */
70 bool writable;
71#endif
72};
73
2e58f173 74#endif /* _LINUX_MM_TYPES_TASK_H */