mm/workingset: remove unused @mapping argument in workingset_eviction()
[linux-block.git] / include / linux / mm_types.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
5b99cd0e
HC
2#ifndef _LINUX_MM_TYPES_H
3#define _LINUX_MM_TYPES_H
4
2e58f173
IM
5#include <linux/mm_types_task.h>
6
4f9a58d7 7#include <linux/auxvec.h>
5b99cd0e
HC
8#include <linux/list.h>
9#include <linux/spinlock.h>
c92ff1bd
MS
10#include <linux/rbtree.h>
11#include <linux/rwsem.h>
12#include <linux/completion.h>
cddb8a5c 13#include <linux/cpumask.h>
d4b3b638 14#include <linux/uprobes.h>
bbeae5b0 15#include <linux/page-flags-layout.h>
ec8d7c14 16#include <linux/workqueue.h>
2e58f173 17
c92ff1bd 18#include <asm/mmu.h>
5b99cd0e 19
4f9a58d7
OH
20#ifndef AT_VECTOR_SIZE_ARCH
21#define AT_VECTOR_SIZE_ARCH 0
22#endif
23#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
24
1c8f4220
SJ
25typedef int vm_fault_t;
26
5b99cd0e 27struct address_space;
1306a85a 28struct mem_cgroup;
133ff0ea 29struct hmm;
5b99cd0e
HC
30
31/*
32 * Each physical page in the system has a struct page associated with
33 * it to keep track of whatever it is we are using the page for at the
34 * moment. Note that we have no way to track which tasks are using
35 * a page, though if it is a pagecache page, rmap structures can tell us
97b4a671 36 * who is mapping it.
be50015d 37 *
97b4a671
MW
38 * If you allocate the page using alloc_pages(), you can use some of the
39 * space in struct page for your own purposes. The five words in the main
40 * union are available, except for bit 0 of the first word which must be
41 * kept clear. Many users use this word to store a pointer to an object
42 * which is guaranteed to be aligned. If you use the same storage as
43 * page->mapping, you must restore it to NULL before freeing the page.
be50015d 44 *
97b4a671
MW
45 * If your page will not be mapped to userspace, you can also use the four
46 * bytes in the mapcount union, but you must call page_mapcount_reset()
47 * before freeing it.
48 *
49 * If you want to use the refcount field, it must be used in such a way
50 * that other CPUs temporarily incrementing and then decrementing the
51 * refcount does not cause problems. On receiving the page from
52 * alloc_pages(), the refcount will be positive.
53 *
54 * If you allocate pages of order > 0, you can use some of the fields
55 * in each subpage, but you may need to restore some of their values
56 * afterwards.
fc9bb8c7 57 *
4cf7c8bf
MW
58 * SLUB uses cmpxchg_double() to atomically update its freelist and
59 * counters. That requires that freelist & counters be adjacent and
60 * double-word aligned. We align all struct pages to double-word
61 * boundaries, and ensure that 'freelist' is aligned within the
62 * struct.
5b99cd0e 63 */
e20df2c6
MW
64#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
65#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
66#else
0dd4da5b 67#define _struct_page_alignment
7d27a04b 68#endif
e20df2c6 69
5b99cd0e
HC
70struct page {
71 unsigned long flags; /* Atomic flags, some possibly
72 * updated asynchronously */
b7ccc7f8 73 /*
4da1984e
MW
74 * Five words (20/40 bytes) are available in this union.
75 * WARNING: bit 0 of the first word is used for PageTail(). That
76 * means the other users of this union MUST NOT use the bit to
b7ccc7f8
MW
77 * avoid collision and false-positive PageTail().
78 */
8456a648 79 union {
66a6ffd2 80 struct { /* Page cache and anonymous pages */
4da1984e
MW
81 /**
82 * @lru: Pageout list, eg. active_list protected by
83 * zone_lru_lock. Sometimes used as a generic list
84 * by the page owner.
85 */
86 struct list_head lru;
66a6ffd2
MW
87 /* See page-flags.h for PAGE_MAPPING_FLAGS */
88 struct address_space *mapping;
89 pgoff_t index; /* Our offset within mapping. */
90 /**
91 * @private: Mapping-private opaque data.
92 * Usually used for buffer_heads if PagePrivate.
93 * Used for swp_entry_t if PageSwapCache.
94 * Indicates order in the buddy system if PageBuddy.
95 */
96 unsigned long private;
97 };
c25fff71
JDB
98 struct { /* page_pool used by netstack */
99 /**
100 * @dma_addr: might require a 64-bit value even on
101 * 32-bit architectures.
102 */
103 dma_addr_t dma_addr;
104 };
66a6ffd2 105 struct { /* slab, slob and slub */
4da1984e
MW
106 union {
107 struct list_head slab_list; /* uses lru */
108 struct { /* Partial pages */
109 struct page *next;
110#ifdef CONFIG_64BIT
111 int pages; /* Nr of pages left */
112 int pobjects; /* Approximate count */
113#else
114 short int pages;
115 short int pobjects;
116#endif
117 };
118 };
66a6ffd2
MW
119 struct kmem_cache *slab_cache; /* not slob */
120 /* Double-word boundary */
121 void *freelist; /* first free object */
122 union {
123 void *s_mem; /* slab: first object */
124 unsigned long counters; /* SLUB */
125 struct { /* SLUB */
126 unsigned inuse:16;
127 unsigned objects:15;
128 unsigned frozen:1;
129 };
130 };
131 };
4da1984e
MW
132 struct { /* Tail pages of compound page */
133 unsigned long compound_head; /* Bit zero is set */
134
135 /* First tail page only */
136 unsigned char compound_dtor;
137 unsigned char compound_order;
138 atomic_t compound_mapcount;
139 };
140 struct { /* Second tail page of compound page */
141 unsigned long _compound_pad_1; /* compound_head */
142 unsigned long _compound_pad_2;
143 struct list_head deferred_list;
144 };
66a6ffd2 145 struct { /* Page table pages */
4da1984e
MW
146 unsigned long _pt_pad_1; /* compound_head */
147 pgtable_t pmd_huge_pte; /* protected by page->ptl */
66a6ffd2 148 unsigned long _pt_pad_2; /* mapping */
4231aba0
NP
149 union {
150 struct mm_struct *pt_mm; /* x86 pgds only */
151 atomic_t pt_frag_refcount; /* powerpc */
152 };
7d27a04b 153#if ALLOC_SPLIT_PTLOCKS
66a6ffd2 154 spinlock_t *ptl;
7d27a04b 155#else
66a6ffd2 156 spinlock_t ptl;
7d27a04b 157#endif
7d27a04b 158 };
50e7fbc3
MW
159 struct { /* ZONE_DEVICE pages */
160 /** @pgmap: Points to the hosting device page map. */
161 struct dev_pagemap *pgmap;
162 unsigned long hmm_data;
163 unsigned long _zd_pad_1; /* uses mapping */
164 };
4da1984e
MW
165
166 /** @rcu_head: You can use this to free a page by RCU. */
167 struct rcu_head rcu_head;
7d27a04b
MW
168 };
169
b21999da
MW
170 union { /* This union is 4 bytes in size. */
171 /*
172 * If the page can be mapped to userspace, encodes the number
173 * of times this page is referenced by a page table.
174 */
175 atomic_t _mapcount;
176
6e292b9b
MW
177 /*
178 * If the page is neither PageSlab nor mappable to userspace,
179 * the value stored here may help determine what this page
180 * is used for. See page-flags.h for a list of page types
181 * which are currently stored here.
182 */
183 unsigned int page_type;
184
ca9c88c7 185 unsigned int active; /* SLAB */
ca9c88c7 186 int units; /* SLOB */
81819f0f 187 };
fc9bb8c7 188
b21999da
MW
189 /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
190 atomic_t _refcount;
191
1306a85a
JW
192#ifdef CONFIG_MEMCG
193 struct mem_cgroup *mem_cgroup;
194#endif
195
5b99cd0e
HC
196 /*
197 * On machines where all RAM is mapped into kernel address space,
198 * we can simply calculate the virtual address. On machines with
199 * highmem some memory is mapped into kernel virtual memory
200 * dynamically, so we need a place to store that address.
201 * Note that this field could be 16 bits on x86 ... ;)
202 *
203 * Architectures with slow multiplication can define
204 * WANT_PAGE_VIRTUAL in asm/page.h
205 */
206#if defined(WANT_PAGE_VIRTUAL)
207 void *virtual; /* Kernel virtual address (NULL if
208 not kmapped, ie. highmem) */
209#endif /* WANT_PAGE_VIRTUAL */
dfec072e 210
90572890
PZ
211#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
212 int _last_cpupid;
57e0a030 213#endif
e20df2c6 214} _struct_page_alignment;
5b99cd0e 215
d1402fc7
LG
216/*
217 * Used for sizing the vmemmap region on some architectures
218 */
219#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
220
b63ae8ca
AD
221#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
222#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
223
224struct page_frag_cache {
225 void * va;
226#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
227 __u16 offset;
228 __u16 size;
229#else
230 __u32 offset;
231#endif
232 /* we maintain a pagecount bias, so that we dont dirty cache line
0139aa7b 233 * containing page->_refcount every time we allocate a fragment.
b63ae8ca
AD
234 */
235 unsigned int pagecnt_bias;
236 bool pfmemalloc;
237};
238
64b990d2 239typedef unsigned long vm_flags_t;
ca16d140 240
8feae131
DH
241/*
242 * A region containing a mapping of a non-memory backed file under NOMMU
243 * conditions. These are held in a global tree and are pinned by the VMAs that
244 * map parts of them.
245 */
246struct vm_region {
247 struct rb_node vm_rb; /* link in global region tree */
ca16d140 248 vm_flags_t vm_flags; /* VMA vm_flags */
8feae131
DH
249 unsigned long vm_start; /* start address of region */
250 unsigned long vm_end; /* region initialised to here */
dd8632a1 251 unsigned long vm_top; /* region allocated to here */
8feae131
DH
252 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
253 struct file *vm_file; /* the backing file or NULL */
254
1e2ae599 255 int vm_usage; /* region usage count (access under nommu_region_sem) */
cfe79c00
MF
256 bool vm_icache_flushed : 1; /* true if the icache has been flushed for
257 * this region */
8feae131
DH
258};
259
745f234b
AA
260#ifdef CONFIG_USERFAULTFD
261#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
262struct vm_userfaultfd_ctx {
263 struct userfaultfd_ctx *ctx;
264};
265#else /* CONFIG_USERFAULTFD */
266#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
267struct vm_userfaultfd_ctx {};
268#endif /* CONFIG_USERFAULTFD */
269
c92ff1bd
MS
270/*
271 * This struct defines a memory VMM memory area. There is one of these
272 * per VM-area/task. A VM area is any part of the process virtual memory
273 * space that has a special rule for the page-fault handlers (ie a shared
274 * library, the executable area etc).
275 */
276struct vm_area_struct {
e4c6bfd2
RR
277 /* The first cache line has the info for VMA tree walking. */
278
c92ff1bd
MS
279 unsigned long vm_start; /* Our start address within vm_mm. */
280 unsigned long vm_end; /* The first byte after our end address
281 within vm_mm. */
282
283 /* linked list of VM areas per task, sorted by address */
297c5eee 284 struct vm_area_struct *vm_next, *vm_prev;
c92ff1bd 285
c92ff1bd
MS
286 struct rb_node vm_rb;
287
d3737187
ML
288 /*
289 * Largest free memory gap in bytes to the left of this VMA.
290 * Either between this VMA and vma->vm_prev, or between one of the
291 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
292 * get_unmapped_area find a free area of the right size.
293 */
294 unsigned long rb_subtree_gap;
295
e4c6bfd2
RR
296 /* Second cache line starts here. */
297
298 struct mm_struct *vm_mm; /* The address space we belong to. */
299 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
300 unsigned long vm_flags; /* Flags, see mm.h. */
301
c92ff1bd
MS
302 /*
303 * For areas with an address space and backing store,
27ba0644 304 * linkage into the address_space->i_mmap interval tree.
c92ff1bd 305 */
ac51b934
KS
306 struct {
307 struct rb_node rb;
308 unsigned long rb_subtree_last;
c92ff1bd
MS
309 } shared;
310
311 /*
312 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
313 * list, after a COW of one of the file pages. A MAP_SHARED vma
314 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
315 * or brk vma (with NULL file) can only be in an anon_vma list.
316 */
5beb4930
RR
317 struct list_head anon_vma_chain; /* Serialized by mmap_sem &
318 * page_table_lock */
c92ff1bd
MS
319 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
320
321 /* Function pointers to deal with this struct. */
f0f37e2f 322 const struct vm_operations_struct *vm_ops;
c92ff1bd
MS
323
324 /* Information about our backing store: */
325 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
ea1754a0 326 units */
c92ff1bd
MS
327 struct file * vm_file; /* File we map to (can be NULL). */
328 void * vm_private_data; /* was vm_pte (shared mem) */
c92ff1bd 329
ec560175 330 atomic_long_t swap_readahead_info;
c92ff1bd 331#ifndef CONFIG_MMU
8feae131 332 struct vm_region *vm_region; /* NOMMU mapping region */
c92ff1bd
MS
333#endif
334#ifdef CONFIG_NUMA
335 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
336#endif
745f234b 337 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
3859a271 338} __randomize_layout;
c92ff1bd 339
b564daf8
ON
340struct core_thread {
341 struct task_struct *task;
342 struct core_thread *next;
343};
344
32ecb1f2 345struct core_state {
c5f1cc8c 346 atomic_t nr_threads;
b564daf8 347 struct core_thread dumper;
32ecb1f2
ON
348 struct completion startup;
349};
350
db446a08 351struct kioctx_table;
c92ff1bd 352struct mm_struct {
c1a2f7f0
RR
353 struct {
354 struct vm_area_struct *mmap; /* list of VMAs */
355 struct rb_root mm_rb;
7a9cdebd 356 u64 vmacache_seqnum; /* per-thread vmacache */
efc1a3b1 357#ifdef CONFIG_MMU
c1a2f7f0 358 unsigned long (*get_unmapped_area) (struct file *filp,
c92ff1bd
MS
359 unsigned long addr, unsigned long len,
360 unsigned long pgoff, unsigned long flags);
efc1a3b1 361#endif
c1a2f7f0
RR
362 unsigned long mmap_base; /* base of mmap area */
363 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
1b028f78 364#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
c1a2f7f0
RR
365 /* Base adresses for compatible mmap() */
366 unsigned long mmap_compat_base;
367 unsigned long mmap_compat_legacy_base;
1b028f78 368#endif
c1a2f7f0
RR
369 unsigned long task_size; /* size of task vm space */
370 unsigned long highest_vm_end; /* highest vma end address */
371 pgd_t * pgd;
372
373 /**
374 * @mm_users: The number of users including userspace.
375 *
376 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
377 * drops to 0 (i.e. when the task exits and there are no other
378 * temporary reference holders), we also release a reference on
379 * @mm_count (which may then free the &struct mm_struct if
380 * @mm_count also drops to 0).
381 */
382 atomic_t mm_users;
383
384 /**
385 * @mm_count: The number of references to &struct mm_struct
386 * (@mm_users count as 1).
387 *
388 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
389 * &struct mm_struct is freed.
390 */
391 atomic_t mm_count;
b279ddc3 392
c4812909 393#ifdef CONFIG_MMU
c1a2f7f0 394 atomic_long_t pgtables_bytes; /* PTE page table pages */
5a3fbef3 395#endif
c1a2f7f0 396 int map_count; /* number of VMAs */
481b4bb5 397
c1a2f7f0
RR
398 spinlock_t page_table_lock; /* Protects page tables and some
399 * counters
400 */
401 struct rw_semaphore mmap_sem;
c92ff1bd 402
c1a2f7f0
RR
403 struct list_head mmlist; /* List of maybe swapped mm's. These
404 * are globally strung together off
405 * init_mm.mmlist, and are protected
406 * by mmlist_lock
407 */
c92ff1bd 408
c92ff1bd 409
c1a2f7f0
RR
410 unsigned long hiwater_rss; /* High-watermark of RSS usage */
411 unsigned long hiwater_vm; /* High-water virtual memory usage */
c92ff1bd 412
c1a2f7f0
RR
413 unsigned long total_vm; /* Total pages mapped */
414 unsigned long locked_vm; /* Pages that have PG_mlocked set */
415 unsigned long pinned_vm; /* Refcount permanently increased */
416 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
417 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
418 unsigned long stack_vm; /* VM_STACK */
419 unsigned long def_flags;
88aa7cc6 420
c1a2f7f0
RR
421 spinlock_t arg_lock; /* protect the below fields */
422 unsigned long start_code, end_code, start_data, end_data;
423 unsigned long start_brk, brk, start_stack;
424 unsigned long arg_start, arg_end, env_start, env_end;
c92ff1bd 425
c1a2f7f0 426 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
c92ff1bd 427
c1a2f7f0
RR
428 /*
429 * Special counters, in some configurations protected by the
430 * page_table_lock, in other configurations by being atomic.
431 */
432 struct mm_rss_stat rss_stat;
801460d0 433
c1a2f7f0 434 struct linux_binfmt *binfmt;
6345d24d 435
c1a2f7f0
RR
436 /* Architecture-specific MM context */
437 mm_context_t context;
c92ff1bd 438
c1a2f7f0 439 unsigned long flags; /* Must use atomic bitops to access */
c92ff1bd 440
c1a2f7f0 441 struct core_state *core_state; /* coredumping support */
a961e409 442#ifdef CONFIG_MEMBARRIER
c1a2f7f0 443 atomic_t membarrier_state;
a961e409 444#endif
858f0993 445#ifdef CONFIG_AIO
c1a2f7f0
RR
446 spinlock_t ioctx_lock;
447 struct kioctx_table __rcu *ioctx_table;
858f0993 448#endif
f98bafa0 449#ifdef CONFIG_MEMCG
c1a2f7f0
RR
450 /*
451 * "owner" points to a task that is regarded as the canonical
452 * user/owner of this mm. All of the following must be true in
453 * order for it to be changed:
454 *
455 * current == mm->owner
456 * current->mm != mm
457 * new_owner->mm == mm
458 * new_owner->alloc_lock is held
459 */
460 struct task_struct __rcu *owner;
78fb7466 461#endif
c1a2f7f0 462 struct user_namespace *user_ns;
925d1c40 463
c1a2f7f0
RR
464 /* store ref to file /proc/<pid>/exe symlink points to */
465 struct file __rcu *exe_file;
cddb8a5c 466#ifdef CONFIG_MMU_NOTIFIER
c1a2f7f0 467 struct mmu_notifier_mm *mmu_notifier_mm;
e7a00c45 468#endif
e009bb30 469#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
c1a2f7f0 470 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
cbee9f88
PZ
471#endif
472#ifdef CONFIG_NUMA_BALANCING
c1a2f7f0
RR
473 /*
474 * numa_next_scan is the next time that the PTEs will be marked
475 * pte_numa. NUMA hinting faults will gather statistics and
476 * migrate pages to new nodes if necessary.
477 */
478 unsigned long numa_next_scan;
cbee9f88 479
c1a2f7f0
RR
480 /* Restart point for scanning and setting pte_numa */
481 unsigned long numa_scan_offset;
6e5fb223 482
c1a2f7f0
RR
483 /* numa_scan_seq prevents two threads setting pte_numa */
484 int numa_scan_seq;
20841405 485#endif
c1a2f7f0
RR
486 /*
487 * An operation with batched TLB flushing is going on. Anything
488 * that can move process memory needs to flush the TLB when
489 * moving a PROT_NONE or PROT_NUMA mapped page.
490 */
491 atomic_t tlb_flush_pending;
3ea27719 492#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
c1a2f7f0
RR
493 /* See flush_tlb_batched_pending() */
494 bool tlb_flush_batched;
6345d24d 495#endif
c1a2f7f0 496 struct uprobes_state uprobes_state;
5d317b2b 497#ifdef CONFIG_HUGETLB_PAGE
c1a2f7f0 498 atomic_long_t hugetlb_usage;
5d317b2b 499#endif
c1a2f7f0 500 struct work_struct async_put_work;
133ff0ea
JG
501
502#if IS_ENABLED(CONFIG_HMM)
c1a2f7f0
RR
503 /* HMM needs to track a few things per mm */
504 struct hmm *hmm;
133ff0ea 505#endif
c1a2f7f0
RR
506 } __randomize_layout;
507
508 /*
509 * The mm_cpumask needs to be at the end of mm_struct, because it
510 * is dynamically sized based on nr_cpu_ids.
511 */
512 unsigned long cpu_bitmap[];
513};
c92ff1bd 514
abe722a1
IM
515extern struct mm_struct init_mm;
516
c1a2f7f0 517/* Pointer magic because the dynamic array size confuses some compilers. */
6345d24d
LT
518static inline void mm_init_cpumask(struct mm_struct *mm)
519{
c1a2f7f0
RR
520 unsigned long cpu_bitmap = (unsigned long)mm;
521
522 cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
523 cpumask_clear((struct cpumask *)cpu_bitmap);
6345d24d
LT
524}
525
45e575ab 526/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
de03c72c
KM
527static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
528{
c1a2f7f0 529 return (struct cpumask *)&mm->cpu_bitmap;
de03c72c 530}
45e575ab 531
56236a59
MK
532struct mmu_gather;
533extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
534 unsigned long start, unsigned long end);
535extern void tlb_finish_mmu(struct mmu_gather *tlb,
536 unsigned long start, unsigned long end);
537
16af97dc 538static inline void init_tlb_flush_pending(struct mm_struct *mm)
20841405 539{
16af97dc 540 atomic_set(&mm->tlb_flush_pending, 0);
20841405 541}
16af97dc
NA
542
543static inline void inc_tlb_flush_pending(struct mm_struct *mm)
20841405 544{
16af97dc 545 atomic_inc(&mm->tlb_flush_pending);
af2c1401 546 /*
8b1b436d
PZ
547 * The only time this value is relevant is when there are indeed pages
548 * to flush. And we'll only flush pages after changing them, which
549 * requires the PTL.
550 *
551 * So the ordering here is:
552 *
040cca3a 553 * atomic_inc(&mm->tlb_flush_pending);
8b1b436d
PZ
554 * spin_lock(&ptl);
555 * ...
556 * set_pte_at();
557 * spin_unlock(&ptl);
558 *
559 * spin_lock(&ptl)
560 * mm_tlb_flush_pending();
561 * ....
562 * spin_unlock(&ptl);
563 *
564 * flush_tlb_range();
040cca3a 565 * atomic_dec(&mm->tlb_flush_pending);
8b1b436d 566 *
0e709703
PZ
567 * Where the increment if constrained by the PTL unlock, it thus
568 * ensures that the increment is visible if the PTE modification is
569 * visible. After all, if there is no PTE modification, nobody cares
570 * about TLB flushes either.
571 *
572 * This very much relies on users (mm_tlb_flush_pending() and
573 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
574 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
575 * locks (PPC) the unlock of one doesn't order against the lock of
576 * another PTL.
577 *
578 * The decrement is ordered by the flush_tlb_range(), such that
579 * mm_tlb_flush_pending() will not return false unless all flushes have
580 * completed.
af2c1401 581 */
20841405 582}
16af97dc 583
16af97dc 584static inline void dec_tlb_flush_pending(struct mm_struct *mm)
20841405 585{
0a2c4048 586 /*
0e709703
PZ
587 * See inc_tlb_flush_pending().
588 *
589 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
590 * not order against TLB invalidate completion, which is what we need.
591 *
592 * Therefore we must rely on tlb_flush_*() to guarantee order.
0a2c4048 593 */
16af97dc 594 atomic_dec(&mm->tlb_flush_pending);
20841405 595}
20841405 596
0e709703
PZ
597static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
598{
599 /*
600 * Must be called after having acquired the PTL; orders against that
601 * PTLs release and therefore ensures that if we observe the modified
602 * PTE we must also observe the increment from inc_tlb_flush_pending().
603 *
604 * That is, it only guarantees to return true if there is a flush
605 * pending for _this_ PTL.
606 */
607 return atomic_read(&mm->tlb_flush_pending);
608}
609
610static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
611{
612 /*
613 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
614 * for which there is a TLB flush pending in order to guarantee
615 * we've seen both that PTE modification and the increment.
616 *
617 * (no requirement on actually still holding the PTL, that is irrelevant)
618 */
619 return atomic_read(&mm->tlb_flush_pending) > 1;
620}
621
f872f540
AL
622struct vm_fault;
623
624struct vm_special_mapping {
625 const char *name; /* The name, e.g. "[vdso]". */
626
627 /*
628 * If .fault is not provided, this points to a
629 * NULL-terminated array of pages that back the special mapping.
630 *
631 * This must not be NULL unless .fault is provided.
632 */
a62c34bd 633 struct page **pages;
f872f540
AL
634
635 /*
636 * If non-NULL, then this is called to resolve page faults
637 * on the special mapping. If used, .pages is not checked.
638 */
b3ec9f33
SJ
639 vm_fault_t (*fault)(const struct vm_special_mapping *sm,
640 struct vm_area_struct *vma,
641 struct vm_fault *vmf);
b059a453
DS
642
643 int (*mremap)(const struct vm_special_mapping *sm,
644 struct vm_area_struct *new_vma);
a62c34bd
AL
645};
646
d17d8f9d
DH
647enum tlb_flush_reason {
648 TLB_FLUSH_ON_TASK_SWITCH,
649 TLB_REMOTE_SHOOTDOWN,
650 TLB_LOCAL_SHOOTDOWN,
651 TLB_LOCAL_MM_SHOOTDOWN,
5b74283a 652 TLB_REMOTE_SEND_IPI,
d17d8f9d
DH
653 NR_TLB_FLUSH_REASONS,
654};
655
bd6dace7
TH
656 /*
657 * A swap entry has to fit into a "unsigned long", as the entry is hidden
658 * in the "index" field of the swapper address space.
659 */
660typedef struct {
661 unsigned long val;
662} swp_entry_t;
663
5b99cd0e 664#endif /* _LINUX_MM_TYPES_H */