Merge tag 'xfs-6.4-rc1-fixes' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-block.git] / include / linux / migrate.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
b20a3503
CL
2#ifndef _LINUX_MIGRATE_H
3#define _LINUX_MIGRATE_H
4
b20a3503 5#include <linux/mm.h>
906e0be1 6#include <linux/mempolicy.h>
6536e312 7#include <linux/migrate_mode.h>
8b913238 8#include <linux/hugetlb.h>
b20a3503 9
666feb21 10typedef struct page *new_page_t(struct page *page, unsigned long private);
68711a74 11typedef void free_page_t(struct page *page, unsigned long private);
95a402c3 12
19fc7bed
JK
13struct migration_target_control;
14
78bd5209
RA
15/*
16 * Return values from addresss_space_operations.migratepage():
17 * - negative errno on page migration failure;
18 * - zero on page migration success;
19 */
20#define MIGRATEPAGE_SUCCESS 0
64c8902e 21#define MIGRATEPAGE_UNMAP 1
d6d86c0a 22
68f2736a
MWO
23/**
24 * struct movable_operations - Driver page migration
25 * @isolate_page:
26 * The VM calls this function to prepare the page to be moved. The page
27 * is locked and the driver should not unlock it. The driver should
28 * return ``true`` if the page is movable and ``false`` if it is not
29 * currently movable. After this function returns, the VM uses the
30 * page->lru field, so the driver must preserve any information which
31 * is usually stored here.
32 *
33 * @migrate_page:
34 * After isolation, the VM calls this function with the isolated
35 * @src page. The driver should copy the contents of the
36 * @src page to the @dst page and set up the fields of @dst page.
37 * Both pages are locked.
38 * If page migration is successful, the driver should call
39 * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
40 * If the driver cannot migrate the page at the moment, it can return
41 * -EAGAIN. The VM interprets this as a temporary migration failure and
42 * will retry it later. Any other error value is a permanent migration
43 * failure and migration will not be retried.
44 * The driver shouldn't touch the @src->lru field while in the
45 * migrate_page() function. It may write to @dst->lru.
46 *
47 * @putback_page:
48 * If migration fails on the isolated page, the VM informs the driver
49 * that the page is no longer a candidate for migration by calling
50 * this function. The driver should put the isolated page back into
51 * its own data structure.
52 */
53struct movable_operations {
54 bool (*isolate_page)(struct page *, isolate_mode_t);
55 int (*migrate_page)(struct page *dst, struct page *src,
56 enum migrate_mode);
57 void (*putback_page)(struct page *);
58};
59
8eb42bea 60/* Defined in mm/debug.c: */
9a2f45ff 61extern const char *migrate_reason_names[MR_TYPES];
7cd12b4a 62
906e0be1 63#ifdef CONFIG_MIGRATION
64cdd548 64
f9366f4c 65void putback_movable_pages(struct list_head *l);
16ce101d
AP
66int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
67 struct folio *src, enum migrate_mode mode, int extra_count);
54184650
MWO
68int migrate_folio(struct address_space *mapping, struct folio *dst,
69 struct folio *src, enum migrate_mode mode);
f9366f4c
AM
70int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
71 unsigned long private, enum migrate_mode mode, int reason,
72 unsigned int *ret_succeeded);
73struct page *alloc_migration_target(struct page *page, unsigned long private);
74bool isolate_movable_page(struct page *page, isolate_mode_t mode);
95a402c3 75
b890ec2a
MWO
76int migrate_huge_page_move_mapping(struct address_space *mapping,
77 struct folio *dst, struct folio *src);
ffa65753
AP
78void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
79 spinlock_t *ptl);
19138349 80void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
715cbfd6 81void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
3417013e
MWO
82int folio_migrate_mapping(struct address_space *mapping,
83 struct folio *newfolio, struct folio *folio, int extra_count);
20f9ba4f 84
734c1570
OS
85#else
86
5733c7d1 87static inline void putback_movable_pages(struct list_head *l) {}
68711a74
DR
88static inline int migrate_pages(struct list_head *l, new_page_t new,
89 free_page_t free, unsigned long private, enum migrate_mode mode,
5ac95884 90 int reason, unsigned int *ret_succeeded)
9c620e2b 91 { return -ENOSYS; }
19fc7bed
JK
92static inline struct page *alloc_migration_target(struct page *page,
93 unsigned long private)
b4b38223 94 { return NULL; }
cd775580
BW
95static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
96 { return false; }
9bf9e89c 97
6f39ce05 98static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
b890ec2a 99 struct folio *dst, struct folio *src)
290408d4
NH
100{
101 return -ENOSYS;
102}
20f9ba4f 103
b20a3503 104#endif /* CONFIG_MIGRATION */
7039e1db 105
dd4123f3 106#ifdef CONFIG_COMPACTION
68f2736a
MWO
107bool PageMovable(struct page *page);
108void __SetPageMovable(struct page *page, const struct movable_operations *ops);
109void __ClearPageMovable(struct page *page);
dd4123f3 110#else
68f2736a 111static inline bool PageMovable(struct page *page) { return false; }
dd4123f3 112static inline void __SetPageMovable(struct page *page,
68f2736a 113 const struct movable_operations *ops)
dd4123f3
MK
114{
115}
116static inline void __ClearPageMovable(struct page *page)
117{
118}
119#endif
120
8b463be3
MWO
121static inline bool folio_test_movable(struct folio *folio)
122{
123 return PageMovable(&folio->page);
124}
125
da707a6d
VMO
126static inline
127const struct movable_operations *folio_movable_ops(struct folio *folio)
128{
129 VM_BUG_ON(!__folio_test_movable(folio));
130
131 return (const struct movable_operations *)
132 ((unsigned long)folio->mapping - PAGE_MAPPING_MOVABLE);
133}
134
68f2736a
MWO
135static inline
136const struct movable_operations *page_movable_ops(struct page *page)
137{
138 VM_BUG_ON(!__PageMovable(page));
139
140 return (const struct movable_operations *)
141 ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
142}
143
7039e1db 144#ifdef CONFIG_NUMA_BALANCING
f9366f4c
AM
145int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
146 int node);
7039e1db 147#else
1bc115d8
MG
148static inline int migrate_misplaced_page(struct page *page,
149 struct vm_area_struct *vma, int node)
7039e1db
PZ
150{
151 return -EAGAIN; /* can't migrate now */
152}
220018d3 153#endif /* CONFIG_NUMA_BALANCING */
b32967ff 154
8763cb45
JG
155#ifdef CONFIG_MIGRATION
156
a5430dda
JG
157/*
158 * Watch out for PAE architecture, which has an unsigned long, and might not
159 * have enough bits to store all physical address and flags. So far we have
160 * enough room for all our flags.
161 */
8763cb45
JG
162#define MIGRATE_PFN_VALID (1UL << 0)
163#define MIGRATE_PFN_MIGRATE (1UL << 1)
8763cb45 164#define MIGRATE_PFN_WRITE (1UL << 3)
a5430dda 165#define MIGRATE_PFN_SHIFT 6
8763cb45
JG
166
167static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
168{
169 if (!(mpfn & MIGRATE_PFN_VALID))
170 return NULL;
171 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
172}
173
174static inline unsigned long migrate_pfn(unsigned long pfn)
175{
176 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
177}
178
5143192c
RC
179enum migrate_vma_direction {
180 MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
181 MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
dd19e6d8 182 MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
5143192c
RC
183};
184
a7d1f22b
CH
185struct migrate_vma {
186 struct vm_area_struct *vma;
187 /*
188 * Both src and dst array must be big enough for
189 * (end - start) >> PAGE_SHIFT entries.
190 *
191 * The src array must not be modified by the caller after
192 * migrate_vma_setup(), and must not change the dst array after
193 * migrate_vma_pages() returns.
194 */
195 unsigned long *dst;
196 unsigned long *src;
197 unsigned long cpages;
198 unsigned long npages;
199 unsigned long start;
200 unsigned long end;
800bb1c8
CH
201
202 /*
203 * Set to the owner value also stored in page->pgmap->owner for
5143192c
RC
204 * migrating out of device private memory. The flags also need to
205 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
998427b3
RC
206 * The caller should always set this field when using mmu notifier
207 * callbacks to avoid device MMU invalidations for device private
208 * pages that are not being migrated.
800bb1c8 209 */
5143192c
RC
210 void *pgmap_owner;
211 unsigned long flags;
16ce101d
AP
212
213 /*
214 * Set to vmf->page if this is being called to migrate a page as part of
215 * a migrate_to_ram() callback.
216 */
217 struct page *fault_page;
8763cb45
JG
218};
219
a7d1f22b
CH
220int migrate_vma_setup(struct migrate_vma *args);
221void migrate_vma_pages(struct migrate_vma *migrate);
222void migrate_vma_finalize(struct migrate_vma *migrate);
e778406b
AP
223int migrate_device_range(unsigned long *src_pfns, unsigned long start,
224 unsigned long npages);
225void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
226 unsigned long npages);
227void migrate_device_finalize(unsigned long *src_pfns,
228 unsigned long *dst_pfns, unsigned long npages);
229
8763cb45
JG
230#endif /* CONFIG_MIGRATION */
231
b20a3503 232#endif /* _LINUX_MIGRATE_H */