mm, migration: add destination page freeing callback
[linux-2.6-block.git] / include / linux / migrate.h
CommitLineData
b20a3503
CL
1#ifndef _LINUX_MIGRATE_H
2#define _LINUX_MIGRATE_H
3
b20a3503 4#include <linux/mm.h>
906e0be1 5#include <linux/mempolicy.h>
6536e312 6#include <linux/migrate_mode.h>
b20a3503 7
68711a74
DR
8typedef struct page *new_page_t(struct page *page, unsigned long private,
9 int **reason);
10typedef void free_page_t(struct page *page, unsigned long private);
95a402c3 11
78bd5209
RA
12/*
13 * Return values from addresss_space_operations.migratepage():
14 * - negative errno on page migration failure;
15 * - zero on page migration success;
18468d93
RA
16 *
17 * The balloon page migration introduces this special case where a 'distinct'
18 * return code is used to flag a successful page migration to unmap_and_move().
19 * This approach is necessary because page migration can race against balloon
20 * deflation procedure, and for such case we could introduce a nasty page leak
21 * if a successfully migrated balloon page gets released concurrently with
22 * migration's unmap_and_move() wrap-up steps.
78bd5209
RA
23 */
24#define MIGRATEPAGE_SUCCESS 0
18468d93
RA
25#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page
26 * sucessful migration case.
27 */
7b2a2d4a
MG
28enum migrate_reason {
29 MR_COMPACTION,
30 MR_MEMORY_FAILURE,
31 MR_MEMORY_HOTPLUG,
32 MR_SYSCALL, /* also applies to cpusets */
33 MR_MEMPOLICY_MBIND,
7039e1db 34 MR_NUMA_MISPLACED,
7b2a2d4a
MG
35 MR_CMA
36};
78bd5209 37
906e0be1 38#ifdef CONFIG_MIGRATION
64cdd548 39
5733c7d1 40extern void putback_movable_pages(struct list_head *l);
2d1db3b1 41extern int migrate_page(struct address_space *,
a6bc32b8 42 struct page *, struct page *, enum migrate_mode);
68711a74 43extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
9c620e2b 44 unsigned long private, enum migrate_mode mode, int reason);
95a402c3 45
b20a3503 46extern int migrate_prep(void);
748446bb 47extern int migrate_prep_local(void);
7b2259b3
CL
48extern int migrate_vmas(struct mm_struct *mm,
49 const nodemask_t *from, const nodemask_t *to,
50 unsigned long flags);
290408d4
NH
51extern void migrate_page_copy(struct page *newpage, struct page *page);
52extern int migrate_huge_page_move_mapping(struct address_space *mapping,
53 struct page *newpage, struct page *page);
36bc08cc
GZ
54extern int migrate_page_move_mapping(struct address_space *mapping,
55 struct page *newpage, struct page *page,
8e321fef
BL
56 struct buffer_head *head, enum migrate_mode mode,
57 int extra_count);
b20a3503 58#else
64cdd548 59
5733c7d1 60static inline void putback_movable_pages(struct list_head *l) {}
68711a74
DR
61static inline int migrate_pages(struct list_head *l, new_page_t new,
62 free_page_t free, unsigned long private, enum migrate_mode mode,
63 int reason)
9c620e2b 64 { return -ENOSYS; }
9bf9e89c 65
b20a3503 66static inline int migrate_prep(void) { return -ENOSYS; }
748446bb 67static inline int migrate_prep_local(void) { return -ENOSYS; }
b20a3503 68
7b2259b3
CL
69static inline int migrate_vmas(struct mm_struct *mm,
70 const nodemask_t *from, const nodemask_t *to,
71 unsigned long flags)
72{
73 return -ENOSYS;
74}
75
290408d4
NH
76static inline void migrate_page_copy(struct page *newpage,
77 struct page *page) {}
78
6f39ce05 79static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
290408d4
NH
80 struct page *newpage, struct page *page)
81{
82 return -ENOSYS;
83}
84
b20a3503
CL
85/* Possible settings for the migrate_page() method in address_operations */
86#define migrate_page NULL
b20a3503
CL
87
88#endif /* CONFIG_MIGRATION */
7039e1db
PZ
89
90#ifdef CONFIG_NUMA_BALANCING
de466bd6
MG
91extern bool pmd_trans_migrating(pmd_t pmd);
92extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
1bc115d8
MG
93extern int migrate_misplaced_page(struct page *page,
94 struct vm_area_struct *vma, int node);
e14808b4 95extern bool migrate_ratelimited(int node);
7039e1db 96#else
de466bd6
MG
97static inline bool pmd_trans_migrating(pmd_t pmd)
98{
99 return false;
100}
101static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
102{
103}
1bc115d8
MG
104static inline int migrate_misplaced_page(struct page *page,
105 struct vm_area_struct *vma, int node)
7039e1db
PZ
106{
107 return -EAGAIN; /* can't migrate now */
108}
e14808b4
MG
109static inline bool migrate_ratelimited(int node)
110{
111 return false;
112}
220018d3 113#endif /* CONFIG_NUMA_BALANCING */
b32967ff 114
220018d3
MG
115#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
116extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
117 struct vm_area_struct *vma,
118 pmd_t *pmd, pmd_t entry,
119 unsigned long address,
120 struct page *page, int node);
121#else
b32967ff
MG
122static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
123 struct vm_area_struct *vma,
124 pmd_t *pmd, pmd_t entry,
125 unsigned long address,
126 struct page *page, int node)
127{
128 return -EAGAIN;
129}
220018d3 130#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
7039e1db 131
b20a3503 132#endif /* _LINUX_MIGRATE_H */