Commit | Line | Data |
---|---|---|
b20a3503 CL |
1 | #ifndef _LINUX_MIGRATE_H |
2 | #define _LINUX_MIGRATE_H | |
3 | ||
b20a3503 | 4 | #include <linux/mm.h> |
906e0be1 CL |
5 | #include <linux/mempolicy.h> |
6 | #include <linux/pagemap.h> | |
b20a3503 | 7 | |
742755a1 | 8 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
95a402c3 | 9 | |
906e0be1 | 10 | #ifdef CONFIG_MIGRATION |
0dc952dc CL |
11 | /* Check if a vma is migratable */ |
12 | static inline int vma_migratable(struct vm_area_struct *vma) | |
13 | { | |
14 | if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) | |
15 | return 0; | |
906e0be1 CL |
16 | /* |
17 | * Migration allocates pages in the highest zone. If we cannot | |
18 | * do so then migration (at least from node to node) is not | |
19 | * possible. | |
20 | */ | |
21 | if (vma->vm_file && | |
22 | gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) | |
23 | < policy_zone) | |
24 | return 0; | |
0dc952dc CL |
25 | return 1; |
26 | } | |
27 | ||
b20a3503 CL |
28 | extern int isolate_lru_page(struct page *p, struct list_head *pagelist); |
29 | extern int putback_lru_pages(struct list_head *l); | |
2d1db3b1 CL |
30 | extern int migrate_page(struct address_space *, |
31 | struct page *, struct page *); | |
95a402c3 CL |
32 | extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long); |
33 | ||
2d1db3b1 CL |
34 | extern int fail_migrate_page(struct address_space *, |
35 | struct page *, struct page *); | |
b20a3503 CL |
36 | |
37 | extern int migrate_prep(void); | |
7b2259b3 CL |
38 | extern int migrate_vmas(struct mm_struct *mm, |
39 | const nodemask_t *from, const nodemask_t *to, | |
40 | unsigned long flags); | |
b20a3503 | 41 | #else |
906e0be1 CL |
42 | static inline int vma_migratable(struct vm_area_struct *vma) |
43 | { return 0; } | |
b20a3503 CL |
44 | |
45 | static inline int isolate_lru_page(struct page *p, struct list_head *list) | |
46 | { return -ENOSYS; } | |
47 | static inline int putback_lru_pages(struct list_head *l) { return 0; } | |
95a402c3 CL |
48 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
49 | unsigned long private) { return -ENOSYS; } | |
b20a3503 | 50 | |
9bf9e89c CL |
51 | static inline int migrate_pages_to(struct list_head *pagelist, |
52 | struct vm_area_struct *vma, int dest) { return 0; } | |
53 | ||
b20a3503 CL |
54 | static inline int migrate_prep(void) { return -ENOSYS; } |
55 | ||
7b2259b3 CL |
56 | static inline int migrate_vmas(struct mm_struct *mm, |
57 | const nodemask_t *from, const nodemask_t *to, | |
58 | unsigned long flags) | |
59 | { | |
60 | return -ENOSYS; | |
61 | } | |
62 | ||
b20a3503 CL |
63 | /* Possible settings for the migrate_page() method in address_operations */ |
64 | #define migrate_page NULL | |
65 | #define fail_migrate_page NULL | |
66 | ||
67 | #endif /* CONFIG_MIGRATION */ | |
68 | #endif /* _LINUX_MIGRATE_H */ |