Commit | Line | Data |
---|---|---|
f8af4da3 HD |
1 | #ifndef __LINUX_KSM_H |
2 | #define __LINUX_KSM_H | |
3 | /* | |
4 | * Memory merging support. | |
5 | * | |
6 | * This code enables dynamic sharing of identical pages found in different | |
7 | * memory areas, even if they are not shared by fork(). | |
8 | */ | |
9 | ||
10 | #include <linux/bitops.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/sched.h> | |
9a840895 | 13 | #include <linux/vmstat.h> |
f8af4da3 | 14 | |
9ba69294 HD |
15 | struct mmu_gather; |
16 | ||
f8af4da3 HD |
17 | #ifdef CONFIG_KSM |
18 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
19 | unsigned long end, int advice, unsigned long *vm_flags); | |
20 | int __ksm_enter(struct mm_struct *mm); | |
9ba69294 HD |
21 | void __ksm_exit(struct mm_struct *mm, |
22 | struct mmu_gather **tlbp, unsigned long end); | |
f8af4da3 HD |
23 | |
24 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
25 | { | |
26 | if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) | |
27 | return __ksm_enter(mm); | |
28 | return 0; | |
29 | } | |
30 | ||
9ba69294 HD |
31 | /* |
32 | * For KSM to handle OOM without deadlock when it's breaking COW in a | |
33 | * likely victim of the OOM killer, exit_mmap() has to serialize with | |
34 | * ksm_exit() after freeing mm's pages but before freeing its page tables. | |
35 | * That leaves a window in which KSM might refault pages which have just | |
36 | * been finally unmapped: guard against that with ksm_test_exit(), and | |
37 | * use it after getting mmap_sem in ksm.c, to check if mm is exiting. | |
38 | */ | |
39 | static inline bool ksm_test_exit(struct mm_struct *mm) | |
40 | { | |
41 | return atomic_read(&mm->mm_users) == 0; | |
42 | } | |
43 | ||
44 | static inline void ksm_exit(struct mm_struct *mm, | |
45 | struct mmu_gather **tlbp, unsigned long end) | |
f8af4da3 HD |
46 | { |
47 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) | |
9ba69294 | 48 | __ksm_exit(mm, tlbp, end); |
f8af4da3 | 49 | } |
9a840895 HD |
50 | |
51 | /* | |
52 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | |
53 | * which KSM maps into multiple mms, wherever identical anonymous page content | |
54 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. | |
55 | */ | |
56 | static inline int PageKsm(struct page *page) | |
57 | { | |
58 | return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); | |
59 | } | |
60 | ||
61 | /* | |
62 | * But we have to avoid the checking which page_add_anon_rmap() performs. | |
63 | */ | |
64 | static inline void page_add_ksm_rmap(struct page *page) | |
65 | { | |
66 | if (atomic_inc_and_test(&page->_mapcount)) { | |
67 | page->mapping = (void *) PAGE_MAPPING_ANON; | |
68 | __inc_zone_page_state(page, NR_ANON_PAGES); | |
69 | } | |
70 | } | |
f8af4da3 HD |
71 | #else /* !CONFIG_KSM */ |
72 | ||
73 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
74 | unsigned long end, int advice, unsigned long *vm_flags) | |
75 | { | |
76 | return 0; | |
77 | } | |
78 | ||
79 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
80 | { | |
81 | return 0; | |
82 | } | |
83 | ||
9ba69294 HD |
84 | static inline bool ksm_test_exit(struct mm_struct *mm) |
85 | { | |
86 | return 0; | |
87 | } | |
88 | ||
89 | static inline void ksm_exit(struct mm_struct *mm, | |
90 | struct mmu_gather **tlbp, unsigned long end) | |
f8af4da3 HD |
91 | { |
92 | } | |
9a840895 HD |
93 | |
94 | static inline int PageKsm(struct page *page) | |
95 | { | |
96 | return 0; | |
97 | } | |
98 | ||
99 | /* No stub required for page_add_ksm_rmap(page) */ | |
f8af4da3 HD |
100 | #endif /* !CONFIG_KSM */ |
101 | ||
102 | #endif |