Commit | Line | Data |
---|---|---|
f8af4da3 | 1 | /* |
31dbd01f IE |
2 | * Memory merging support. |
3 | * | |
4 | * This code enables dynamic sharing of identical pages found in different | |
5 | * memory areas, even if they are not shared by fork() | |
6 | * | |
36b2528d | 7 | * Copyright (C) 2008-2009 Red Hat, Inc. |
31dbd01f IE |
8 | * Authors: |
9 | * Izik Eidus | |
10 | * Andrea Arcangeli | |
11 | * Chris Wright | |
36b2528d | 12 | * Hugh Dickins |
31dbd01f IE |
13 | * |
14 | * This work is licensed under the terms of the GNU GPL, version 2. | |
f8af4da3 HD |
15 | */ |
16 | ||
17 | #include <linux/errno.h> | |
31dbd01f IE |
18 | #include <linux/mm.h> |
19 | #include <linux/fs.h> | |
f8af4da3 | 20 | #include <linux/mman.h> |
31dbd01f IE |
21 | #include <linux/sched.h> |
22 | #include <linux/rwsem.h> | |
23 | #include <linux/pagemap.h> | |
24 | #include <linux/rmap.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/jhash.h> | |
27 | #include <linux/delay.h> | |
28 | #include <linux/kthread.h> | |
29 | #include <linux/wait.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/rbtree.h> | |
32 | #include <linux/mmu_notifier.h> | |
f8af4da3 HD |
33 | #include <linux/ksm.h> |
34 | ||
31dbd01f IE |
35 | #include <asm/tlbflush.h> |
36 | ||
37 | /* | |
38 | * A few notes about the KSM scanning process, | |
39 | * to make it easier to understand the data structures below: | |
40 | * | |
41 | * In order to reduce excessive scanning, KSM sorts the memory pages by their | |
42 | * contents into a data structure that holds pointers to the pages' locations. | |
43 | * | |
44 | * Since the contents of the pages may change at any moment, KSM cannot just | |
45 | * insert the pages into a normal sorted tree and expect it to find anything. | |
46 | * Therefore KSM uses two data structures - the stable and the unstable tree. | |
47 | * | |
48 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted | |
49 | * by their contents. Because each such page is write-protected, searching on | |
50 | * this tree is fully assured to be working (except when pages are unmapped), | |
51 | * and therefore this tree is called the stable tree. | |
52 | * | |
53 | * In addition to the stable tree, KSM uses a second data structure called the | |
54 | * unstable tree: this tree holds pointers to pages which have been found to | |
55 | * be "unchanged for a period of time". The unstable tree sorts these pages | |
56 | * by their contents, but since they are not write-protected, KSM cannot rely | |
57 | * upon the unstable tree to work correctly - the unstable tree is liable to | |
58 | * be corrupted as its contents are modified, and so it is called unstable. | |
59 | * | |
60 | * KSM solves this problem by several techniques: | |
61 | * | |
62 | * 1) The unstable tree is flushed every time KSM completes scanning all | |
63 | * memory areas, and then the tree is rebuilt again from the beginning. | |
64 | * 2) KSM will only insert into the unstable tree, pages whose hash value | |
65 | * has not changed since the previous scan of all memory areas. | |
66 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the | |
67 | * colors of the nodes and not on their contents, assuring that even when | |
68 | * the tree gets "corrupted" it won't get out of balance, so scanning time | |
69 | * remains the same (also, searching and inserting nodes in an rbtree uses | |
70 | * the same algorithm, so we have no overhead when we flush and rebuild). | |
71 | * 4) KSM never flushes the stable tree, which means that even if it were to | |
72 | * take 10 attempts to find a page in the unstable tree, once it is found, | |
73 | * it is secured in the stable tree. (When we scan a new page, we first | |
74 | * compare it against the stable tree, and then against the unstable tree.) | |
75 | */ | |
76 | ||
77 | /** | |
78 | * struct mm_slot - ksm information per mm that is being scanned | |
79 | * @link: link to the mm_slots hash list | |
80 | * @mm_list: link into the mm_slots list, rooted in ksm_mm_head | |
81 | * @rmap_list: head for this mm_slot's list of rmap_items | |
82 | * @mm: the mm that this information is valid for | |
83 | */ | |
84 | struct mm_slot { | |
85 | struct hlist_node link; | |
86 | struct list_head mm_list; | |
87 | struct list_head rmap_list; | |
88 | struct mm_struct *mm; | |
89 | }; | |
90 | ||
91 | /** | |
92 | * struct ksm_scan - cursor for scanning | |
93 | * @mm_slot: the current mm_slot we are scanning | |
94 | * @address: the next address inside that to be scanned | |
95 | * @rmap_item: the current rmap that we are scanning inside the rmap_list | |
96 | * @seqnr: count of completed full scans (needed when removing unstable node) | |
97 | * | |
98 | * There is only the one ksm_scan instance of this cursor structure. | |
99 | */ | |
100 | struct ksm_scan { | |
101 | struct mm_slot *mm_slot; | |
102 | unsigned long address; | |
103 | struct rmap_item *rmap_item; | |
104 | unsigned long seqnr; | |
105 | }; | |
106 | ||
107 | /** | |
108 | * struct rmap_item - reverse mapping item for virtual addresses | |
109 | * @link: link into mm_slot's rmap_list (rmap_list is per mm) | |
110 | * @mm: the memory structure this rmap_item is pointing into | |
111 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) | |
112 | * @oldchecksum: previous checksum of the page at that virtual address | |
113 | * @node: rb_node of this rmap_item in either unstable or stable tree | |
114 | * @next: next rmap_item hanging off the same node of the stable tree | |
115 | * @prev: previous rmap_item hanging off the same node of the stable tree | |
116 | */ | |
117 | struct rmap_item { | |
118 | struct list_head link; | |
119 | struct mm_struct *mm; | |
120 | unsigned long address; /* + low bits used for flags below */ | |
121 | union { | |
122 | unsigned int oldchecksum; /* when unstable */ | |
123 | struct rmap_item *next; /* when stable */ | |
124 | }; | |
125 | union { | |
126 | struct rb_node node; /* when tree node */ | |
127 | struct rmap_item *prev; /* in stable list */ | |
128 | }; | |
129 | }; | |
130 | ||
131 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ | |
132 | #define NODE_FLAG 0x100 /* is a node of unstable or stable tree */ | |
133 | #define STABLE_FLAG 0x200 /* is a node or list item of stable tree */ | |
134 | ||
135 | /* The stable and unstable tree heads */ | |
136 | static struct rb_root root_stable_tree = RB_ROOT; | |
137 | static struct rb_root root_unstable_tree = RB_ROOT; | |
138 | ||
139 | #define MM_SLOTS_HASH_HEADS 1024 | |
140 | static struct hlist_head *mm_slots_hash; | |
141 | ||
142 | static struct mm_slot ksm_mm_head = { | |
143 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), | |
144 | }; | |
145 | static struct ksm_scan ksm_scan = { | |
146 | .mm_slot = &ksm_mm_head, | |
147 | }; | |
148 | ||
149 | static struct kmem_cache *rmap_item_cache; | |
150 | static struct kmem_cache *mm_slot_cache; | |
151 | ||
152 | /* The number of nodes in the stable tree */ | |
b4028260 | 153 | static unsigned long ksm_pages_shared; |
31dbd01f | 154 | |
e178dfde | 155 | /* The number of page slots additionally sharing those nodes */ |
b4028260 | 156 | static unsigned long ksm_pages_sharing; |
31dbd01f | 157 | |
473b0ce4 HD |
158 | /* The number of nodes in the unstable tree */ |
159 | static unsigned long ksm_pages_unshared; | |
160 | ||
161 | /* The number of rmap_items in use: to calculate pages_volatile */ | |
162 | static unsigned long ksm_rmap_items; | |
163 | ||
31dbd01f IE |
164 | /* Limit on the number of unswappable pages used */ |
165 | static unsigned long ksm_max_kernel_pages; | |
166 | ||
167 | /* Number of pages ksmd should scan in one batch */ | |
168 | static unsigned int ksm_thread_pages_to_scan; | |
169 | ||
170 | /* Milliseconds ksmd should sleep between batches */ | |
171 | static unsigned int ksm_thread_sleep_millisecs; | |
172 | ||
173 | #define KSM_RUN_STOP 0 | |
174 | #define KSM_RUN_MERGE 1 | |
175 | #define KSM_RUN_UNMERGE 2 | |
176 | static unsigned int ksm_run; | |
177 | ||
178 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); | |
179 | static DEFINE_MUTEX(ksm_thread_mutex); | |
180 | static DEFINE_SPINLOCK(ksm_mmlist_lock); | |
181 | ||
182 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ | |
183 | sizeof(struct __struct), __alignof__(struct __struct),\ | |
184 | (__flags), NULL) | |
185 | ||
186 | static int __init ksm_slab_init(void) | |
187 | { | |
188 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); | |
189 | if (!rmap_item_cache) | |
190 | goto out; | |
191 | ||
192 | mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); | |
193 | if (!mm_slot_cache) | |
194 | goto out_free; | |
195 | ||
196 | return 0; | |
197 | ||
198 | out_free: | |
199 | kmem_cache_destroy(rmap_item_cache); | |
200 | out: | |
201 | return -ENOMEM; | |
202 | } | |
203 | ||
204 | static void __init ksm_slab_free(void) | |
205 | { | |
206 | kmem_cache_destroy(mm_slot_cache); | |
207 | kmem_cache_destroy(rmap_item_cache); | |
208 | mm_slot_cache = NULL; | |
209 | } | |
210 | ||
211 | static inline struct rmap_item *alloc_rmap_item(void) | |
212 | { | |
473b0ce4 HD |
213 | struct rmap_item *rmap_item; |
214 | ||
215 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); | |
216 | if (rmap_item) | |
217 | ksm_rmap_items++; | |
218 | return rmap_item; | |
31dbd01f IE |
219 | } |
220 | ||
221 | static inline void free_rmap_item(struct rmap_item *rmap_item) | |
222 | { | |
473b0ce4 | 223 | ksm_rmap_items--; |
31dbd01f IE |
224 | rmap_item->mm = NULL; /* debug safety */ |
225 | kmem_cache_free(rmap_item_cache, rmap_item); | |
226 | } | |
227 | ||
228 | static inline struct mm_slot *alloc_mm_slot(void) | |
229 | { | |
230 | if (!mm_slot_cache) /* initialization failed */ | |
231 | return NULL; | |
232 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); | |
233 | } | |
234 | ||
235 | static inline void free_mm_slot(struct mm_slot *mm_slot) | |
236 | { | |
237 | kmem_cache_free(mm_slot_cache, mm_slot); | |
238 | } | |
239 | ||
240 | static int __init mm_slots_hash_init(void) | |
241 | { | |
242 | mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), | |
243 | GFP_KERNEL); | |
244 | if (!mm_slots_hash) | |
245 | return -ENOMEM; | |
246 | return 0; | |
247 | } | |
248 | ||
249 | static void __init mm_slots_hash_free(void) | |
250 | { | |
251 | kfree(mm_slots_hash); | |
252 | } | |
253 | ||
254 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) | |
255 | { | |
256 | struct mm_slot *mm_slot; | |
257 | struct hlist_head *bucket; | |
258 | struct hlist_node *node; | |
259 | ||
260 | bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) | |
261 | % MM_SLOTS_HASH_HEADS]; | |
262 | hlist_for_each_entry(mm_slot, node, bucket, link) { | |
263 | if (mm == mm_slot->mm) | |
264 | return mm_slot; | |
265 | } | |
266 | return NULL; | |
267 | } | |
268 | ||
269 | static void insert_to_mm_slots_hash(struct mm_struct *mm, | |
270 | struct mm_slot *mm_slot) | |
271 | { | |
272 | struct hlist_head *bucket; | |
273 | ||
274 | bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) | |
275 | % MM_SLOTS_HASH_HEADS]; | |
276 | mm_slot->mm = mm; | |
277 | INIT_LIST_HEAD(&mm_slot->rmap_list); | |
278 | hlist_add_head(&mm_slot->link, bucket); | |
279 | } | |
280 | ||
281 | static inline int in_stable_tree(struct rmap_item *rmap_item) | |
282 | { | |
283 | return rmap_item->address & STABLE_FLAG; | |
284 | } | |
285 | ||
286 | /* | |
287 | * We use break_ksm to break COW on a ksm page: it's a stripped down | |
288 | * | |
289 | * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) | |
290 | * put_page(page); | |
291 | * | |
292 | * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, | |
293 | * in case the application has unmapped and remapped mm,addr meanwhile. | |
294 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP | |
295 | * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. | |
296 | */ | |
d952b791 | 297 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr) |
31dbd01f IE |
298 | { |
299 | struct page *page; | |
d952b791 | 300 | int ret = 0; |
31dbd01f IE |
301 | |
302 | do { | |
303 | cond_resched(); | |
304 | page = follow_page(vma, addr, FOLL_GET); | |
305 | if (!page) | |
306 | break; | |
307 | if (PageKsm(page)) | |
308 | ret = handle_mm_fault(vma->vm_mm, vma, addr, | |
309 | FAULT_FLAG_WRITE); | |
310 | else | |
311 | ret = VM_FAULT_WRITE; | |
312 | put_page(page); | |
d952b791 HD |
313 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); |
314 | /* | |
315 | * We must loop because handle_mm_fault() may back out if there's | |
316 | * any difficulty e.g. if pte accessed bit gets updated concurrently. | |
317 | * | |
318 | * VM_FAULT_WRITE is what we have been hoping for: it indicates that | |
319 | * COW has been broken, even if the vma does not permit VM_WRITE; | |
320 | * but note that a concurrent fault might break PageKsm for us. | |
321 | * | |
322 | * VM_FAULT_SIGBUS could occur if we race with truncation of the | |
323 | * backing file, which also invalidates anonymous pages: that's | |
324 | * okay, that truncation will have unmapped the PageKsm for us. | |
325 | * | |
326 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting | |
327 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the | |
328 | * current task has TIF_MEMDIE set, and will be OOM killed on return | |
329 | * to user; and ksmd, having no mm, would never be chosen for that. | |
330 | * | |
331 | * But if the mm is in a limited mem_cgroup, then the fault may fail | |
332 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and | |
333 | * even ksmd can fail in this way - though it's usually breaking ksm | |
334 | * just to undo a merge it made a moment before, so unlikely to oom. | |
335 | * | |
336 | * That's a pity: we might therefore have more kernel pages allocated | |
337 | * than we're counting as nodes in the stable tree; but ksm_do_scan | |
338 | * will retry to break_cow on each pass, so should recover the page | |
339 | * in due course. The important thing is to not let VM_MERGEABLE | |
340 | * be cleared while any such pages might remain in the area. | |
341 | */ | |
342 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; | |
31dbd01f IE |
343 | } |
344 | ||
81464e30 | 345 | static void break_cow(struct mm_struct *mm, unsigned long addr) |
31dbd01f IE |
346 | { |
347 | struct vm_area_struct *vma; | |
348 | ||
81464e30 | 349 | down_read(&mm->mmap_sem); |
31dbd01f IE |
350 | vma = find_vma(mm, addr); |
351 | if (!vma || vma->vm_start > addr) | |
81464e30 | 352 | goto out; |
31dbd01f | 353 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
81464e30 | 354 | goto out; |
31dbd01f | 355 | break_ksm(vma, addr); |
81464e30 | 356 | out: |
31dbd01f IE |
357 | up_read(&mm->mmap_sem); |
358 | } | |
359 | ||
360 | static struct page *get_mergeable_page(struct rmap_item *rmap_item) | |
361 | { | |
362 | struct mm_struct *mm = rmap_item->mm; | |
363 | unsigned long addr = rmap_item->address; | |
364 | struct vm_area_struct *vma; | |
365 | struct page *page; | |
366 | ||
367 | down_read(&mm->mmap_sem); | |
368 | vma = find_vma(mm, addr); | |
369 | if (!vma || vma->vm_start > addr) | |
370 | goto out; | |
371 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) | |
372 | goto out; | |
373 | ||
374 | page = follow_page(vma, addr, FOLL_GET); | |
375 | if (!page) | |
376 | goto out; | |
377 | if (PageAnon(page)) { | |
378 | flush_anon_page(vma, page, addr); | |
379 | flush_dcache_page(page); | |
380 | } else { | |
381 | put_page(page); | |
382 | out: page = NULL; | |
383 | } | |
384 | up_read(&mm->mmap_sem); | |
385 | return page; | |
386 | } | |
387 | ||
388 | /* | |
389 | * get_ksm_page: checks if the page at the virtual address in rmap_item | |
390 | * is still PageKsm, in which case we can trust the content of the page, | |
391 | * and it returns the gotten page; but NULL if the page has been zapped. | |
392 | */ | |
393 | static struct page *get_ksm_page(struct rmap_item *rmap_item) | |
394 | { | |
395 | struct page *page; | |
396 | ||
397 | page = get_mergeable_page(rmap_item); | |
398 | if (page && !PageKsm(page)) { | |
399 | put_page(page); | |
400 | page = NULL; | |
401 | } | |
402 | return page; | |
403 | } | |
404 | ||
405 | /* | |
406 | * Removing rmap_item from stable or unstable tree. | |
407 | * This function will clean the information from the stable/unstable tree. | |
408 | */ | |
409 | static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) | |
410 | { | |
411 | if (in_stable_tree(rmap_item)) { | |
412 | struct rmap_item *next_item = rmap_item->next; | |
413 | ||
414 | if (rmap_item->address & NODE_FLAG) { | |
415 | if (next_item) { | |
416 | rb_replace_node(&rmap_item->node, | |
417 | &next_item->node, | |
418 | &root_stable_tree); | |
419 | next_item->address |= NODE_FLAG; | |
e178dfde | 420 | ksm_pages_sharing--; |
31dbd01f IE |
421 | } else { |
422 | rb_erase(&rmap_item->node, &root_stable_tree); | |
b4028260 | 423 | ksm_pages_shared--; |
31dbd01f IE |
424 | } |
425 | } else { | |
426 | struct rmap_item *prev_item = rmap_item->prev; | |
427 | ||
428 | BUG_ON(prev_item->next != rmap_item); | |
429 | prev_item->next = next_item; | |
430 | if (next_item) { | |
431 | BUG_ON(next_item->prev != rmap_item); | |
432 | next_item->prev = rmap_item->prev; | |
433 | } | |
e178dfde | 434 | ksm_pages_sharing--; |
31dbd01f IE |
435 | } |
436 | ||
437 | rmap_item->next = NULL; | |
31dbd01f IE |
438 | |
439 | } else if (rmap_item->address & NODE_FLAG) { | |
440 | unsigned char age; | |
441 | /* | |
442 | * ksm_thread can and must skip the rb_erase, because | |
443 | * root_unstable_tree was already reset to RB_ROOT. | |
444 | * But __ksm_exit has to be careful: do the rb_erase | |
445 | * if it's interrupting a scan, and this rmap_item was | |
446 | * inserted by this scan rather than left from before. | |
31dbd01f IE |
447 | */ |
448 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); | |
cd551f97 | 449 | BUG_ON(age > 1); |
31dbd01f IE |
450 | if (!age) |
451 | rb_erase(&rmap_item->node, &root_unstable_tree); | |
473b0ce4 | 452 | ksm_pages_unshared--; |
31dbd01f IE |
453 | } |
454 | ||
455 | rmap_item->address &= PAGE_MASK; | |
456 | ||
457 | cond_resched(); /* we're called from many long loops */ | |
458 | } | |
459 | ||
31dbd01f IE |
460 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, |
461 | struct list_head *cur) | |
462 | { | |
463 | struct rmap_item *rmap_item; | |
464 | ||
465 | while (cur != &mm_slot->rmap_list) { | |
466 | rmap_item = list_entry(cur, struct rmap_item, link); | |
467 | cur = cur->next; | |
468 | remove_rmap_item_from_tree(rmap_item); | |
469 | list_del(&rmap_item->link); | |
470 | free_rmap_item(rmap_item); | |
471 | } | |
472 | } | |
473 | ||
474 | /* | |
475 | * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather | |
476 | * than check every pte of a given vma, the locking doesn't quite work for | |
477 | * that - an rmap_item is assigned to the stable tree after inserting ksm | |
478 | * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing | |
479 | * rmap_items from parent to child at fork time (so as not to waste time | |
480 | * if exit comes before the next scan reaches it). | |
81464e30 HD |
481 | * |
482 | * Similarly, although we'd like to remove rmap_items (so updating counts | |
483 | * and freeing memory) when unmerging an area, it's easier to leave that | |
484 | * to the next pass of ksmd - consider, for example, how ksmd might be | |
485 | * in cmp_and_merge_page on one of the rmap_items we would be removing. | |
31dbd01f | 486 | */ |
d952b791 HD |
487 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
488 | unsigned long start, unsigned long end) | |
31dbd01f IE |
489 | { |
490 | unsigned long addr; | |
d952b791 | 491 | int err = 0; |
31dbd01f | 492 | |
d952b791 HD |
493 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
494 | if (signal_pending(current)) | |
495 | err = -ERESTARTSYS; | |
496 | else | |
497 | err = break_ksm(vma, addr); | |
498 | } | |
499 | return err; | |
31dbd01f IE |
500 | } |
501 | ||
d952b791 | 502 | static int unmerge_and_remove_all_rmap_items(void) |
31dbd01f IE |
503 | { |
504 | struct mm_slot *mm_slot; | |
505 | struct mm_struct *mm; | |
506 | struct vm_area_struct *vma; | |
d952b791 HD |
507 | int err = 0; |
508 | ||
509 | spin_lock(&ksm_mmlist_lock); | |
510 | mm_slot = list_entry(ksm_mm_head.mm_list.next, | |
511 | struct mm_slot, mm_list); | |
512 | spin_unlock(&ksm_mmlist_lock); | |
31dbd01f | 513 | |
d952b791 | 514 | while (mm_slot != &ksm_mm_head) { |
31dbd01f IE |
515 | mm = mm_slot->mm; |
516 | down_read(&mm->mmap_sem); | |
517 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
518 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) | |
519 | continue; | |
d952b791 HD |
520 | err = unmerge_ksm_pages(vma, |
521 | vma->vm_start, vma->vm_end); | |
522 | if (err) { | |
523 | up_read(&mm->mmap_sem); | |
524 | goto out; | |
525 | } | |
31dbd01f | 526 | } |
81464e30 | 527 | remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next); |
31dbd01f | 528 | up_read(&mm->mmap_sem); |
d952b791 HD |
529 | |
530 | spin_lock(&ksm_mmlist_lock); | |
531 | mm_slot = list_entry(mm_slot->mm_list.next, | |
532 | struct mm_slot, mm_list); | |
533 | spin_unlock(&ksm_mmlist_lock); | |
31dbd01f IE |
534 | } |
535 | ||
d952b791 HD |
536 | ksm_scan.seqnr = 0; |
537 | out: | |
31dbd01f | 538 | spin_lock(&ksm_mmlist_lock); |
d952b791 | 539 | ksm_scan.mm_slot = &ksm_mm_head; |
31dbd01f | 540 | spin_unlock(&ksm_mmlist_lock); |
d952b791 | 541 | return err; |
31dbd01f IE |
542 | } |
543 | ||
31dbd01f IE |
544 | static u32 calc_checksum(struct page *page) |
545 | { | |
546 | u32 checksum; | |
547 | void *addr = kmap_atomic(page, KM_USER0); | |
548 | checksum = jhash2(addr, PAGE_SIZE / 4, 17); | |
549 | kunmap_atomic(addr, KM_USER0); | |
550 | return checksum; | |
551 | } | |
552 | ||
553 | static int memcmp_pages(struct page *page1, struct page *page2) | |
554 | { | |
555 | char *addr1, *addr2; | |
556 | int ret; | |
557 | ||
558 | addr1 = kmap_atomic(page1, KM_USER0); | |
559 | addr2 = kmap_atomic(page2, KM_USER1); | |
560 | ret = memcmp(addr1, addr2, PAGE_SIZE); | |
561 | kunmap_atomic(addr2, KM_USER1); | |
562 | kunmap_atomic(addr1, KM_USER0); | |
563 | return ret; | |
564 | } | |
565 | ||
566 | static inline int pages_identical(struct page *page1, struct page *page2) | |
567 | { | |
568 | return !memcmp_pages(page1, page2); | |
569 | } | |
570 | ||
571 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |
572 | pte_t *orig_pte) | |
573 | { | |
574 | struct mm_struct *mm = vma->vm_mm; | |
575 | unsigned long addr; | |
576 | pte_t *ptep; | |
577 | spinlock_t *ptl; | |
578 | int swapped; | |
579 | int err = -EFAULT; | |
580 | ||
581 | addr = page_address_in_vma(page, vma); | |
582 | if (addr == -EFAULT) | |
583 | goto out; | |
584 | ||
585 | ptep = page_check_address(page, mm, addr, &ptl, 0); | |
586 | if (!ptep) | |
587 | goto out; | |
588 | ||
589 | if (pte_write(*ptep)) { | |
590 | pte_t entry; | |
591 | ||
592 | swapped = PageSwapCache(page); | |
593 | flush_cache_page(vma, addr, page_to_pfn(page)); | |
594 | /* | |
595 | * Ok this is tricky, when get_user_pages_fast() run it doesnt | |
596 | * take any lock, therefore the check that we are going to make | |
597 | * with the pagecount against the mapcount is racey and | |
598 | * O_DIRECT can happen right after the check. | |
599 | * So we clear the pte and flush the tlb before the check | |
600 | * this assure us that no O_DIRECT can happen after the check | |
601 | * or in the middle of the check. | |
602 | */ | |
603 | entry = ptep_clear_flush(vma, addr, ptep); | |
604 | /* | |
605 | * Check that no O_DIRECT or similar I/O is in progress on the | |
606 | * page | |
607 | */ | |
608 | if ((page_mapcount(page) + 2 + swapped) != page_count(page)) { | |
609 | set_pte_at_notify(mm, addr, ptep, entry); | |
610 | goto out_unlock; | |
611 | } | |
612 | entry = pte_wrprotect(entry); | |
613 | set_pte_at_notify(mm, addr, ptep, entry); | |
614 | } | |
615 | *orig_pte = *ptep; | |
616 | err = 0; | |
617 | ||
618 | out_unlock: | |
619 | pte_unmap_unlock(ptep, ptl); | |
620 | out: | |
621 | return err; | |
622 | } | |
623 | ||
624 | /** | |
625 | * replace_page - replace page in vma by new ksm page | |
626 | * @vma: vma that holds the pte pointing to oldpage | |
627 | * @oldpage: the page we are replacing by newpage | |
628 | * @newpage: the ksm page we replace oldpage by | |
629 | * @orig_pte: the original value of the pte | |
630 | * | |
631 | * Returns 0 on success, -EFAULT on failure. | |
632 | */ | |
633 | static int replace_page(struct vm_area_struct *vma, struct page *oldpage, | |
634 | struct page *newpage, pte_t orig_pte) | |
635 | { | |
636 | struct mm_struct *mm = vma->vm_mm; | |
637 | pgd_t *pgd; | |
638 | pud_t *pud; | |
639 | pmd_t *pmd; | |
640 | pte_t *ptep; | |
641 | spinlock_t *ptl; | |
642 | unsigned long addr; | |
643 | pgprot_t prot; | |
644 | int err = -EFAULT; | |
645 | ||
646 | prot = vm_get_page_prot(vma->vm_flags & ~VM_WRITE); | |
647 | ||
648 | addr = page_address_in_vma(oldpage, vma); | |
649 | if (addr == -EFAULT) | |
650 | goto out; | |
651 | ||
652 | pgd = pgd_offset(mm, addr); | |
653 | if (!pgd_present(*pgd)) | |
654 | goto out; | |
655 | ||
656 | pud = pud_offset(pgd, addr); | |
657 | if (!pud_present(*pud)) | |
658 | goto out; | |
659 | ||
660 | pmd = pmd_offset(pud, addr); | |
661 | if (!pmd_present(*pmd)) | |
662 | goto out; | |
663 | ||
664 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); | |
665 | if (!pte_same(*ptep, orig_pte)) { | |
666 | pte_unmap_unlock(ptep, ptl); | |
667 | goto out; | |
668 | } | |
669 | ||
670 | get_page(newpage); | |
671 | page_add_ksm_rmap(newpage); | |
672 | ||
673 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | |
674 | ptep_clear_flush(vma, addr, ptep); | |
675 | set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, prot)); | |
676 | ||
677 | page_remove_rmap(oldpage); | |
678 | put_page(oldpage); | |
679 | ||
680 | pte_unmap_unlock(ptep, ptl); | |
681 | err = 0; | |
682 | out: | |
683 | return err; | |
684 | } | |
685 | ||
686 | /* | |
687 | * try_to_merge_one_page - take two pages and merge them into one | |
688 | * @vma: the vma that hold the pte pointing into oldpage | |
689 | * @oldpage: the page that we want to replace with newpage | |
690 | * @newpage: the page that we want to map instead of oldpage | |
691 | * | |
692 | * Note: | |
693 | * oldpage should be a PageAnon page, while newpage should be a PageKsm page, | |
694 | * or a newly allocated kernel page which page_add_ksm_rmap will make PageKsm. | |
695 | * | |
696 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
697 | */ | |
698 | static int try_to_merge_one_page(struct vm_area_struct *vma, | |
699 | struct page *oldpage, | |
700 | struct page *newpage) | |
701 | { | |
702 | pte_t orig_pte = __pte(0); | |
703 | int err = -EFAULT; | |
704 | ||
705 | if (!(vma->vm_flags & VM_MERGEABLE)) | |
706 | goto out; | |
707 | ||
708 | if (!PageAnon(oldpage)) | |
709 | goto out; | |
710 | ||
711 | get_page(newpage); | |
712 | get_page(oldpage); | |
713 | ||
714 | /* | |
715 | * We need the page lock to read a stable PageSwapCache in | |
716 | * write_protect_page(). We use trylock_page() instead of | |
717 | * lock_page() because we don't want to wait here - we | |
718 | * prefer to continue scanning and merging different pages, | |
719 | * then come back to this page when it is unlocked. | |
720 | */ | |
721 | if (!trylock_page(oldpage)) | |
722 | goto out_putpage; | |
723 | /* | |
724 | * If this anonymous page is mapped only here, its pte may need | |
725 | * to be write-protected. If it's mapped elsewhere, all of its | |
726 | * ptes are necessarily already write-protected. But in either | |
727 | * case, we need to lock and check page_count is not raised. | |
728 | */ | |
729 | if (write_protect_page(vma, oldpage, &orig_pte)) { | |
730 | unlock_page(oldpage); | |
731 | goto out_putpage; | |
732 | } | |
733 | unlock_page(oldpage); | |
734 | ||
735 | if (pages_identical(oldpage, newpage)) | |
736 | err = replace_page(vma, oldpage, newpage, orig_pte); | |
737 | ||
738 | out_putpage: | |
739 | put_page(oldpage); | |
740 | put_page(newpage); | |
741 | out: | |
742 | return err; | |
743 | } | |
744 | ||
81464e30 HD |
745 | /* |
746 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, | |
747 | * but no new kernel page is allocated: kpage must already be a ksm page. | |
748 | */ | |
749 | static int try_to_merge_with_ksm_page(struct mm_struct *mm1, | |
750 | unsigned long addr1, | |
751 | struct page *page1, | |
752 | struct page *kpage) | |
753 | { | |
754 | struct vm_area_struct *vma; | |
755 | int err = -EFAULT; | |
756 | ||
757 | down_read(&mm1->mmap_sem); | |
758 | vma = find_vma(mm1, addr1); | |
759 | if (!vma || vma->vm_start > addr1) | |
760 | goto out; | |
761 | ||
762 | err = try_to_merge_one_page(vma, page1, kpage); | |
763 | out: | |
764 | up_read(&mm1->mmap_sem); | |
765 | return err; | |
766 | } | |
767 | ||
31dbd01f IE |
768 | /* |
769 | * try_to_merge_two_pages - take two identical pages and prepare them | |
770 | * to be merged into one page. | |
771 | * | |
772 | * This function returns 0 if we successfully mapped two identical pages | |
773 | * into one page, -EFAULT otherwise. | |
774 | * | |
775 | * Note that this function allocates a new kernel page: if one of the pages | |
776 | * is already a ksm page, try_to_merge_with_ksm_page should be used. | |
777 | */ | |
778 | static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1, | |
779 | struct page *page1, struct mm_struct *mm2, | |
780 | unsigned long addr2, struct page *page2) | |
781 | { | |
782 | struct vm_area_struct *vma; | |
783 | struct page *kpage; | |
784 | int err = -EFAULT; | |
785 | ||
786 | /* | |
787 | * The number of nodes in the stable tree | |
788 | * is the number of kernel pages that we hold. | |
789 | */ | |
790 | if (ksm_max_kernel_pages && | |
b4028260 | 791 | ksm_max_kernel_pages <= ksm_pages_shared) |
31dbd01f IE |
792 | return err; |
793 | ||
794 | kpage = alloc_page(GFP_HIGHUSER); | |
795 | if (!kpage) | |
796 | return err; | |
797 | ||
798 | down_read(&mm1->mmap_sem); | |
799 | vma = find_vma(mm1, addr1); | |
800 | if (!vma || vma->vm_start > addr1) { | |
31dbd01f | 801 | up_read(&mm1->mmap_sem); |
81464e30 | 802 | goto out; |
31dbd01f IE |
803 | } |
804 | ||
805 | copy_user_highpage(kpage, page1, addr1, vma); | |
806 | err = try_to_merge_one_page(vma, page1, kpage); | |
807 | up_read(&mm1->mmap_sem); | |
808 | ||
809 | if (!err) { | |
81464e30 | 810 | err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage); |
31dbd01f | 811 | /* |
81464e30 HD |
812 | * If that fails, we have a ksm page with only one pte |
813 | * pointing to it: so break it. | |
31dbd01f IE |
814 | */ |
815 | if (err) | |
816 | break_cow(mm1, addr1); | |
31dbd01f | 817 | } |
81464e30 | 818 | out: |
31dbd01f IE |
819 | put_page(kpage); |
820 | return err; | |
821 | } | |
822 | ||
31dbd01f IE |
823 | /* |
824 | * stable_tree_search - search page inside the stable tree | |
825 | * @page: the page that we are searching identical pages to. | |
826 | * @page2: pointer into identical page that we are holding inside the stable | |
827 | * tree that we have found. | |
828 | * @rmap_item: the reverse mapping item | |
829 | * | |
830 | * This function checks if there is a page inside the stable tree | |
831 | * with identical content to the page that we are scanning right now. | |
832 | * | |
833 | * This function return rmap_item pointer to the identical item if found, | |
834 | * NULL otherwise. | |
835 | */ | |
836 | static struct rmap_item *stable_tree_search(struct page *page, | |
837 | struct page **page2, | |
838 | struct rmap_item *rmap_item) | |
839 | { | |
840 | struct rb_node *node = root_stable_tree.rb_node; | |
841 | ||
842 | while (node) { | |
843 | struct rmap_item *tree_rmap_item, *next_rmap_item; | |
844 | int ret; | |
845 | ||
846 | tree_rmap_item = rb_entry(node, struct rmap_item, node); | |
847 | while (tree_rmap_item) { | |
848 | BUG_ON(!in_stable_tree(tree_rmap_item)); | |
849 | cond_resched(); | |
850 | page2[0] = get_ksm_page(tree_rmap_item); | |
851 | if (page2[0]) | |
852 | break; | |
853 | next_rmap_item = tree_rmap_item->next; | |
854 | remove_rmap_item_from_tree(tree_rmap_item); | |
855 | tree_rmap_item = next_rmap_item; | |
856 | } | |
857 | if (!tree_rmap_item) | |
858 | return NULL; | |
859 | ||
860 | ret = memcmp_pages(page, page2[0]); | |
861 | ||
862 | if (ret < 0) { | |
863 | put_page(page2[0]); | |
864 | node = node->rb_left; | |
865 | } else if (ret > 0) { | |
866 | put_page(page2[0]); | |
867 | node = node->rb_right; | |
868 | } else { | |
869 | return tree_rmap_item; | |
870 | } | |
871 | } | |
872 | ||
873 | return NULL; | |
874 | } | |
875 | ||
876 | /* | |
877 | * stable_tree_insert - insert rmap_item pointing to new ksm page | |
878 | * into the stable tree. | |
879 | * | |
880 | * @page: the page that we are searching identical page to inside the stable | |
881 | * tree. | |
882 | * @rmap_item: pointer to the reverse mapping item. | |
883 | * | |
884 | * This function returns rmap_item if success, NULL otherwise. | |
885 | */ | |
886 | static struct rmap_item *stable_tree_insert(struct page *page, | |
887 | struct rmap_item *rmap_item) | |
888 | { | |
889 | struct rb_node **new = &root_stable_tree.rb_node; | |
890 | struct rb_node *parent = NULL; | |
891 | ||
892 | while (*new) { | |
893 | struct rmap_item *tree_rmap_item, *next_rmap_item; | |
894 | struct page *tree_page; | |
895 | int ret; | |
896 | ||
897 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); | |
898 | while (tree_rmap_item) { | |
899 | BUG_ON(!in_stable_tree(tree_rmap_item)); | |
900 | cond_resched(); | |
901 | tree_page = get_ksm_page(tree_rmap_item); | |
902 | if (tree_page) | |
903 | break; | |
904 | next_rmap_item = tree_rmap_item->next; | |
905 | remove_rmap_item_from_tree(tree_rmap_item); | |
906 | tree_rmap_item = next_rmap_item; | |
907 | } | |
908 | if (!tree_rmap_item) | |
909 | return NULL; | |
910 | ||
911 | ret = memcmp_pages(page, tree_page); | |
912 | put_page(tree_page); | |
913 | ||
914 | parent = *new; | |
915 | if (ret < 0) | |
916 | new = &parent->rb_left; | |
917 | else if (ret > 0) | |
918 | new = &parent->rb_right; | |
919 | else { | |
920 | /* | |
921 | * It is not a bug that stable_tree_search() didn't | |
922 | * find this node: because at that time our page was | |
923 | * not yet write-protected, so may have changed since. | |
924 | */ | |
925 | return NULL; | |
926 | } | |
927 | } | |
928 | ||
31dbd01f IE |
929 | rmap_item->address |= NODE_FLAG | STABLE_FLAG; |
930 | rmap_item->next = NULL; | |
931 | rb_link_node(&rmap_item->node, parent, new); | |
932 | rb_insert_color(&rmap_item->node, &root_stable_tree); | |
933 | ||
e178dfde | 934 | ksm_pages_shared++; |
31dbd01f IE |
935 | return rmap_item; |
936 | } | |
937 | ||
938 | /* | |
939 | * unstable_tree_search_insert - search and insert items into the unstable tree. | |
940 | * | |
941 | * @page: the page that we are going to search for identical page or to insert | |
942 | * into the unstable tree | |
943 | * @page2: pointer into identical page that was found inside the unstable tree | |
944 | * @rmap_item: the reverse mapping item of page | |
945 | * | |
946 | * This function searches for a page in the unstable tree identical to the | |
947 | * page currently being scanned; and if no identical page is found in the | |
948 | * tree, we insert rmap_item as a new object into the unstable tree. | |
949 | * | |
950 | * This function returns pointer to rmap_item found to be identical | |
951 | * to the currently scanned page, NULL otherwise. | |
952 | * | |
953 | * This function does both searching and inserting, because they share | |
954 | * the same walking algorithm in an rbtree. | |
955 | */ | |
956 | static struct rmap_item *unstable_tree_search_insert(struct page *page, | |
957 | struct page **page2, | |
958 | struct rmap_item *rmap_item) | |
959 | { | |
960 | struct rb_node **new = &root_unstable_tree.rb_node; | |
961 | struct rb_node *parent = NULL; | |
962 | ||
963 | while (*new) { | |
964 | struct rmap_item *tree_rmap_item; | |
965 | int ret; | |
966 | ||
967 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); | |
968 | page2[0] = get_mergeable_page(tree_rmap_item); | |
969 | if (!page2[0]) | |
970 | return NULL; | |
971 | ||
972 | /* | |
973 | * Don't substitute an unswappable ksm page | |
974 | * just for one good swappable forked page. | |
975 | */ | |
976 | if (page == page2[0]) { | |
977 | put_page(page2[0]); | |
978 | return NULL; | |
979 | } | |
980 | ||
981 | ret = memcmp_pages(page, page2[0]); | |
982 | ||
983 | parent = *new; | |
984 | if (ret < 0) { | |
985 | put_page(page2[0]); | |
986 | new = &parent->rb_left; | |
987 | } else if (ret > 0) { | |
988 | put_page(page2[0]); | |
989 | new = &parent->rb_right; | |
990 | } else { | |
991 | return tree_rmap_item; | |
992 | } | |
993 | } | |
994 | ||
995 | rmap_item->address |= NODE_FLAG; | |
996 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); | |
997 | rb_link_node(&rmap_item->node, parent, new); | |
998 | rb_insert_color(&rmap_item->node, &root_unstable_tree); | |
999 | ||
473b0ce4 | 1000 | ksm_pages_unshared++; |
31dbd01f IE |
1001 | return NULL; |
1002 | } | |
1003 | ||
1004 | /* | |
1005 | * stable_tree_append - add another rmap_item to the linked list of | |
1006 | * rmap_items hanging off a given node of the stable tree, all sharing | |
1007 | * the same ksm page. | |
1008 | */ | |
1009 | static void stable_tree_append(struct rmap_item *rmap_item, | |
1010 | struct rmap_item *tree_rmap_item) | |
1011 | { | |
1012 | rmap_item->next = tree_rmap_item->next; | |
1013 | rmap_item->prev = tree_rmap_item; | |
1014 | ||
1015 | if (tree_rmap_item->next) | |
1016 | tree_rmap_item->next->prev = rmap_item; | |
1017 | ||
1018 | tree_rmap_item->next = rmap_item; | |
1019 | rmap_item->address |= STABLE_FLAG; | |
e178dfde HD |
1020 | |
1021 | ksm_pages_sharing++; | |
31dbd01f IE |
1022 | } |
1023 | ||
1024 | /* | |
81464e30 HD |
1025 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
1026 | * if not, compare checksum to previous and if it's the same, see if page can | |
1027 | * be inserted into the unstable tree, or merged with a page already there and | |
1028 | * both transferred to the stable tree. | |
31dbd01f IE |
1029 | * |
1030 | * @page: the page that we are searching identical page to. | |
1031 | * @rmap_item: the reverse mapping into the virtual address of this page | |
1032 | */ | |
1033 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | |
1034 | { | |
1035 | struct page *page2[1]; | |
1036 | struct rmap_item *tree_rmap_item; | |
1037 | unsigned int checksum; | |
1038 | int err; | |
1039 | ||
1040 | if (in_stable_tree(rmap_item)) | |
1041 | remove_rmap_item_from_tree(rmap_item); | |
1042 | ||
1043 | /* We first start with searching the page inside the stable tree */ | |
1044 | tree_rmap_item = stable_tree_search(page, page2, rmap_item); | |
1045 | if (tree_rmap_item) { | |
e178dfde | 1046 | if (page == page2[0]) /* forked */ |
31dbd01f | 1047 | err = 0; |
e178dfde | 1048 | else |
31dbd01f IE |
1049 | err = try_to_merge_with_ksm_page(rmap_item->mm, |
1050 | rmap_item->address, | |
1051 | page, page2[0]); | |
1052 | put_page(page2[0]); | |
1053 | ||
1054 | if (!err) { | |
1055 | /* | |
1056 | * The page was successfully merged: | |
1057 | * add its rmap_item to the stable tree. | |
1058 | */ | |
1059 | stable_tree_append(rmap_item, tree_rmap_item); | |
1060 | } | |
1061 | return; | |
1062 | } | |
1063 | ||
1064 | /* | |
1065 | * A ksm page might have got here by fork, but its other | |
1066 | * references have already been removed from the stable tree. | |
d952b791 HD |
1067 | * Or it might be left over from a break_ksm which failed |
1068 | * when the mem_cgroup had reached its limit: try again now. | |
31dbd01f IE |
1069 | */ |
1070 | if (PageKsm(page)) | |
1071 | break_cow(rmap_item->mm, rmap_item->address); | |
1072 | ||
1073 | /* | |
1074 | * In case the hash value of the page was changed from the last time we | |
1075 | * have calculated it, this page to be changed frequely, therefore we | |
1076 | * don't want to insert it to the unstable tree, and we don't want to | |
1077 | * waste our time to search if there is something identical to it there. | |
1078 | */ | |
1079 | checksum = calc_checksum(page); | |
1080 | if (rmap_item->oldchecksum != checksum) { | |
1081 | rmap_item->oldchecksum = checksum; | |
1082 | return; | |
1083 | } | |
1084 | ||
1085 | tree_rmap_item = unstable_tree_search_insert(page, page2, rmap_item); | |
1086 | if (tree_rmap_item) { | |
1087 | err = try_to_merge_two_pages(rmap_item->mm, | |
1088 | rmap_item->address, page, | |
1089 | tree_rmap_item->mm, | |
1090 | tree_rmap_item->address, page2[0]); | |
1091 | /* | |
1092 | * As soon as we merge this page, we want to remove the | |
1093 | * rmap_item of the page we have merged with from the unstable | |
1094 | * tree, and insert it instead as new node in the stable tree. | |
1095 | */ | |
1096 | if (!err) { | |
1097 | rb_erase(&tree_rmap_item->node, &root_unstable_tree); | |
1098 | tree_rmap_item->address &= ~NODE_FLAG; | |
473b0ce4 HD |
1099 | ksm_pages_unshared--; |
1100 | ||
31dbd01f IE |
1101 | /* |
1102 | * If we fail to insert the page into the stable tree, | |
1103 | * we will have 2 virtual addresses that are pointing | |
1104 | * to a ksm page left outside the stable tree, | |
1105 | * in which case we need to break_cow on both. | |
1106 | */ | |
1107 | if (stable_tree_insert(page2[0], tree_rmap_item)) | |
1108 | stable_tree_append(rmap_item, tree_rmap_item); | |
1109 | else { | |
1110 | break_cow(tree_rmap_item->mm, | |
1111 | tree_rmap_item->address); | |
1112 | break_cow(rmap_item->mm, rmap_item->address); | |
31dbd01f IE |
1113 | } |
1114 | } | |
1115 | ||
1116 | put_page(page2[0]); | |
1117 | } | |
1118 | } | |
1119 | ||
1120 | static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, | |
1121 | struct list_head *cur, | |
1122 | unsigned long addr) | |
1123 | { | |
1124 | struct rmap_item *rmap_item; | |
1125 | ||
1126 | while (cur != &mm_slot->rmap_list) { | |
1127 | rmap_item = list_entry(cur, struct rmap_item, link); | |
1128 | if ((rmap_item->address & PAGE_MASK) == addr) { | |
1129 | if (!in_stable_tree(rmap_item)) | |
1130 | remove_rmap_item_from_tree(rmap_item); | |
1131 | return rmap_item; | |
1132 | } | |
1133 | if (rmap_item->address > addr) | |
1134 | break; | |
1135 | cur = cur->next; | |
1136 | remove_rmap_item_from_tree(rmap_item); | |
1137 | list_del(&rmap_item->link); | |
1138 | free_rmap_item(rmap_item); | |
1139 | } | |
1140 | ||
1141 | rmap_item = alloc_rmap_item(); | |
1142 | if (rmap_item) { | |
1143 | /* It has already been zeroed */ | |
1144 | rmap_item->mm = mm_slot->mm; | |
1145 | rmap_item->address = addr; | |
1146 | list_add_tail(&rmap_item->link, cur); | |
1147 | } | |
1148 | return rmap_item; | |
1149 | } | |
1150 | ||
1151 | static struct rmap_item *scan_get_next_rmap_item(struct page **page) | |
1152 | { | |
1153 | struct mm_struct *mm; | |
1154 | struct mm_slot *slot; | |
1155 | struct vm_area_struct *vma; | |
1156 | struct rmap_item *rmap_item; | |
1157 | ||
1158 | if (list_empty(&ksm_mm_head.mm_list)) | |
1159 | return NULL; | |
1160 | ||
1161 | slot = ksm_scan.mm_slot; | |
1162 | if (slot == &ksm_mm_head) { | |
1163 | root_unstable_tree = RB_ROOT; | |
1164 | ||
1165 | spin_lock(&ksm_mmlist_lock); | |
1166 | slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); | |
1167 | ksm_scan.mm_slot = slot; | |
1168 | spin_unlock(&ksm_mmlist_lock); | |
1169 | next_mm: | |
1170 | ksm_scan.address = 0; | |
1171 | ksm_scan.rmap_item = list_entry(&slot->rmap_list, | |
1172 | struct rmap_item, link); | |
1173 | } | |
1174 | ||
1175 | mm = slot->mm; | |
1176 | down_read(&mm->mmap_sem); | |
1177 | for (vma = find_vma(mm, ksm_scan.address); vma; vma = vma->vm_next) { | |
1178 | if (!(vma->vm_flags & VM_MERGEABLE)) | |
1179 | continue; | |
1180 | if (ksm_scan.address < vma->vm_start) | |
1181 | ksm_scan.address = vma->vm_start; | |
1182 | if (!vma->anon_vma) | |
1183 | ksm_scan.address = vma->vm_end; | |
1184 | ||
1185 | while (ksm_scan.address < vma->vm_end) { | |
1186 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); | |
1187 | if (*page && PageAnon(*page)) { | |
1188 | flush_anon_page(vma, *page, ksm_scan.address); | |
1189 | flush_dcache_page(*page); | |
1190 | rmap_item = get_next_rmap_item(slot, | |
1191 | ksm_scan.rmap_item->link.next, | |
1192 | ksm_scan.address); | |
1193 | if (rmap_item) { | |
1194 | ksm_scan.rmap_item = rmap_item; | |
1195 | ksm_scan.address += PAGE_SIZE; | |
1196 | } else | |
1197 | put_page(*page); | |
1198 | up_read(&mm->mmap_sem); | |
1199 | return rmap_item; | |
1200 | } | |
1201 | if (*page) | |
1202 | put_page(*page); | |
1203 | ksm_scan.address += PAGE_SIZE; | |
1204 | cond_resched(); | |
1205 | } | |
1206 | } | |
1207 | ||
31dbd01f IE |
1208 | /* |
1209 | * Nuke all the rmap_items that are above this current rmap: | |
1210 | * because there were no VM_MERGEABLE vmas with such addresses. | |
1211 | */ | |
1212 | remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next); | |
31dbd01f IE |
1213 | |
1214 | spin_lock(&ksm_mmlist_lock); | |
cd551f97 HD |
1215 | ksm_scan.mm_slot = list_entry(slot->mm_list.next, |
1216 | struct mm_slot, mm_list); | |
1217 | if (ksm_scan.address == 0) { | |
1218 | /* | |
1219 | * We've completed a full scan of all vmas, holding mmap_sem | |
1220 | * throughout, and found no VM_MERGEABLE: so do the same as | |
1221 | * __ksm_exit does to remove this mm from all our lists now. | |
1222 | */ | |
1223 | hlist_del(&slot->link); | |
1224 | list_del(&slot->mm_list); | |
1225 | free_mm_slot(slot); | |
1226 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
1227 | } | |
31dbd01f | 1228 | spin_unlock(&ksm_mmlist_lock); |
cd551f97 | 1229 | up_read(&mm->mmap_sem); |
31dbd01f IE |
1230 | |
1231 | /* Repeat until we've completed scanning the whole list */ | |
cd551f97 | 1232 | slot = ksm_scan.mm_slot; |
31dbd01f IE |
1233 | if (slot != &ksm_mm_head) |
1234 | goto next_mm; | |
1235 | ||
1236 | /* | |
1237 | * Bump seqnr here rather than at top, so that __ksm_exit | |
1238 | * can skip rb_erase on unstable tree until we run again. | |
1239 | */ | |
1240 | ksm_scan.seqnr++; | |
1241 | return NULL; | |
1242 | } | |
1243 | ||
1244 | /** | |
1245 | * ksm_do_scan - the ksm scanner main worker function. | |
1246 | * @scan_npages - number of pages we want to scan before we return. | |
1247 | */ | |
1248 | static void ksm_do_scan(unsigned int scan_npages) | |
1249 | { | |
1250 | struct rmap_item *rmap_item; | |
1251 | struct page *page; | |
1252 | ||
1253 | while (scan_npages--) { | |
1254 | cond_resched(); | |
1255 | rmap_item = scan_get_next_rmap_item(&page); | |
1256 | if (!rmap_item) | |
1257 | return; | |
1258 | if (!PageKsm(page) || !in_stable_tree(rmap_item)) | |
1259 | cmp_and_merge_page(page, rmap_item); | |
26465d3e HD |
1260 | else if (page_mapcount(page) == 1) { |
1261 | /* | |
1262 | * Replace now-unshared ksm page by ordinary page. | |
1263 | */ | |
1264 | break_cow(rmap_item->mm, rmap_item->address); | |
1265 | remove_rmap_item_from_tree(rmap_item); | |
1266 | rmap_item->oldchecksum = calc_checksum(page); | |
1267 | } | |
31dbd01f IE |
1268 | put_page(page); |
1269 | } | |
1270 | } | |
1271 | ||
6e158384 HD |
1272 | static int ksmd_should_run(void) |
1273 | { | |
1274 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); | |
1275 | } | |
1276 | ||
31dbd01f IE |
1277 | static int ksm_scan_thread(void *nothing) |
1278 | { | |
339aa624 | 1279 | set_user_nice(current, 5); |
31dbd01f IE |
1280 | |
1281 | while (!kthread_should_stop()) { | |
6e158384 HD |
1282 | mutex_lock(&ksm_thread_mutex); |
1283 | if (ksmd_should_run()) | |
31dbd01f | 1284 | ksm_do_scan(ksm_thread_pages_to_scan); |
6e158384 HD |
1285 | mutex_unlock(&ksm_thread_mutex); |
1286 | ||
1287 | if (ksmd_should_run()) { | |
31dbd01f IE |
1288 | schedule_timeout_interruptible( |
1289 | msecs_to_jiffies(ksm_thread_sleep_millisecs)); | |
1290 | } else { | |
1291 | wait_event_interruptible(ksm_thread_wait, | |
6e158384 | 1292 | ksmd_should_run() || kthread_should_stop()); |
31dbd01f IE |
1293 | } |
1294 | } | |
1295 | return 0; | |
1296 | } | |
1297 | ||
f8af4da3 HD |
1298 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
1299 | unsigned long end, int advice, unsigned long *vm_flags) | |
1300 | { | |
1301 | struct mm_struct *mm = vma->vm_mm; | |
d952b791 | 1302 | int err; |
f8af4da3 HD |
1303 | |
1304 | switch (advice) { | |
1305 | case MADV_MERGEABLE: | |
1306 | /* | |
1307 | * Be somewhat over-protective for now! | |
1308 | */ | |
1309 | if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | | |
1310 | VM_PFNMAP | VM_IO | VM_DONTEXPAND | | |
1311 | VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | | |
1312 | VM_MIXEDMAP | VM_SAO)) | |
1313 | return 0; /* just ignore the advice */ | |
1314 | ||
d952b791 HD |
1315 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
1316 | err = __ksm_enter(mm); | |
1317 | if (err) | |
1318 | return err; | |
1319 | } | |
f8af4da3 HD |
1320 | |
1321 | *vm_flags |= VM_MERGEABLE; | |
1322 | break; | |
1323 | ||
1324 | case MADV_UNMERGEABLE: | |
1325 | if (!(*vm_flags & VM_MERGEABLE)) | |
1326 | return 0; /* just ignore the advice */ | |
1327 | ||
d952b791 HD |
1328 | if (vma->anon_vma) { |
1329 | err = unmerge_ksm_pages(vma, start, end); | |
1330 | if (err) | |
1331 | return err; | |
1332 | } | |
f8af4da3 HD |
1333 | |
1334 | *vm_flags &= ~VM_MERGEABLE; | |
1335 | break; | |
1336 | } | |
1337 | ||
1338 | return 0; | |
1339 | } | |
1340 | ||
1341 | int __ksm_enter(struct mm_struct *mm) | |
1342 | { | |
6e158384 HD |
1343 | struct mm_slot *mm_slot; |
1344 | int needs_wakeup; | |
1345 | ||
1346 | mm_slot = alloc_mm_slot(); | |
31dbd01f IE |
1347 | if (!mm_slot) |
1348 | return -ENOMEM; | |
1349 | ||
6e158384 HD |
1350 | /* Check ksm_run too? Would need tighter locking */ |
1351 | needs_wakeup = list_empty(&ksm_mm_head.mm_list); | |
1352 | ||
31dbd01f IE |
1353 | spin_lock(&ksm_mmlist_lock); |
1354 | insert_to_mm_slots_hash(mm, mm_slot); | |
1355 | /* | |
1356 | * Insert just behind the scanning cursor, to let the area settle | |
1357 | * down a little; when fork is followed by immediate exec, we don't | |
1358 | * want ksmd to waste time setting up and tearing down an rmap_list. | |
1359 | */ | |
1360 | list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); | |
1361 | spin_unlock(&ksm_mmlist_lock); | |
1362 | ||
f8af4da3 | 1363 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
6e158384 HD |
1364 | |
1365 | if (needs_wakeup) | |
1366 | wake_up_interruptible(&ksm_thread_wait); | |
1367 | ||
f8af4da3 HD |
1368 | return 0; |
1369 | } | |
1370 | ||
1371 | void __ksm_exit(struct mm_struct *mm) | |
1372 | { | |
cd551f97 HD |
1373 | struct mm_slot *mm_slot; |
1374 | ||
31dbd01f IE |
1375 | /* |
1376 | * This process is exiting: doesn't hold and doesn't need mmap_sem; | |
1377 | * but we do need to exclude ksmd and other exiters while we modify | |
1378 | * the various lists and trees. | |
1379 | */ | |
1380 | mutex_lock(&ksm_thread_mutex); | |
cd551f97 HD |
1381 | spin_lock(&ksm_mmlist_lock); |
1382 | mm_slot = get_mm_slot(mm); | |
1383 | if (!list_empty(&mm_slot->rmap_list)) { | |
1384 | spin_unlock(&ksm_mmlist_lock); | |
1385 | remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next); | |
1386 | spin_lock(&ksm_mmlist_lock); | |
1387 | } | |
1388 | ||
1389 | if (ksm_scan.mm_slot == mm_slot) { | |
1390 | ksm_scan.mm_slot = list_entry( | |
1391 | mm_slot->mm_list.next, struct mm_slot, mm_list); | |
1392 | ksm_scan.address = 0; | |
1393 | ksm_scan.rmap_item = list_entry( | |
1394 | &ksm_scan.mm_slot->rmap_list, struct rmap_item, link); | |
1395 | if (ksm_scan.mm_slot == &ksm_mm_head) | |
1396 | ksm_scan.seqnr++; | |
1397 | } | |
1398 | ||
1399 | hlist_del(&mm_slot->link); | |
1400 | list_del(&mm_slot->mm_list); | |
1401 | spin_unlock(&ksm_mmlist_lock); | |
1402 | ||
1403 | free_mm_slot(mm_slot); | |
1404 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
31dbd01f IE |
1405 | mutex_unlock(&ksm_thread_mutex); |
1406 | } | |
1407 | ||
1408 | #define KSM_ATTR_RO(_name) \ | |
1409 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | |
1410 | #define KSM_ATTR(_name) \ | |
1411 | static struct kobj_attribute _name##_attr = \ | |
1412 | __ATTR(_name, 0644, _name##_show, _name##_store) | |
1413 | ||
1414 | static ssize_t sleep_millisecs_show(struct kobject *kobj, | |
1415 | struct kobj_attribute *attr, char *buf) | |
1416 | { | |
1417 | return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); | |
1418 | } | |
1419 | ||
1420 | static ssize_t sleep_millisecs_store(struct kobject *kobj, | |
1421 | struct kobj_attribute *attr, | |
1422 | const char *buf, size_t count) | |
1423 | { | |
1424 | unsigned long msecs; | |
1425 | int err; | |
1426 | ||
1427 | err = strict_strtoul(buf, 10, &msecs); | |
1428 | if (err || msecs > UINT_MAX) | |
1429 | return -EINVAL; | |
1430 | ||
1431 | ksm_thread_sleep_millisecs = msecs; | |
1432 | ||
1433 | return count; | |
1434 | } | |
1435 | KSM_ATTR(sleep_millisecs); | |
1436 | ||
1437 | static ssize_t pages_to_scan_show(struct kobject *kobj, | |
1438 | struct kobj_attribute *attr, char *buf) | |
1439 | { | |
1440 | return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); | |
1441 | } | |
1442 | ||
1443 | static ssize_t pages_to_scan_store(struct kobject *kobj, | |
1444 | struct kobj_attribute *attr, | |
1445 | const char *buf, size_t count) | |
1446 | { | |
1447 | int err; | |
1448 | unsigned long nr_pages; | |
1449 | ||
1450 | err = strict_strtoul(buf, 10, &nr_pages); | |
1451 | if (err || nr_pages > UINT_MAX) | |
1452 | return -EINVAL; | |
1453 | ||
1454 | ksm_thread_pages_to_scan = nr_pages; | |
1455 | ||
1456 | return count; | |
1457 | } | |
1458 | KSM_ATTR(pages_to_scan); | |
1459 | ||
1460 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, | |
1461 | char *buf) | |
1462 | { | |
1463 | return sprintf(buf, "%u\n", ksm_run); | |
1464 | } | |
1465 | ||
1466 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, | |
1467 | const char *buf, size_t count) | |
1468 | { | |
1469 | int err; | |
1470 | unsigned long flags; | |
1471 | ||
1472 | err = strict_strtoul(buf, 10, &flags); | |
1473 | if (err || flags > UINT_MAX) | |
1474 | return -EINVAL; | |
1475 | if (flags > KSM_RUN_UNMERGE) | |
1476 | return -EINVAL; | |
1477 | ||
1478 | /* | |
1479 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. | |
1480 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, | |
b4028260 | 1481 | * breaking COW to free the unswappable pages_shared (but leaves |
31dbd01f IE |
1482 | * mm_slots on the list for when ksmd may be set running again). |
1483 | */ | |
1484 | ||
1485 | mutex_lock(&ksm_thread_mutex); | |
1486 | if (ksm_run != flags) { | |
1487 | ksm_run = flags; | |
d952b791 HD |
1488 | if (flags & KSM_RUN_UNMERGE) { |
1489 | err = unmerge_and_remove_all_rmap_items(); | |
1490 | if (err) { | |
1491 | ksm_run = KSM_RUN_STOP; | |
1492 | count = err; | |
1493 | } | |
1494 | } | |
31dbd01f IE |
1495 | } |
1496 | mutex_unlock(&ksm_thread_mutex); | |
1497 | ||
1498 | if (flags & KSM_RUN_MERGE) | |
1499 | wake_up_interruptible(&ksm_thread_wait); | |
1500 | ||
1501 | return count; | |
1502 | } | |
1503 | KSM_ATTR(run); | |
1504 | ||
31dbd01f IE |
1505 | static ssize_t max_kernel_pages_store(struct kobject *kobj, |
1506 | struct kobj_attribute *attr, | |
1507 | const char *buf, size_t count) | |
1508 | { | |
1509 | int err; | |
1510 | unsigned long nr_pages; | |
1511 | ||
1512 | err = strict_strtoul(buf, 10, &nr_pages); | |
1513 | if (err) | |
1514 | return -EINVAL; | |
1515 | ||
1516 | ksm_max_kernel_pages = nr_pages; | |
1517 | ||
1518 | return count; | |
1519 | } | |
1520 | ||
1521 | static ssize_t max_kernel_pages_show(struct kobject *kobj, | |
1522 | struct kobj_attribute *attr, char *buf) | |
1523 | { | |
1524 | return sprintf(buf, "%lu\n", ksm_max_kernel_pages); | |
1525 | } | |
1526 | KSM_ATTR(max_kernel_pages); | |
1527 | ||
b4028260 HD |
1528 | static ssize_t pages_shared_show(struct kobject *kobj, |
1529 | struct kobj_attribute *attr, char *buf) | |
1530 | { | |
1531 | return sprintf(buf, "%lu\n", ksm_pages_shared); | |
1532 | } | |
1533 | KSM_ATTR_RO(pages_shared); | |
1534 | ||
1535 | static ssize_t pages_sharing_show(struct kobject *kobj, | |
1536 | struct kobj_attribute *attr, char *buf) | |
1537 | { | |
e178dfde | 1538 | return sprintf(buf, "%lu\n", ksm_pages_sharing); |
b4028260 HD |
1539 | } |
1540 | KSM_ATTR_RO(pages_sharing); | |
1541 | ||
473b0ce4 HD |
1542 | static ssize_t pages_unshared_show(struct kobject *kobj, |
1543 | struct kobj_attribute *attr, char *buf) | |
1544 | { | |
1545 | return sprintf(buf, "%lu\n", ksm_pages_unshared); | |
1546 | } | |
1547 | KSM_ATTR_RO(pages_unshared); | |
1548 | ||
1549 | static ssize_t pages_volatile_show(struct kobject *kobj, | |
1550 | struct kobj_attribute *attr, char *buf) | |
1551 | { | |
1552 | long ksm_pages_volatile; | |
1553 | ||
1554 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared | |
1555 | - ksm_pages_sharing - ksm_pages_unshared; | |
1556 | /* | |
1557 | * It was not worth any locking to calculate that statistic, | |
1558 | * but it might therefore sometimes be negative: conceal that. | |
1559 | */ | |
1560 | if (ksm_pages_volatile < 0) | |
1561 | ksm_pages_volatile = 0; | |
1562 | return sprintf(buf, "%ld\n", ksm_pages_volatile); | |
1563 | } | |
1564 | KSM_ATTR_RO(pages_volatile); | |
1565 | ||
1566 | static ssize_t full_scans_show(struct kobject *kobj, | |
1567 | struct kobj_attribute *attr, char *buf) | |
1568 | { | |
1569 | return sprintf(buf, "%lu\n", ksm_scan.seqnr); | |
1570 | } | |
1571 | KSM_ATTR_RO(full_scans); | |
1572 | ||
31dbd01f IE |
1573 | static struct attribute *ksm_attrs[] = { |
1574 | &sleep_millisecs_attr.attr, | |
1575 | &pages_to_scan_attr.attr, | |
1576 | &run_attr.attr, | |
31dbd01f | 1577 | &max_kernel_pages_attr.attr, |
b4028260 HD |
1578 | &pages_shared_attr.attr, |
1579 | &pages_sharing_attr.attr, | |
473b0ce4 HD |
1580 | &pages_unshared_attr.attr, |
1581 | &pages_volatile_attr.attr, | |
1582 | &full_scans_attr.attr, | |
31dbd01f IE |
1583 | NULL, |
1584 | }; | |
1585 | ||
1586 | static struct attribute_group ksm_attr_group = { | |
1587 | .attrs = ksm_attrs, | |
1588 | .name = "ksm", | |
1589 | }; | |
1590 | ||
1591 | static int __init ksm_init(void) | |
1592 | { | |
1593 | struct task_struct *ksm_thread; | |
1594 | int err; | |
1595 | ||
1596 | err = ksm_slab_init(); | |
1597 | if (err) | |
1598 | goto out; | |
1599 | ||
1600 | err = mm_slots_hash_init(); | |
1601 | if (err) | |
1602 | goto out_free1; | |
1603 | ||
1604 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); | |
1605 | if (IS_ERR(ksm_thread)) { | |
1606 | printk(KERN_ERR "ksm: creating kthread failed\n"); | |
1607 | err = PTR_ERR(ksm_thread); | |
1608 | goto out_free2; | |
1609 | } | |
1610 | ||
1611 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); | |
1612 | if (err) { | |
1613 | printk(KERN_ERR "ksm: register sysfs failed\n"); | |
1614 | goto out_free3; | |
1615 | } | |
1616 | ||
1617 | return 0; | |
1618 | ||
1619 | out_free3: | |
1620 | kthread_stop(ksm_thread); | |
1621 | out_free2: | |
1622 | mm_slots_hash_free(); | |
1623 | out_free1: | |
1624 | ksm_slab_free(); | |
1625 | out: | |
1626 | return err; | |
f8af4da3 | 1627 | } |
31dbd01f | 1628 | module_init(ksm_init) |