bugfix for memory cgroup controller: migration under memory controller fix
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Thu, 7 Feb 2008 08:14:10 +0000 (00:14 -0800)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 7 Feb 2008 16:42:19 +0000 (08:42 -0800)
While using memory control cgroup, page-migration under it works as following.
==
 1. uncharge all refs at try to unmap.
 2. charge regs again remove_migration_ptes()
==
This is simple but has following problems.
==
 The page is uncharged and charged back again if *mapped*.
    - This means that cgroup before migration can be different from one after
      migration
    - If page is not mapped but charged as page cache, charge is just ignored
      (because not mapped, it will not be uncharged before migration)
      This is memory leak.
==
This patch tries to keep memory cgroup at page migration by increasing
one refcnt during it. 3 functions are added.

 mem_cgroup_prepare_migration() --- increase refcnt of page->page_cgroup
 mem_cgroup_end_migration()     --- decrease refcnt of page->page_cgroup
 mem_cgroup_page_migration() --- copy page->page_cgroup from old page to
                                 new page.

During migration
  - old page is under PG_locked.
  - new page is under PG_locked, too.
  - both old page and new page is not on LRU.

These 3 facts guarantee that page_cgroup() migration has no race.

Tested and worked well in x86_64/fake-NUMA box.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: David Rientjes <rientjes@google.com>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/migrate.c

index 42536c737d9c412be88aac0d3959f17e0622ff05..4ec712967f7cdd717b05074cdccfa20c7669cf0d 100644 (file)
@@ -60,6 +60,10 @@ static inline void mem_cgroup_uncharge_page(struct page *page)
        mem_cgroup_uncharge(page_get_page_cgroup(page));
 }
 
+extern int mem_cgroup_prepare_migration(struct page *page);
+extern void mem_cgroup_end_migration(struct page *page);
+extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
+
 #else /* CONFIG_CGROUP_MEM_CONT */
 static inline void mm_init_cgroup(struct mm_struct *mm,
                                        struct task_struct *p)
@@ -117,6 +121,21 @@ static inline int task_in_mem_cgroup(struct task_struct *task,
        return 1;
 }
 
+static inline int mem_cgroup_prepare_migration(struct page *page)
+{
+       return 0;
+}
+
+static inline void mem_cgroup_end_migration(struct page *page)
+{
+}
+
+static inline void
+mem_cgroup_page_migration(struct page *page, struct page *newpage)
+{
+}
+
+
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #endif /* _LINUX_MEMCONTROL_H */
index 3270ce7375dbf03d5837c74f5aa74f3a32d9bf67..128f45c16fa6626ac33813fdaefc239c18e9967f 100644 (file)
@@ -492,6 +492,49 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
                }
        }
 }
+/*
+ * Returns non-zero if a page (under migration) has valid page_cgroup member.
+ * Refcnt of page_cgroup is incremented.
+ */
+
+int mem_cgroup_prepare_migration(struct page *page)
+{
+       struct page_cgroup *pc;
+       int ret = 0;
+       lock_page_cgroup(page);
+       pc = page_get_page_cgroup(page);
+       if (pc && atomic_inc_not_zero(&pc->ref_cnt))
+               ret = 1;
+       unlock_page_cgroup(page);
+       return ret;
+}
+
+void mem_cgroup_end_migration(struct page *page)
+{
+       struct page_cgroup *pc = page_get_page_cgroup(page);
+       mem_cgroup_uncharge(pc);
+}
+/*
+ * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
+ * And no race with uncharge() routines because page_cgroup for *page*
+ * has extra one reference by mem_cgroup_prepare_migration.
+ */
+
+void mem_cgroup_page_migration(struct page *page, struct page *newpage)
+{
+       struct page_cgroup *pc;
+retry:
+       pc = page_get_page_cgroup(page);
+       if (!pc)
+               return;
+       if (clear_page_cgroup(page, pc) != pc)
+               goto retry;
+       pc->page = newpage;
+       lock_page_cgroup(newpage);
+       page_assign_page_cgroup(newpage, pc);
+       unlock_page_cgroup(newpage);
+       return;
+}
 
 int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
 {
index 7637941446971fc9c31de48066926f944cd6f8d6..a73504ff5ab982007b2e0355e49f0dedcac65899 100644 (file)
@@ -593,9 +593,10 @@ static int move_to_new_page(struct page *newpage, struct page *page)
        else
                rc = fallback_migrate_page(mapping, newpage, page);
 
-       if (!rc)
+       if (!rc) {
+               mem_cgroup_page_migration(page, newpage);
                remove_migration_ptes(page, newpage);
-       else
+       else
                newpage->mapping = NULL;
 
        unlock_page(newpage);
@@ -614,6 +615,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
        int *result = NULL;
        struct page *newpage = get_new_page(page, private, &result);
        int rcu_locked = 0;
+       int charge = 0;
 
        if (!newpage)
                return -ENOMEM;
@@ -673,14 +675,19 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                goto rcu_unlock;
        }
 
+       charge = mem_cgroup_prepare_migration(page);
        /* Establish migration ptes or remove ptes */
        try_to_unmap(page, 1);
 
        if (!page_mapped(page))
                rc = move_to_new_page(newpage, page);
 
-       if (rc)
+       if (rc) {
                remove_migration_ptes(page, page);
+               if (charge)
+                       mem_cgroup_end_migration(page);
+       } else if (charge)
+               mem_cgroup_end_migration(newpage);
 rcu_unlock:
        if (rcu_locked)
                rcu_read_unlock();