memcg: create extensible page stat update routines
authorGreg Thelen <gthelen@google.com>
Thu, 13 Jan 2011 23:47:37 +0000 (15:47 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Jan 2011 01:32:50 +0000 (17:32 -0800)
Replace usage of the mem_cgroup_update_file_mapped() memcg
statistic update routine with two new routines:
* mem_cgroup_inc_page_stat()
* mem_cgroup_dec_page_stat()

As before, only the file_mapped statistic is managed.  However, these more
general interfaces allow for new statistics to be more easily added.  New
statistics are added with memcg dirty page accounting.

Signed-off-by: Greg Thelen <gthelen@google.com>
Signed-off-by: Andrea Righi <arighi@develer.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/rmap.c

index 159a0762aeafe28d70ed674c22105a18256c4a44..067115ce6b3ed81f6662fe4d70d332298aa7cb17 100644 (file)
@@ -25,6 +25,11 @@ struct page_cgroup;
 struct page;
 struct mm_struct;
 
+/* Stats that can be updated by kernel. */
+enum mem_cgroup_page_stat_item {
+       MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
+};
+
 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        struct list_head *dst,
                                        unsigned long *scanned, int order,
@@ -121,7 +126,22 @@ static inline bool mem_cgroup_disabled(void)
        return false;
 }
 
-void mem_cgroup_update_file_mapped(struct page *page, int val);
+void mem_cgroup_update_page_stat(struct page *page,
+                                enum mem_cgroup_page_stat_item idx,
+                                int val);
+
+static inline void mem_cgroup_inc_page_stat(struct page *page,
+                                           enum mem_cgroup_page_stat_item idx)
+{
+       mem_cgroup_update_page_stat(page, idx, 1);
+}
+
+static inline void mem_cgroup_dec_page_stat(struct page *page,
+                                           enum mem_cgroup_page_stat_item idx)
+{
+       mem_cgroup_update_page_stat(page, idx, -1);
+}
+
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
                                                gfp_t gfp_mask);
 u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
@@ -293,8 +313,13 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 {
 }
 
-static inline void mem_cgroup_update_file_mapped(struct page *page,
-                                                       int val)
+static inline void mem_cgroup_inc_page_stat(struct page *page,
+                                           enum mem_cgroup_page_stat_item idx)
+{
+}
+
+static inline void mem_cgroup_dec_page_stat(struct page *page,
+                                           enum mem_cgroup_page_stat_item idx)
 {
 }
 
index 741206ffdacec6cc5f8cd4ccedbde93e88490452..3d8a0c79dece40cb1c5a4ed3b9eb61da43a65c13 100644 (file)
@@ -1600,7 +1600,8 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
  * possibility of race condition. If there is, we take a lock.
  */
 
-static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
+void mem_cgroup_update_page_stat(struct page *page,
+                                enum mem_cgroup_page_stat_item idx, int val)
 {
        struct mem_cgroup *mem;
        struct page_cgroup *pc = lookup_page_cgroup(page);
@@ -1623,30 +1624,27 @@ static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
                        goto out;
        }
 
-       this_cpu_add(mem->stat->count[idx], val);
-
        switch (idx) {
-       case MEM_CGROUP_STAT_FILE_MAPPED:
+       case MEMCG_NR_FILE_MAPPED:
                if (val > 0)
                        SetPageCgroupFileMapped(pc);
                else if (!page_mapped(page))
                        ClearPageCgroupFileMapped(pc);
+               idx = MEM_CGROUP_STAT_FILE_MAPPED;
                break;
        default:
                BUG();
        }
 
+       this_cpu_add(mem->stat->count[idx], val);
+
 out:
        if (unlikely(need_unlock))
                unlock_page_cgroup(pc);
        rcu_read_unlock();
        return;
 }
-
-void mem_cgroup_update_file_mapped(struct page *page, int val)
-{
-       mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val);
-}
+EXPORT_SYMBOL(mem_cgroup_update_page_stat);
 
 /*
  * size of first charge trial. "32" comes from vmscan.c's magic value.
index c30f33854f9751ff27dac942bbcdbbdfb30bd200..f21f4a1d6a1ce144d2ce45c30123eb2010f93bb8 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -937,7 +937,7 @@ void page_add_file_rmap(struct page *page)
 {
        if (atomic_inc_and_test(&page->_mapcount)) {
                __inc_zone_page_state(page, NR_FILE_MAPPED);
-               mem_cgroup_update_file_mapped(page, 1);
+               mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
        }
 }
 
@@ -979,7 +979,7 @@ void page_remove_rmap(struct page *page)
                                              NR_ANON_TRANSPARENT_HUGEPAGES);
        } else {
                __dec_zone_page_state(page, NR_FILE_MAPPED);
-               mem_cgroup_update_file_mapped(page, -1);
+               mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
        }
        /*
         * It would be tidy to reset the PageAnon mapping here,