mm: workingset: make shadow node shrinker memcg aware
authorVladimir Davydov <vdavydov@virtuozzo.com>
Thu, 17 Mar 2016 21:18:42 +0000 (14:18 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Mar 2016 22:09:34 +0000 (15:09 -0700)
Workingset code was recently made memcg aware, but shadow node shrinker
is still global.  As a result, one small cgroup can consume all memory
available for shadow nodes, possibly hurting other cgroups by reclaiming
their shadow nodes, even though reclaim distances stored in its shadow
nodes have no effect.  To avoid this, we need to make shadow node
shrinker memcg aware.

Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/workingset.c

index bc8e4e22f58f10ce73e831d19a35c0ee057a65a3..1191d79aa495e698fdd233e91c2b2b2e317cba9d 100644 (file)
@@ -403,6 +403,9 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
                int nr_pages);
 
+unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+                                          int nid, unsigned int lru_mask);
+
 static inline
 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
@@ -661,6 +664,13 @@ mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
 {
 }
 
+static inline unsigned long
+mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+                            int nid, unsigned int lru_mask)
+{
+       return 0;
+}
+
 static inline void
 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 {
index 341bf86d26c258ddb77717b0e1514e3d05c1eb10..ae8b81c55685794c48821986a7817fa42e4354c1 100644 (file)
@@ -638,9 +638,8 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
        __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 }
 
-static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
-                                                 int nid,
-                                                 unsigned int lru_mask)
+unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+                                          int nid, unsigned int lru_mask)
 {
        unsigned long nr = 0;
        int zid;
index 68e8cd94ebe48e4ecca8c92df8212179e6e16524..8a75f8d2916af99a1efb6101666038dcd4309c04 100644 (file)
@@ -349,8 +349,12 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
        shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
        local_irq_enable();
 
-       pages = node_page_state(sc->nid, NR_ACTIVE_FILE) +
-               node_page_state(sc->nid, NR_INACTIVE_FILE);
+       if (memcg_kmem_enabled())
+               pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
+                                                    LRU_ALL_FILE);
+       else
+               pages = node_page_state(sc->nid, NR_ACTIVE_FILE) +
+                       node_page_state(sc->nid, NR_INACTIVE_FILE);
 
        /*
         * Active cache pages are limited to 50% of memory, and shadow
@@ -460,7 +464,7 @@ static struct shrinker workingset_shadow_shrinker = {
        .count_objects = count_shadow_nodes,
        .scan_objects = scan_shadow_nodes,
        .seeks = DEFAULT_SEEKS,
-       .flags = SHRINKER_NUMA_AWARE,
+       .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
 };
 
 /*