mm: memcontrol: quarantine the mem_cgroup_[node_]nr_lru_pages() API
authorJohannes Weiner <hannes@cmpxchg.org>
Tue, 14 May 2019 00:18:11 +0000 (17:18 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 May 2019 16:47:47 +0000 (09:47 -0700)
Only memcg_numa_stat_show() uses those wrappers and the lru bitmasks,
group them together.

Link: http://lkml.kernel.org/r/20190228163020.24100-7-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Roman Gushchin <guro@fb.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mmzone.h
mm/memcontrol.c

index fba7741533bec2535ef9552ed9bf07e25ff984f8..5a4aedc160bdfde21d59a8e8aff107feb8fca1cf 100644 (file)
@@ -247,11 +247,6 @@ struct lruvec {
 #endif
 };
 
-/* Mask used at gathering information at once (see memcontrol.c) */
-#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
-#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
-#define LRU_ALL             ((1 << NR_LRU_LISTS) - 1)
-
 /* Isolate unmapped file */
 #define ISOLATE_UNMAPPED       ((__force isolate_mode_t)0x2)
 /* Isolate for asynchronous migration */
index d29417b93a8b4e2c77c6d2bbab763493c74f456b..287933005e11d693eb59b47a0f75905c320ecb50 100644 (file)
@@ -725,37 +725,6 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
        __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
 }
 
-static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
-                                          int nid, unsigned int lru_mask)
-{
-       struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
-       unsigned long nr = 0;
-       enum lru_list lru;
-
-       VM_BUG_ON((unsigned)nid >= nr_node_ids);
-
-       for_each_lru(lru) {
-               if (!(BIT(lru) & lru_mask))
-                       continue;
-               nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
-       }
-       return nr;
-}
-
-static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
-                       unsigned int lru_mask)
-{
-       unsigned long nr = 0;
-       enum lru_list lru;
-
-       for_each_lru(lru) {
-               if (!(BIT(lru) & lru_mask))
-                       continue;
-               nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
-       }
-       return nr;
-}
-
 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
                                       enum mem_cgroup_events_target target)
 {
@@ -3338,6 +3307,42 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
 #endif
 
 #ifdef CONFIG_NUMA
+
+#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
+#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
+#define LRU_ALL             ((1 << NR_LRU_LISTS) - 1)
+
+static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+                                          int nid, unsigned int lru_mask)
+{
+       struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
+       unsigned long nr = 0;
+       enum lru_list lru;
+
+       VM_BUG_ON((unsigned)nid >= nr_node_ids);
+
+       for_each_lru(lru) {
+               if (!(BIT(lru) & lru_mask))
+                       continue;
+               nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
+       }
+       return nr;
+}
+
+static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
+                                            unsigned int lru_mask)
+{
+       unsigned long nr = 0;
+       enum lru_list lru;
+
+       for_each_lru(lru) {
+               if (!(BIT(lru) & lru_mask))
+                       continue;
+               nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
+       }
+       return nr;
+}
+
 static int memcg_numa_stat_show(struct seq_file *m, void *v)
 {
        struct numa_stat {