mm: memcontrol: separate kmem code from legacy tcp accounting code
[linux-2.6-block.git] / mm / memcontrol.c
index bee6b1c9fdce1d7906d87e353ffb106ad9b61d02..7f8219b58e0c4938471e16f1907e88ca2e0e3539 100644 (file)
@@ -382,14 +382,11 @@ struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 {
        struct mem_cgroup *memcg;
 
-       rcu_read_lock();
-
        memcg = page->mem_cgroup;
 
        if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
                memcg = root_mem_cgroup;
 
-       rcu_read_unlock();
        return &memcg->css;
 }
 
@@ -2381,7 +2378,7 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
        struct page_counter *counter;
        int ret;
 
-       if (!memcg_kmem_is_active(memcg))
+       if (!memcg_kmem_online(memcg))
                return 0;
 
        if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter))
@@ -2864,15 +2861,13 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
 }
 
 #ifdef CONFIG_MEMCG_KMEM
-static int memcg_activate_kmem(struct mem_cgroup *memcg,
-                              unsigned long nr_pages)
+static int memcg_online_kmem(struct mem_cgroup *memcg)
 {
        int err = 0;
        int memcg_id;
 
        BUG_ON(memcg->kmemcg_id >= 0);
-       BUG_ON(memcg->kmem_acct_activated);
-       BUG_ON(memcg->kmem_acct_active);
+       BUG_ON(memcg->kmem_state);
 
        /*
         * For simplicity, we won't allow this to be disabled.  It also can't
@@ -2900,23 +2895,15 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
                goto out;
        }
 
-       /*
-        * We couldn't have accounted to this cgroup, because it hasn't got
-        * activated yet, so this should succeed.
-        */
-       err = page_counter_limit(&memcg->kmem, nr_pages);
-       VM_BUG_ON(err);
-
        static_branch_inc(&memcg_kmem_enabled_key);
        /*
-        * A memory cgroup is considered kmem-active as soon as it gets
+        * A memory cgroup is considered kmem-online as soon as it gets
         * kmemcg_id. Setting the id after enabling static branching will
         * guarantee no one starts accounting before all call sites are
         * patched.
         */
        memcg->kmemcg_id = memcg_id;
-       memcg->kmem_acct_activated = true;
-       memcg->kmem_acct_active = true;
+       memcg->kmem_state = KMEM_ONLINE;
 out:
        return err;
 }
@@ -2927,10 +2914,14 @@ static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
        int ret;
 
        mutex_lock(&memcg_limit_mutex);
-       if (!memcg_kmem_is_active(memcg))
-               ret = memcg_activate_kmem(memcg, limit);
-       else
-               ret = page_counter_limit(&memcg->kmem, limit);
+       /* Top-level cgroup doesn't propagate from root */
+       if (!memcg_kmem_online(memcg)) {
+               ret = memcg_online_kmem(memcg);
+               if (ret)
+                       goto out;
+       }
+       ret = page_counter_limit(&memcg->kmem, limit);
+out:
        mutex_unlock(&memcg_limit_mutex);
        return ret;
 }
@@ -2945,20 +2936,78 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg)
 
        mutex_lock(&memcg_limit_mutex);
        /*
-        * If the parent cgroup is not kmem-active now, it cannot be activated
-        * after this point, because it has at least one child already.
+        * If the parent cgroup is not kmem-online now, it cannot be
+        * onlined after this point, because it has at least one child
+        * already.
         */
-       if (memcg_kmem_is_active(parent))
-               ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
+       if (memcg_kmem_online(parent))
+               ret = memcg_online_kmem(memcg);
        mutex_unlock(&memcg_limit_mutex);
        return ret;
 }
+
+static void memcg_offline_kmem(struct mem_cgroup *memcg)
+{
+       struct cgroup_subsys_state *css;
+       struct mem_cgroup *parent, *child;
+       int kmemcg_id;
+
+       if (memcg->kmem_state != KMEM_ONLINE)
+               return;
+       /*
+        * Clear the online state before clearing memcg_caches array
+        * entries. The slab_mutex in memcg_deactivate_kmem_caches()
+        * guarantees that no cache will be created for this cgroup
+        * after we are done (see memcg_create_kmem_cache()).
+        */
+       memcg->kmem_state = KMEM_ALLOCATED;
+
+       memcg_deactivate_kmem_caches(memcg);
+
+       kmemcg_id = memcg->kmemcg_id;
+       BUG_ON(kmemcg_id < 0);
+
+       parent = parent_mem_cgroup(memcg);
+       if (!parent)
+               parent = root_mem_cgroup;
+
+       /*
+        * Change kmemcg_id of this cgroup and all its descendants to the
+        * parent's id, and then move all entries from this cgroup's list_lrus
+        * to ones of the parent. After we have finished, all list_lrus
+        * corresponding to this cgroup are guaranteed to remain empty. The
+        * ordering is imposed by list_lru_node->lock taken by
+        * memcg_drain_all_list_lrus().
+        */
+       css_for_each_descendant_pre(css, &memcg->css) {
+               child = mem_cgroup_from_css(css);
+               BUG_ON(child->kmemcg_id != kmemcg_id);
+               child->kmemcg_id = parent->kmemcg_id;
+               if (!memcg->use_hierarchy)
+                       break;
+       }
+       memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
+
+       memcg_free_cache_id(kmemcg_id);
+}
+
+static void memcg_free_kmem(struct mem_cgroup *memcg)
+{
+       if (memcg->kmem_state == KMEM_ALLOCATED) {
+               memcg_destroy_kmem_caches(memcg);
+               static_branch_dec(&memcg_kmem_enabled_key);
+               WARN_ON(page_counter_read(&memcg->kmem));
+       }
+}
 #else
 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
                                   unsigned long limit)
 {
        return -EINVAL;
 }
+static void memcg_offline_kmem(struct mem_cgroup *memcg)
+{
+}
 #endif /* CONFIG_MEMCG_KMEM */
 
 /*
@@ -3494,16 +3543,17 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
 swap_buffers:
        /* Swap primary and spare array */
        thresholds->spare = thresholds->primary;
-       /* If all events are unregistered, free the spare array */
-       if (!new) {
-               kfree(thresholds->spare);
-               thresholds->spare = NULL;
-       }
 
        rcu_assign_pointer(thresholds->primary, new);
 
        /* To be sure that nobody uses thresholds */
        synchronize_rcu();
+
+       /* If all events are unregistered, free the spare array */
+       if (!new) {
+               kfree(thresholds->spare);
+               thresholds->spare = NULL;
+       }
 unlock:
        mutex_unlock(&memcg->thresholds_lock);
 }
@@ -3584,88 +3634,6 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
        return 0;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
-static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
-{
-       int ret;
-
-       ret = memcg_propagate_kmem(memcg);
-       if (ret)
-               return ret;
-
-       return tcp_init_cgroup(memcg, ss);
-}
-
-static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
-{
-       struct cgroup_subsys_state *css;
-       struct mem_cgroup *parent, *child;
-       int kmemcg_id;
-
-       if (!memcg->kmem_acct_active)
-               return;
-
-       /*
-        * Clear the 'active' flag before clearing memcg_caches arrays entries.
-        * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
-        * guarantees no cache will be created for this cgroup after we are
-        * done (see memcg_create_kmem_cache()).
-        */
-       memcg->kmem_acct_active = false;
-
-       memcg_deactivate_kmem_caches(memcg);
-
-       kmemcg_id = memcg->kmemcg_id;
-       BUG_ON(kmemcg_id < 0);
-
-       parent = parent_mem_cgroup(memcg);
-       if (!parent)
-               parent = root_mem_cgroup;
-
-       /*
-        * Change kmemcg_id of this cgroup and all its descendants to the
-        * parent's id, and then move all entries from this cgroup's list_lrus
-        * to ones of the parent. After we have finished, all list_lrus
-        * corresponding to this cgroup are guaranteed to remain empty. The
-        * ordering is imposed by list_lru_node->lock taken by
-        * memcg_drain_all_list_lrus().
-        */
-       css_for_each_descendant_pre(css, &memcg->css) {
-               child = mem_cgroup_from_css(css);
-               BUG_ON(child->kmemcg_id != kmemcg_id);
-               child->kmemcg_id = parent->kmemcg_id;
-               if (!memcg->use_hierarchy)
-                       break;
-       }
-       memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
-
-       memcg_free_cache_id(kmemcg_id);
-}
-
-static void memcg_destroy_kmem(struct mem_cgroup *memcg)
-{
-       if (memcg->kmem_acct_activated) {
-               memcg_destroy_kmem_caches(memcg);
-               static_branch_dec(&memcg_kmem_enabled_key);
-               WARN_ON(page_counter_read(&memcg->kmem));
-       }
-       tcp_destroy_cgroup(memcg);
-}
-#else
-static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
-{
-       return 0;
-}
-
-static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
-{
-}
-
-static void memcg_destroy_kmem(struct mem_cgroup *memcg)
-{
-}
-#endif
-
 #ifdef CONFIG_CGROUP_WRITEBACK
 
 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
@@ -4276,9 +4244,14 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
        }
        mutex_unlock(&memcg_create_mutex);
 
-       ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
+#ifdef CONFIG_MEMCG_KMEM
+       ret = memcg_propagate_kmem(memcg);
+       if (ret)
+               return ret;
+       ret = tcp_init_cgroup(memcg);
        if (ret)
                return ret;
+#endif
 
 #ifdef CONFIG_INET
        if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
@@ -4314,7 +4287,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 
        vmpressure_cleanup(&memcg->vmpressure);
 
-       memcg_deactivate_kmem(memcg);
+       memcg_offline_kmem(memcg);
 
        wb_memcg_offline(memcg);
 }
@@ -4330,11 +4303,16 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
-       memcg_destroy_kmem(memcg);
 #ifdef CONFIG_INET
        if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
                static_branch_dec(&memcg_sockets_enabled_key);
 #endif
+
+#ifdef CONFIG_MEMCG_KMEM
+       memcg_free_kmem(memcg);
+       tcp_destroy_cgroup(memcg);
+#endif
+
        __mem_cgroup_free(memcg);
 }