mm, memcg: avoid stale protection values when cgroup is above protection
[linux-block.git] / mm / vmscan.c
index 749d239c62b2b714fc73aac9db6044a4ab575065..9f0811d242551aba2b620232f8ad5b1354e669ca 100644 (file)
@@ -2331,7 +2331,8 @@ out:
                unsigned long protection;
 
                lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
-               protection = mem_cgroup_protection(memcg,
+               protection = mem_cgroup_protection(sc->target_mem_cgroup,
+                                                  memcg,
                                                   sc->memcg_low_reclaim);
 
                if (protection) {
@@ -4222,7 +4223,8 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
         * unmapped file backed pages.
         */
        if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
-           node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
+           node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
+           pgdat->min_slab_pages)
                return NODE_RECLAIM_FULL;
 
        /*