Merge tag 'nios2-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/lftan...
[linux-2.6-block.git] / mm / vmscan.c
index c27dd62ed594205f6fd225be2883c4d1a4ad15da..e5d52d6a24aff1c7fccd292bb8a2455e1eec1d52 100644 (file)
@@ -171,11 +171,22 @@ int vm_swappiness = 60;
  */
 unsigned long vm_total_pages;
 
+static void set_task_reclaim_state(struct task_struct *task,
+                                  struct reclaim_state *rs)
+{
+       /* Check for an overwrite */
+       WARN_ON_ONCE(rs && task->reclaim_state);
+
+       /* Check for the nulling of an already-nulled member */
+       WARN_ON_ONCE(!rs && !task->reclaim_state);
+
+       task->reclaim_state = rs;
+}
+
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
-#ifdef CONFIG_MEMCG_KMEM
-
+#ifdef CONFIG_MEMCG
 /*
  * We allow subsystems to populate their shrinker-related
  * LRU lists before register_shrinker_prepared() is called
@@ -227,30 +238,7 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
        idr_remove(&shrinker_idr, id);
        up_write(&shrinker_rwsem);
 }
-#else /* CONFIG_MEMCG_KMEM */
-static int prealloc_memcg_shrinker(struct shrinker *shrinker)
-{
-       return 0;
-}
-
-static void unregister_memcg_shrinker(struct shrinker *shrinker)
-{
-}
-#endif /* CONFIG_MEMCG_KMEM */
-
-static void set_task_reclaim_state(struct task_struct *task,
-                                  struct reclaim_state *rs)
-{
-       /* Check for an overwrite */
-       WARN_ON_ONCE(rs && task->reclaim_state);
-
-       /* Check for the nulling of an already-nulled member */
-       WARN_ON_ONCE(!rs && !task->reclaim_state);
 
-       task->reclaim_state = rs;
-}
-
-#ifdef CONFIG_MEMCG
 static bool global_reclaim(struct scan_control *sc)
 {
        return !sc->target_mem_cgroup;
@@ -305,6 +293,15 @@ static bool memcg_congested(pg_data_t *pgdat,
 
 }
 #else
+static int prealloc_memcg_shrinker(struct shrinker *shrinker)
+{
+       return 0;
+}
+
+static void unregister_memcg_shrinker(struct shrinker *shrinker)
+{
+}
+
 static bool global_reclaim(struct scan_control *sc)
 {
        return true;
@@ -591,7 +588,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
        return freed;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
                        struct mem_cgroup *memcg, int priority)
 {
@@ -599,7 +596,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
        unsigned long ret, freed = 0;
        int i;
 
-       if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
+       if (!mem_cgroup_online(memcg))
                return 0;
 
        if (!down_read_trylock(&shrinker_rwsem))
@@ -625,6 +622,11 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
                        continue;
                }
 
+               /* Call non-slab shrinkers even though kmem is disabled */
+               if (!memcg_kmem_enabled() &&
+                   !(shrinker->flags & SHRINKER_NONSLAB))
+                       continue;
+
                ret = do_shrink_slab(&sc, shrinker, priority);
                if (ret == SHRINK_EMPTY) {
                        clear_bit(i, map->map);
@@ -661,13 +663,13 @@ unlock:
        up_read(&shrinker_rwsem);
        return freed;
 }
-#else /* CONFIG_MEMCG_KMEM */
+#else /* CONFIG_MEMCG */
 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
                        struct mem_cgroup *memcg, int priority)
 {
        return 0;
 }
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG */
 
 /**
  * shrink_slab - shrink slab caches
@@ -1121,7 +1123,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                      struct scan_control *sc,
                                      enum ttu_flags ttu_flags,
                                      struct reclaim_stat *stat,
-                                     bool force_reclaim)
+                                     bool ignore_references)
 {
        LIST_HEAD(ret_pages);
        LIST_HEAD(free_pages);
@@ -1135,7 +1137,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                struct address_space *mapping;
                struct page *page;
                int may_enter_fs;
-               enum page_references references = PAGEREF_RECLAIM_CLEAN;
+               enum page_references references = PAGEREF_RECLAIM;
                bool dirty, writeback;
                unsigned int nr_pages;
 
@@ -1266,7 +1268,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        }
                }
 
-               if (!force_reclaim)
+               if (!ignore_references)
                        references = page_check_references(page, sc);
 
                switch (references) {
@@ -1487,10 +1489,9 @@ free_it:
                 * Is there need to periodically free_page_list? It would
                 * appear not as the counts should be low
                 */
-               if (unlikely(PageTransHuge(page))) {
-                       mem_cgroup_uncharge(page);
+               if (unlikely(PageTransHuge(page)))
                        (*get_compound_page_dtor(page))(page);
-               else
+               else
                        list_add(&page->lru, &free_pages);
                continue;
 
@@ -1911,7 +1912,6 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&pgdat->lru_lock);
-                               mem_cgroup_uncharge(page);
                                (*get_compound_page_dtor(page))(page);
                                spin_lock_irq(&pgdat->lru_lock);
                        } else
@@ -2145,6 +2145,62 @@ static void shrink_active_list(unsigned long nr_to_scan,
                        nr_deactivate, nr_rotated, sc->priority, file);
 }
 
+unsigned long reclaim_pages(struct list_head *page_list)
+{
+       int nid = -1;
+       unsigned long nr_reclaimed = 0;
+       LIST_HEAD(node_page_list);
+       struct reclaim_stat dummy_stat;
+       struct page *page;
+       struct scan_control sc = {
+               .gfp_mask = GFP_KERNEL,
+               .priority = DEF_PRIORITY,
+               .may_writepage = 1,
+               .may_unmap = 1,
+               .may_swap = 1,
+       };
+
+       while (!list_empty(page_list)) {
+               page = lru_to_page(page_list);
+               if (nid == -1) {
+                       nid = page_to_nid(page);
+                       INIT_LIST_HEAD(&node_page_list);
+               }
+
+               if (nid == page_to_nid(page)) {
+                       ClearPageActive(page);
+                       list_move(&page->lru, &node_page_list);
+                       continue;
+               }
+
+               nr_reclaimed += shrink_page_list(&node_page_list,
+                                               NODE_DATA(nid),
+                                               &sc, 0,
+                                               &dummy_stat, false);
+               while (!list_empty(&node_page_list)) {
+                       page = lru_to_page(&node_page_list);
+                       list_del(&page->lru);
+                       putback_lru_page(page);
+               }
+
+               nid = -1;
+       }
+
+       if (!list_empty(&node_page_list)) {
+               nr_reclaimed += shrink_page_list(&node_page_list,
+                                               NODE_DATA(nid),
+                                               &sc, 0,
+                                               &dummy_stat, false);
+               while (!list_empty(&node_page_list)) {
+                       page = lru_to_page(&node_page_list);
+                       list_del(&page->lru);
+                       putback_lru_page(page);
+               }
+       }
+
+       return nr_reclaimed;
+}
+
 /*
  * The inactive anon list should be small enough that the VM never has
  * to do too much work.