fs/fat: comment fix, fat_bits can be also 32
[linux-2.6-block.git] / mm / swap.c
index cd3a5e64cea9be1f1b1759f056c35c0bf3ad2811..a7251a8ed53297a7ec129b6254a5229995d86fc3 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -31,6 +31,7 @@
 #include <linux/memcontrol.h>
 #include <linux/gfp.h>
 #include <linux/uio.h>
+#include <linux/hugetlb.h>
 
 #include "internal.h"
 
@@ -42,7 +43,7 @@ int page_cluster;
 
 static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
 
 /*
  * This path almost never happens for VM activity - pages are normally
@@ -75,7 +76,14 @@ static void __put_compound_page(struct page *page)
 {
        compound_page_dtor *dtor;
 
-       __page_cache_release(page);
+       /*
+        * __page_cache_release() is supposed to be called for thp, not for
+        * hugetlb. This is because hugetlb page does never have PageLRU set
+        * (it's never listed to any LRU lists) and no memcg routines should
+        * be called for hugetlb (it has a separate hugetlb_cgroup.)
+        */
+       if (!PageHuge(page))
+               __page_cache_release(page);
        dtor = get_compound_page_dtor(page);
        (*dtor)(page);
 }
@@ -743,7 +751,7 @@ void lru_cache_add_active_or_unevictable(struct page *page,
  * be write it out by flusher threads as this is much more effective
  * than the single-page writeout from reclaim.
  */
-static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
                              void *arg)
 {
        int lru, file;
@@ -811,36 +819,36 @@ void lru_add_drain_cpu(int cpu)
                local_irq_restore(flags);
        }
 
-       pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+       pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
        if (pagevec_count(pvec))
-               pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+               pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
 
        activate_page_drain(cpu);
 }
 
 /**
- * deactivate_page - forcefully deactivate a page
+ * deactivate_file_page - forcefully deactivate a file page
  * @page: page to deactivate
  *
  * This function hints the VM that @page is a good reclaim candidate,
  * for example if its invalidation fails due to the page being dirty
  * or under writeback.
  */
-void deactivate_page(struct page *page)
+void deactivate_file_page(struct page *page)
 {
        /*
-        * In a workload with many unevictable page such as mprotect, unevictable
-        * page deactivation for accelerating reclaim is pointless.
+        * In a workload with many unevictable page such as mprotect,
+        * unevictable page deactivation for accelerating reclaim is pointless.
         */
        if (PageUnevictable(page))
                return;
 
        if (likely(get_page_unless_zero(page))) {
-               struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+               struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
 
                if (!pagevec_add(pvec, page))
-                       pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
-               put_cpu_var(lru_deactivate_pvecs);
+                       pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+               put_cpu_var(lru_deactivate_file_pvecs);
        }
 }
 
@@ -872,7 +880,7 @@ void lru_add_drain_all(void)
 
                if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
                    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
-                   pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
+                   pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
                    need_activate_page_drain(cpu)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
                        schedule_work_on(cpu, work);