mm: move most file-based accounting to the node
[linux-2.6-block.git] / mm / page-writeback.c
index f7c0fb993fb992c2f04bc6e80fa75be0c6043ed2..f97591d9fa001792ab92e77f855fc3d0026d5530 100644 (file)
@@ -498,20 +498,12 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
  */
 bool node_dirty_ok(struct pglist_data *pgdat)
 {
-       int z;
        unsigned long limit = node_dirty_limit(pgdat);
        unsigned long nr_pages = 0;
 
-       for (z = 0; z < MAX_NR_ZONES; z++) {
-               struct zone *zone = pgdat->node_zones + z;
-
-               if (!populated_zone(zone))
-                       continue;
-
-               nr_pages += zone_page_state(zone, NR_FILE_DIRTY);
-               nr_pages += zone_page_state(zone, NR_UNSTABLE_NFS);
-               nr_pages += zone_page_state(zone, NR_WRITEBACK);
-       }
+       nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
+       nr_pages += node_page_state(pgdat, NR_UNSTABLE_NFS);
+       nr_pages += node_page_state(pgdat, NR_WRITEBACK);
 
        return nr_pages <= limit;
 }
@@ -1601,10 +1593,10 @@ static void balance_dirty_pages(struct address_space *mapping,
                 * written to the server's write cache, but has not yet
                 * been flushed to permanent storage.
                 */
-               nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
-                                       global_page_state(NR_UNSTABLE_NFS);
+               nr_reclaimable = global_node_page_state(NR_FILE_DIRTY) +
+                                       global_node_page_state(NR_UNSTABLE_NFS);
                gdtc->avail = global_dirtyable_memory();
-               gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
+               gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
 
                domain_dirty_limits(gdtc);
 
@@ -1941,8 +1933,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
         * as we're trying to decide whether to put more under writeback.
         */
        gdtc->avail = global_dirtyable_memory();
-       gdtc->dirty = global_page_state(NR_FILE_DIRTY) +
-                     global_page_state(NR_UNSTABLE_NFS);
+       gdtc->dirty = global_node_page_state(NR_FILE_DIRTY) +
+                     global_node_page_state(NR_UNSTABLE_NFS);
        domain_dirty_limits(gdtc);
 
        if (gdtc->dirty > gdtc->bg_thresh)
@@ -1986,8 +1978,8 @@ void throttle_vm_writeout(gfp_t gfp_mask)
                  */
                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
 
-                if (global_page_state(NR_UNSTABLE_NFS) +
-                       global_page_state(NR_WRITEBACK) <= dirty_thresh)
+                if (global_node_page_state(NR_UNSTABLE_NFS) +
+                       global_node_page_state(NR_WRITEBACK) <= dirty_thresh)
                                break;
                 congestion_wait(BLK_RW_ASYNC, HZ/10);
 
@@ -2015,8 +2007,8 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
 void laptop_mode_timer_fn(unsigned long data)
 {
        struct request_queue *q = (struct request_queue *)data;
-       int nr_pages = global_page_state(NR_FILE_DIRTY) +
-               global_page_state(NR_UNSTABLE_NFS);
+       int nr_pages = global_node_page_state(NR_FILE_DIRTY) +
+               global_node_page_state(NR_UNSTABLE_NFS);
        struct bdi_writeback *wb;
 
        /*
@@ -2467,7 +2459,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
                wb = inode_to_wb(inode);
 
                mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
-               __inc_zone_page_state(page, NR_FILE_DIRTY);
+               __inc_node_page_state(page, NR_FILE_DIRTY);
+               __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                __inc_zone_page_state(page, NR_DIRTIED);
                __inc_wb_stat(wb, WB_RECLAIMABLE);
                __inc_wb_stat(wb, WB_DIRTIED);
@@ -2488,7 +2481,8 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
 {
        if (mapping_cap_account_dirty(mapping)) {
                mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
-               dec_zone_page_state(page, NR_FILE_DIRTY);
+               dec_node_page_state(page, NR_FILE_DIRTY);
+               dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                dec_wb_stat(wb, WB_RECLAIMABLE);
                task_io_account_cancelled_write(PAGE_SIZE);
        }
@@ -2744,7 +2738,8 @@ int clear_page_dirty_for_io(struct page *page)
                wb = unlocked_inode_to_wb_begin(inode, &locked);
                if (TestClearPageDirty(page)) {
                        mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
-                       dec_zone_page_state(page, NR_FILE_DIRTY);
+                       dec_node_page_state(page, NR_FILE_DIRTY);
+                       dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                        dec_wb_stat(wb, WB_RECLAIMABLE);
                        ret = 1;
                }
@@ -2790,7 +2785,8 @@ int test_clear_page_writeback(struct page *page)
        }
        if (ret) {
                mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
-               dec_zone_page_state(page, NR_WRITEBACK);
+               dec_node_page_state(page, NR_WRITEBACK);
+               dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                inc_zone_page_state(page, NR_WRITTEN);
        }
        unlock_page_memcg(page);
@@ -2844,7 +2840,8 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
        }
        if (!ret) {
                mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
-               inc_zone_page_state(page, NR_WRITEBACK);
+               inc_node_page_state(page, NR_WRITEBACK);
+               inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
        }
        unlock_page_memcg(page);
        return ret;