Merge tag 'nfsd-4.8' of git://linux-nfs.org/~bfields/linux
[linux-2.6-block.git] / mm / page-writeback.c
index f7c0fb993fb992c2f04bc6e80fa75be0c6043ed2..f4cd7d8005c9071dc1caf77a155eb3f2dc611b61 100644 (file)
@@ -310,19 +310,21 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
        for_each_node_state(node, N_HIGH_MEMORY) {
                for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
                        struct zone *z;
-                       unsigned long dirtyable;
+                       unsigned long nr_pages;
 
                        if (!is_highmem_idx(i))
                                continue;
 
                        z = &NODE_DATA(node)->node_zones[i];
-                       dirtyable = zone_page_state(z, NR_FREE_PAGES) +
-                               zone_page_state(z, NR_ZONE_LRU_FILE);
+                       if (!populated_zone(z))
+                               continue;
 
+                       nr_pages = zone_page_state(z, NR_FREE_PAGES);
                        /* watch for underflows */
-                       dirtyable -= min(dirtyable, high_wmark_pages(z));
-
-                       x += dirtyable;
+                       nr_pages -= min(nr_pages, high_wmark_pages(z));
+                       nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
+                       nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
+                       x += nr_pages;
                }
        }
 
@@ -498,20 +500,12 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
  */
 bool node_dirty_ok(struct pglist_data *pgdat)
 {
-       int z;
        unsigned long limit = node_dirty_limit(pgdat);
        unsigned long nr_pages = 0;
 
-       for (z = 0; z < MAX_NR_ZONES; z++) {
-               struct zone *zone = pgdat->node_zones + z;
-
-               if (!populated_zone(zone))
-                       continue;
-
-               nr_pages += zone_page_state(zone, NR_FILE_DIRTY);
-               nr_pages += zone_page_state(zone, NR_UNSTABLE_NFS);
-               nr_pages += zone_page_state(zone, NR_WRITEBACK);
-       }
+       nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
+       nr_pages += node_page_state(pgdat, NR_UNSTABLE_NFS);
+       nr_pages += node_page_state(pgdat, NR_WRITEBACK);
 
        return nr_pages <= limit;
 }
@@ -1601,10 +1595,10 @@ static void balance_dirty_pages(struct address_space *mapping,
                 * written to the server's write cache, but has not yet
                 * been flushed to permanent storage.
                 */
-               nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
-                                       global_page_state(NR_UNSTABLE_NFS);
+               nr_reclaimable = global_node_page_state(NR_FILE_DIRTY) +
+                                       global_node_page_state(NR_UNSTABLE_NFS);
                gdtc->avail = global_dirtyable_memory();
-               gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
+               gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
 
                domain_dirty_limits(gdtc);
 
@@ -1941,8 +1935,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
         * as we're trying to decide whether to put more under writeback.
         */
        gdtc->avail = global_dirtyable_memory();
-       gdtc->dirty = global_page_state(NR_FILE_DIRTY) +
-                     global_page_state(NR_UNSTABLE_NFS);
+       gdtc->dirty = global_node_page_state(NR_FILE_DIRTY) +
+                     global_node_page_state(NR_UNSTABLE_NFS);
        domain_dirty_limits(gdtc);
 
        if (gdtc->dirty > gdtc->bg_thresh)
@@ -1986,8 +1980,8 @@ void throttle_vm_writeout(gfp_t gfp_mask)
                  */
                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
 
-                if (global_page_state(NR_UNSTABLE_NFS) +
-                       global_page_state(NR_WRITEBACK) <= dirty_thresh)
+                if (global_node_page_state(NR_UNSTABLE_NFS) +
+                       global_node_page_state(NR_WRITEBACK) <= dirty_thresh)
                                break;
                 congestion_wait(BLK_RW_ASYNC, HZ/10);
 
@@ -2015,8 +2009,8 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
 void laptop_mode_timer_fn(unsigned long data)
 {
        struct request_queue *q = (struct request_queue *)data;
-       int nr_pages = global_page_state(NR_FILE_DIRTY) +
-               global_page_state(NR_UNSTABLE_NFS);
+       int nr_pages = global_node_page_state(NR_FILE_DIRTY) +
+               global_node_page_state(NR_UNSTABLE_NFS);
        struct bdi_writeback *wb;
 
        /*
@@ -2467,8 +2461,9 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
                wb = inode_to_wb(inode);
 
                mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
-               __inc_zone_page_state(page, NR_FILE_DIRTY);
-               __inc_zone_page_state(page, NR_DIRTIED);
+               __inc_node_page_state(page, NR_FILE_DIRTY);
+               __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
+               __inc_node_page_state(page, NR_DIRTIED);
                __inc_wb_stat(wb, WB_RECLAIMABLE);
                __inc_wb_stat(wb, WB_DIRTIED);
                task_io_account_write(PAGE_SIZE);
@@ -2488,7 +2483,8 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
 {
        if (mapping_cap_account_dirty(mapping)) {
                mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
-               dec_zone_page_state(page, NR_FILE_DIRTY);
+               dec_node_page_state(page, NR_FILE_DIRTY);
+               dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                dec_wb_stat(wb, WB_RECLAIMABLE);
                task_io_account_cancelled_write(PAGE_SIZE);
        }
@@ -2556,7 +2552,7 @@ void account_page_redirty(struct page *page)
 
                wb = unlocked_inode_to_wb_begin(inode, &locked);
                current->nr_dirtied--;
-               dec_zone_page_state(page, NR_DIRTIED);
+               dec_node_page_state(page, NR_DIRTIED);
                dec_wb_stat(wb, WB_DIRTIED);
                unlocked_inode_to_wb_end(inode, locked);
        }
@@ -2744,7 +2740,8 @@ int clear_page_dirty_for_io(struct page *page)
                wb = unlocked_inode_to_wb_begin(inode, &locked);
                if (TestClearPageDirty(page)) {
                        mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
-                       dec_zone_page_state(page, NR_FILE_DIRTY);
+                       dec_node_page_state(page, NR_FILE_DIRTY);
+                       dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                        dec_wb_stat(wb, WB_RECLAIMABLE);
                        ret = 1;
                }
@@ -2790,8 +2787,9 @@ int test_clear_page_writeback(struct page *page)
        }
        if (ret) {
                mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
-               dec_zone_page_state(page, NR_WRITEBACK);
-               inc_zone_page_state(page, NR_WRITTEN);
+               dec_node_page_state(page, NR_WRITEBACK);
+               dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
+               inc_node_page_state(page, NR_WRITTEN);
        }
        unlock_page_memcg(page);
        return ret;
@@ -2844,7 +2842,8 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
        }
        if (!ret) {
                mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
-               inc_zone_page_state(page, NR_WRITEBACK);
+               inc_node_page_state(page, NR_WRITEBACK);
+               inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
        }
        unlock_page_memcg(page);
        return ret;