int result;
int ignore_layout = 0;
- if (wbc->range_cyclic) {
- start = mapping->writeback_index << PAGE_SHIFT;
- end = OBD_OBJECT_EOF;
- } else {
- start = wbc->range_start;
- end = wbc->range_end;
- if (end == LLONG_MAX) {
- end = OBD_OBJECT_EOF;
- range_whole = start == 0;
- }
- }
-
+ start = mapping->writeback_index << PAGE_SHIFT;
+ end = OBD_OBJECT_EOF;
mode = CL_FSYNC_NONE;
if (wbc->sync_mode == WB_SYNC_ALL)
mode = CL_FSYNC_LOCAL;
result = 0;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
- if (end == OBD_OBJECT_EOF)
- mapping->writeback_index = 0;
- else
- mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
- }
+ if (end == OBD_OBJECT_EOF)
+ mapping->writeback_index = 0;
+ else
+ mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
+
return result;
}
_enter("");
- if (wbc->range_cyclic) {
- start = mapping->writeback_index;
- end = -1;
- ret = afs_writepages_region(mapping, wbc, start, end, &next);
- if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
- ret = afs_writepages_region(mapping, wbc, 0, start,
- &next);
- mapping->writeback_index = next;
- } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
- end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
- ret = afs_writepages_region(mapping, wbc, 0, end, &next);
- if (wbc->nr_to_write > 0)
- mapping->writeback_index = next;
- } else {
- start = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- ret = afs_writepages_region(mapping, wbc, start, end, &next);
- }
+ start = mapping->writeback_index;
+ end = -1;
+ ret = afs_writepages_region(mapping, wbc, start, end, &next);
+ if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
+ ret = afs_writepages_region(mapping, wbc, 0, start, &next);
+ mapping->writeback_index = next;
_leave(" = %d", ret);
return ret;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
- .range_cyclic = 1,
};
int ret;
int tag;
pagevec_init(&pvec, 0);
- if (wbc->range_cyclic) {
- index = mapping->writeback_index; /* Start from prev offset */
- end = -1;
- } else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- scanned = 1;
- }
+ index = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
if (wbc->sync_mode == WB_SYNC_ALL)
tag = PAGECACHE_TAG_TOWRITE;
else
if (!PagePrivate(page))
continue;
- if (!wbc->range_cyclic && page->index > end) {
+ if (page->index > end) {
done = 1;
break;
}
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
- int range_whole = 0;
int scanned = 0;
int tag;
return 0;
pagevec_init(&pvec, 0);
- if (wbc->range_cyclic) {
- index = mapping->writeback_index; /* Start from prev offset */
- end = -1;
- } else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = 1;
- scanned = 1;
- }
+ index = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
if (wbc->sync_mode == WB_SYNC_ALL)
tag = PAGECACHE_TAG_TOWRITE;
else
continue;
}
- if (!wbc->range_cyclic && page->index > end) {
+ if (page->index > end) {
done = 1;
unlock_page(page);
continue;
goto retry;
}
- if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
- mapping->writeback_index = done_index;
-
+ mapping->writeback_index = done_index;
btrfs_add_delayed_iput(inode);
return ret;
}
unsigned int wsize = i_blocksize(inode);
struct ceph_osd_request *req = NULL;
struct ceph_writeback_ctl ceph_wbc;
- bool should_loop, range_whole = false;
+ bool should_loop;
bool stop, done = false;
dout("writepages_start %p (mode=%s)\n", inode,
pagevec_init(&pvec, 0);
- start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
+ start_index = mapping->writeback_index;
index = start_index;
retry:
should_loop = false;
if (ceph_wbc.head_snapc && snapc != last_snapc) {
/* where to start/end? */
- if (wbc->range_cyclic) {
- index = start_index;
- end = -1;
- if (index > 0)
- should_loop = true;
- dout(" cyclic, start at %lu\n", index);
- } else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = true;
- dout(" not cyclic, %lu to %lu\n", index, end);
- }
+ index = start_index;
+ end = -1;
+ if (index > 0)
+ should_loop = true;
+ dout(" cyclic, start at %lu\n", index);
} else if (!ceph_wbc.head_snapc) {
/* Do not respect wbc->range_{start,end}. Dirty pages
* in that range can be associated with newer snapc.
goto retry;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = index;
-
+ mapping->writeback_index = index;
out:
ceph_osdc_put_request(req);
ceph_put_snap_context(last_snapc);
break;
}
- if (!wbc->range_cyclic && page->index > end) {
+ if (page->index > end) {
*done = true;
unlock_page(page);
break;
{
struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
struct TCP_Server_Info *server;
- bool done = false, scanned = false, range_whole = false;
+ bool done = false, scanned = false;
pgoff_t end, index;
struct cifs_writedata *wdata;
int rc = 0;
if (cifs_sb->wsize < PAGE_SIZE)
return generic_writepages(mapping, wbc);
- if (wbc->range_cyclic) {
- index = mapping->writeback_index; /* Start from prev offset */
- end = -1;
- } else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = true;
- scanned = true;
- }
+ index = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
server = cifs_sb_master_tcon(cifs_sb)->ses->server;
retry:
while (!done && index <= end) {
goto retry;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = index;
-
+ mapping->writeback_index = index;
return rc;
}
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
- if (wbc->range_cyclic) {
- writeback_index = mapping->writeback_index;
- if (writeback_index)
- cycled = 0;
- mpd.first_page = writeback_index;
- mpd.last_page = -1;
- } else {
- mpd.first_page = wbc->range_start >> PAGE_SHIFT;
- mpd.last_page = wbc->range_end >> PAGE_SHIFT;
- }
+ writeback_index = mapping->writeback_index;
+ if (writeback_index)
+ cycled = 0;
+ mpd.first_page = writeback_index;
+ mpd.last_page = -1;
mpd.inode = inode;
mpd.wbc = wbc;
}
/* Update index */
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- /*
- * Set the writeback_index so that range_cyclic
- * mode will write it back later
- */
- mapping->writeback_index = mpd.first_page;
+ /*
+ * Set the writeback_index so that range_cyclic
+ * mode will write it back later
+ */
+ mapping->writeback_index = mpd.first_page;
out_writepages:
trace_ext4_writepages_result(inode, wbc, ret,
pgoff_t done_index;
pgoff_t last_idx = ULONG_MAX;
int cycled;
- int range_whole = 0;
int tag;
pagevec_init(&pvec, 0);
else
clear_inode_flag(mapping->host, FI_HOT_DATA);
- if (wbc->range_cyclic) {
- writeback_index = mapping->writeback_index; /* prev offset */
- index = writeback_index;
- if (index == 0)
- cycled = 1;
- else
- cycled = 0;
- end = -1;
- } else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = 1;
- cycled = 1; /* ignore range_cyclic tests */
- }
+ writeback_index = mapping->writeback_index; /* prev offset */
+ index = writeback_index;
+ if (index == 0)
+ cycled = 1;
+ else
+ cycled = 0;
+ end = -1;
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
end = writeback_index - 1;
goto retry;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = done_index;
+ mapping->writeback_index = done_index;
if (last_idx != ULONG_MAX)
f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
enum writeback_sync_modes sync_mode;
unsigned int tagged_writepages:1;
unsigned int for_kupdate:1;
- unsigned int range_cyclic:1;
unsigned int for_background:1;
unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
unsigned int auto_free:1; /* free on completion */
get_nr_dirty_inodes();
}
-static void wb_start_writeback(struct bdi_writeback *wb, bool range_cyclic,
- enum wb_reason reason)
+static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
{
struct wb_writeback_work *work;
work->sync_mode = WB_SYNC_NONE;
work->nr_pages = wb_split_bdi_pages(wb, get_nr_dirty_pages());
- work->range_cyclic = range_cyclic;
work->reason = reason;
work->auto_free = 1;
work->start_all = 1;
.for_kupdate = work->for_kupdate,
.for_background = work->for_background,
.for_sync = work->for_sync,
- .range_cyclic = work->range_cyclic,
.range_start = 0,
.range_end = LLONG_MAX,
};
struct wb_writeback_work work = {
.nr_pages = nr_pages,
.sync_mode = WB_SYNC_NONE,
- .range_cyclic = 1,
.reason = reason,
};
struct blk_plug plug;
.nr_pages = LONG_MAX,
.sync_mode = WB_SYNC_NONE,
.for_background = 1,
- .range_cyclic = 1,
.reason = WB_REASON_BACKGROUND,
};
.nr_pages = nr_pages,
.sync_mode = WB_SYNC_NONE,
.for_kupdate = 1,
- .range_cyclic = 1,
.reason = WB_REASON_PERIODIC,
};
return;
list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
- wb_start_writeback(wb, true, reason);
+ wb_start_writeback(wb, reason);
}
void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
- .range_cyclic = 1,
.done = &done,
.reason = WB_REASON_SYNC,
.for_sync = 1,
pgoff_t end;
pgoff_t done_index;
int cycled;
- int range_whole = 0;
int tag;
pagevec_init(&pvec, 0);
- if (wbc->range_cyclic) {
- writeback_index = mapping->writeback_index; /* prev offset */
- index = writeback_index;
- if (index == 0)
- cycled = 1;
- else
- cycled = 0;
- end = -1;
- } else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = 1;
- cycled = 1; /* ignore range_cyclic tests */
- }
+ writeback_index = mapping->writeback_index; /* prev offset */
+ index = writeback_index;
+ if (index == 0)
+ cycled = 1;
+ else
+ cycled = 0;
+ end = -1;
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
goto retry;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = done_index;
-
+ mapping->writeback_index = done_index;
return ret;
}
unsigned for_background:1; /* A background writeback */
unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
unsigned for_reclaim:1; /* Invoked from the page allocator */
- unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *wb; /* wb this writeback is issued under */
__entry->range_end = wbc->range_end;
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_reclaim = wbc->for_reclaim;
- __entry->range_cyclic = wbc->range_cyclic;
+ __entry->range_cyclic = 1;
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->root_objectid =
BTRFS_I(inode)->root->root_key.objectid;
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->sync_mode = wbc->sync_mode;
__entry->for_kupdate = wbc->for_kupdate;
- __entry->range_cyclic = wbc->range_cyclic;
+ __entry->range_cyclic = 1;
),
TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
__entry->for_background = wbc->for_background;
__entry->tagged_writepages = wbc->tagged_writepages;
__entry->for_reclaim = wbc->for_reclaim;
- __entry->range_cyclic = wbc->range_cyclic;
+ __entry->range_cyclic = 1;
__entry->for_sync = wbc->for_sync;
),
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
__entry->for_kupdate = work->for_kupdate;
- __entry->range_cyclic = work->range_cyclic;
+ __entry->range_cyclic = 1;
__entry->for_background = work->for_background;
__entry->reason = work->reason;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_background = wbc->for_background;
__entry->for_reclaim = wbc->for_reclaim;
- __entry->range_cyclic = wbc->range_cyclic;
+ __entry->range_cyclic = 1;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
pgoff_t end; /* Inclusive */
pgoff_t done_index;
int cycled;
- int range_whole = 0;
int tag;
pagevec_init(&pvec, 0);
- if (wbc->range_cyclic) {
- writeback_index = mapping->writeback_index; /* prev offset */
- index = writeback_index;
- if (index == 0)
- cycled = 1;
- else
- cycled = 0;
- end = -1;
- } else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = 1;
- cycled = 1; /* ignore range_cyclic tests */
- }
+ writeback_index = mapping->writeback_index; /* prev offset */
+ index = writeback_index;
+ if (index == 0)
+ cycled = 1;
+ else
+ cycled = 0;
+ end = -1;
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
end = writeback_index - 1;
goto retry;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = done_index;
-
+ mapping->writeback_index = done_index;
return ret;
}
EXPORT_SYMBOL(write_cache_pages);