if (!list_empty(&wb->b_more_io)) {
inode = list_entry(wb->b_more_io.prev,
struct inode, i_list);
+ trace_writeback_inode_wait(1);
inode_wait_for_writeback(inode);
+ trace_writeback_inode_wait(0);
}
spin_unlock(&inode_lock);
}
while ((work = get_next_work_item(bdi, wb)) != NULL) {
struct wb_writeback_args args = work->args;
+ long pgs;
/*
* Override sync mode, in case we must wait for completion
if (args.sync_mode == WB_SYNC_NONE)
wb_clear_pending(wb, work);
- wrote += wb_writeback(wb, &args);
+ pgs = wb_writeback(wb, &args);
+ wrote += pgs;
+
+ trace_writeback_done(work, pgs);
/*
* This is a data integrity writeback, so only do the
__entry->range_cyclic, __entry->for_background)
);
+TRACE_EVENT(writeback_done,
+
+ TP_PROTO(struct bdi_work *work, long pages),
+
+ TP_ARGS(work, pages),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, work)
+ __field(long, pages)
+ ),
+
+ TP_fast_assign(
+ __entry->work = (unsigned long) work & 0xffff;
+ __entry->pages = pages;
+
+ ),
+
+ TP_printk("work=%x pages=%ld", __entry->work, __entry->pages)
+);
+
TRACE_EVENT(writeback_clear,
TP_PROTO(struct bdi_work *work),
__entry->start ? "registered" : "unregistered")
);
+TRACE_EVENT(writeback_inode_wait,
+
+ TP_PROTO(int start),
+
+ TP_ARGS(start),
+
+ TP_STRUCT__entry(
+ __field(int, start)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ ),
+
+ TP_printk("%s", __entry->start ? "start" : "end")
+);
+
+TRACE_EVENT(writeback_bdp_start,
+
+ TP_PROTO(unsigned long reclaim, unsigned long thresh),
+
+ TP_ARGS(reclaim, thresh),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, reclaim)
+ __field(unsigned long, thresh)
+ ),
+
+ TP_fast_assign(
+ __entry->reclaim = reclaim;
+ __entry->thresh = thresh;
+ ),
+
+ TP_printk("reclaimable=%lu, threshold=%lu", __entry->reclaim,
+ __entry->thresh)
+);
+
+TRACE_EVENT(writeback_bdp_end,
+
+ TP_PROTO(unsigned long wrote),
+
+ TP_ARGS(wrote),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, wrote)
+ ),
+
+ TP_fast_assign(
+ __entry->wrote = wrote;
+ ),
+
+ TP_printk("wrote=%lu", __entry->wrote)
+);
+
#endif /* _TRACE_WRITEBACK_H */
/* This part must be outside protection */
#include <linux/syscalls.h>
#include <linux/buffer_head.h>
#include <linux/pagevec.h>
+#include <trace/events/writeback.h>
/*
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
* up.
*/
if (bdi_nr_reclaimable > bdi_thresh) {
+ unsigned long wrote;
+
+ trace_writeback_bdp_start(bdi_nr_reclaimable,
+ bdi_thresh);
writeback_inodes_wbc(&wbc);
- pages_written += write_chunk - wbc.nr_to_write;
+ wrote = write_chunk - wbc.nr_to_write;
+ trace_writeback_bdp_end(wrote);
+ pages_written += wrote;
get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi);
}