Right now we only do that for when the job writes, do it for the
initial setup as well. Otherwise you just get zero filled files
initially, which might not be what you want.
Also fixes a bug with compression if compress_chunk isn't set.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
}
b = malloc(td->o.max_bs[DDIR_WRITE]);
}
b = malloc(td->o.max_bs[DDIR_WRITE]);
- memset(b, 0, td->o.max_bs[DDIR_WRITE]);
left = f->real_file_size;
while (left && !td->terminate) {
left = f->real_file_size;
while (left && !td->terminate) {
if (bs > left)
bs = left;
if (bs > left)
bs = left;
+ fill_io_buffer(td, b, bs, bs);
+
r = write(f->fd, b, bs);
if (r > 0) {
r = write(f->fd, b, bs);
if (r > 0) {
-/*
- * "randomly" fill the buffer contents
- */
-void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
- unsigned int min_write, unsigned int max_bs)
+void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
+ unsigned int max_bs)
- io_u->buf_filled_len = 0;
-
if (!td->o.zero_buffers) {
unsigned int perc = td->o.compress_percentage;
if (!td->o.zero_buffers) {
unsigned int perc = td->o.compress_percentage;
unsigned int seg = min_write;
seg = min(min_write, td->o.compress_chunk);
unsigned int seg = min_write;
seg = min(min_write, td->o.compress_chunk);
- fill_random_buf_percentage(&td->buf_state, io_u->buf,
+ if (!seg)
+ seg = min_write;
+
+ fill_random_buf_percentage(&td->buf_state, buf,
perc, seg, max_bs);
} else
perc, seg, max_bs);
} else
- fill_random_buf(&td->buf_state, io_u->buf, max_bs);
+ fill_random_buf(&td->buf_state, buf, max_bs);
- memset(io_u->buf, 0, max_bs);
+ memset(buf, 0, max_bs);
+}
+
+/*
+ * "randomly" fill the buffer contents
+ */
+void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
+ unsigned int min_write, unsigned int max_bs)
+{
+ io_u->buf_filled_len = 0;
+ fill_io_buffer(td, io_u->buf, min_write, max_bs);
extern void io_u_queued(struct thread_data *, struct io_u *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_mark_depth(struct thread_data *, unsigned int);
extern void io_u_queued(struct thread_data *, struct io_u *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_mark_depth(struct thread_data *, unsigned int);
+extern void fill_io_buffer(struct thread_data *, void *, unsigned int, unsigned int);
extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned int, unsigned int);
void io_u_mark_complete(struct thread_data *, unsigned int);
void io_u_mark_submit(struct thread_data *, unsigned int);
extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned int, unsigned int);
void io_u_mark_complete(struct thread_data *, unsigned int);
void io_u_mark_submit(struct thread_data *, unsigned int);