projects
/
fio.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Add iops rate to ETA display
[fio.git]
/
io_u.c
diff --git
a/io_u.c
b/io_u.c
index 27014c8aad503b4cb8ed7eba374a954b6b7c6a2d..40fd1968eb08b4a9e8d1bcd6108ab7b6f780d1fe 100644
(file)
--- a/
io_u.c
+++ b/
io_u.c
@@
-7,6
+7,7
@@
#include "fio.h"
#include "hash.h"
#include "fio.h"
#include "hash.h"
+#include "verify.h"
struct io_completion_data {
int nr; /* input */
struct io_completion_data {
int nr; /* input */
@@
-95,7
+96,7
@@
static unsigned long long last_block(struct thread_data *td, struct fio_file *f,
if (max_size > f->real_file_size)
max_size = f->real_file_size;
if (max_size > f->real_file_size)
max_size = f->real_file_size;
- max_blocks = max_size / (unsigned long long) td->o.
min_bs
[ddir];
+ max_blocks = max_size / (unsigned long long) td->o.
ba
[ddir];
if (!max_blocks)
return 0;
if (!max_blocks)
return 0;
@@
-113,7
+114,8
@@
static int get_next_free_block(struct thread_data *td, struct fio_file *f,
i = f->last_free_lookup;
*b = (i * BLOCKS_PER_MAP);
i = f->last_free_lookup;
*b = (i * BLOCKS_PER_MAP);
- while ((*b) * min_bs < f->real_file_size) {
+ while ((*b) * min_bs < f->real_file_size &&
+ (*b) * min_bs < f->io_size) {
if (f->file_map[i] != (unsigned int) -1) {
*b += ffz(f->file_map[i]);
if (*b > last_block(td, f, ddir))
if (f->file_map[i] != (unsigned int) -1) {
*b += ffz(f->file_map[i]);
if (*b > last_block(td, f, ddir))
@@
-212,7
+214,7
@@
static int get_next_offset(struct thread_data *td, struct io_u *io_u)
b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir];
}
b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir];
}
- io_u->offset = b * td->o.
min_bs
[ddir];
+ io_u->offset = b * td->o.
ba
[ddir];
if (io_u->offset >= f->io_size) {
dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
io_u->offset, f->io_size);
if (io_u->offset >= f->io_size) {
dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
io_u->offset, f->io_size);
@@
-248,7
+250,7
@@
static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
buflen = minbs;
else {
r = os_random_long(&td->bsrange_state);
buflen = minbs;
else {
r = os_random_long(&td->bsrange_state);
- if (!td->o.bssplit_nr) {
+ if (!td->o.bssplit_nr
[ddir]
) {
buflen = 1 + (unsigned int) ((double) maxbs *
(r / (OS_RAND_MAX + 1.0)));
if (buflen < minbs)
buflen = 1 + (unsigned int) ((double) maxbs *
(r / (OS_RAND_MAX + 1.0)));
if (buflen < minbs)
@@
-257,8
+259,8
@@
static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
long perc = 0;
unsigned int i;
long perc = 0;
unsigned int i;
- for (i = 0; i < td->o.bssplit_nr; i++) {
- struct bssplit *bsp = &td->o.bssplit[i];
+ for (i = 0; i < td->o.bssplit_nr
[ddir]
; i++) {
+ struct bssplit *bsp = &td->o.bssplit[
ddir][
i];
buflen = bsp->bs;
perc += bsp->perc;
buflen = bsp->bs;
perc += bsp->perc;
@@
-317,7
+319,6
@@
static enum fio_ddir get_rw_ddir(struct thread_data *td)
* Check if it's time to seed a new data direction.
*/
if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
* Check if it's time to seed a new data direction.
*/
if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
- unsigned long long max_bytes;
enum fio_ddir ddir;
/*
enum fio_ddir ddir;
/*
@@
-326,16
+327,6
@@
static enum fio_ddir get_rw_ddir(struct thread_data *td)
* ranges too much
*/
ddir = get_rand_ddir(td);
* ranges too much
*/
ddir = get_rand_ddir(td);
- max_bytes = td->this_io_bytes[ddir];
- if (max_bytes >=
- (td->o.size * td->o.rwmix[ddir] / 100)) {
- if (!td->rw_end_set[ddir]) {
- td->rw_end_set[ddir] = 1;
- fio_gettime(&td->rw_end[ddir], NULL);
- }
-
- ddir ^= 1;
- }
if (ddir != td->rwmix_ddir)
set_rwmix_bytes(td);
if (ddir != td->rwmix_ddir)
set_rwmix_bytes(td);
@@
-620,8
+611,8
@@
static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
/*
* Get next file to service by choosing one at random
*/
/*
* Get next file to service by choosing one at random
*/
-static struct fio_file *get_next_file_rand(struct thread_data *td,
int
goodf,
-
int
badf)
+static struct fio_file *get_next_file_rand(struct thread_data *td,
enum fio_file_flags
goodf,
+
enum fio_file_flags
badf)
{
struct fio_file *f;
int fno;
{
struct fio_file *f;
int fno;
@@
-633,10
+624,10
@@
static struct fio_file *get_next_file_rand(struct thread_data *td, int goodf,
fno = (unsigned int) ((double) td->o.nr_files
* (r / (OS_RAND_MAX + 1.0)));
f = td->files[fno];
fno = (unsigned int) ((double) td->o.nr_files
* (r / (OS_RAND_MAX + 1.0)));
f = td->files[fno];
- if (f
->flags & FIO_FILE_DONE
)
+ if (f
io_file_done(f)
)
continue;
continue;
- if (!
(f->flags & FIO_FILE_OPEN
)) {
+ if (!
fio_file_open(f
)) {
int err;
err = td_io_open_file(td, f);
int err;
err = td_io_open_file(td, f);
@@
-673,18
+664,19
@@
static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
td->next_file = 0;
dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
td->next_file = 0;
dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
- if (f
->flags & FIO_FILE_DONE
) {
+ if (f
io_file_done(f)
) {
f = NULL;
continue;
}
f = NULL;
continue;
}
- if (!
(f->flags & FIO_FILE_OPEN
)) {
+ if (!
fio_file_open(f
)) {
int err;
err = td_io_open_file(td, f);
if (err) {
dprint(FD_FILE, "error %d on open of %s\n",
err, f->file_name);
int err;
err = td_io_open_file(td, f);
if (err) {
dprint(FD_FILE, "error %d on open of %s\n",
err, f->file_name);
+ f = NULL;
continue;
}
opened = 1;
continue;
}
opened = 1;
@@
-719,7
+711,7
@@
static struct fio_file *get_next_file(struct thread_data *td)
}
f = td->file_service_file;
}
f = td->file_service_file;
- if (f &&
(f->flags & FIO_FILE_OPEN) && !(f->flags & FIO_FILE_CLOSING
)) {
+ if (f &&
fio_file_open(f) && !fio_file_closing(f
)) {
if (td->o.file_service_type == FIO_FSERVICE_SEQ)
goto out;
if (td->file_service_left--)
if (td->o.file_service_type == FIO_FSERVICE_SEQ)
goto out;
if (td->file_service_left--)
@@
-728,9
+720,9
@@
static struct fio_file *get_next_file(struct thread_data *td)
if (td->o.file_service_type == FIO_FSERVICE_RR ||
td->o.file_service_type == FIO_FSERVICE_SEQ)
if (td->o.file_service_type == FIO_FSERVICE_RR ||
td->o.file_service_type == FIO_FSERVICE_SEQ)
- f = get_next_file_rr(td, FIO_FILE_
OPEN, FIO_FILE_CLOSING
);
+ f = get_next_file_rr(td, FIO_FILE_
open, FIO_FILE_closing
);
else
else
- f = get_next_file_rand(td, FIO_FILE_
OPEN, FIO_FILE_CLOSING
);
+ f = get_next_file_rand(td, FIO_FILE_
open, FIO_FILE_closing
);
td->file_service_file = f;
td->file_service_left = td->file_service_nr - 1;
td->file_service_file = f;
td->file_service_left = td->file_service_nr - 1;
@@
-748,27
+740,16
@@
static int set_io_u_file(struct thread_data *td, struct io_u *io_u)
if (!f)
return 1;
if (!f)
return 1;
-set_file:
io_u->file = f;
get_file(f);
if (!fill_io_u(td, io_u))
break;
io_u->file = f;
get_file(f);
if (!fill_io_u(td, io_u))
break;
- /*
- * optimization to prevent close/open of the same file. This
- * way we preserve queueing etc.
- */
- if (td->o.nr_files == 1 && td->o.time_based) {
- put_file_log(td, f);
- fio_file_reset(f);
- goto set_file;
- }
-
put_file_log(td, f);
td_io_close_file(td, f);
io_u->file = NULL;
put_file_log(td, f);
td_io_close_file(td, f);
io_u->file = NULL;
- f
->flags |= FIO_FILE_DONE
;
+ f
io_file_set_done(f)
;
td->nr_done_files++;
dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, td->nr_done_files, td->o.nr_files);
} while (1);
td->nr_done_files++;
dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, td->nr_done_files, td->o.nr_files);
} while (1);
@@
-838,7
+819,7
@@
struct io_u *get_io_u(struct thread_data *td)
}
f = io_u->file;
}
f = io_u->file;
- assert(f
->flags & FIO_FILE_OPEN
);
+ assert(f
io_file_open(f)
);
if (io_u->ddir != DDIR_SYNC) {
if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
if (io_u->ddir != DDIR_SYNC) {
if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
@@
-848,7
+829,7
@@
struct io_u *get_io_u(struct thread_data *td)
f->last_pos = io_u->offset + io_u->buflen;
f->last_pos = io_u->offset + io_u->buflen;
- if (td->o.verify != VERIFY_NONE)
+ if (td->o.verify != VERIFY_NONE
&& io_u->ddir == DDIR_WRITE
)
populate_verify_io_u(td, io_u);
else if (td->o.refill_buffers && io_u->ddir == DDIR_WRITE)
io_u_fill_buffer(td, io_u, io_u->xfer_buflen);
populate_verify_io_u(td, io_u);
else if (td->o.refill_buffers && io_u->ddir == DDIR_WRITE)
io_u_fill_buffer(td, io_u, io_u->xfer_buflen);
@@
-857,7
+838,6
@@
struct io_u *get_io_u(struct thread_data *td)
/*
* Set io data pointers.
*/
/*
* Set io data pointers.
*/
- io_u->endpos = io_u->offset + io_u->buflen;
io_u->xfer_buf = io_u->buf;
io_u->xfer_buflen = io_u->buflen;
io_u->xfer_buf = io_u->buf;
io_u->xfer_buflen = io_u->buflen;
@@
-927,11
+907,11
@@
static void io_completed(struct thread_data *td, struct io_u *io_u,
&icd->time);
if (!td->o.disable_clat) {
&icd->time);
if (!td->o.disable_clat) {
- add_clat_sample(td, idx, usec);
+ add_clat_sample(td, idx, usec
, bytes
);
io_u_mark_latency(td, usec);
}
if (!td->o.disable_bw)
io_u_mark_latency(td, usec);
}
if (!td->o.disable_bw)
- add_bw_sample(td, idx, &icd->time);
+ add_bw_sample(td, idx,
bytes,
&icd->time);
}
if (td_write(td) && idx == DDIR_WRITE &&
}
if (td_write(td) && idx == DDIR_WRITE &&
@@
-1036,7
+1016,7
@@
void io_u_queued(struct thread_data *td, struct io_u *io_u)
unsigned long slat_time;
slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
unsigned long slat_time;
slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
- add_slat_sample(td, io_u->ddir, slat_time);
+ add_slat_sample(td, io_u->ddir,
io_u->xfer_buflen,
slat_time);
}
}
}
}