X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=9488adeeffe43983a03a0b8089eb400afe7db96c;hp=5a3ca7444946751f4e208a32affdad5fb98051b9;hb=38fdef226f1a1fa053ab53005abbc184143bff65;hpb=0ce8b119b65849e537cab628a176a0ec4238aab0 diff --git a/io_u.c b/io_u.c index 5a3ca744..9488adee 100644 --- a/io_u.c +++ b/io_u.c @@ -13,6 +13,7 @@ struct io_completion_data { int nr; /* input */ + int account; /* input */ int error; /* output */ unsigned long bytes_done[2]; /* output */ @@ -168,9 +169,16 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, goto ffz; do { - r = os_random_long(&td->random_state); + if (td->o.use_os_rand) { + r = os_random_long(&td->random_state); + *b = (lastb - 1) * (r / ((unsigned long long) OS_RAND_MAX + 1.0)); + } else { + r = __rand(&td->__random_state); + *b = (lastb - 1) * (r / ((unsigned long long) FRAND_MAX + 1.0)); + } + dprint(FD_RANDOM, "off rand %llu\n", r); - *b = (lastb - 1) * (r / ((unsigned long long) OS_RAND_MAX + 1.0)); + /* * if we are not maintaining a random map, we are done. @@ -203,7 +211,10 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, if (!get_next_free_block(td, f, ddir, b)) goto ret; - r = os_random_long(&td->random_state); + if (td->o.use_os_rand) + r = os_random_long(&td->random_state); + else + r = __rand(&td->__random_state); } while (--loops); /* @@ -239,7 +250,16 @@ static int get_next_seq_block(struct thread_data *td, struct fio_file *f, assert(ddir_rw(ddir)); if (f->last_pos < f->real_file_size) { - *b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir]; + unsigned long long pos; + + if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0) + f->last_pos = f->real_file_size; + + pos = f->last_pos - f->file_offset; + if (pos) + pos += td->o.ddir_seq_add; + + *b = pos / td->o.min_bs[ddir]; return 0; } @@ -331,12 +351,20 @@ static int get_next_offset(struct thread_data *td, struct io_u *io_u) return __get_next_offset(td, io_u); } +static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, + unsigned int buflen) +{ + struct fio_file *f = io_u->file; + + return io_u->offset + buflen <= f->io_size + td->o.start_offset; +} + static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) { const int ddir = io_u->ddir; unsigned int uninitialized_var(buflen); unsigned int minbs, maxbs; - long r; + unsigned long r, rand_max; assert(ddir_rw(ddir)); @@ -344,12 +372,22 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) maxbs = td->o.max_bs[ddir]; if (minbs == maxbs) - buflen = minbs; - else { - r = os_random_long(&td->bsrange_state); + return minbs; + + if (td->o.use_os_rand) + rand_max = OS_RAND_MAX; + else + rand_max = FRAND_MAX; + + do { + if (td->o.use_os_rand) + r = os_random_long(&td->bsrange_state); + else + r = __rand(&td->__bsrange_state); + if (!td->o.bssplit_nr[ddir]) { buflen = 1 + (unsigned int) ((double) maxbs * - (r / (OS_RAND_MAX + 1.0))); + (r / (rand_max + 1.0))); if (buflen < minbs) buflen = minbs; } else { @@ -361,19 +399,16 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) buflen = bsp->bs; perc += bsp->perc; - if (r <= ((OS_RAND_MAX / 100L) * perc)) + if ((r <= ((rand_max / 100L) * perc)) && + io_u_fits(td, io_u, buflen)) break; } } + if (!td->o.bs_unaligned && is_power_of_2(minbs)) buflen = (buflen + minbs - 1) & ~(minbs - 1); - } - if (io_u->offset + buflen > io_u->file->real_file_size) { - dprint(FD_IO, "lower buflen %u -> %u (ddir=%d)\n", buflen, - minbs, ddir); - buflen = minbs; - } + } while (!io_u_fits(td, io_u, buflen)); return buflen; } @@ -404,10 +439,16 @@ static void set_rwmix_bytes(struct thread_data *td) static inline enum fio_ddir get_rand_ddir(struct thread_data *td) { unsigned int v; - long r; + unsigned long r; + + if (td->o.use_os_rand) { + r = os_random_long(&td->rwmix_state); + v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); + } else { + r = __rand(&td->__rwmix_state); + v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); + } - r = os_random_long(&td->rwmix_state); - v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); if (v <= td->o.rwmix[DDIR_READ]) return DDIR_READ; @@ -450,6 +491,16 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) } else usec = td->rate_pending_usleep[ddir]; + /* + * We are going to sleep, ensure that we flush anything pending as + * not to skew our latency numbers + */ + if (td->cur_depth) { + int fio_unused ret; + + ret = io_u_queued_complete(td, td->cur_depth, NULL); + } + fio_gettime(&t, NULL); usec_sleep(td, usec); usec = utime_since_now(&t); @@ -823,11 +874,19 @@ static struct fio_file *get_next_file_rand(struct thread_data *td, int fno; do { - long r = os_random_long(&td->next_file_state); int opened = 0; + unsigned long r; + + if (td->o.use_os_rand) { + r = os_random_long(&td->next_file_state); + fno = (unsigned int) ((double) td->o.nr_files + * (r / (OS_RAND_MAX + 1.0))); + } else { + r = __rand(&td->__next_file_state); + fno = (unsigned int) ((double) td->o.nr_files + * (r / (FRAND_MAX + 1.0))); + } - fno = (unsigned int) ((double) td->o.nr_files - * (r / (OS_RAND_MAX + 1.0))); f = td->files[fno]; if (fio_file_done(f)) continue; @@ -1062,6 +1121,44 @@ static int check_get_verify(struct thread_data *td, struct io_u *io_u) return 0; } +/* + * Fill offset and start time into the buffer content, to prevent too + * easy compressible data for simple de-dupe attempts. Do this for every + * 512b block in the range, since that should be the smallest block size + * we can expect from a device. + */ +static void small_content_scramble(struct io_u *io_u) +{ + unsigned int i, nr_blocks = io_u->buflen / 512; + unsigned long long boffset; + unsigned int offset; + void *p, *end; + + if (!nr_blocks) + return; + + p = io_u->xfer_buf; + boffset = io_u->offset; + + for (i = 0; i < nr_blocks; i++) { + /* + * Fill the byte offset into a "random" start offset of + * the buffer, given by the product of the usec time + * and the actual offset. + */ + offset = (io_u->start_time.tv_usec ^ boffset) & 511; + offset &= ~(sizeof(unsigned long long) - 1); + if (offset >= 512 - sizeof(unsigned long long)) + offset -= sizeof(unsigned long long); + memcpy(p + offset, &boffset, sizeof(boffset)); + + end = p + 512 - sizeof(io_u->start_time); + memcpy(end, &io_u->start_time, sizeof(io_u->start_time)); + p += 512; + boffset += 512; + } +} + /* * Return an io_u to be processed. Gets a buflen and offset, sets direction, * etc. The returned io_u is fully ready to be prepped and submitted. @@ -1070,6 +1167,7 @@ struct io_u *get_io_u(struct thread_data *td) { struct fio_file *f; struct io_u *io_u; + int do_scramble = 0; io_u = __get_io_u(td); if (!io_u) { @@ -1111,11 +1209,14 @@ struct io_u *get_io_u(struct thread_data *td) f->last_start = io_u->offset; f->last_pos = io_u->offset + io_u->buflen; - if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_WRITE) - populate_verify_io_u(td, io_u); - else if (td->o.refill_buffers && io_u->ddir == DDIR_WRITE) - io_u_fill_buffer(td, io_u, io_u->xfer_buflen); - else if (io_u->ddir == DDIR_READ) { + if (io_u->ddir == DDIR_WRITE) { + if (td->o.verify != VERIFY_NONE) + populate_verify_io_u(td, io_u); + else if (td->o.refill_buffers) + io_u_fill_buffer(td, io_u, io_u->xfer_buflen); + else if (td->o.scramble_buffers) + do_scramble = 1; + } else if (io_u->ddir == DDIR_READ) { /* * Reset the buf_filled parameters so next time if the * buffer is used for writes it is refilled. @@ -1135,6 +1236,8 @@ out: if (!td_io_prep(td, io_u)) { if (!td->o.disable_slat) fio_gettime(&io_u->start_time, NULL); + if (do_scramble) + small_content_scramble(io_u); return io_u; } err_put: @@ -1164,6 +1267,36 @@ void io_u_log_error(struct thread_data *td, struct io_u *io_u) td_verror(td, io_u->error, "io_u error"); } +static void account_io_completion(struct thread_data *td, struct io_u *io_u, + struct io_completion_data *icd, + const enum fio_ddir idx, unsigned int bytes) +{ + unsigned long uninitialized_var(lusec); + + if (!icd->account) + return; + + if (!td->o.disable_clat || !td->o.disable_bw) + lusec = utime_since(&io_u->issue_time, &icd->time); + + if (!td->o.disable_lat) { + unsigned long tusec; + + tusec = utime_since(&io_u->start_time, &icd->time); + add_lat_sample(td, idx, tusec, bytes); + } + + if (!td->o.disable_clat) { + add_clat_sample(td, idx, lusec, bytes); + io_u_mark_latency(td, lusec); + } + + if (!td->o.disable_bw) + add_bw_sample(td, idx, bytes, &icd->time); + + add_iops_sample(td, idx, &icd->time); +} + static void io_completed(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd) { @@ -1201,6 +1334,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, int ret; td->io_blocks[idx]++; + td->this_io_blocks[idx]++; td->io_bytes[idx] += bytes; td->this_io_bytes[idx] += bytes; @@ -1216,25 +1350,9 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, } } - if (ramp_time_over(td)) { - unsigned long uninitialized_var(lusec); - - if (!td->o.disable_clat || !td->o.disable_bw) - lusec = utime_since(&io_u->issue_time, - &icd->time); - if (!td->o.disable_lat) { - unsigned long tusec; + if (ramp_time_over(td) && td->runstate == TD_RUNNING) { + account_io_completion(td, io_u, icd, idx, bytes); - tusec = utime_since(&io_u->start_time, - &icd->time); - add_lat_sample(td, idx, tusec, bytes); - } - if (!td->o.disable_clat) { - add_clat_sample(td, idx, lusec, bytes); - io_u_mark_latency(td, lusec); - } - if (!td->o.disable_bw) - add_bw_sample(td, idx, bytes, &icd->time); if (__should_check_rate(td, idx)) { td->rate_pending_usleep[idx] = ((td->this_io_bytes[idx] * @@ -1284,6 +1402,7 @@ static void init_icd(struct thread_data *td, struct io_completion_data *icd, fio_gettime(&icd->time, NULL); icd->nr = nr; + icd->account = 1; icd->error = 0; icd->bytes_done[0] = icd->bytes_done[1] = 0; @@ -1302,6 +1421,8 @@ static void ios_completed(struct thread_data *td, if (!(io_u->flags & IO_U_F_FREE_DEF)) put_io_u(td, io_u); + + icd->account = 0; } } @@ -1392,7 +1513,7 @@ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, io_u->buf_filled_len = 0; if (!td->o.zero_buffers) - fill_random_buf(io_u->buf, max_bs); + fill_random_buf(&td->buf_state, io_u->buf, max_bs); else memset(io_u->buf, 0, max_bs); }