X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=backend.c;h=808e4362d1e369a16029bc10b8a7bd9c0bce4804;hb=84f07387d900ae9e06ce7b3eadf13273a577b7ca;hp=e20a2e07766eabf4ea0f86dfde6db40e9ed621f7;hpb=548b363c08875165a018788195e8fd2304c2ce24;p=fio.git diff --git a/backend.c b/backend.c index e20a2e07..808e4362 100644 --- a/backend.c +++ b/backend.c @@ -393,7 +393,7 @@ static bool break_on_this_error(struct thread_data *td, enum fio_ddir ddir, td_clear_error(td); *retptr = 0; return false; - } else if (td->o.fill_device && err == ENOSPC) { + } else if (td->o.fill_device && (err == ENOSPC || err == EDQUOT)) { /* * We expect to hit this error if * fill_device option is set. @@ -858,14 +858,15 @@ static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) return 0; } -static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir) +static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir, + struct timespec *time) { unsigned long long b; uint64_t total; int left; - b = ddir_rw_sum(td->io_blocks); - if (b % td->o.thinktime_blocks) + b = ddir_rw_sum(td->thinktime_blocks_counter); + if (b % td->o.thinktime_blocks || !b) return; io_u_quiesce(td); @@ -898,6 +899,9 @@ static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir) /* adjust for rate_process=poisson */ td->last_usec[ddir] += total; } + + if (time && should_check_rate(td)) + fio_gettime(time, NULL); } /* @@ -1076,6 +1080,10 @@ reap: } if (ret < 0) break; + + if (ddir_rw(ddir) && td->o.thinktime) + handle_thinktime(td, ddir, &comp_time); + if (!ddir_rw_sum(td->bytes_done) && !td_ioengine_flagged(td, FIO_NOIO)) continue; @@ -1090,9 +1098,6 @@ reap: } if (!in_ramp_time(td) && td->o.latency_target) lat_target_check(td); - - if (ddir_rw(ddir) && td->o.thinktime) - handle_thinktime(td, ddir); } check_update_rusage(td); @@ -1100,7 +1105,7 @@ reap: if (td->trim_entries) log_err("fio: %lu trim entries leaked?\n", td->trim_entries); - if (td->o.fill_device && td->error == ENOSPC) { + if (td->o.fill_device && (td->error == ENOSPC || td->error == EDQUOT)) { td->error = 0; fio_mark_td_terminate(td); } @@ -1115,7 +1120,8 @@ reap: if (i) { ret = io_u_queued_complete(td, i); - if (td->o.fill_device && td->error == ENOSPC) + if (td->o.fill_device && + (td->error == ENOSPC || td->error == EDQUOT)) td->error = 0; } @@ -1336,22 +1342,19 @@ int init_io_u_buffers(struct thread_data *td) return 0; } +#ifdef FIO_HAVE_IOSCHED_SWITCH /* - * This function is Linux specific. + * These functions are Linux specific. * FIO_HAVE_IOSCHED_SWITCH enabled currently means it's Linux. */ -static int switch_ioscheduler(struct thread_data *td) +static int set_ioscheduler(struct thread_data *td, struct fio_file *file) { -#ifdef FIO_HAVE_IOSCHED_SWITCH char tmp[256], tmp2[128], *p; FILE *f; int ret; - if (td_ioengine_flagged(td, FIO_DISKLESSIO)) - return 0; - - assert(td->files && td->files[0]); - sprintf(tmp, "%s/queue/scheduler", td->files[0]->du->sysfs_root); + assert(file->du && file->du->sysfs_root); + sprintf(tmp, "%s/queue/scheduler", file->du->sysfs_root); f = fopen(tmp, "r+"); if (!f) { @@ -1404,7 +1407,7 @@ static int switch_ioscheduler(struct thread_data *td) sprintf(tmp2, "[%s]", td->o.ioscheduler); if (!strstr(tmp, tmp2)) { - log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); + log_err("fio: unable to set io scheduler to %s\n", td->o.ioscheduler); td_verror(td, EINVAL, "iosched_switch"); fclose(f); return 1; @@ -1412,11 +1415,55 @@ static int switch_ioscheduler(struct thread_data *td) fclose(f); return 0; +} + +static int switch_ioscheduler(struct thread_data *td) +{ + struct fio_file *f; + unsigned int i; + int ret = 0; + + if (td_ioengine_flagged(td, FIO_DISKLESSIO)) + return 0; + + assert(td->files && td->files[0]); + + for_each_file(td, f, i) { + + /* Only consider regular files and block device files */ + switch (f->filetype) { + case FIO_TYPE_FILE: + case FIO_TYPE_BLOCK: + /* + * Make sure that the device hosting the file could + * be determined. + */ + if (!f->du) + continue; + break; + case FIO_TYPE_CHAR: + case FIO_TYPE_PIPE: + default: + continue; + } + + ret = set_ioscheduler(td, f); + if (ret) + return ret; + } + + return 0; +} + #else + +static int switch_ioscheduler(struct thread_data *td) +{ return 0; -#endif } +#endif /* FIO_HAVE_IOSCHED_SWITCH */ + static bool keep_running(struct thread_data *td) { unsigned long long limit; @@ -1744,6 +1791,11 @@ static void *thread_main(void *data) if (rate_submit_init(td, sk_out)) goto err; + if (td->o.thinktime_blocks_type == THINKTIME_BLOCKS_TYPE_COMPLETE) + td->thinktime_blocks_counter = td->io_blocks; + else + td->thinktime_blocks_counter = td->io_issues; + set_epoch_time(td, o->log_unix_epoch); fio_getrusage(&td->ru_start); memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch)); @@ -2527,6 +2579,7 @@ int fio_backend(struct sk_out *sk_out) for_each_td(td, i) { steadystate_free(td); fio_options_free(td); + fio_dump_options_free(td); if (td->rusage_sem) { fio_sem_remove(td->rusage_sem); td->rusage_sem = NULL;