#include "err.h"
#include "lib/tp.h"
#include "workqueue.h"
+#include "lib/mountcheck.h"
static pthread_t helper_thread;
static pthread_mutex_t helper_lock;
if (spent < td->o.ratecycle)
return 0;
- if (td->o.rate[ddir]) {
+ if (td->o.rate[ddir] || td->o.ratemin[ddir]) {
/*
* check bandwidth specified rate
*/
log_err("%s: min iops rate %u not met,"
" got %lu\n", td->o.name,
rate_iops_min, rate);
+ return 1;
}
}
}
return 0;
}
+/*
+ * We need to update the runtime consistently in ms, but keep a running
+ * tally of the current elapsed time in microseconds for sub millisecond
+ * updates.
+ */
+static inline void update_runtime(struct thread_data *td,
+ unsigned long long *elapsed_us,
+ const enum fio_ddir ddir)
+{
+ if (ddir == DDIR_WRITE && td_write(td) && td->o.verify_only)
+ return;
+
+ td->ts.runtime[ddir] -= (elapsed_us[ddir] + 999) / 1000;
+ elapsed_us[ddir] += utime_since_now(&td->start);
+ td->ts.runtime[ddir] += (elapsed_us[ddir] + 999) / 1000;
+}
+
static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir,
int *retptr)
{
return bytes >= limit || exceeds_number_ios(td);
}
+/*
+ * used to calculate the next io time for rate control
+ *
+ */
+static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
+{
+ uint64_t secs, remainder, bps, bytes;
+
+ assert(!(td->flags & TD_F_CHILD));
+ bytes = td->rate_io_issue_bytes[ddir];
+ bps = td->rate_bps[ddir];
+ if (bps) {
+ secs = bytes / bps;
+ remainder = bytes % bps;
+ return remainder * 1000000 / bps + secs * 1000000;
+ } else
+ return 0;
+}
+
/*
* Main IO worker function. It retrieves io_u's to process and queues
* and reaps them, checking for rate and errors along the way.
if (td->error)
break;
ret = workqueue_enqueue(&td->io_wq, io_u);
+
+ if (should_check_rate(td))
+ td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
+
} else {
ret = td_io_queue(td, io_u);
- if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 1, &comp_time))
+ if (should_check_rate(td))
+ td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
+
+ if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 0, &comp_time))
break;
/*
}
if (!in_ramp_time(td) && td->o.latency_target)
lat_target_check(td);
-
+
if (td->o.thinktime) {
unsigned long long b;
/*
* Read back and check that the selected scheduler is now the default.
*/
+ memset(tmp, 0, sizeof(tmp));
ret = fread(tmp, sizeof(tmp), 1, f);
if (ferror(f) || ret < 0) {
td_verror(td, errno, "fread");
fclose(f);
return 1;
}
- tmp[sizeof(tmp) - 1] = '\0';
+ /*
+ * either a list of io schedulers or "none\n" is expected.
+ */
+ tmp[strlen(tmp) - 1] = '\0';
sprintf(tmp2, "[%s]", td->o.ioscheduler);
static int exec_string(struct thread_options *o, const char *string, const char *mode)
{
- int ret, newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
+ size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
+ int ret;
char *str;
str = malloc(newlen);
*/
static void *thread_main(void *data)
{
- unsigned long long elapsed;
+ unsigned long long elapsed_us[DDIR_RWDIR_CNT] = { 0, };
struct thread_data *td = data;
struct thread_options *o = &td->o;
pthread_condattr_t attr;
check_update_rusage(td);
fio_mutex_down(stat_mutex);
- if (td_read(td) && td->io_bytes[DDIR_READ]) {
- elapsed = mtime_since_now(&td->start);
- td->ts.runtime[DDIR_READ] += elapsed;
- }
- if (td_write(td) && td->io_bytes[DDIR_WRITE]) {
- elapsed = mtime_since_now(&td->start);
- td->ts.runtime[DDIR_WRITE] += elapsed;
- }
- if (td_trim(td) && td->io_bytes[DDIR_TRIM]) {
- elapsed = mtime_since_now(&td->start);
- td->ts.runtime[DDIR_TRIM] += elapsed;
- }
+ if (td_read(td) && td->io_bytes[DDIR_READ])
+ update_runtime(td, elapsed_us, DDIR_READ);
+ if (td_write(td) && td->io_bytes[DDIR_WRITE])
+ update_runtime(td, elapsed_us, DDIR_WRITE);
+ if (td_trim(td) && td->io_bytes[DDIR_TRIM])
+ update_runtime(td, elapsed_us, DDIR_TRIM);
fio_gettime(&td->start, NULL);
fio_mutex_up(stat_mutex);
check_update_rusage(td);
fio_mutex_down(stat_mutex);
- td->ts.runtime[DDIR_READ] += mtime_since_now(&td->start);
+ update_runtime(td, elapsed_us, DDIR_READ);
fio_gettime(&td->start, NULL);
fio_mutex_up(stat_mutex);
if (is_backend) {
void *data;
+ int ver;
ret = fio_server_get_verify_state(td->o.name,
- td->thread_number - 1, &data);
+ td->thread_number - 1, &data, &ver);
if (!ret)
- verify_convert_assign_state(td, data);
+ verify_convert_assign_state(td, data, ver);
} else
ret = verify_load_state(td, "local");
usleep(usecs);
}
+static int check_mount_writes(struct thread_data *td)
+{
+ struct fio_file *f;
+ unsigned int i;
+
+ if (!td_write(td) || td->o.allow_mounted_write)
+ return 0;
+
+ for_each_file(td, f, i) {
+ if (f->filetype != FIO_TYPE_BD)
+ continue;
+ if (device_is_mounted(f->file_name))
+ goto mounted;
+ }
+
+ return 0;
+mounted:
+ log_err("fio: %s appears mounted, and 'allow_mounted_write' isn't set. Aborting.", f->file_name);
+ return 1;
+}
+
/*
* Main function for kicking off and reaping jobs, as needed.
*/
nr_thread = nr_process = 0;
for_each_td(td, i) {
+ if (check_mount_writes(td))
+ return;
if (td->o.use_thread)
nr_thread++;
else
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
struct io_log *log = agg_io_log[i];
- flush_log(log);
+ flush_log(log, 0);
free_log(log);
}
}