static char run_str[MAX_JOBS + 1];
int shm_id = 0;
static struct timeval genesis;
+static int temp_stall_ts;
static void print_thread_status(void);
extern unsigned long long mlock_size;
/*
- * thread life cycle
+ * Thread life cycle. Once a thread has a runstate beyond TD_INITIALIZED, it
+ * will never back again. It may cycle between running/verififying/fsyncing.
+ * Once the thread reaches TD_EXITED, it is just waiting for the core to
+ * reap it.
*/
enum {
TD_NOT_CREATED = 0,
TD_INITIALIZED,
TD_RUNNING,
TD_VERIFYING,
+ TD_FSYNCING,
TD_EXITED,
TD_REAPED,
};
#define should_fsync(td) ((td_write(td) || td_rw(td)) && (!(td)->odirect || (td)->override_sync))
-static sem_t startup_sem;
+static volatile int startup_sem;
#define TERMINATE_ALL (-1)
#define JOB_START_TIMEOUT (5 * 1000)
}
}
+/*
+ * The ->file_map[] contains a map of blocks we have or have not done io
+ * to yet. Used to make sure we cover the entire range in a fair fashion.
+ */
static int random_map_free(struct thread_data *td, unsigned long long block)
{
unsigned int idx = RAND_MAP_IDX(td, block);
return (td->file_map[idx] & (1UL << bit)) == 0;
}
+/*
+ * Return the next free block in the map.
+ */
static int get_next_free_block(struct thread_data *td, unsigned long long *b)
{
int i;
return 1;
}
+/*
+ * Mark a given offset as used in the map.
+ */
static void mark_random_map(struct thread_data *td, struct io_u *io_u)
{
unsigned long long block = io_u->offset / (unsigned long long) td->min_bs;
io_u->buflen = blocks * td->min_bs;
}
+/*
+ * For random io, generate a random new block and see if it's used. Repeat
+ * until we find a free one. For sequential io, just return the end of
+ * the last io issued.
+ */
static int get_next_offset(struct thread_data *td, unsigned long long *offset)
{
unsigned long long b, rb;
int loops = 50;
do {
- lrand48_r(&td->random_state, &r);
+ r = os_random_long(&td->random_state);
b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0));
rb = b + (td->file_offset / td->min_bs);
loops--;
if (td->min_bs == td->max_bs)
buflen = td->min_bs;
else {
- lrand48_r(&td->bsrange_state, &r);
+ r = os_random_long(&td->bsrange_state);
buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
}
return buflen;
}
+/*
+ * Check if we are above the minimum rate given.
+ */
static int check_min_rate(struct thread_data *td, struct timeval *now)
{
unsigned long spent;
rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
if (rate < td->ratemin) {
- printf("%s: min rate %d not met, got %ldKiB/sec\n", td->name, td->ratemin, rate);
+ fprintf(f_out, "%s: min rate %d not met, got %ldKiB/sec\n", td->name, td->ratemin, rate);
if (rate_quit)
terminate_threads(td->groupid);
return 1;
double r;
while (len) {
- drand48_r(&td->verify_state, &r);
+ r = os_random_double(&td->verify_state);
/*
* lrand48_r seems to be broken and only fill the bottom
int i;
for (i = 0; i < len; i++)
- printf("%02x", p[i]);
- printf("\n");
+ fprintf(f_out, "%02x", p[i]);
+ fprintf(f_out, "\n");
}
static int verify_io_u_crc32(struct verify_header *hdr, struct io_u *io_u)
c = crc32(p, hdr->len - sizeof(*hdr));
if (c != hdr->crc32) {
- fprintf(stderr, "crc32: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
- fprintf(stderr, "crc32: wanted %lx, got %lx\n", hdr->crc32, c);
+ log_err("crc32: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
+ log_err("crc32: wanted %lx, got %lx\n", hdr->crc32, c);
return 1;
}
md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
if (memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash))) {
- fprintf(stderr, "md5: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
+ log_err("md5: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
return 1;
else if (hdr->verify_type == VERIFY_CRC32)
ret = verify_io_u_crc32(hdr, io_u);
else {
- fprintf(stderr, "Bad verify type %d\n", hdr->verify_type);
+ log_err("Bad verify type %d\n", hdr->verify_type);
ret = 1;
}
memcpy(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
}
+/*
+ * Return the data direction for the next io_u. If the job is a
+ * mixed read/write workload, check the rwmix cycle and switch if
+ * necessary.
+ */
static int get_rw_ddir(struct thread_data *td)
{
if (td_rw(td)) {
* Check if it's time to seed a new data direction.
*/
if (elapsed >= td->rwmixcycle) {
- unsigned long v;
+ int v;
long r;
- lrand48_r(&td->random_state, &r);
- v = 100UL * r / (unsigned long) (RAND_MAX + 1.0);
+ r = os_random_long(&td->rwmix_state);
+ v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
if (v < td->rwmixread)
td->rwmix_ddir = DDIR_READ;
else
return io_u;
}
+/*
+ * Return an io_u to be processed. Gets a buflen and offset, sets direction,
+ * etc. The returned io_u is fully ready to be prepped and submitted.
+ */
static struct io_u *get_io_u(struct thread_data *td)
{
struct io_u *io_u;
static inline void td_set_runstate(struct thread_data *td, int runstate)
{
- td->old_runstate = td->runstate;
td->runstate = runstate;
}
}
}
+/*
+ * When job exits, we can cancel the in-flight IO if we are using async
+ * io. Attempt to do so.
+ */
static void cleanup_pending_aio(struct thread_data *td)
{
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
return ret;
}
+/*
+ * The main verify engine. Runs over the writes we previusly submitted,
+ * reads the blocks back in, and checks the crc/md5 of the data.
+ */
static void do_verify(struct thread_data *td)
{
struct timeval t;
td_set_runstate(td, TD_RUNNING);
}
+/*
+ * Main IO worker function. It retrieves io_u's to process and queues
+ * and reaps them, checking for rate and errors along the way.
+ */
static void do_io(struct thread_data *td)
{
struct io_completion_data icd;
struct timeval s, e;
unsigned long usec;
+ td_set_runstate(td, TD_RUNNING);
+
while (td->this_io_bytes[td->ddir] < td->io_size) {
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
struct timespec *timeout;
if (td->cur_depth)
cleanup_pending_aio(td);
- if (should_fsync(td) && td->end_fsync)
+ if (should_fsync(td) && td->end_fsync) {
+ td_set_runstate(td, TD_FSYNCING);
sync_td(td);
+ }
}
static void cleanup_io(struct thread_data *td)
else if (td->io_engine == FIO_SPLICEIO)
return fio_spliceio_init(td);
else {
- fprintf(stderr, "bad io_engine %d\n", td->io_engine);
+ log_err("bad io_engine %d\n", td->io_engine);
return 1;
}
}
} else if (td->mem_type == MEM_MMAP)
munmap(td->orig_buffer, td->orig_buffer_size);
else
- fprintf(stderr, "Bad memory type %d\n", td->mem_type);
+ log_err("Bad memory type %d\n", td->mem_type);
td->orig_buffer = NULL;
}
return 0;
}
-static void cleanup_allocs(struct thread_data *td)
-{
- if (td->directory)
- free(td->directory);
- if (td->iolog_file)
- free(td->iolog_file);
- if (td->exec_prerun)
- free(td->exec_prerun);
- if (td->exec_postrun)
- free(td->exec_postrun);
- if (td->ioscheduler)
- free(td->ioscheduler);
- if (td->sysfs_root)
- free(td->sysfs_root);
-}
-
-static int create_file(struct thread_data *td, unsigned long long size,
- int extend)
+static int create_file(struct thread_data *td, unsigned long long size)
{
unsigned long long left;
unsigned int bs;
- int r, oflags;
char *b;
+ int r;
/*
* unless specifically asked for overwrite, let normal io extend it
*/
- if (td_write(td) && !td->overwrite)
+ if (!td->overwrite) {
+ td->real_file_size = size;
return 0;
+ }
if (!size) {
- fprintf(stderr, "Need size for create\n");
+ log_err("Need size for create\n");
td_verror(td, EINVAL);
return 1;
}
- if (!extend) {
- oflags = O_CREAT | O_TRUNC;
- printf("%s: Laying out IO file (%LuMiB)\n", td->name, size >> 20);
- } else {
- oflags = O_APPEND;
- printf("%s: Extending IO file (%Lu -> %LuMiB)\n", td->name, (td->file_size - size) >> 20, td->file_size >> 20);
- }
+ temp_stall_ts = 1;
+ fprintf(f_out, "%s: Laying out IO file (%LuMiB)\n",td->name,size >> 20);
- td->fd = open(td->file_name, O_WRONLY | oflags, 0644);
+ td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (td->fd < 0) {
td_verror(td, errno);
- return 1;
+ goto done_noclose;
}
- if (!extend && ftruncate(td->fd, td->file_size) == -1) {
+ if (ftruncate(td->fd, td->file_size) == -1) {
td_verror(td, errno);
- return 1;
+ goto done;
}
td->io_size = td->file_size;
else if (td->create_fsync)
fsync(td->fd);
+ free(b);
+done:
close(td->fd);
td->fd = -1;
- free(b);
+done_noclose:
+ temp_stall_ts = 0;
return 0;
}
{
struct stat st;
- if (fstat(td->fd, &st) == -1) {
- td_verror(td, errno);
- return 1;
- }
+ if (td->overwrite) {
+ if (fstat(td->fd, &st) == -1) {
+ td_verror(td, errno);
+ return 1;
+ }
- td->real_file_size = st.st_size;
+ td->real_file_size = st.st_size;
- if (!td->file_size || td->file_size > td->real_file_size)
- td->file_size = td->real_file_size;
+ if (!td->file_size || td->file_size > td->real_file_size)
+ td->file_size = td->real_file_size;
+ }
td->file_size -= td->file_offset;
return 0;
return ret;
if (td->file_offset > td->real_file_size) {
- fprintf(stderr, "%s: offset extends end (%Lu > %Lu)\n", td->name, td->file_offset, td->real_file_size);
+ log_err("%s: offset extends end (%Lu > %Lu)\n", td->name, td->file_offset, td->real_file_size);
return 1;
}
td->io_size = td->file_size;
if (td->io_size == 0) {
- fprintf(stderr, "%s: no io blocks\n", td->name);
+ log_err("%s: no io blocks\n", td->name);
td_verror(td, EINVAL);
return 1;
}
td_verror(td, ENOENT);
return 1;
}
- if (create_file(td, td->file_size, 0))
+ if (create_file(td, td->file_size))
+ return 1;
+ } else if (td->filetype == FIO_TYPE_FILE &&
+ st.st_size < (off_t) td->file_size) {
+ if (create_file(td, td->file_size))
return 1;
- } else if (td->filetype == FIO_TYPE_FILE) {
- if (st.st_size < (off_t) td->file_size) {
- if (create_file(td, td->file_size - st.st_size, 1))
- return 1;
- }
}
if (td->odirect)
- flags |= O_DIRECT;
+ flags |= OS_O_DIRECT;
if (td_write(td) || td_rw(td)) {
if (td->filetype == FIO_TYPE_FILE) {
sprintf(tmp2, "[%s]", td->ioscheduler);
if (!strstr(tmp, tmp2)) {
- fprintf(stderr, "fio: io scheduler %s not found\n", td->ioscheduler);
+ log_err("fio: io scheduler %s not found\n", td->ioscheduler);
td_verror(td, EINVAL);
fclose(f);
return 1;
memset(td->file_map, 0, td->num_maps * sizeof(long));
}
+/*
+ * Entry point for the thread based jobs. The process based jobs end up
+ * here as well, after a little setup.
+ */
static void *thread_main(void *data)
{
struct thread_data *td = data;
}
}
- if (nice(td->nice) < 0) {
+ if (nice(td->nice) == -1) {
td_verror(td, errno);
goto err;
}
goto err;
td_set_runstate(td, TD_INITIALIZED);
- sem_post(&startup_sem);
- sem_wait(&td->mutex);
+ fio_sem_up(&startup_sem);
+ fio_sem_down(&td->mutex);
if (!td->create_serialize && setup_file(td))
goto err;
}
if (td->mmap)
munmap(td->mmap, td->file_size);
- cleanup_allocs(td);
cleanup_io(td);
cleanup_io_u(td);
td_set_runstate(td, TD_EXITED);
}
+/*
+ * We cannot pass the td data into a forked process, so attach the td and
+ * pass it to the thread worker.
+ */
static void *fork_main(int shmid, int offset)
{
struct thread_data *td;
return NULL;
}
+/*
+ * Sets the status of the 'td' in the printed status map.
+ */
static void check_str_update(struct thread_data *td)
{
char c = run_str[td->thread_number - 1];
- if (td->runstate == td->old_runstate)
- return;
-
switch (td->runstate) {
case TD_REAPED:
c = '_';
case TD_VERIFYING:
c = 'V';
break;
+ case TD_FSYNCING:
+ c = 'F';
+ break;
case TD_CREATED:
c = 'C';
break;
c = 'P';
break;
default:
- printf("state %d\n", td->runstate);
+ log_err("state %d\n", td->runstate);
}
run_str[td->thread_number - 1] = c;
- td->old_runstate = td->runstate;
}
+/*
+ * Convert seconds to a printable string.
+ */
static void eta_to_str(char *str, int eta_sec)
{
unsigned int d, h, m, s;
str += sprintf(str, "%02ds", s);
}
+/*
+ * Best effort calculation of the estimated pending runtime of a job.
+ */
static int thread_eta(struct thread_data *td, unsigned long elapsed)
{
unsigned long long bytes_total, bytes_done;
else if (t_eta)
eta_sec = t_eta;
else
- eta_sec = INT_MAX;
+ eta_sec = 0;
} else {
/*
- * thread is already done
+ * thread is already done or waiting for fsync
*/
eta_sec = 0;
}
return eta_sec;
}
+/*
+ * Print status of the jobs we know about. This includes rate estimates,
+ * ETA, thread state, etc.
+ */
static void print_thread_status(void)
{
unsigned long elapsed = time_since_now(&genesis);
- int i, nr_running, t_rate, m_rate, *eta_secs, eta_sec;
+ int i, nr_running, nr_pending, t_rate, m_rate, *eta_secs, eta_sec;
char eta_str[32];
double perc = 0.0;
+ if (temp_stall_ts || terse_output)
+ return;
+
eta_secs = malloc(thread_number * sizeof(int));
memset(eta_secs, 0, thread_number * sizeof(int));
- nr_running = t_rate = m_rate = 0;
+ nr_pending = nr_running = t_rate = m_rate = 0;
for (i = 0; i < thread_number; i++) {
struct thread_data *td = &threads[i];
- if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING){
+ if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING||
+ td->runstate == TD_FSYNCING) {
nr_running++;
t_rate += td->rate;
m_rate += td->ratemin;
- }
+ } else if (td->runstate < TD_RUNNING)
+ nr_pending++;
if (elapsed >= 3)
eta_secs[i] = thread_eta(td, elapsed);
eta_to_str(eta_str, eta_sec);
}
- printf("Threads now running (%d)", nr_running);
+ if (!nr_running && !nr_pending)
+ return;
+
+ printf("Threads running: %d", nr_running);
if (m_rate || t_rate)
printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
- if (eta_sec != INT_MAX) {
+ if (eta_sec != INT_MAX && nr_running) {
perc *= 100.0;
printf(": [%s] [%3.2f%% done] [eta %s]", run_str, perc,eta_str);
}
free(eta_secs);
}
+/*
+ * Run over the job map and reap the threads that have exited, if any.
+ */
static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
{
int i;
static void *fio_pin_memory(void)
{
- long pagesize, pages;
+ unsigned long long phys_mem;
void *ptr;
if (!mlock_size)
/*
* Don't allow mlock of more than real_mem-128MB
*/
- pagesize = sysconf(_SC_PAGESIZE);
- pages = sysconf(_SC_PHYS_PAGES);
- if (pages != -1 && pagesize != -1) {
- unsigned long long real_mem = pages * pagesize;
-
- if ((mlock_size + 128 * 1024 * 1024) > real_mem) {
- mlock_size = real_mem - 128 * 1024 * 1024;
- printf("fio: limiting mlocked memory to %lluMiB\n",
- mlock_size >> 20);
+ phys_mem = os_phys_mem();
+ if (phys_mem) {
+ if ((mlock_size + 128 * 1024 * 1024) > phys_mem) {
+ mlock_size = phys_mem - 128 * 1024 * 1024;
+ fprintf(f_out, "fio: limiting mlocked memory to %lluMiB\n", mlock_size >> 20);
}
}
return ptr;
}
+/*
+ * Main function for kicking off and reaping jobs, as needed.
+ */
static void run_threads(void)
{
struct thread_data *td;
mlocked_mem = fio_pin_memory();
- printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
- fflush(stdout);
+ if (!terse_output) {
+ printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
+ fflush(stdout);
+ }
signal(SIGINT, sig_handler);
signal(SIGALRM, sig_handler);
*/
td_set_runstate(td, TD_CREATED);
map[this_jobs++] = td;
- sem_init(&startup_sem, 0, 1);
+ fio_sem_init(&startup_sem, 1);
nr_started++;
if (td->use_thread) {
}
} else {
if (fork())
- sem_wait(&startup_sem);
+ fio_sem_down(&startup_sem);
else {
fork_main(shm_id, i);
exit(0);
* Wait for the started threads to transition to
* TD_INITIALIZED.
*/
- printf("fio: Waiting for threads to initialize...\n");
gettimeofday(&this_start, NULL);
left = this_jobs;
while (left) {
}
if (left) {
- fprintf(stderr, "fio: %d jobs failed to start\n", left);
+ log_err("fio: %d jobs failed to start\n", left);
for (i = 0; i < this_jobs; i++) {
td = map[i];
if (!td)
/*
* start created threads (TD_INITIALIZED -> TD_RUNNING).
*/
- printf("fio: Go for launch\n");
for (i = 0; i < thread_number; i++) {
td = &threads[i];
m_rate += td->ratemin;
t_rate += td->rate;
todo--;
- sem_post(&td->mutex);
+ fio_sem_up(&td->mutex);
}
reap_threads(&nr_running, &t_rate, &m_rate);
return 1;
if (!thread_number) {
- printf("Nothing to do\n");
+ log_err("Nothing to do\n");
return 1;
}