TD_INITIALIZED,
TD_RUNNING,
TD_VERIFYING,
+ TD_FSYNCING,
TD_EXITED,
TD_REAPED,
};
#define should_fsync(td) ((td_write(td) || td_rw(td)) && (!(td)->odirect || (td)->override_sync))
-static sem_t startup_sem;
+static volatile int startup_sem;
#define TERMINATE_ALL (-1)
#define JOB_START_TIMEOUT (5 * 1000)
int loops = 50;
do {
- lrand48_r(&td->random_state, &r);
+ r = os_random_long(&td->random_state);
b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0));
rb = b + (td->file_offset / td->min_bs);
loops--;
if (td->min_bs == td->max_bs)
buflen = td->min_bs;
else {
- lrand48_r(&td->bsrange_state, &r);
+ r = os_random_long(&td->bsrange_state);
buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
}
double r;
while (len) {
- drand48_r(&td->verify_state, &r);
+ r = os_random_double(&td->verify_state);
/*
* lrand48_r seems to be broken and only fill the bottom
* Check if it's time to seed a new data direction.
*/
if (elapsed >= td->rwmixcycle) {
- unsigned long v;
+ int v;
long r;
- lrand48_r(&td->random_state, &r);
- v = 100UL * r / (unsigned long) (RAND_MAX + 1.0);
+ r = os_random_long(&td->rwmix_state);
+ v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
if (v < td->rwmixread)
td->rwmix_ddir = DDIR_READ;
else
struct timeval s, e;
unsigned long usec;
+ td_set_runstate(td, TD_RUNNING);
+
while (td->this_io_bytes[td->ddir] < td->io_size) {
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
struct timespec *timeout;
if (td->cur_depth)
cleanup_pending_aio(td);
- if (should_fsync(td) && td->end_fsync)
+ if (should_fsync(td) && td->end_fsync) {
+ td_set_runstate(td, TD_FSYNCING);
sync_td(td);
+ }
}
static void cleanup_io(struct thread_data *td)
return 0;
}
-static void cleanup_allocs(struct thread_data *td)
-{
- if (td->directory)
- free(td->directory);
- if (td->iolog_file)
- free(td->iolog_file);
- if (td->exec_prerun)
- free(td->exec_prerun);
- if (td->exec_postrun)
- free(td->exec_postrun);
- if (td->ioscheduler)
- free(td->ioscheduler);
- if (td->sysfs_root)
- free(td->sysfs_root);
-}
-
static int create_file(struct thread_data *td, unsigned long long size,
int extend)
{
}
if (td->odirect)
- flags |= O_DIRECT;
+ flags |= OS_O_DIRECT;
if (td_write(td) || td_rw(td)) {
if (td->filetype == FIO_TYPE_FILE) {
goto err;
td_set_runstate(td, TD_INITIALIZED);
- sem_post(&startup_sem);
- sem_wait(&td->mutex);
+ fio_sem_up(&startup_sem);
+ fio_sem_down(&td->mutex);
if (!td->create_serialize && setup_file(td))
goto err;
}
if (td->mmap)
munmap(td->mmap, td->file_size);
- cleanup_allocs(td);
cleanup_io(td);
cleanup_io_u(td);
td_set_runstate(td, TD_EXITED);
case TD_VERIFYING:
c = 'V';
break;
+ case TD_FSYNCING:
+ c = 'F';
+ break;
case TD_CREATED:
c = 'C';
break;
eta_sec = INT_MAX;
} else {
/*
- * thread is already done
+ * thread is already done or waiting for fsync
*/
eta_sec = 0;
}
for (i = 0; i < thread_number; i++) {
struct thread_data *td = &threads[i];
- if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING){
+ if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING||
+ td->runstate == TD_FSYNCING) {
nr_running++;
t_rate += td->rate;
m_rate += td->ratemin;
*/
td_set_runstate(td, TD_CREATED);
map[this_jobs++] = td;
- sem_init(&startup_sem, 0, 1);
+ fio_sem_init(&startup_sem, 1);
nr_started++;
if (td->use_thread) {
}
} else {
if (fork())
- sem_wait(&startup_sem);
+ fio_sem_down(&startup_sem);
else {
fork_main(shm_id, i);
exit(0);
m_rate += td->ratemin;
t_rate += td->rate;
todo--;
- sem_post(&td->mutex);
+ fio_sem_up(&td->mutex);
}
reap_threads(&nr_running, &t_rate, &m_rate);