int temp_stall_ts;
char *fio_inst_prefix = _INST_PREFIX;
-extern unsigned long long mlock_size;
-
#define should_fsync(td) ((td_write(td) || td_rw(td)) && (!(td)->odirect || (td)->override_sync))
static volatile int startup_sem;
static void terminate_threads(int group_id)
{
+ struct thread_data *td;
int i;
- for (i = 0; i < thread_number; i++) {
- struct thread_data *td = &threads[i];
-
+ for_each_td(td, i) {
if (group_id == TERMINATE_ALL || groupid == td->groupid) {
td->terminate = 1;
td->start_delay = 0;
rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
if (rate < td->ratemin) {
fprintf(f_out, "%s: min rate %d not met, got %ldKiB/sec\n", td->name, td->ratemin, rate);
- if (rate_quit)
- terminate_threads(td->groupid);
return 1;
}
}
* Check if it's time to seed a new data direction.
*/
if (elapsed >= td->rwmixcycle) {
- int v;
+ unsigned int v;
long r;
r = os_random_long(&td->rwmix_state);
static struct fio_file *get_next_file(struct thread_data *td)
{
- int old_next_file = td->next_file;
+ unsigned int old_next_file = td->next_file;
struct fio_file *f;
do {
struct timeval s, e;
unsigned long usec;
struct fio_file *f;
- int i;
+ int i, ret = 0;
td_set_runstate(td, TD_RUNNING);
while (td->this_io_bytes[td->ddir] < td->io_size) {
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
struct timespec *timeout;
- int ret, min_evts = 0;
+ int min_evts = 0;
struct io_u *io_u;
if (td->terminate)
min_evts = 1;
}
+
ret = td_io_getevents(td, min_evts, td->cur_depth, timeout);
if (ret < 0) {
- td_verror(td, ret);
+ td_verror(td, -ret);
break;
} else if (!ret)
continue;
rate_throttle(td, usec, icd.bytes_done[td->ddir]);
if (check_min_rate(td, &e)) {
+ if (rate_quit)
+ terminate_threads(td->groupid);
td_verror(td, ENOMEM);
break;
}
td_io_sync(td, f);
}
- if (td->cur_depth)
- cleanup_pending_aio(td);
+ if (!ret) {
+ if (td->cur_depth)
+ cleanup_pending_aio(td);
- if (should_fsync(td) && td->end_fsync) {
- td_set_runstate(td, TD_FSYNCING);
- for_each_file(td, f, i)
- td_io_sync(td, f);
+ if (should_fsync(td) && td->end_fsync) {
+ td_set_runstate(td, TD_FSYNCING);
+ for_each_file(td, f, i)
+ td_io_sync(td, f);
+ }
}
}
free(io_u);
}
- if (td->mem_type == MEM_MALLOC)
- free(td->orig_buffer);
- else if (td->mem_type == MEM_SHM) {
- struct shmid_ds sbuf;
-
- shmdt(td->orig_buffer);
- shmctl(td->shm_id, IPC_RMID, &sbuf);
- } else if (td->mem_type == MEM_MMAP)
- munmap(td->orig_buffer, td->orig_buffer_size);
- else
- log_err("Bad memory type %d\n", td->mem_type);
-
- td->orig_buffer = NULL;
+ free_io_mem(td);
}
static int init_io_u(struct thread_data *td)
td->orig_buffer_size = td->max_bs * max_units + MASK;
- if (td->mem_type == MEM_MALLOC)
- td->orig_buffer = malloc(td->orig_buffer_size);
- else if (td->mem_type == MEM_SHM) {
- td->shm_id = shmget(IPC_PRIVATE, td->orig_buffer_size, IPC_CREAT | 0600);
- if (td->shm_id < 0) {
- td_verror(td, errno);
- perror("shmget");
- return 1;
- }
-
- td->orig_buffer = shmat(td->shm_id, NULL, 0);
- if (td->orig_buffer == (void *) -1) {
- td_verror(td, errno);
- perror("shmat");
- td->orig_buffer = NULL;
- return 1;
- }
- } else if (td->mem_type == MEM_MMAP) {
- td->orig_buffer = mmap(NULL, td->orig_buffer_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | OS_MAP_ANON, 0, 0);
- if (td->orig_buffer == MAP_FAILED) {
- td_verror(td, errno);
- perror("mmap");
- td->orig_buffer = NULL;
- return 1;
- }
- }
+ if (allocate_io_mem(td))
+ return 1;
p = ALIGN(td->orig_buffer);
for (i = 0; i < max_units; i++) {
*/
static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
{
+ struct thread_data *td;
int i, cputhreads;
/*
* reap exited threads (TD_EXITED -> TD_REAPED)
*/
- for (i = 0, cputhreads = 0; i < thread_number; i++) {
- struct thread_data *td = &threads[i];
-
- if (td->io_ops->flags & FIO_CPUIO)
+ cputhreads = 0;
+ for_each_td(td, i) {
+ /*
+ * ->io_ops is NULL for a thread that has closed its
+ * io engine
+ */
+ if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
cputhreads++;
if (td->runstate != TD_EXITED)
terminate_threads(TERMINATE_ALL);
}
-static void fio_unpin_memory(void *pinned)
-{
- if (pinned) {
- if (munlock(pinned, mlock_size) < 0)
- perror("munlock");
- munmap(pinned, mlock_size);
- }
-}
-
-static void *fio_pin_memory(void)
-{
- unsigned long long phys_mem;
- void *ptr;
-
- if (!mlock_size)
- return NULL;
-
- /*
- * Don't allow mlock of more than real_mem-128MB
- */
- phys_mem = os_phys_mem();
- if (phys_mem) {
- if ((mlock_size + 128 * 1024 * 1024) > phys_mem) {
- mlock_size = phys_mem - 128 * 1024 * 1024;
- fprintf(f_out, "fio: limiting mlocked memory to %lluMiB\n", mlock_size >> 20);
- }
- }
-
- ptr = mmap(NULL, mlock_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | OS_MAP_ANON, 0, 0);
- if (!ptr) {
- perror("malloc locked mem");
- return NULL;
- }
- if (mlock(ptr, mlock_size) < 0) {
- munmap(ptr, mlock_size);
- perror("mlock");
- return NULL;
- }
-
- return ptr;
-}
-
/*
* Main function for kicking off and reaping jobs, as needed.
*/
struct thread_data *td;
unsigned long spent;
int i, todo, nr_running, m_rate, t_rate, nr_started;
- void *mlocked_mem;
- mlocked_mem = fio_pin_memory();
+ if (fio_pin_memory())
+ return;
if (!terse_output) {
printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
nr_started = 0;
m_rate = t_rate = 0;
- for (i = 0; i < thread_number; i++) {
- td = &threads[i];
-
+ for_each_td(td, i) {
print_status_init(td->thread_number - 1);
init_disk_util(td);
/*
* create threads (TD_NOT_CREATED -> TD_CREATED)
*/
- for (i = 0; i < thread_number; i++) {
- td = &threads[i];
-
+ for_each_td(td, i) {
if (td->runstate != TD_NOT_CREATED)
continue;
/*
* start created threads (TD_INITIALIZED -> TD_RUNNING).
*/
- for (i = 0; i < thread_number; i++) {
- td = &threads[i];
-
+ for_each_td(td, i) {
if (td->runstate != TD_INITIALIZED)
continue;
}
update_io_ticks();
- fio_unpin_memory(mlocked_mem);
+ fio_unpin_memory();
}
int main(int argc, char *argv[])