static void disk_util_timer_arm(void);
static void print_thread_status(void);
+extern unsigned long long mlock_size;
+
/*
* thread life cycle
*/
if (td->cur_depth)
cleanup_pending_aio(td);
- if (should_fsync(td) && td->fsync_blocks)
+ if (should_fsync(td) && td->end_fsync)
sync_td(td);
}
return fio_posixaio_init(td);
else if (td->io_engine == FIO_SGIO)
return fio_sgio_init(td);
+ else if (td->io_engine == FIO_SPLICEIO)
+ return fio_spliceio_init(td);
else {
fprintf(stderr, "bad io_engine %d\n", td->io_engine);
return 1;
struct thread_data *td;
unsigned long spent;
int i, todo, nr_running, m_rate, t_rate, nr_started;
+ void *mlocked_mem = NULL;
printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
- fflush(stdout);
signal(SIGINT, sig_handler);
signal(SIGALRM, sig_handler);
+ if (mlock_size) {
+ mlocked_mem = mmap(NULL, mlock_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | OS_MAP_ANON, 0, 0);
+ if (!mlocked_mem) {
+ perror("mmap locked mem");
+ return;
+ }
+ if (mlock(mlocked_mem, mlock_size) < 0) {
+ munmap(mlocked_mem, mlock_size);
+ return;
+ }
+ }
+
+ fflush(stdout);
+
todo = thread_number;
nr_running = 0;
nr_started = 0;
}
update_io_ticks();
+
+ if (mlocked_mem) {
+ if (munlock(mlocked_mem, mlock_size) < 0)
+ perror("munlock");
+ munmap(mlocked_mem, mlock_size);
+ }
}
static void show_group_stats(struct group_run_stats *rs, int id)