We need to get rid of per job options that fiddle with global
state. It's confusing, and it breaks remote option handling.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Conflicts:
backend.c
fio.h
init.c
options.c
Signed-off-by: Jens Axboe <axboe@kernel.dk>
+ if (fio_pin_memory(td))
+ goto err;
+
/*
* May alter parameters that init_io_u() will use, so we need to
* do this first.
/*
* May alter parameters that init_io_u() will use, so we need to
* do this first.
td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+ fio_unpin_memory(td);
+
fio_mutex_down(writeout_mutex);
if (td->bw_log) {
if (td->o.bw_log_file) {
fio_mutex_down(writeout_mutex);
if (td->bw_log) {
if (td->o.bw_log_file) {
unsigned long spent;
unsigned int i, todo, nr_running, m_rate, t_rate, nr_started;
unsigned long spent;
unsigned int i, todo, nr_running, m_rate, t_rate, nr_started;
- if (fio_pin_memory())
- return;
-
if (fio_gtod_offload && fio_start_gtod_thread())
return;
if (fio_gtod_offload && fio_start_gtod_thread())
return;
fio_idle_prof_stop();
update_io_ticks();
fio_idle_prof_stop();
update_io_ticks();
}
void wait_for_disk_thread_exit(void)
}
void wait_for_disk_thread_exit(void)
*/
struct prof_io_ops prof_io_ops;
void *prof_data;
*/
struct prof_io_ops prof_io_ops;
void *prof_data;
extern int groupid;
extern int output_format;
extern int temp_stall_ts;
extern int groupid;
extern int output_format;
extern int temp_stall_ts;
-extern unsigned long long mlock_size;
extern uintptr_t page_mask, page_size;
extern int read_only;
extern int eta_print;
extern uintptr_t page_mask, page_size;
extern int read_only;
extern int eta_print;
-extern int __must_check fio_pin_memory(void);
-extern void fio_unpin_memory(void);
+extern int __must_check fio_pin_memory(struct thread_data *);
+extern void fio_unpin_memory(struct thread_data *);
extern int __must_check allocate_io_mem(struct thread_data *);
extern void free_io_mem(struct thread_data *);
extern int __must_check allocate_io_mem(struct thread_data *);
extern void free_io_mem(struct thread_data *);
int output_format = FIO_OUTPUT_NORMAL;
int eta_print = FIO_ETA_AUTO;
int eta_new_line = 0;
int output_format = FIO_OUTPUT_NORMAL;
int eta_print = FIO_ETA_AUTO;
int eta_new_line = 0;
-unsigned long long mlock_size = 0;
FILE *f_out = NULL;
FILE *f_err = NULL;
char **job_sections = NULL;
FILE *f_out = NULL;
FILE *f_err = NULL;
char **job_sections = NULL;
#include <sys/shm.h>
#endif
#include <sys/shm.h>
#endif
-static void *pinned_mem;
-
-void fio_unpin_memory(void)
+void fio_unpin_memory(struct thread_data *td)
- if (pinned_mem) {
- dprint(FD_MEM, "unpinning %llu bytes\n", mlock_size);
- if (munlock(pinned_mem, mlock_size) < 0)
+ if (td->pinned_mem) {
+ dprint(FD_MEM, "unpinning %llu bytes\n", td->o.lockmem);
+ if (munlock(td->pinned_mem, td->o.lockmem) < 0)
- munmap(pinned_mem, mlock_size);
- pinned_mem = NULL;
+ munmap(td->pinned_mem, td->o.lockmem);
+ td->pinned_mem = NULL;
-int fio_pin_memory(void)
+int fio_pin_memory(struct thread_data *td)
{
unsigned long long phys_mem;
{
unsigned long long phys_mem;
- dprint(FD_MEM, "pinning %llu bytes\n", mlock_size);
+ dprint(FD_MEM, "pinning %llu bytes\n", td->o.lockmem);
/*
* Don't allow mlock of more than real_mem-128MB
*/
phys_mem = os_phys_mem();
if (phys_mem) {
/*
* Don't allow mlock of more than real_mem-128MB
*/
phys_mem = os_phys_mem();
if (phys_mem) {
- if ((mlock_size + 128 * 1024 * 1024) > phys_mem) {
- mlock_size = phys_mem - 128 * 1024 * 1024;
+ if ((td->o.lockmem + 128 * 1024 * 1024) > phys_mem) {
+ td->o.lockmem = phys_mem - 128 * 1024 * 1024;
log_info("fio: limiting mlocked memory to %lluMB\n",
log_info("fio: limiting mlocked memory to %lluMB\n",
- pinned_mem = mmap(NULL, mlock_size, PROT_READ | PROT_WRITE,
+ td->pinned_mem = mmap(NULL, td->o.lockmem, PROT_READ | PROT_WRITE,
MAP_PRIVATE | OS_MAP_ANON, -1, 0);
MAP_PRIVATE | OS_MAP_ANON, -1, 0);
- if (pinned_mem == MAP_FAILED) {
+ if (td->pinned_mem == MAP_FAILED) {
perror("malloc locked mem");
perror("malloc locked mem");
- if (mlock(pinned_mem, mlock_size) < 0) {
+ if (mlock(td->pinned_mem, td->o.lockmem) < 0) {
- munmap(pinned_mem, mlock_size);
- pinned_mem = NULL;
+ munmap(td->pinned_mem, td->o.lockmem);
+ td->pinned_mem = NULL;