uint64_t size_prev;
};
+#define FIO_MAX_OPEN_ZBD_ZONES 128
+
/*
* This describes a single thread/process executing a fio job.
*/
* For IO replaying
*/
struct flist_head io_log_list;
+ FILE *io_log_rfile;
+ unsigned int io_log_current;
+ unsigned int io_log_checkmark;
+ unsigned int io_log_highmark;
+ struct timespec io_log_highmark_time;
/*
* For tracking/handling discards
extern int warnings_fatal;
extern int terse_version;
extern int is_backend;
+extern int is_local_backend;
extern int nr_clients;
extern int log_syslog;
extern int status_interval;
extern struct thread_data *threads;
+static inline bool is_running_backend(void)
+{
+ return is_backend || is_local_backend;
+}
+
extern bool eta_time_within_slack(unsigned int time);
static inline void fio_ro_check(const struct thread_data *td, struct io_u *io_u)
#define REAL_MAX_JOBS 4096
-static inline int should_fsync(struct thread_data *td)
+static inline bool should_fsync(struct thread_data *td)
{
if (td->last_was_sync)
- return 0;
+ return false;
if (td_write(td) || td->o.override_sync)
- return 1;
+ return true;
- return 0;
+ return false;
}
/*
return ddir_rw_sum(td->bytes_done) != 0;
}
-static inline unsigned int td_max_bs(struct thread_data *td)
+static inline unsigned long long td_max_bs(struct thread_data *td)
{
- unsigned int max_bs;
+ unsigned long long max_bs;
max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
return max(td->o.max_bs[DDIR_TRIM], max_bs);
}
-static inline unsigned int td_min_bs(struct thread_data *td)
+static inline unsigned long long td_min_bs(struct thread_data *td)
{
- unsigned int min_bs;
+ unsigned long long min_bs;
min_bs = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
return min(td->o.min_bs[DDIR_TRIM], min_bs);
* We currently only need to do locking if we have verifier threads
* accessing our internal structures too
*/
-static inline void td_io_u_lock(struct thread_data *td)
+static inline void __td_io_u_lock(struct thread_data *td)
{
- if (td_async_processing(td))
- pthread_mutex_lock(&td->io_u_lock);
+ pthread_mutex_lock(&td->io_u_lock);
}
-static inline void td_io_u_unlock(struct thread_data *td)
+static inline void __td_io_u_unlock(struct thread_data *td)
{
- if (td_async_processing(td))
- pthread_mutex_unlock(&td->io_u_lock);
+ pthread_mutex_unlock(&td->io_u_lock);
}
static inline void td_io_u_free_notify(struct thread_data *td)