X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.h;h=9e99da194f4f53850ec4299027f88e8bd8048e0d;hp=9727f6c65556c4f937adad573cc2429a02ce6aa2;hb=a0c84dd4354eaf0a84b8bf0a3126f65301f2206f;hpb=b4f5e72f1383499439c45acee627c022f06b6825 diff --git a/fio.h b/fio.h index 9727f6c6..9e99da19 100644 --- a/fio.h +++ b/fio.h @@ -167,6 +167,8 @@ struct zone_split_index { uint64_t size_prev; }; +#define FIO_MAX_OPEN_ZBD_ZONES 128 + /* * This describes a single thread/process executing a fio job. */ @@ -399,6 +401,11 @@ struct thread_data { * For IO replaying */ struct flist_head io_log_list; + FILE *io_log_rfile; + unsigned int io_log_current; + unsigned int io_log_checkmark; + unsigned int io_log_highmark; + struct timespec io_log_highmark_time; /* * For tracking/handling discards @@ -517,6 +524,7 @@ extern int fio_clock_source_set; extern int warnings_fatal; extern int terse_version; extern int is_backend; +extern int is_local_backend; extern int nr_clients; extern int log_syslog; extern int status_interval; @@ -529,23 +537,29 @@ extern char *aux_path; extern struct thread_data *threads; +static inline bool is_running_backend(void) +{ + return is_backend || is_local_backend; +} + extern bool eta_time_within_slack(unsigned int time); static inline void fio_ro_check(const struct thread_data *td, struct io_u *io_u) { - assert(!(io_u->ddir == DDIR_WRITE && !td_write(td))); + assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)) && + !(io_u->ddir == DDIR_TRIM && !td_trim(td))); } #define REAL_MAX_JOBS 4096 -static inline int should_fsync(struct thread_data *td) +static inline bool should_fsync(struct thread_data *td) { if (td->last_was_sync) - return 0; + return false; if (td_write(td) || td->o.override_sync) - return 1; + return true; - return 0; + return false; } /* @@ -735,17 +749,17 @@ static inline bool should_check_rate(struct thread_data *td) return ddir_rw_sum(td->bytes_done) != 0; } -static inline unsigned int td_max_bs(struct thread_data *td) +static inline unsigned long long td_max_bs(struct thread_data *td) { - unsigned int max_bs; + unsigned long long max_bs; max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]); return max(td->o.max_bs[DDIR_TRIM], max_bs); } -static inline unsigned int td_min_bs(struct thread_data *td) +static inline unsigned long long td_min_bs(struct thread_data *td) { - unsigned int min_bs; + unsigned long long min_bs; min_bs = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]); return min(td->o.min_bs[DDIR_TRIM], min_bs); @@ -760,16 +774,14 @@ static inline bool td_async_processing(struct thread_data *td) * We currently only need to do locking if we have verifier threads * accessing our internal structures too */ -static inline void td_io_u_lock(struct thread_data *td) +static inline void __td_io_u_lock(struct thread_data *td) { - if (td_async_processing(td)) - pthread_mutex_lock(&td->io_u_lock); + pthread_mutex_lock(&td->io_u_lock); } -static inline void td_io_u_unlock(struct thread_data *td) +static inline void __td_io_u_unlock(struct thread_data *td) { - if (td_async_processing(td)) - pthread_mutex_unlock(&td->io_u_lock); + pthread_mutex_unlock(&td->io_u_lock); } static inline void td_io_u_free_notify(struct thread_data *td)