X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=stat.h;h=ba7e290d5d95c49117157852b64fdd3e91be2b02;hp=a06237e7a41e9f5f0e287786509df6fd5f578301;hb=HEAD;hpb=a7194b2d3d427e7e5678c55a128639df9caf4a48 diff --git a/stat.h b/stat.h index a06237e7..0d57cceb 100644 --- a/stat.h +++ b/stat.h @@ -51,7 +51,7 @@ struct group_run_stats { * * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the memory * requirement of storing those aggregate counts. The memory used will - * be (FIO_IO_U_PLAT_GROUP_NR * 2^FIO_IO_U_PLAT_BITS) * sizeof(int) + * be (FIO_IO_U_PLAT_GROUP_NR * 2^FIO_IO_U_PLAT_BITS) * sizeof(uint64_t) * bytes. * * FIO_IO_U_PLAT_NR is the total number of buckets. @@ -68,7 +68,7 @@ struct group_run_stats { * than one. This method has low accuracy when the value is small. For * example, let the buckets be {[0,99],[100,199],...,[900,999]}, and * the represented value of each bucket be the mean of the range. Then - * a value 0 has an round-off error of 49.5. To improve on this, we + * a value 0 has a round-off error of 49.5. To improve on this, we * use buckets with non-uniform ranges, while bounding the error of * each bucket within a ratio of the sample value. A simple example * would be when error_bound = 0.005, buckets are { @@ -142,7 +142,6 @@ enum block_info_state { BLOCK_STATE_COUNT, }; -#define MAX_PATTERN_SIZE 512 #define FIO_JOBNAME_SIZE 128 #define FIO_JOBDESC_SIZE 256 #define FIO_VERROR_SIZE 128 @@ -158,16 +157,24 @@ enum fio_lat { FIO_LAT_CNT = 3, }; +struct clat_prio_stat { + uint64_t io_u_plat[FIO_IO_U_PLAT_NR]; + struct io_stat clat_stat; + uint32_t ioprio; +}; + struct thread_stat { char name[FIO_JOBNAME_SIZE]; char verror[FIO_VERROR_SIZE]; uint32_t error; uint32_t thread_number; uint32_t groupid; + uint64_t job_start; /* Time job was started, as clock_gettime(job_start_clock_id) */ uint32_t pid; char description[FIO_JOBDESC_SIZE]; uint32_t members; uint32_t unified_rw_rep; + uint32_t disable_prio_stat; /* * bandwidth and latency stats @@ -252,21 +259,40 @@ struct thread_stat { fio_fp64_t ss_deviation; fio_fp64_t ss_criterion; - uint64_t io_u_plat_high_prio[DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR] __attribute__((aligned(8)));; - uint64_t io_u_plat_low_prio[DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR]; - struct io_stat clat_high_prio_stat[DDIR_RWDIR_CNT] __attribute__((aligned(8))); - struct io_stat clat_low_prio_stat[DDIR_RWDIR_CNT]; + /* A mirror of td->ioprio. */ + uint32_t ioprio; union { uint64_t *ss_iops_data; + /* + * For FIO_NET_CMD_TS, the pointed to data will temporarily + * be stored at this offset from the start of the payload. + */ + uint64_t ss_iops_data_offset; uint64_t pad4; }; union { uint64_t *ss_bw_data; + /* + * For FIO_NET_CMD_TS, the pointed to data will temporarily + * be stored at this offset from the start of the payload. + */ + uint64_t ss_bw_data_offset; uint64_t pad5; }; + union { + struct clat_prio_stat *clat_prio[DDIR_RWDIR_CNT]; + /* + * For FIO_NET_CMD_TS, the pointed to data will temporarily + * be stored at this offset from the start of the payload. + */ + uint64_t clat_prio_offset[DDIR_RWDIR_CNT]; + uint64_t pad6; + }; + uint32_t nr_clat_prio[DDIR_RWDIR_CNT]; + uint64_t cachehit; uint64_t cachemiss; } __attribute__((packed)); @@ -319,14 +345,14 @@ extern void stat_exit(void); extern struct json_object * show_thread_status(struct thread_stat *ts, struct group_run_stats *rs, struct flist_head *, struct buf_output *); extern void show_group_stats(struct group_run_stats *rs, struct buf_output *); -extern bool calc_thread_status(struct jobs_eta *je, int force); extern void display_thread_status(struct jobs_eta *je); extern void __show_run_stats(void); extern int __show_running_run_stats(void); extern void show_running_run_stats(void); extern void check_for_running_stats(void); -extern void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, bool first); +extern void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src); extern void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src); +extern void init_thread_stat_min_vals(struct thread_stat *ts); extern void init_thread_stat(struct thread_stat *ts); extern void init_group_run_stat(struct group_run_stats *gs); extern void eta_to_str(char *str, unsigned long eta_sec); @@ -341,9 +367,9 @@ extern void update_rusage_stat(struct thread_data *); extern void clear_rusage_stat(struct thread_data *); extern void add_lat_sample(struct thread_data *, enum fio_ddir, unsigned long long, - unsigned long long, uint64_t, unsigned int, bool); + unsigned long long, uint64_t, unsigned int, unsigned short); extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long long, - unsigned long long, uint64_t, unsigned int, bool); + unsigned long long, uint64_t, unsigned int, unsigned short); extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long long, unsigned long long, uint64_t, unsigned int); extern void add_agg_sample(union io_sample_data, enum fio_ddir, unsigned long long); @@ -354,6 +380,8 @@ extern void add_bw_sample(struct thread_data *, struct io_u *, extern void add_sync_clat_sample(struct thread_stat *ts, unsigned long long nsec); extern int calc_log_samples(void); +extern void free_clat_prio_stats(struct thread_stat *); +extern int alloc_clat_prio_stat_ddir(struct thread_stat *, enum fio_ddir, int); extern void print_disk_util(struct disk_util_stat *, struct disk_util_agg *, int terse, struct buf_output *); extern void json_array_add_disk_util(struct disk_util_stat *dus,