uint32_t unit_base;
uint32_t groupid;
uint32_t unified_rw_rep;
-};
+} __attribute__((packed));
/*
* How many depth levels to log
uint32_t io_u_plat[DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR];
uint64_t total_io_u[3];
uint64_t short_io_u[3];
+ uint64_t drop_io_u[3];
uint64_t total_submit;
uint64_t total_complete;
uint64_t latency_target;
fio_fp64_t latency_percentile;
uint64_t latency_window;
-};
+} __attribute__((packed));
struct jobs_eta {
uint32_t nr_running;
uint32_t nr_ramp;
+
uint32_t nr_pending;
uint32_t nr_setting_up;
+
uint32_t files_open;
+
uint32_t m_rate[DDIR_RWDIR_CNT], t_rate[DDIR_RWDIR_CNT];
uint32_t m_iops[DDIR_RWDIR_CNT], t_iops[DDIR_RWDIR_CNT];
uint32_t rate[DDIR_RWDIR_CNT];
*/
uint32_t nr_threads;
uint8_t run_str[];
-};
+} __attribute__((packed));
+
+extern struct jobs_eta *get_jobs_eta(int force, size_t *size);
extern void stat_init(void);
extern void stat_exit(void);
extern int calc_thread_status(struct jobs_eta *je, int force);
extern void display_thread_status(struct jobs_eta *je);
extern void show_run_stats(void);
+extern void __show_run_stats(void);
extern void show_running_run_stats(void);
extern void check_for_running_stats(void);
extern void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, int nr);
return 1;
}
-
-#define __THREAD_RUNSTR_SZ(nr) (((nr) * 5) + 1)
+/*
+ * Worst level condensing would be 1:5, so allow enough room for that
+ */
+#define __THREAD_RUNSTR_SZ(nr) ((nr) * 5)
#define THREAD_RUNSTR_SZ __THREAD_RUNSTR_SZ(thread_number)
#endif