Read stats for backlog verifies not reported for time-expired workloads
[fio.git] / rate-submit.c
CommitLineData
51575029
JA
1/*
2 * Rated submission helpers
3 *
4 * Copyright (C) 2015 Jens Axboe <axboe@kernel.dk>
5 *
6 */
a38c70a8 7#include <assert.h>
40511301 8#include "fio.h"
618ee94c 9#include "ioengines.h"
40511301 10#include "lib/getrusage.h"
24660963 11#include "rate-submit.h"
40511301 12
c06379a6
VF
13static void check_overlap(struct io_u *io_u)
14{
a38c70a8 15 int i, res;
c06379a6 16 struct thread_data *td;
c06379a6 17
c76b661c
BVA
18 /*
19 * Allow only one thread to check for overlap at a time to prevent two
20 * threads from thinking the coast is clear and then submitting IOs
21 * that overlap with each other.
22 *
23 * If an overlap is found, release the lock and re-acquire it before
24 * checking again to give other threads a chance to make progress.
25 *
26 * If no overlap is found, release the lock when the io_u's
27 * IO_U_F_FLIGHT flag is set so that this io_u can be checked by other
28 * threads as they assess overlap.
29 */
a38c70a8
BVA
30 res = pthread_mutex_lock(&overlap_check);
31 assert(res == 0);
c76b661c
BVA
32
33retry:
34 for_each_td(td, i) {
35 if (td->runstate <= TD_SETTING_UP ||
36 td->runstate >= TD_FINISHING ||
37 !td->o.serialize_overlap ||
38 td->o.io_submit_mode != IO_MODE_OFFLOAD)
39 continue;
c06379a6 40
c76b661c
BVA
41 if (!in_flight_overlap(&td->io_u_all, io_u))
42 continue;
43
a38c70a8
BVA
44 res = pthread_mutex_unlock(&overlap_check);
45 assert(res == 0);
46 res = pthread_mutex_lock(&overlap_check);
47 assert(res == 0);
c76b661c
BVA
48 goto retry;
49 }
c06379a6
VF
50}
51
155f2f02
JA
52static int io_workqueue_fn(struct submit_worker *sw,
53 struct workqueue_work *work)
40511301
JA
54{
55 struct io_u *io_u = container_of(work, struct io_u, work);
56 const enum fio_ddir ddir = io_u->ddir;
b86ad8f1 57 struct thread_data *td = sw->priv;
d28174f0 58 int ret, error;
40511301 59
c06379a6
VF
60 if (td->o.serialize_overlap)
61 check_overlap(io_u);
62
40511301
JA
63 dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
64
1651e431 65 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
40511301
JA
66
67 td->cur_depth++;
68
69 do {
70 ret = td_io_queue(td, io_u);
71 if (ret != FIO_Q_BUSY)
72 break;
73 ret = io_u_queued_complete(td, 1);
74 if (ret > 0)
75 td->cur_depth -= ret;
d28174f0
JA
76 else if (ret < 0)
77 break;
1651e431 78 io_u_clear(td, io_u, IO_U_F_FLIGHT);
40511301
JA
79 } while (1);
80
81 dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
82
d28174f0 83 error = io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
40511301
JA
84
85 if (ret == FIO_Q_COMPLETED)
86 td->cur_depth--;
87 else if (ret == FIO_Q_QUEUED) {
88 unsigned int min_evts;
89
90 if (td->o.iodepth == 1)
91 min_evts = 1;
92 else
93 min_evts = 0;
94
95 ret = io_u_queued_complete(td, min_evts);
96 if (ret > 0)
97 td->cur_depth -= ret;
40511301 98 }
155f2f02 99
d89ee9f4
BVA
100 if (error || td->error) {
101 pthread_mutex_lock(&td->io_u_lock);
d28174f0 102 pthread_cond_signal(&td->parent->free_cond);
d89ee9f4
BVA
103 pthread_mutex_unlock(&td->io_u_lock);
104 }
d28174f0 105
155f2f02 106 return 0;
40511301
JA
107}
108
109static bool io_workqueue_pre_sleep_flush_fn(struct submit_worker *sw)
110{
b86ad8f1 111 struct thread_data *td = sw->priv;
40511301 112
d28174f0
JA
113 if (td->error)
114 return false;
40511301
JA
115 if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
116 return true;
117
118 return false;
119}
120
121static void io_workqueue_pre_sleep_fn(struct submit_worker *sw)
122{
b86ad8f1 123 struct thread_data *td = sw->priv;
40511301
JA
124 int ret;
125
126 ret = io_u_quiesce(td);
127 if (ret > 0)
128 td->cur_depth -= ret;
129}
130
131static int io_workqueue_alloc_fn(struct submit_worker *sw)
132{
133 struct thread_data *td;
134
135 td = calloc(1, sizeof(*td));
b86ad8f1 136 sw->priv = td;
40511301
JA
137 return 0;
138}
139
140static void io_workqueue_free_fn(struct submit_worker *sw)
141{
b86ad8f1
CB
142 free(sw->priv);
143 sw->priv = NULL;
40511301
JA
144}
145
146static int io_workqueue_init_worker_fn(struct submit_worker *sw)
147{
148 struct thread_data *parent = sw->wq->td;
b86ad8f1 149 struct thread_data *td = sw->priv;
40511301
JA
150
151 memcpy(&td->o, &parent->o, sizeof(td->o));
152 memcpy(&td->ts, &parent->ts, sizeof(td->ts));
153 td->o.uid = td->o.gid = -1U;
154 dup_files(td, parent);
155 td->eo = parent->eo;
156 fio_options_mem_dupe(td);
c83579f0 157 td->iolog_f = parent->iolog_f;
40511301
JA
158
159 if (ioengine_load(td))
160 goto err;
161
40511301
JA
162 td->pid = gettid();
163
164 INIT_FLIST_HEAD(&td->io_log_list);
165 INIT_FLIST_HEAD(&td->io_hist_list);
166 INIT_FLIST_HEAD(&td->verify_list);
167 INIT_FLIST_HEAD(&td->trim_list);
40511301
JA
168 td->io_hist_tree = RB_ROOT;
169
170 td->o.iodepth = 1;
171 if (td_io_init(td))
172 goto err_io_init;
173
4c085cf2
VF
174 if (td->io_ops->post_init && td->io_ops->post_init(td))
175 goto err_io_init;
176
d5b3cfd4 177 set_epoch_time(td, td->o.log_unix_epoch | td->o.log_alternate_epoch, td->o.log_alternate_epoch_clock_id);
40511301
JA
178 fio_getrusage(&td->ru_start);
179 clear_io_state(td, 1);
180
181 td_set_runstate(td, TD_RUNNING);
b7aae4ba 182 td->flags |= TD_F_CHILD | TD_F_NEED_LOCK;
40511301
JA
183 td->parent = parent;
184 return 0;
185
186err_io_init:
187 close_ioengine(td);
188err:
189 return 1;
190
191}
192
193static void io_workqueue_exit_worker_fn(struct submit_worker *sw,
194 unsigned int *sum_cnt)
195{
b86ad8f1 196 struct thread_data *td = sw->priv;
40511301
JA
197
198 (*sum_cnt)++;
691310e2
NC
199
200 /*
201 * io_workqueue_update_acct_fn() doesn't support per prio stats, and
202 * even if it did, offload can't be used with all async IO engines.
203 * If group reporting is set in the parent td, the group result
204 * generated by __show_run_stats() can still contain multiple prios
205 * from different offloaded jobs.
206 */
207 sw->wq->td->ts.disable_prio_stat = 1;
016869be 208 sum_thread_stats(&sw->wq->td->ts, &td->ts);
40511301
JA
209
210 fio_options_free(td);
211 close_and_free_files(td);
212 if (td->io_ops)
213 close_ioengine(td);
214 td_set_runstate(td, TD_EXITED);
215}
216
217#ifdef CONFIG_SFAA
218static void sum_val(uint64_t *dst, uint64_t *src)
219{
220 if (*src) {
221 __sync_fetch_and_add(dst, *src);
222 *src = 0;
223 }
224}
225#else
226static void sum_val(uint64_t *dst, uint64_t *src)
227{
228 if (*src) {
229 *dst += *src;
230 *src = 0;
231 }
232}
233#endif
234
235static void pthread_double_unlock(pthread_mutex_t *lock1,
236 pthread_mutex_t *lock2)
237{
238#ifndef CONFIG_SFAA
239 pthread_mutex_unlock(lock1);
240 pthread_mutex_unlock(lock2);
241#endif
242}
243
244static void pthread_double_lock(pthread_mutex_t *lock1, pthread_mutex_t *lock2)
245{
246#ifndef CONFIG_SFAA
247 if (lock1 < lock2) {
248 pthread_mutex_lock(lock1);
249 pthread_mutex_lock(lock2);
250 } else {
251 pthread_mutex_lock(lock2);
252 pthread_mutex_lock(lock1);
253 }
254#endif
255}
256
257static void sum_ddir(struct thread_data *dst, struct thread_data *src,
258 enum fio_ddir ddir)
259{
260 pthread_double_lock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
261
262 sum_val(&dst->io_bytes[ddir], &src->io_bytes[ddir]);
263 sum_val(&dst->io_blocks[ddir], &src->io_blocks[ddir]);
264 sum_val(&dst->this_io_blocks[ddir], &src->this_io_blocks[ddir]);
265 sum_val(&dst->this_io_bytes[ddir], &src->this_io_bytes[ddir]);
266 sum_val(&dst->bytes_done[ddir], &src->bytes_done[ddir]);
191d6634
SK
267 if (ddir == DDIR_READ)
268 sum_val(&dst->bytes_verified, &src->bytes_verified);
40511301
JA
269
270 pthread_double_unlock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
271}
272
273static void io_workqueue_update_acct_fn(struct submit_worker *sw)
274{
b86ad8f1 275 struct thread_data *src = sw->priv;
40511301
JA
276 struct thread_data *dst = sw->wq->td;
277
278 if (td_read(src))
279 sum_ddir(dst, src, DDIR_READ);
280 if (td_write(src))
281 sum_ddir(dst, src, DDIR_WRITE);
282 if (td_trim(src))
283 sum_ddir(dst, src, DDIR_TRIM);
284
285}
286
103b174e 287static struct workqueue_ops rated_wq_ops = {
40511301
JA
288 .fn = io_workqueue_fn,
289 .pre_sleep_flush_fn = io_workqueue_pre_sleep_flush_fn,
290 .pre_sleep_fn = io_workqueue_pre_sleep_fn,
291 .update_acct_fn = io_workqueue_update_acct_fn,
292 .alloc_worker_fn = io_workqueue_alloc_fn,
293 .free_worker_fn = io_workqueue_free_fn,
294 .init_worker_fn = io_workqueue_init_worker_fn,
295 .exit_worker_fn = io_workqueue_exit_worker_fn,
296};
103b174e 297
24660963 298int rate_submit_init(struct thread_data *td, struct sk_out *sk_out)
103b174e
JA
299{
300 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
301 return 0;
302
24660963 303 return workqueue_init(td, &td->io_wq, &rated_wq_ops, td->o.iodepth, sk_out);
103b174e
JA
304}
305
306void rate_submit_exit(struct thread_data *td)
307{
308 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
309 return;
310
311 workqueue_exit(&td->io_wq);
312}