docs: move rate_cycle description
[fio.git] / rate-submit.c
CommitLineData
51575029
JA
1/*
2 * Rated submission helpers
3 *
4 * Copyright (C) 2015 Jens Axboe <axboe@kernel.dk>
5 *
6 */
a38c70a8 7#include <assert.h>
40511301 8#include "fio.h"
618ee94c 9#include "ioengines.h"
40511301 10#include "lib/getrusage.h"
24660963 11#include "rate-submit.h"
40511301 12
c06379a6
VF
13static void check_overlap(struct io_u *io_u)
14{
da8f124f 15 int res;
c06379a6 16
c76b661c
BVA
17 /*
18 * Allow only one thread to check for overlap at a time to prevent two
19 * threads from thinking the coast is clear and then submitting IOs
20 * that overlap with each other.
21 *
22 * If an overlap is found, release the lock and re-acquire it before
23 * checking again to give other threads a chance to make progress.
24 *
25 * If no overlap is found, release the lock when the io_u's
26 * IO_U_F_FLIGHT flag is set so that this io_u can be checked by other
27 * threads as they assess overlap.
28 */
a38c70a8
BVA
29 res = pthread_mutex_lock(&overlap_check);
30 assert(res == 0);
c76b661c
BVA
31
32retry:
da8f124f 33 for_each_td(td) {
c76b661c
BVA
34 if (td->runstate <= TD_SETTING_UP ||
35 td->runstate >= TD_FINISHING ||
36 !td->o.serialize_overlap ||
37 td->o.io_submit_mode != IO_MODE_OFFLOAD)
38 continue;
c06379a6 39
c76b661c
BVA
40 if (!in_flight_overlap(&td->io_u_all, io_u))
41 continue;
42
a38c70a8
BVA
43 res = pthread_mutex_unlock(&overlap_check);
44 assert(res == 0);
45 res = pthread_mutex_lock(&overlap_check);
46 assert(res == 0);
c76b661c 47 goto retry;
da8f124f 48 } end_for_each();
c06379a6
VF
49}
50
155f2f02
JA
51static int io_workqueue_fn(struct submit_worker *sw,
52 struct workqueue_work *work)
40511301
JA
53{
54 struct io_u *io_u = container_of(work, struct io_u, work);
55 const enum fio_ddir ddir = io_u->ddir;
b86ad8f1 56 struct thread_data *td = sw->priv;
d28174f0 57 int ret, error;
40511301 58
c06379a6
VF
59 if (td->o.serialize_overlap)
60 check_overlap(io_u);
61
40511301
JA
62 dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
63
1651e431 64 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
40511301
JA
65
66 td->cur_depth++;
67
68 do {
69 ret = td_io_queue(td, io_u);
70 if (ret != FIO_Q_BUSY)
71 break;
72 ret = io_u_queued_complete(td, 1);
73 if (ret > 0)
74 td->cur_depth -= ret;
d28174f0
JA
75 else if (ret < 0)
76 break;
1651e431 77 io_u_clear(td, io_u, IO_U_F_FLIGHT);
40511301
JA
78 } while (1);
79
80 dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
81
d28174f0 82 error = io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
40511301
JA
83
84 if (ret == FIO_Q_COMPLETED)
85 td->cur_depth--;
86 else if (ret == FIO_Q_QUEUED) {
87 unsigned int min_evts;
88
89 if (td->o.iodepth == 1)
90 min_evts = 1;
91 else
92 min_evts = 0;
93
94 ret = io_u_queued_complete(td, min_evts);
95 if (ret > 0)
96 td->cur_depth -= ret;
40511301 97 }
155f2f02 98
d89ee9f4
BVA
99 if (error || td->error) {
100 pthread_mutex_lock(&td->io_u_lock);
d28174f0 101 pthread_cond_signal(&td->parent->free_cond);
d89ee9f4
BVA
102 pthread_mutex_unlock(&td->io_u_lock);
103 }
d28174f0 104
155f2f02 105 return 0;
40511301
JA
106}
107
108static bool io_workqueue_pre_sleep_flush_fn(struct submit_worker *sw)
109{
b86ad8f1 110 struct thread_data *td = sw->priv;
40511301 111
d28174f0
JA
112 if (td->error)
113 return false;
40511301
JA
114 if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
115 return true;
116
117 return false;
118}
119
120static void io_workqueue_pre_sleep_fn(struct submit_worker *sw)
121{
b86ad8f1 122 struct thread_data *td = sw->priv;
40511301
JA
123 int ret;
124
125 ret = io_u_quiesce(td);
126 if (ret > 0)
127 td->cur_depth -= ret;
128}
129
130static int io_workqueue_alloc_fn(struct submit_worker *sw)
131{
132 struct thread_data *td;
133
134 td = calloc(1, sizeof(*td));
b86ad8f1 135 sw->priv = td;
40511301
JA
136 return 0;
137}
138
139static void io_workqueue_free_fn(struct submit_worker *sw)
140{
b86ad8f1
CB
141 free(sw->priv);
142 sw->priv = NULL;
40511301
JA
143}
144
145static int io_workqueue_init_worker_fn(struct submit_worker *sw)
146{
147 struct thread_data *parent = sw->wq->td;
b86ad8f1 148 struct thread_data *td = sw->priv;
40511301
JA
149
150 memcpy(&td->o, &parent->o, sizeof(td->o));
151 memcpy(&td->ts, &parent->ts, sizeof(td->ts));
152 td->o.uid = td->o.gid = -1U;
153 dup_files(td, parent);
154 td->eo = parent->eo;
155 fio_options_mem_dupe(td);
c83579f0 156 td->iolog_f = parent->iolog_f;
40511301
JA
157
158 if (ioengine_load(td))
159 goto err;
160
40511301
JA
161 td->pid = gettid();
162
163 INIT_FLIST_HEAD(&td->io_log_list);
164 INIT_FLIST_HEAD(&td->io_hist_list);
165 INIT_FLIST_HEAD(&td->verify_list);
166 INIT_FLIST_HEAD(&td->trim_list);
40511301
JA
167 td->io_hist_tree = RB_ROOT;
168
169 td->o.iodepth = 1;
170 if (td_io_init(td))
171 goto err_io_init;
172
4c085cf2
VF
173 if (td->io_ops->post_init && td->io_ops->post_init(td))
174 goto err_io_init;
175
d5b3cfd4 176 set_epoch_time(td, td->o.log_unix_epoch | td->o.log_alternate_epoch, td->o.log_alternate_epoch_clock_id);
40511301
JA
177 fio_getrusage(&td->ru_start);
178 clear_io_state(td, 1);
179
180 td_set_runstate(td, TD_RUNNING);
b7aae4ba 181 td->flags |= TD_F_CHILD | TD_F_NEED_LOCK;
40511301
JA
182 td->parent = parent;
183 return 0;
184
185err_io_init:
186 close_ioengine(td);
187err:
188 return 1;
189
190}
191
192static void io_workqueue_exit_worker_fn(struct submit_worker *sw,
193 unsigned int *sum_cnt)
194{
b86ad8f1 195 struct thread_data *td = sw->priv;
40511301
JA
196
197 (*sum_cnt)++;
691310e2
NC
198
199 /*
200 * io_workqueue_update_acct_fn() doesn't support per prio stats, and
201 * even if it did, offload can't be used with all async IO engines.
202 * If group reporting is set in the parent td, the group result
203 * generated by __show_run_stats() can still contain multiple prios
204 * from different offloaded jobs.
205 */
206 sw->wq->td->ts.disable_prio_stat = 1;
016869be 207 sum_thread_stats(&sw->wq->td->ts, &td->ts);
40511301
JA
208
209 fio_options_free(td);
210 close_and_free_files(td);
211 if (td->io_ops)
212 close_ioengine(td);
213 td_set_runstate(td, TD_EXITED);
214}
215
216#ifdef CONFIG_SFAA
217static void sum_val(uint64_t *dst, uint64_t *src)
218{
219 if (*src) {
220 __sync_fetch_and_add(dst, *src);
221 *src = 0;
222 }
223}
224#else
225static void sum_val(uint64_t *dst, uint64_t *src)
226{
227 if (*src) {
228 *dst += *src;
229 *src = 0;
230 }
231}
232#endif
233
234static void pthread_double_unlock(pthread_mutex_t *lock1,
235 pthread_mutex_t *lock2)
236{
237#ifndef CONFIG_SFAA
238 pthread_mutex_unlock(lock1);
239 pthread_mutex_unlock(lock2);
240#endif
241}
242
243static void pthread_double_lock(pthread_mutex_t *lock1, pthread_mutex_t *lock2)
244{
245#ifndef CONFIG_SFAA
246 if (lock1 < lock2) {
247 pthread_mutex_lock(lock1);
248 pthread_mutex_lock(lock2);
249 } else {
250 pthread_mutex_lock(lock2);
251 pthread_mutex_lock(lock1);
252 }
253#endif
254}
255
256static void sum_ddir(struct thread_data *dst, struct thread_data *src,
257 enum fio_ddir ddir)
258{
259 pthread_double_lock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
260
261 sum_val(&dst->io_bytes[ddir], &src->io_bytes[ddir]);
262 sum_val(&dst->io_blocks[ddir], &src->io_blocks[ddir]);
263 sum_val(&dst->this_io_blocks[ddir], &src->this_io_blocks[ddir]);
264 sum_val(&dst->this_io_bytes[ddir], &src->this_io_bytes[ddir]);
265 sum_val(&dst->bytes_done[ddir], &src->bytes_done[ddir]);
191d6634
SK
266 if (ddir == DDIR_READ)
267 sum_val(&dst->bytes_verified, &src->bytes_verified);
40511301
JA
268
269 pthread_double_unlock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
270}
271
272static void io_workqueue_update_acct_fn(struct submit_worker *sw)
273{
b86ad8f1 274 struct thread_data *src = sw->priv;
40511301
JA
275 struct thread_data *dst = sw->wq->td;
276
277 if (td_read(src))
278 sum_ddir(dst, src, DDIR_READ);
279 if (td_write(src))
280 sum_ddir(dst, src, DDIR_WRITE);
281 if (td_trim(src))
282 sum_ddir(dst, src, DDIR_TRIM);
283
284}
285
103b174e 286static struct workqueue_ops rated_wq_ops = {
40511301
JA
287 .fn = io_workqueue_fn,
288 .pre_sleep_flush_fn = io_workqueue_pre_sleep_flush_fn,
289 .pre_sleep_fn = io_workqueue_pre_sleep_fn,
290 .update_acct_fn = io_workqueue_update_acct_fn,
291 .alloc_worker_fn = io_workqueue_alloc_fn,
292 .free_worker_fn = io_workqueue_free_fn,
293 .init_worker_fn = io_workqueue_init_worker_fn,
294 .exit_worker_fn = io_workqueue_exit_worker_fn,
295};
103b174e 296
24660963 297int rate_submit_init(struct thread_data *td, struct sk_out *sk_out)
103b174e
JA
298{
299 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
300 return 0;
301
24660963 302 return workqueue_init(td, &td->io_wq, &rated_wq_ops, td->o.iodepth, sk_out);
103b174e
JA
303}
304
305void rate_submit_exit(struct thread_data *td)
306{
307 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
308 return;
309
310 workqueue_exit(&td->io_wq);
311}