Fix a potentially infinite loop in check_overlap()
[fio.git] / rate-submit.c
CommitLineData
51575029
JA
1/*
2 * Rated submission helpers
3 *
4 * Copyright (C) 2015 Jens Axboe <axboe@kernel.dk>
5 *
6 */
40511301 7#include "fio.h"
618ee94c 8#include "ioengines.h"
40511301 9#include "lib/getrusage.h"
24660963 10#include "rate-submit.h"
40511301 11
c06379a6
VF
12static void check_overlap(struct io_u *io_u)
13{
14 int i;
15 struct thread_data *td;
c06379a6 16
c76b661c
BVA
17 /*
18 * Allow only one thread to check for overlap at a time to prevent two
19 * threads from thinking the coast is clear and then submitting IOs
20 * that overlap with each other.
21 *
22 * If an overlap is found, release the lock and re-acquire it before
23 * checking again to give other threads a chance to make progress.
24 *
25 * If no overlap is found, release the lock when the io_u's
26 * IO_U_F_FLIGHT flag is set so that this io_u can be checked by other
27 * threads as they assess overlap.
28 */
29 pthread_mutex_lock(&overlap_check);
30
31retry:
32 for_each_td(td, i) {
33 if (td->runstate <= TD_SETTING_UP ||
34 td->runstate >= TD_FINISHING ||
35 !td->o.serialize_overlap ||
36 td->o.io_submit_mode != IO_MODE_OFFLOAD)
37 continue;
c06379a6 38
c76b661c
BVA
39 if (!in_flight_overlap(&td->io_u_all, io_u))
40 continue;
41
42 pthread_mutex_unlock(&overlap_check);
43 pthread_mutex_lock(&overlap_check);
44 goto retry;
45 }
c06379a6
VF
46}
47
155f2f02
JA
48static int io_workqueue_fn(struct submit_worker *sw,
49 struct workqueue_work *work)
40511301
JA
50{
51 struct io_u *io_u = container_of(work, struct io_u, work);
52 const enum fio_ddir ddir = io_u->ddir;
b86ad8f1 53 struct thread_data *td = sw->priv;
d28174f0 54 int ret, error;
40511301 55
c06379a6
VF
56 if (td->o.serialize_overlap)
57 check_overlap(io_u);
58
40511301
JA
59 dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
60
1651e431 61 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
40511301
JA
62
63 td->cur_depth++;
64
65 do {
66 ret = td_io_queue(td, io_u);
67 if (ret != FIO_Q_BUSY)
68 break;
69 ret = io_u_queued_complete(td, 1);
70 if (ret > 0)
71 td->cur_depth -= ret;
d28174f0
JA
72 else if (ret < 0)
73 break;
1651e431 74 io_u_clear(td, io_u, IO_U_F_FLIGHT);
40511301
JA
75 } while (1);
76
77 dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
78
d28174f0 79 error = io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
40511301
JA
80
81 if (ret == FIO_Q_COMPLETED)
82 td->cur_depth--;
83 else if (ret == FIO_Q_QUEUED) {
84 unsigned int min_evts;
85
86 if (td->o.iodepth == 1)
87 min_evts = 1;
88 else
89 min_evts = 0;
90
91 ret = io_u_queued_complete(td, min_evts);
92 if (ret > 0)
93 td->cur_depth -= ret;
40511301 94 }
155f2f02 95
d28174f0
JA
96 if (error || td->error)
97 pthread_cond_signal(&td->parent->free_cond);
98
155f2f02 99 return 0;
40511301
JA
100}
101
102static bool io_workqueue_pre_sleep_flush_fn(struct submit_worker *sw)
103{
b86ad8f1 104 struct thread_data *td = sw->priv;
40511301 105
d28174f0
JA
106 if (td->error)
107 return false;
40511301
JA
108 if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
109 return true;
110
111 return false;
112}
113
114static void io_workqueue_pre_sleep_fn(struct submit_worker *sw)
115{
b86ad8f1 116 struct thread_data *td = sw->priv;
40511301
JA
117 int ret;
118
119 ret = io_u_quiesce(td);
120 if (ret > 0)
121 td->cur_depth -= ret;
122}
123
124static int io_workqueue_alloc_fn(struct submit_worker *sw)
125{
126 struct thread_data *td;
127
128 td = calloc(1, sizeof(*td));
b86ad8f1 129 sw->priv = td;
40511301
JA
130 return 0;
131}
132
133static void io_workqueue_free_fn(struct submit_worker *sw)
134{
b86ad8f1
CB
135 free(sw->priv);
136 sw->priv = NULL;
40511301
JA
137}
138
139static int io_workqueue_init_worker_fn(struct submit_worker *sw)
140{
141 struct thread_data *parent = sw->wq->td;
b86ad8f1 142 struct thread_data *td = sw->priv;
40511301
JA
143
144 memcpy(&td->o, &parent->o, sizeof(td->o));
145 memcpy(&td->ts, &parent->ts, sizeof(td->ts));
146 td->o.uid = td->o.gid = -1U;
147 dup_files(td, parent);
148 td->eo = parent->eo;
149 fio_options_mem_dupe(td);
150
151 if (ioengine_load(td))
152 goto err;
153
40511301
JA
154 td->pid = gettid();
155
156 INIT_FLIST_HEAD(&td->io_log_list);
157 INIT_FLIST_HEAD(&td->io_hist_list);
158 INIT_FLIST_HEAD(&td->verify_list);
159 INIT_FLIST_HEAD(&td->trim_list);
40511301
JA
160 td->io_hist_tree = RB_ROOT;
161
162 td->o.iodepth = 1;
163 if (td_io_init(td))
164 goto err_io_init;
165
4c085cf2
VF
166 if (td->io_ops->post_init && td->io_ops->post_init(td))
167 goto err_io_init;
168
3aea75b1 169 set_epoch_time(td, td->o.log_unix_epoch);
40511301
JA
170 fio_getrusage(&td->ru_start);
171 clear_io_state(td, 1);
172
173 td_set_runstate(td, TD_RUNNING);
b7aae4ba 174 td->flags |= TD_F_CHILD | TD_F_NEED_LOCK;
40511301
JA
175 td->parent = parent;
176 return 0;
177
178err_io_init:
179 close_ioengine(td);
180err:
181 return 1;
182
183}
184
185static void io_workqueue_exit_worker_fn(struct submit_worker *sw,
186 unsigned int *sum_cnt)
187{
b86ad8f1 188 struct thread_data *td = sw->priv;
40511301
JA
189
190 (*sum_cnt)++;
191 sum_thread_stats(&sw->wq->td->ts, &td->ts, *sum_cnt == 1);
192
193 fio_options_free(td);
194 close_and_free_files(td);
195 if (td->io_ops)
196 close_ioengine(td);
197 td_set_runstate(td, TD_EXITED);
198}
199
200#ifdef CONFIG_SFAA
201static void sum_val(uint64_t *dst, uint64_t *src)
202{
203 if (*src) {
204 __sync_fetch_and_add(dst, *src);
205 *src = 0;
206 }
207}
208#else
209static void sum_val(uint64_t *dst, uint64_t *src)
210{
211 if (*src) {
212 *dst += *src;
213 *src = 0;
214 }
215}
216#endif
217
218static void pthread_double_unlock(pthread_mutex_t *lock1,
219 pthread_mutex_t *lock2)
220{
221#ifndef CONFIG_SFAA
222 pthread_mutex_unlock(lock1);
223 pthread_mutex_unlock(lock2);
224#endif
225}
226
227static void pthread_double_lock(pthread_mutex_t *lock1, pthread_mutex_t *lock2)
228{
229#ifndef CONFIG_SFAA
230 if (lock1 < lock2) {
231 pthread_mutex_lock(lock1);
232 pthread_mutex_lock(lock2);
233 } else {
234 pthread_mutex_lock(lock2);
235 pthread_mutex_lock(lock1);
236 }
237#endif
238}
239
240static void sum_ddir(struct thread_data *dst, struct thread_data *src,
241 enum fio_ddir ddir)
242{
243 pthread_double_lock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
244
245 sum_val(&dst->io_bytes[ddir], &src->io_bytes[ddir]);
246 sum_val(&dst->io_blocks[ddir], &src->io_blocks[ddir]);
247 sum_val(&dst->this_io_blocks[ddir], &src->this_io_blocks[ddir]);
248 sum_val(&dst->this_io_bytes[ddir], &src->this_io_bytes[ddir]);
249 sum_val(&dst->bytes_done[ddir], &src->bytes_done[ddir]);
250
251 pthread_double_unlock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
252}
253
254static void io_workqueue_update_acct_fn(struct submit_worker *sw)
255{
b86ad8f1 256 struct thread_data *src = sw->priv;
40511301
JA
257 struct thread_data *dst = sw->wq->td;
258
259 if (td_read(src))
260 sum_ddir(dst, src, DDIR_READ);
261 if (td_write(src))
262 sum_ddir(dst, src, DDIR_WRITE);
263 if (td_trim(src))
264 sum_ddir(dst, src, DDIR_TRIM);
265
266}
267
103b174e 268static struct workqueue_ops rated_wq_ops = {
40511301
JA
269 .fn = io_workqueue_fn,
270 .pre_sleep_flush_fn = io_workqueue_pre_sleep_flush_fn,
271 .pre_sleep_fn = io_workqueue_pre_sleep_fn,
272 .update_acct_fn = io_workqueue_update_acct_fn,
273 .alloc_worker_fn = io_workqueue_alloc_fn,
274 .free_worker_fn = io_workqueue_free_fn,
275 .init_worker_fn = io_workqueue_init_worker_fn,
276 .exit_worker_fn = io_workqueue_exit_worker_fn,
277};
103b174e 278
24660963 279int rate_submit_init(struct thread_data *td, struct sk_out *sk_out)
103b174e
JA
280{
281 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
282 return 0;
283
24660963 284 return workqueue_init(td, &td->io_wq, &rated_wq_ops, td->o.iodepth, sk_out);
103b174e
JA
285}
286
287void rate_submit_exit(struct thread_data *td)
288{
289 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
290 return;
291
292 workqueue_exit(&td->io_wq);
293}