2 * Rated submission helpers
4 * Copyright (C) 2015 Jens Axboe <axboe@kernel.dk>
12 #include "ioengines.h"
13 #include "lib/getrusage.h"
14 #include "rate-submit.h"
16 static void check_overlap(struct io_u *io_u)
21 * Allow only one thread to check for overlap at a time to prevent two
22 * threads from thinking the coast is clear and then submitting IOs
23 * that overlap with each other.
25 * If an overlap is found, release the lock and re-acquire it before
26 * checking again to give other threads a chance to make progress.
28 * If no overlap is found, release the lock when the io_u's
29 * IO_U_F_FLIGHT flag is set so that this io_u can be checked by other
30 * threads as they assess overlap.
32 res = pthread_mutex_lock(&overlap_check);
33 if (fio_unlikely(res != 0)) {
34 log_err("failed to lock overlap check mutex, err: %i:%s", errno, strerror(errno));
40 if (td->runstate <= TD_SETTING_UP ||
41 td->runstate >= TD_FINISHING ||
42 !td->o.serialize_overlap ||
43 td->o.io_submit_mode != IO_MODE_OFFLOAD)
46 if (!in_flight_overlap(&td->io_u_all, io_u))
49 res = pthread_mutex_unlock(&overlap_check);
50 if (fio_unlikely(res != 0)) {
51 log_err("failed to unlock overlap check mutex, err: %i:%s", errno, strerror(errno));
54 res = pthread_mutex_lock(&overlap_check);
55 if (fio_unlikely(res != 0)) {
56 log_err("failed to lock overlap check mutex, err: %i:%s", errno, strerror(errno));
63 static int io_workqueue_fn(struct submit_worker *sw,
64 struct workqueue_work *work)
66 struct io_u *io_u = container_of(work, struct io_u, work);
67 const enum fio_ddir ddir = io_u->ddir;
68 struct thread_data *td = sw->priv;
71 if (td->o.serialize_overlap)
74 dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
76 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
81 ret = td_io_queue(td, io_u);
82 if (ret != FIO_Q_BUSY)
84 ret = io_u_queued_complete(td, 1);
89 io_u_clear(td, io_u, IO_U_F_FLIGHT);
92 dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
94 error = io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
96 if (ret == FIO_Q_COMPLETED)
98 else if (ret == FIO_Q_QUEUED) {
99 unsigned int min_evts;
101 if (td->o.iodepth == 1)
106 ret = io_u_queued_complete(td, min_evts);
108 td->cur_depth -= ret;
111 if (error || td->error) {
112 pthread_mutex_lock(&td->io_u_lock);
113 pthread_cond_signal(&td->parent->free_cond);
114 pthread_mutex_unlock(&td->io_u_lock);
120 static bool io_workqueue_pre_sleep_flush_fn(struct submit_worker *sw)
122 struct thread_data *td = sw->priv;
126 if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
132 static void io_workqueue_pre_sleep_fn(struct submit_worker *sw)
134 struct thread_data *td = sw->priv;
137 ret = io_u_quiesce(td);
139 td->cur_depth -= ret;
142 static int io_workqueue_alloc_fn(struct submit_worker *sw)
144 struct thread_data *td;
146 td = calloc(1, sizeof(*td));
151 static void io_workqueue_free_fn(struct submit_worker *sw)
157 static int io_workqueue_init_worker_fn(struct submit_worker *sw)
159 struct thread_data *parent = sw->wq->td;
160 struct thread_data *td = sw->priv;
162 memcpy(&td->o, &parent->o, sizeof(td->o));
163 memcpy(&td->ts, &parent->ts, sizeof(td->ts));
164 td->o.uid = td->o.gid = -1U;
165 dup_files(td, parent);
167 fio_options_mem_dupe(td);
168 td->iolog_f = parent->iolog_f;
170 if (ioengine_load(td))
175 INIT_FLIST_HEAD(&td->io_log_list);
176 INIT_FLIST_HEAD(&td->io_hist_list);
177 INIT_FLIST_HEAD(&td->verify_list);
178 INIT_FLIST_HEAD(&td->trim_list);
179 td->io_hist_tree = RB_ROOT;
185 if (td->io_ops->post_init && td->io_ops->post_init(td))
188 set_epoch_time(td, td->o.log_unix_epoch | td->o.log_alternate_epoch, td->o.log_alternate_epoch_clock_id);
189 fio_getrusage(&td->ru_start);
190 clear_io_state(td, 1);
192 td_set_runstate(td, TD_RUNNING);
193 td->flags |= TD_F_CHILD | TD_F_NEED_LOCK;
204 static void io_workqueue_exit_worker_fn(struct submit_worker *sw,
205 unsigned int *sum_cnt)
207 struct thread_data *td = sw->priv;
212 * io_workqueue_update_acct_fn() doesn't support per prio stats, and
213 * even if it did, offload can't be used with all async IO engines.
214 * If group reporting is set in the parent td, the group result
215 * generated by __show_run_stats() can still contain multiple prios
216 * from different offloaded jobs.
218 sw->wq->td->ts.disable_prio_stat = 1;
219 sum_thread_stats(&sw->wq->td->ts, &td->ts);
221 fio_options_free(td);
222 close_and_free_files(td);
225 td_set_runstate(td, TD_EXITED);
229 static void sum_val(uint64_t *dst, uint64_t *src)
232 __sync_fetch_and_add(dst, *src);
237 static void sum_val(uint64_t *dst, uint64_t *src)
246 static void pthread_double_unlock(pthread_mutex_t *lock1,
247 pthread_mutex_t *lock2)
250 pthread_mutex_unlock(lock1);
251 pthread_mutex_unlock(lock2);
255 static void pthread_double_lock(pthread_mutex_t *lock1, pthread_mutex_t *lock2)
259 pthread_mutex_lock(lock1);
260 pthread_mutex_lock(lock2);
262 pthread_mutex_lock(lock2);
263 pthread_mutex_lock(lock1);
268 static void sum_ddir(struct thread_data *dst, struct thread_data *src,
271 pthread_double_lock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
273 sum_val(&dst->io_bytes[ddir], &src->io_bytes[ddir]);
274 sum_val(&dst->io_blocks[ddir], &src->io_blocks[ddir]);
275 sum_val(&dst->this_io_blocks[ddir], &src->this_io_blocks[ddir]);
276 sum_val(&dst->this_io_bytes[ddir], &src->this_io_bytes[ddir]);
277 sum_val(&dst->bytes_done[ddir], &src->bytes_done[ddir]);
278 if (ddir == DDIR_READ)
279 sum_val(&dst->bytes_verified, &src->bytes_verified);
281 pthread_double_unlock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
284 static void io_workqueue_update_acct_fn(struct submit_worker *sw)
286 struct thread_data *src = sw->priv;
287 struct thread_data *dst = sw->wq->td;
290 sum_ddir(dst, src, DDIR_READ);
292 sum_ddir(dst, src, DDIR_WRITE);
294 sum_ddir(dst, src, DDIR_TRIM);
298 static struct workqueue_ops rated_wq_ops = {
299 .fn = io_workqueue_fn,
300 .pre_sleep_flush_fn = io_workqueue_pre_sleep_flush_fn,
301 .pre_sleep_fn = io_workqueue_pre_sleep_fn,
302 .update_acct_fn = io_workqueue_update_acct_fn,
303 .alloc_worker_fn = io_workqueue_alloc_fn,
304 .free_worker_fn = io_workqueue_free_fn,
305 .init_worker_fn = io_workqueue_init_worker_fn,
306 .exit_worker_fn = io_workqueue_exit_worker_fn,
309 int rate_submit_init(struct thread_data *td, struct sk_out *sk_out)
311 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
314 return workqueue_init(td, &td->io_wq, &rated_wq_ops, td->o.iodepth, sk_out);
317 void rate_submit_exit(struct thread_data *td)
319 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
322 workqueue_exit(&td->io_wq);