2 * Rated submission helpers
4 * Copyright (C) 2015 Jens Axboe <axboe@kernel.dk>
10 #include "lib/getrusage.h"
11 #include "rate-submit.h"
13 static void check_overlap(struct io_u *io_u)
16 struct thread_data *td;
19 * Allow only one thread to check for overlap at a time to prevent two
20 * threads from thinking the coast is clear and then submitting IOs
21 * that overlap with each other.
23 * If an overlap is found, release the lock and re-acquire it before
24 * checking again to give other threads a chance to make progress.
26 * If no overlap is found, release the lock when the io_u's
27 * IO_U_F_FLIGHT flag is set so that this io_u can be checked by other
28 * threads as they assess overlap.
30 res = pthread_mutex_lock(&overlap_check);
35 if (td->runstate <= TD_SETTING_UP ||
36 td->runstate >= TD_FINISHING ||
37 !td->o.serialize_overlap ||
38 td->o.io_submit_mode != IO_MODE_OFFLOAD)
41 if (!in_flight_overlap(&td->io_u_all, io_u))
44 res = pthread_mutex_unlock(&overlap_check);
46 res = pthread_mutex_lock(&overlap_check);
52 static int io_workqueue_fn(struct submit_worker *sw,
53 struct workqueue_work *work)
55 struct io_u *io_u = container_of(work, struct io_u, work);
56 const enum fio_ddir ddir = io_u->ddir;
57 struct thread_data *td = sw->priv;
60 if (td->o.serialize_overlap)
63 dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
65 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
70 ret = td_io_queue(td, io_u);
71 if (ret != FIO_Q_BUSY)
73 ret = io_u_queued_complete(td, 1);
78 io_u_clear(td, io_u, IO_U_F_FLIGHT);
81 dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
83 error = io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
85 if (ret == FIO_Q_COMPLETED)
87 else if (ret == FIO_Q_QUEUED) {
88 unsigned int min_evts;
90 if (td->o.iodepth == 1)
95 ret = io_u_queued_complete(td, min_evts);
100 if (error || td->error) {
101 pthread_mutex_lock(&td->io_u_lock);
102 pthread_cond_signal(&td->parent->free_cond);
103 pthread_mutex_unlock(&td->io_u_lock);
109 static bool io_workqueue_pre_sleep_flush_fn(struct submit_worker *sw)
111 struct thread_data *td = sw->priv;
115 if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
121 static void io_workqueue_pre_sleep_fn(struct submit_worker *sw)
123 struct thread_data *td = sw->priv;
126 ret = io_u_quiesce(td);
128 td->cur_depth -= ret;
131 static int io_workqueue_alloc_fn(struct submit_worker *sw)
133 struct thread_data *td;
135 td = calloc(1, sizeof(*td));
140 static void io_workqueue_free_fn(struct submit_worker *sw)
146 static int io_workqueue_init_worker_fn(struct submit_worker *sw)
148 struct thread_data *parent = sw->wq->td;
149 struct thread_data *td = sw->priv;
151 memcpy(&td->o, &parent->o, sizeof(td->o));
152 memcpy(&td->ts, &parent->ts, sizeof(td->ts));
153 td->o.uid = td->o.gid = -1U;
154 dup_files(td, parent);
156 fio_options_mem_dupe(td);
158 if (ioengine_load(td))
163 INIT_FLIST_HEAD(&td->io_log_list);
164 INIT_FLIST_HEAD(&td->io_hist_list);
165 INIT_FLIST_HEAD(&td->verify_list);
166 INIT_FLIST_HEAD(&td->trim_list);
167 td->io_hist_tree = RB_ROOT;
173 if (td->io_ops->post_init && td->io_ops->post_init(td))
176 set_epoch_time(td, td->o.log_unix_epoch);
177 fio_getrusage(&td->ru_start);
178 clear_io_state(td, 1);
180 td_set_runstate(td, TD_RUNNING);
181 td->flags |= TD_F_CHILD | TD_F_NEED_LOCK;
192 static void io_workqueue_exit_worker_fn(struct submit_worker *sw,
193 unsigned int *sum_cnt)
195 struct thread_data *td = sw->priv;
200 * io_workqueue_update_acct_fn() doesn't support per prio stats, and
201 * even if it did, offload can't be used with all async IO engines.
202 * If group reporting is set in the parent td, the group result
203 * generated by __show_run_stats() can still contain multiple prios
204 * from different offloaded jobs.
206 sw->wq->td->ts.disable_prio_stat = 1;
207 sum_thread_stats(&sw->wq->td->ts, &td->ts);
209 fio_options_free(td);
210 close_and_free_files(td);
213 td_set_runstate(td, TD_EXITED);
217 static void sum_val(uint64_t *dst, uint64_t *src)
220 __sync_fetch_and_add(dst, *src);
225 static void sum_val(uint64_t *dst, uint64_t *src)
234 static void pthread_double_unlock(pthread_mutex_t *lock1,
235 pthread_mutex_t *lock2)
238 pthread_mutex_unlock(lock1);
239 pthread_mutex_unlock(lock2);
243 static void pthread_double_lock(pthread_mutex_t *lock1, pthread_mutex_t *lock2)
247 pthread_mutex_lock(lock1);
248 pthread_mutex_lock(lock2);
250 pthread_mutex_lock(lock2);
251 pthread_mutex_lock(lock1);
256 static void sum_ddir(struct thread_data *dst, struct thread_data *src,
259 pthread_double_lock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
261 sum_val(&dst->io_bytes[ddir], &src->io_bytes[ddir]);
262 sum_val(&dst->io_blocks[ddir], &src->io_blocks[ddir]);
263 sum_val(&dst->this_io_blocks[ddir], &src->this_io_blocks[ddir]);
264 sum_val(&dst->this_io_bytes[ddir], &src->this_io_bytes[ddir]);
265 sum_val(&dst->bytes_done[ddir], &src->bytes_done[ddir]);
267 pthread_double_unlock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
270 static void io_workqueue_update_acct_fn(struct submit_worker *sw)
272 struct thread_data *src = sw->priv;
273 struct thread_data *dst = sw->wq->td;
276 sum_ddir(dst, src, DDIR_READ);
278 sum_ddir(dst, src, DDIR_WRITE);
280 sum_ddir(dst, src, DDIR_TRIM);
284 static struct workqueue_ops rated_wq_ops = {
285 .fn = io_workqueue_fn,
286 .pre_sleep_flush_fn = io_workqueue_pre_sleep_flush_fn,
287 .pre_sleep_fn = io_workqueue_pre_sleep_fn,
288 .update_acct_fn = io_workqueue_update_acct_fn,
289 .alloc_worker_fn = io_workqueue_alloc_fn,
290 .free_worker_fn = io_workqueue_free_fn,
291 .init_worker_fn = io_workqueue_init_worker_fn,
292 .exit_worker_fn = io_workqueue_exit_worker_fn,
295 int rate_submit_init(struct thread_data *td, struct sk_out *sk_out)
297 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
300 return workqueue_init(td, &td->io_wq, &rated_wq_ops, td->o.iodepth, sk_out);
303 void rate_submit_exit(struct thread_data *td)
305 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
308 workqueue_exit(&td->io_wq);