2 * Rated submission helpers
4 * Copyright (C) 2015 Jens Axboe <axboe@kernel.dk>
9 #include "lib/getrusage.h"
10 #include "rate-submit.h"
12 static void check_overlap(struct io_u *io_u)
15 struct thread_data *td;
20 * Allow only one thread to check for overlap at a
21 * time to prevent two threads from thinking the coast
22 * is clear and then submitting IOs that overlap with
25 pthread_mutex_lock(&overlap_check);
27 if (td->runstate <= TD_SETTING_UP ||
28 td->runstate >= TD_FINISHING ||
29 !td->o.serialize_overlap ||
30 td->o.io_submit_mode != IO_MODE_OFFLOAD)
33 overlap = in_flight_overlap(&td->io_u_all, io_u);
35 pthread_mutex_unlock(&overlap_check);
42 static int io_workqueue_fn(struct submit_worker *sw,
43 struct workqueue_work *work)
45 struct io_u *io_u = container_of(work, struct io_u, work);
46 const enum fio_ddir ddir = io_u->ddir;
47 struct thread_data *td = sw->priv;
50 if (td->o.serialize_overlap)
53 dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
55 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
60 ret = td_io_queue(td, io_u);
61 if (ret != FIO_Q_BUSY)
63 ret = io_u_queued_complete(td, 1);
66 io_u_clear(td, io_u, IO_U_F_FLIGHT);
69 dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
71 io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
73 if (ret == FIO_Q_COMPLETED)
75 else if (ret == FIO_Q_QUEUED) {
76 unsigned int min_evts;
78 if (td->o.iodepth == 1)
83 ret = io_u_queued_complete(td, min_evts);
86 } else if (ret == FIO_Q_BUSY) {
87 ret = io_u_queued_complete(td, td->cur_depth);
95 static bool io_workqueue_pre_sleep_flush_fn(struct submit_worker *sw)
97 struct thread_data *td = sw->priv;
99 if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
105 static void io_workqueue_pre_sleep_fn(struct submit_worker *sw)
107 struct thread_data *td = sw->priv;
110 ret = io_u_quiesce(td);
112 td->cur_depth -= ret;
115 static int io_workqueue_alloc_fn(struct submit_worker *sw)
117 struct thread_data *td;
119 td = calloc(1, sizeof(*td));
124 static void io_workqueue_free_fn(struct submit_worker *sw)
130 static int io_workqueue_init_worker_fn(struct submit_worker *sw)
132 struct thread_data *parent = sw->wq->td;
133 struct thread_data *td = sw->priv;
135 memcpy(&td->o, &parent->o, sizeof(td->o));
136 memcpy(&td->ts, &parent->ts, sizeof(td->ts));
137 td->o.uid = td->o.gid = -1U;
138 dup_files(td, parent);
140 fio_options_mem_dupe(td);
142 if (ioengine_load(td))
147 INIT_FLIST_HEAD(&td->io_log_list);
148 INIT_FLIST_HEAD(&td->io_hist_list);
149 INIT_FLIST_HEAD(&td->verify_list);
150 INIT_FLIST_HEAD(&td->trim_list);
151 td->io_hist_tree = RB_ROOT;
157 set_epoch_time(td, td->o.log_unix_epoch);
158 fio_getrusage(&td->ru_start);
159 clear_io_state(td, 1);
161 td_set_runstate(td, TD_RUNNING);
162 td->flags |= TD_F_CHILD | TD_F_NEED_LOCK;
173 static void io_workqueue_exit_worker_fn(struct submit_worker *sw,
174 unsigned int *sum_cnt)
176 struct thread_data *td = sw->priv;
179 sum_thread_stats(&sw->wq->td->ts, &td->ts, *sum_cnt == 1);
181 fio_options_free(td);
182 close_and_free_files(td);
185 td_set_runstate(td, TD_EXITED);
189 static void sum_val(uint64_t *dst, uint64_t *src)
192 __sync_fetch_and_add(dst, *src);
197 static void sum_val(uint64_t *dst, uint64_t *src)
206 static void pthread_double_unlock(pthread_mutex_t *lock1,
207 pthread_mutex_t *lock2)
210 pthread_mutex_unlock(lock1);
211 pthread_mutex_unlock(lock2);
215 static void pthread_double_lock(pthread_mutex_t *lock1, pthread_mutex_t *lock2)
219 pthread_mutex_lock(lock1);
220 pthread_mutex_lock(lock2);
222 pthread_mutex_lock(lock2);
223 pthread_mutex_lock(lock1);
228 static void sum_ddir(struct thread_data *dst, struct thread_data *src,
231 pthread_double_lock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
233 sum_val(&dst->io_bytes[ddir], &src->io_bytes[ddir]);
234 sum_val(&dst->io_blocks[ddir], &src->io_blocks[ddir]);
235 sum_val(&dst->this_io_blocks[ddir], &src->this_io_blocks[ddir]);
236 sum_val(&dst->this_io_bytes[ddir], &src->this_io_bytes[ddir]);
237 sum_val(&dst->bytes_done[ddir], &src->bytes_done[ddir]);
239 pthread_double_unlock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
242 static void io_workqueue_update_acct_fn(struct submit_worker *sw)
244 struct thread_data *src = sw->priv;
245 struct thread_data *dst = sw->wq->td;
248 sum_ddir(dst, src, DDIR_READ);
250 sum_ddir(dst, src, DDIR_WRITE);
252 sum_ddir(dst, src, DDIR_TRIM);
256 static struct workqueue_ops rated_wq_ops = {
257 .fn = io_workqueue_fn,
258 .pre_sleep_flush_fn = io_workqueue_pre_sleep_flush_fn,
259 .pre_sleep_fn = io_workqueue_pre_sleep_fn,
260 .update_acct_fn = io_workqueue_update_acct_fn,
261 .alloc_worker_fn = io_workqueue_alloc_fn,
262 .free_worker_fn = io_workqueue_free_fn,
263 .init_worker_fn = io_workqueue_init_worker_fn,
264 .exit_worker_fn = io_workqueue_exit_worker_fn,
267 int rate_submit_init(struct thread_data *td, struct sk_out *sk_out)
269 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
272 return workqueue_init(td, &td->io_wq, &rated_wq_ops, td->o.iodepth, sk_out);
275 void rate_submit_exit(struct thread_data *td)
277 if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
280 workqueue_exit(&td->io_wq);