smalloc: fix compiler warning on Windows
[fio.git] / rate-submit.c
1 /*
2  * Rated submission helpers
3  *
4  * Copyright (C) 2015 Jens Axboe <axboe@kernel.dk>
5  *
6  */
7 #include "fio.h"
8 #include "ioengines.h"
9 #include "lib/getrusage.h"
10 #include "rate-submit.h"
11
12 static void check_overlap(struct io_u *io_u)
13 {
14         int i;
15         struct thread_data *td;
16         bool overlap = false;
17
18         do {
19                 /*
20                  * Allow only one thread to check for overlap at a
21                  * time to prevent two threads from thinking the coast
22                  * is clear and then submitting IOs that overlap with
23                  * each other
24                  *
25                  * If an overlap is found, release the lock and
26                  * re-acquire it before checking again to give other
27                  * threads a chance to make progress
28                  *
29                  * If an overlap is not found, release the lock when the
30                  * io_u's IO_U_F_FLIGHT flag is set so that this io_u
31                  * can be checked by other threads as they assess overlap
32                  */
33                 pthread_mutex_lock(&overlap_check);
34                 for_each_td(td, i) {
35                         if (td->runstate <= TD_SETTING_UP ||
36                                 td->runstate >= TD_FINISHING ||
37                                 !td->o.serialize_overlap ||
38                                 td->o.io_submit_mode != IO_MODE_OFFLOAD)
39                                 continue;
40
41                         overlap = in_flight_overlap(&td->io_u_all, io_u);
42                         if (overlap) {
43                                 pthread_mutex_unlock(&overlap_check);
44                                 break;
45                         }
46                 }
47         } while (overlap);
48 }
49
50 static int io_workqueue_fn(struct submit_worker *sw,
51                            struct workqueue_work *work)
52 {
53         struct io_u *io_u = container_of(work, struct io_u, work);
54         const enum fio_ddir ddir = io_u->ddir;
55         struct thread_data *td = sw->priv;
56         int ret, error;
57
58         if (td->o.serialize_overlap)
59                 check_overlap(io_u);
60
61         dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
62
63         io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
64
65         td->cur_depth++;
66
67         do {
68                 ret = td_io_queue(td, io_u);
69                 if (ret != FIO_Q_BUSY)
70                         break;
71                 ret = io_u_queued_complete(td, 1);
72                 if (ret > 0)
73                         td->cur_depth -= ret;
74                 else if (ret < 0)
75                         break;
76                 io_u_clear(td, io_u, IO_U_F_FLIGHT);
77         } while (1);
78
79         dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
80
81         error = io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
82
83         if (ret == FIO_Q_COMPLETED)
84                 td->cur_depth--;
85         else if (ret == FIO_Q_QUEUED) {
86                 unsigned int min_evts;
87
88                 if (td->o.iodepth == 1)
89                         min_evts = 1;
90                 else
91                         min_evts = 0;
92
93                 ret = io_u_queued_complete(td, min_evts);
94                 if (ret > 0)
95                         td->cur_depth -= ret;
96         }
97
98         if (error || td->error)
99                 pthread_cond_signal(&td->parent->free_cond);
100
101         return 0;
102 }
103
104 static bool io_workqueue_pre_sleep_flush_fn(struct submit_worker *sw)
105 {
106         struct thread_data *td = sw->priv;
107
108         if (td->error)
109                 return false;
110         if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
111                 return true;
112
113         return false;
114 }
115
116 static void io_workqueue_pre_sleep_fn(struct submit_worker *sw)
117 {
118         struct thread_data *td = sw->priv;
119         int ret;
120
121         ret = io_u_quiesce(td);
122         if (ret > 0)
123                 td->cur_depth -= ret;
124 }
125
126 static int io_workqueue_alloc_fn(struct submit_worker *sw)
127 {
128         struct thread_data *td;
129
130         td = calloc(1, sizeof(*td));
131         sw->priv = td;
132         return 0;
133 }
134
135 static void io_workqueue_free_fn(struct submit_worker *sw)
136 {
137         free(sw->priv);
138         sw->priv = NULL;
139 }
140
141 static int io_workqueue_init_worker_fn(struct submit_worker *sw)
142 {
143         struct thread_data *parent = sw->wq->td;
144         struct thread_data *td = sw->priv;
145
146         memcpy(&td->o, &parent->o, sizeof(td->o));
147         memcpy(&td->ts, &parent->ts, sizeof(td->ts));
148         td->o.uid = td->o.gid = -1U;
149         dup_files(td, parent);
150         td->eo = parent->eo;
151         fio_options_mem_dupe(td);
152
153         if (ioengine_load(td))
154                 goto err;
155
156         td->pid = gettid();
157
158         INIT_FLIST_HEAD(&td->io_log_list);
159         INIT_FLIST_HEAD(&td->io_hist_list);
160         INIT_FLIST_HEAD(&td->verify_list);
161         INIT_FLIST_HEAD(&td->trim_list);
162         td->io_hist_tree = RB_ROOT;
163
164         td->o.iodepth = 1;
165         if (td_io_init(td))
166                 goto err_io_init;
167
168         if (td->io_ops->post_init && td->io_ops->post_init(td))
169                 goto err_io_init;
170
171         set_epoch_time(td, td->o.log_unix_epoch);
172         fio_getrusage(&td->ru_start);
173         clear_io_state(td, 1);
174
175         td_set_runstate(td, TD_RUNNING);
176         td->flags |= TD_F_CHILD | TD_F_NEED_LOCK;
177         td->parent = parent;
178         return 0;
179
180 err_io_init:
181         close_ioengine(td);
182 err:
183         return 1;
184
185 }
186
187 static void io_workqueue_exit_worker_fn(struct submit_worker *sw,
188                                         unsigned int *sum_cnt)
189 {
190         struct thread_data *td = sw->priv;
191
192         (*sum_cnt)++;
193         sum_thread_stats(&sw->wq->td->ts, &td->ts, *sum_cnt == 1);
194
195         fio_options_free(td);
196         close_and_free_files(td);
197         if (td->io_ops)
198                 close_ioengine(td);
199         td_set_runstate(td, TD_EXITED);
200 }
201
202 #ifdef CONFIG_SFAA
203 static void sum_val(uint64_t *dst, uint64_t *src)
204 {
205         if (*src) {
206                 __sync_fetch_and_add(dst, *src);
207                 *src = 0;
208         }
209 }
210 #else
211 static void sum_val(uint64_t *dst, uint64_t *src)
212 {
213         if (*src) {
214                 *dst += *src;
215                 *src = 0;
216         }
217 }
218 #endif
219
220 static void pthread_double_unlock(pthread_mutex_t *lock1,
221                                   pthread_mutex_t *lock2)
222 {
223 #ifndef CONFIG_SFAA
224         pthread_mutex_unlock(lock1);
225         pthread_mutex_unlock(lock2);
226 #endif
227 }
228
229 static void pthread_double_lock(pthread_mutex_t *lock1, pthread_mutex_t *lock2)
230 {
231 #ifndef CONFIG_SFAA
232         if (lock1 < lock2) {
233                 pthread_mutex_lock(lock1);
234                 pthread_mutex_lock(lock2);
235         } else {
236                 pthread_mutex_lock(lock2);
237                 pthread_mutex_lock(lock1);
238         }
239 #endif
240 }
241
242 static void sum_ddir(struct thread_data *dst, struct thread_data *src,
243                      enum fio_ddir ddir)
244 {
245         pthread_double_lock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
246
247         sum_val(&dst->io_bytes[ddir], &src->io_bytes[ddir]);
248         sum_val(&dst->io_blocks[ddir], &src->io_blocks[ddir]);
249         sum_val(&dst->this_io_blocks[ddir], &src->this_io_blocks[ddir]);
250         sum_val(&dst->this_io_bytes[ddir], &src->this_io_bytes[ddir]);
251         sum_val(&dst->bytes_done[ddir], &src->bytes_done[ddir]);
252
253         pthread_double_unlock(&dst->io_wq.stat_lock, &src->io_wq.stat_lock);
254 }
255
256 static void io_workqueue_update_acct_fn(struct submit_worker *sw)
257 {
258         struct thread_data *src = sw->priv;
259         struct thread_data *dst = sw->wq->td;
260
261         if (td_read(src))
262                 sum_ddir(dst, src, DDIR_READ);
263         if (td_write(src))
264                 sum_ddir(dst, src, DDIR_WRITE);
265         if (td_trim(src))
266                 sum_ddir(dst, src, DDIR_TRIM);
267
268 }
269
270 static struct workqueue_ops rated_wq_ops = {
271         .fn                     = io_workqueue_fn,
272         .pre_sleep_flush_fn     = io_workqueue_pre_sleep_flush_fn,
273         .pre_sleep_fn           = io_workqueue_pre_sleep_fn,
274         .update_acct_fn         = io_workqueue_update_acct_fn,
275         .alloc_worker_fn        = io_workqueue_alloc_fn,
276         .free_worker_fn         = io_workqueue_free_fn,
277         .init_worker_fn         = io_workqueue_init_worker_fn,
278         .exit_worker_fn         = io_workqueue_exit_worker_fn,
279 };
280
281 int rate_submit_init(struct thread_data *td, struct sk_out *sk_out)
282 {
283         if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
284                 return 0;
285
286         return workqueue_init(td, &td->io_wq, &rated_wq_ops, td->o.iodepth, sk_out);
287 }
288
289 void rate_submit_exit(struct thread_data *td)
290 {
291         if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
292                 return;
293
294         workqueue_exit(&td->io_wq);
295 }