Commit | Line | Data |
---|---|---|
2e1df07d JA |
1 | /* |
2 | * fio - the flexible io tester | |
3 | * | |
4 | * Copyright (C) 2005 Jens Axboe <axboe@suse.de> | |
5 | * Copyright (C) 2006-2012 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * The license below covers all files distributed with fio unless otherwise | |
8 | * noted in the file itself. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
22 | * | |
23 | */ | |
24 | #include <unistd.h> | |
25 | #include <fcntl.h> | |
26 | #include <string.h> | |
27 | #include <limits.h> | |
28 | #include <signal.h> | |
29 | #include <time.h> | |
30 | #include <locale.h> | |
31 | #include <assert.h> | |
32 | #include <time.h> | |
e43606c2 | 33 | #include <inttypes.h> |
2e1df07d JA |
34 | #include <sys/stat.h> |
35 | #include <sys/wait.h> | |
36 | #include <sys/ipc.h> | |
2e1df07d JA |
37 | #include <sys/mman.h> |
38 | ||
39 | #include "fio.h" | |
a5e0ee11 O |
40 | #ifndef FIO_NO_HAVE_SHM_H |
41 | #include <sys/shm.h> | |
42 | #endif | |
2e1df07d JA |
43 | #include "hash.h" |
44 | #include "smalloc.h" | |
45 | #include "verify.h" | |
46 | #include "trim.h" | |
47 | #include "diskutil.h" | |
48 | #include "cgroup.h" | |
49 | #include "profile.h" | |
50 | #include "lib/rand.h" | |
51 | #include "memalign.h" | |
52 | #include "server.h" | |
44404c5a | 53 | #include "lib/getrusage.h" |
f2a2ce0e | 54 | #include "idletime.h" |
2e1df07d JA |
55 | |
56 | static pthread_t disk_util_thread; | |
9ec7779f | 57 | static struct fio_mutex *disk_thread_mutex; |
2e1df07d JA |
58 | static struct fio_mutex *startup_mutex; |
59 | static struct fio_mutex *writeout_mutex; | |
60 | static struct flist_head *cgroup_list; | |
61 | static char *cgroup_mnt; | |
62 | static int exit_value; | |
63 | static volatile int fio_abort; | |
64 | ||
6eaf09d6 | 65 | struct io_log *agg_io_log[DDIR_RWDIR_CNT]; |
2e1df07d | 66 | |
a3efc919 JA |
67 | int groupid = 0; |
68 | unsigned int thread_number = 0; | |
108fea77 | 69 | unsigned int stat_number = 0; |
a3efc919 JA |
70 | unsigned int nr_process = 0; |
71 | unsigned int nr_thread = 0; | |
72 | int shm_id = 0; | |
73 | int temp_stall_ts; | |
74 | unsigned long done_secs = 0; | |
27357187 | 75 | volatile int disk_util_exit = 0; |
a3efc919 | 76 | |
2e1df07d | 77 | #define PAGE_ALIGN(buf) \ |
e43606c2 | 78 | (char *) (((uintptr_t) (buf) + page_mask) & ~page_mask) |
2e1df07d JA |
79 | |
80 | #define JOB_START_TIMEOUT (5 * 1000) | |
81 | ||
82 | static void sig_int(int sig) | |
83 | { | |
84 | if (threads) { | |
85 | if (is_backend) | |
86 | fio_server_got_signal(sig); | |
87 | else { | |
88 | log_info("\nfio: terminating on signal %d\n", sig); | |
89 | fflush(stdout); | |
90 | exit_value = 128; | |
91 | } | |
92 | ||
93 | fio_terminate_threads(TERMINATE_ALL); | |
94 | } | |
95 | } | |
96 | ||
4c6d91e8 JA |
97 | static void sig_show_status(int sig) |
98 | { | |
99 | show_running_run_stats(); | |
100 | } | |
101 | ||
2e1df07d JA |
102 | static void set_sig_handlers(void) |
103 | { | |
104 | struct sigaction act; | |
105 | ||
106 | memset(&act, 0, sizeof(act)); | |
107 | act.sa_handler = sig_int; | |
108 | act.sa_flags = SA_RESTART; | |
109 | sigaction(SIGINT, &act, NULL); | |
110 | ||
111 | memset(&act, 0, sizeof(act)); | |
112 | act.sa_handler = sig_int; | |
113 | act.sa_flags = SA_RESTART; | |
114 | sigaction(SIGTERM, &act, NULL); | |
115 | ||
2f694507 BC |
116 | /* Windows uses SIGBREAK as a quit signal from other applications */ |
117 | #ifdef WIN32 | |
118 | memset(&act, 0, sizeof(act)); | |
119 | act.sa_handler = sig_int; | |
120 | act.sa_flags = SA_RESTART; | |
121 | sigaction(SIGBREAK, &act, NULL); | |
122 | #endif | |
123 | ||
4c6d91e8 JA |
124 | memset(&act, 0, sizeof(act)); |
125 | act.sa_handler = sig_show_status; | |
126 | act.sa_flags = SA_RESTART; | |
127 | sigaction(SIGUSR1, &act, NULL); | |
128 | ||
2e1df07d JA |
129 | if (is_backend) { |
130 | memset(&act, 0, sizeof(act)); | |
131 | act.sa_handler = sig_int; | |
132 | act.sa_flags = SA_RESTART; | |
133 | sigaction(SIGPIPE, &act, NULL); | |
134 | } | |
135 | } | |
136 | ||
137 | /* | |
138 | * Check if we are above the minimum rate given. | |
139 | */ | |
140 | static int __check_min_rate(struct thread_data *td, struct timeval *now, | |
141 | enum fio_ddir ddir) | |
142 | { | |
143 | unsigned long long bytes = 0; | |
144 | unsigned long iops = 0; | |
145 | unsigned long spent; | |
146 | unsigned long rate; | |
147 | unsigned int ratemin = 0; | |
148 | unsigned int rate_iops = 0; | |
149 | unsigned int rate_iops_min = 0; | |
150 | ||
151 | assert(ddir_rw(ddir)); | |
152 | ||
153 | if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir]) | |
154 | return 0; | |
155 | ||
156 | /* | |
157 | * allow a 2 second settle period in the beginning | |
158 | */ | |
159 | if (mtime_since(&td->start, now) < 2000) | |
160 | return 0; | |
161 | ||
162 | iops += td->this_io_blocks[ddir]; | |
163 | bytes += td->this_io_bytes[ddir]; | |
164 | ratemin += td->o.ratemin[ddir]; | |
165 | rate_iops += td->o.rate_iops[ddir]; | |
166 | rate_iops_min += td->o.rate_iops_min[ddir]; | |
167 | ||
168 | /* | |
169 | * if rate blocks is set, sample is running | |
170 | */ | |
171 | if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) { | |
172 | spent = mtime_since(&td->lastrate[ddir], now); | |
173 | if (spent < td->o.ratecycle) | |
174 | return 0; | |
175 | ||
176 | if (td->o.rate[ddir]) { | |
177 | /* | |
178 | * check bandwidth specified rate | |
179 | */ | |
180 | if (bytes < td->rate_bytes[ddir]) { | |
181 | log_err("%s: min rate %u not met\n", td->o.name, | |
182 | ratemin); | |
183 | return 1; | |
184 | } else { | |
185 | rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent; | |
186 | if (rate < ratemin || | |
187 | bytes < td->rate_bytes[ddir]) { | |
188 | log_err("%s: min rate %u not met, got" | |
189 | " %luKB/sec\n", td->o.name, | |
190 | ratemin, rate); | |
191 | return 1; | |
192 | } | |
193 | } | |
194 | } else { | |
195 | /* | |
196 | * checks iops specified rate | |
197 | */ | |
198 | if (iops < rate_iops) { | |
199 | log_err("%s: min iops rate %u not met\n", | |
200 | td->o.name, rate_iops); | |
201 | return 1; | |
202 | } else { | |
203 | rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent; | |
204 | if (rate < rate_iops_min || | |
205 | iops < td->rate_blocks[ddir]) { | |
206 | log_err("%s: min iops rate %u not met," | |
207 | " got %lu\n", td->o.name, | |
208 | rate_iops_min, rate); | |
209 | } | |
210 | } | |
211 | } | |
212 | } | |
213 | ||
214 | td->rate_bytes[ddir] = bytes; | |
215 | td->rate_blocks[ddir] = iops; | |
216 | memcpy(&td->lastrate[ddir], now, sizeof(*now)); | |
217 | return 0; | |
218 | } | |
219 | ||
220 | static int check_min_rate(struct thread_data *td, struct timeval *now, | |
100f49f1 | 221 | uint64_t *bytes_done) |
2e1df07d JA |
222 | { |
223 | int ret = 0; | |
224 | ||
6eaf09d6 SL |
225 | if (bytes_done[DDIR_READ]) |
226 | ret |= __check_min_rate(td, now, DDIR_READ); | |
227 | if (bytes_done[DDIR_WRITE]) | |
228 | ret |= __check_min_rate(td, now, DDIR_WRITE); | |
229 | if (bytes_done[DDIR_TRIM]) | |
230 | ret |= __check_min_rate(td, now, DDIR_TRIM); | |
2e1df07d JA |
231 | |
232 | return ret; | |
233 | } | |
234 | ||
235 | /* | |
236 | * When job exits, we can cancel the in-flight IO if we are using async | |
237 | * io. Attempt to do so. | |
238 | */ | |
239 | static void cleanup_pending_aio(struct thread_data *td) | |
240 | { | |
241 | struct flist_head *entry, *n; | |
242 | struct io_u *io_u; | |
243 | int r; | |
244 | ||
245 | /* | |
246 | * get immediately available events, if any | |
247 | */ | |
248 | r = io_u_queued_complete(td, 0, NULL); | |
249 | if (r < 0) | |
250 | return; | |
251 | ||
252 | /* | |
253 | * now cancel remaining active events | |
254 | */ | |
255 | if (td->io_ops->cancel) { | |
256 | flist_for_each_safe(entry, n, &td->io_u_busylist) { | |
257 | io_u = flist_entry(entry, struct io_u, list); | |
258 | ||
259 | /* | |
260 | * if the io_u isn't in flight, then that generally | |
261 | * means someone leaked an io_u. complain but fix | |
262 | * it up, so we don't stall here. | |
263 | */ | |
264 | if ((io_u->flags & IO_U_F_FLIGHT) == 0) { | |
265 | log_err("fio: non-busy IO on busy list\n"); | |
266 | put_io_u(td, io_u); | |
267 | } else { | |
268 | r = td->io_ops->cancel(td, io_u); | |
269 | if (!r) | |
270 | put_io_u(td, io_u); | |
271 | } | |
272 | } | |
273 | } | |
274 | ||
275 | if (td->cur_depth) | |
276 | r = io_u_queued_complete(td, td->cur_depth, NULL); | |
277 | } | |
278 | ||
279 | /* | |
280 | * Helper to handle the final sync of a file. Works just like the normal | |
281 | * io path, just does everything sync. | |
282 | */ | |
283 | static int fio_io_sync(struct thread_data *td, struct fio_file *f) | |
284 | { | |
285 | struct io_u *io_u = __get_io_u(td); | |
286 | int ret; | |
287 | ||
288 | if (!io_u) | |
289 | return 1; | |
290 | ||
291 | io_u->ddir = DDIR_SYNC; | |
292 | io_u->file = f; | |
293 | ||
294 | if (td_io_prep(td, io_u)) { | |
295 | put_io_u(td, io_u); | |
296 | return 1; | |
297 | } | |
298 | ||
299 | requeue: | |
300 | ret = td_io_queue(td, io_u); | |
301 | if (ret < 0) { | |
302 | td_verror(td, io_u->error, "td_io_queue"); | |
303 | put_io_u(td, io_u); | |
304 | return 1; | |
305 | } else if (ret == FIO_Q_QUEUED) { | |
306 | if (io_u_queued_complete(td, 1, NULL) < 0) | |
307 | return 1; | |
308 | } else if (ret == FIO_Q_COMPLETED) { | |
309 | if (io_u->error) { | |
310 | td_verror(td, io_u->error, "td_io_queue"); | |
311 | return 1; | |
312 | } | |
313 | ||
314 | if (io_u_sync_complete(td, io_u, NULL) < 0) | |
315 | return 1; | |
316 | } else if (ret == FIO_Q_BUSY) { | |
317 | if (td_io_commit(td)) | |
318 | return 1; | |
319 | goto requeue; | |
320 | } | |
321 | ||
322 | return 0; | |
323 | } | |
a3efc919 | 324 | |
61ee0f86 JA |
325 | static int fio_file_fsync(struct thread_data *td, struct fio_file *f) |
326 | { | |
327 | int ret; | |
328 | ||
329 | if (fio_file_open(f)) | |
330 | return fio_io_sync(td, f); | |
331 | ||
332 | if (td_io_open_file(td, f)) | |
333 | return 1; | |
334 | ||
335 | ret = fio_io_sync(td, f); | |
336 | td_io_close_file(td, f); | |
337 | return ret; | |
338 | } | |
339 | ||
2e1df07d JA |
340 | static inline void __update_tv_cache(struct thread_data *td) |
341 | { | |
342 | fio_gettime(&td->tv_cache, NULL); | |
343 | } | |
344 | ||
345 | static inline void update_tv_cache(struct thread_data *td) | |
346 | { | |
347 | if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask) | |
348 | __update_tv_cache(td); | |
349 | } | |
350 | ||
351 | static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) | |
352 | { | |
353 | if (in_ramp_time(td)) | |
354 | return 0; | |
355 | if (!td->o.timeout) | |
356 | return 0; | |
357 | if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000) | |
358 | return 1; | |
359 | ||
360 | return 0; | |
361 | } | |
362 | ||
363 | static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir, | |
364 | int *retptr) | |
365 | { | |
366 | int ret = *retptr; | |
367 | ||
368 | if (ret < 0 || td->error) { | |
8b28bd41 DM |
369 | int err = td->error; |
370 | enum error_type_bit eb; | |
2e1df07d JA |
371 | |
372 | if (ret < 0) | |
373 | err = -ret; | |
2e1df07d | 374 | |
8b28bd41 DM |
375 | eb = td_error_type(ddir, err); |
376 | if (!(td->o.continue_on_error & (1 << eb))) | |
2e1df07d JA |
377 | return 1; |
378 | ||
8b28bd41 | 379 | if (td_non_fatal_error(td, eb, err)) { |
2e1df07d JA |
380 | /* |
381 | * Continue with the I/Os in case of | |
382 | * a non fatal error. | |
383 | */ | |
384 | update_error_count(td, err); | |
385 | td_clear_error(td); | |
386 | *retptr = 0; | |
387 | return 0; | |
388 | } else if (td->o.fill_device && err == ENOSPC) { | |
389 | /* | |
390 | * We expect to hit this error if | |
391 | * fill_device option is set. | |
392 | */ | |
393 | td_clear_error(td); | |
394 | td->terminate = 1; | |
395 | return 1; | |
396 | } else { | |
397 | /* | |
398 | * Stop the I/O in case of a fatal | |
399 | * error. | |
400 | */ | |
401 | update_error_count(td, err); | |
402 | return 1; | |
403 | } | |
404 | } | |
405 | ||
406 | return 0; | |
407 | } | |
408 | ||
c97f1ad6 JA |
409 | static void check_update_rusage(struct thread_data *td) |
410 | { | |
411 | if (td->update_rusage) { | |
412 | td->update_rusage = 0; | |
413 | update_rusage_stat(td); | |
414 | fio_mutex_up(td->rusage_sem); | |
415 | } | |
416 | } | |
417 | ||
2e1df07d JA |
418 | /* |
419 | * The main verify engine. Runs over the writes we previously submitted, | |
420 | * reads the blocks back in, and checks the crc/md5 of the data. | |
421 | */ | |
100f49f1 | 422 | static void do_verify(struct thread_data *td, uint64_t verify_bytes) |
2e1df07d | 423 | { |
100f49f1 | 424 | uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; |
2e1df07d JA |
425 | struct fio_file *f; |
426 | struct io_u *io_u; | |
427 | int ret, min_events; | |
428 | unsigned int i; | |
429 | ||
430 | dprint(FD_VERIFY, "starting loop\n"); | |
431 | ||
432 | /* | |
433 | * sync io first and invalidate cache, to make sure we really | |
434 | * read from disk. | |
435 | */ | |
436 | for_each_file(td, f, i) { | |
437 | if (!fio_file_open(f)) | |
438 | continue; | |
439 | if (fio_io_sync(td, f)) | |
440 | break; | |
441 | if (file_invalidate_cache(td, f)) | |
442 | break; | |
443 | } | |
444 | ||
c97f1ad6 JA |
445 | check_update_rusage(td); |
446 | ||
2e1df07d JA |
447 | if (td->error) |
448 | return; | |
449 | ||
450 | td_set_runstate(td, TD_VERIFYING); | |
451 | ||
452 | io_u = NULL; | |
453 | while (!td->terminate) { | |
fbccf46c | 454 | enum fio_ddir ddir; |
2e1df07d JA |
455 | int ret2, full; |
456 | ||
457 | update_tv_cache(td); | |
c97f1ad6 | 458 | check_update_rusage(td); |
2e1df07d JA |
459 | |
460 | if (runtime_exceeded(td, &td->tv_cache)) { | |
461 | __update_tv_cache(td); | |
462 | if (runtime_exceeded(td, &td->tv_cache)) { | |
463 | td->terminate = 1; | |
464 | break; | |
465 | } | |
466 | } | |
467 | ||
9e684a49 DE |
468 | if (flow_threshold_exceeded(td)) |
469 | continue; | |
470 | ||
44cbc6da JA |
471 | if (!td->o.experimental_verify) { |
472 | io_u = __get_io_u(td); | |
473 | if (!io_u) | |
474 | break; | |
2e1df07d | 475 | |
44cbc6da JA |
476 | if (get_next_verify(td, io_u)) { |
477 | put_io_u(td, io_u); | |
478 | break; | |
479 | } | |
2e1df07d | 480 | |
44cbc6da JA |
481 | if (td_io_prep(td, io_u)) { |
482 | put_io_u(td, io_u); | |
483 | break; | |
484 | } | |
485 | } else { | |
100f49f1 JA |
486 | if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes) |
487 | break; | |
488 | ||
bcd5abfa JA |
489 | while ((io_u = get_io_u(td)) != NULL) { |
490 | /* | |
491 | * We are only interested in the places where | |
492 | * we wrote or trimmed IOs. Turn those into | |
493 | * reads for verification purposes. | |
494 | */ | |
495 | if (io_u->ddir == DDIR_READ) { | |
496 | /* | |
497 | * Pretend we issued it for rwmix | |
498 | * accounting | |
499 | */ | |
500 | td->io_issues[DDIR_READ]++; | |
501 | put_io_u(td, io_u); | |
502 | continue; | |
503 | } else if (io_u->ddir == DDIR_TRIM) { | |
504 | io_u->ddir = DDIR_READ; | |
505 | io_u->flags |= IO_U_F_TRIMMED; | |
506 | break; | |
507 | } else if (io_u->ddir == DDIR_WRITE) { | |
508 | io_u->ddir = DDIR_READ; | |
509 | break; | |
510 | } else { | |
511 | put_io_u(td, io_u); | |
512 | continue; | |
513 | } | |
514 | } | |
44cbc6da | 515 | |
bcd5abfa | 516 | if (!io_u) |
44cbc6da | 517 | break; |
2e1df07d JA |
518 | } |
519 | ||
520 | if (td->o.verify_async) | |
521 | io_u->end_io = verify_io_u_async; | |
522 | else | |
523 | io_u->end_io = verify_io_u; | |
524 | ||
fbccf46c JA |
525 | ddir = io_u->ddir; |
526 | ||
2e1df07d JA |
527 | ret = td_io_queue(td, io_u); |
528 | switch (ret) { | |
529 | case FIO_Q_COMPLETED: | |
530 | if (io_u->error) { | |
531 | ret = -io_u->error; | |
532 | clear_io_u(td, io_u); | |
533 | } else if (io_u->resid) { | |
534 | int bytes = io_u->xfer_buflen - io_u->resid; | |
535 | ||
536 | /* | |
537 | * zero read, fail | |
538 | */ | |
539 | if (!bytes) { | |
540 | td_verror(td, EIO, "full resid"); | |
541 | put_io_u(td, io_u); | |
542 | break; | |
543 | } | |
544 | ||
545 | io_u->xfer_buflen = io_u->resid; | |
546 | io_u->xfer_buf += bytes; | |
547 | io_u->offset += bytes; | |
548 | ||
549 | if (ddir_rw(io_u->ddir)) | |
550 | td->ts.short_io_u[io_u->ddir]++; | |
551 | ||
552 | f = io_u->file; | |
553 | if (io_u->offset == f->real_file_size) | |
554 | goto sync_done; | |
555 | ||
556 | requeue_io_u(td, &io_u); | |
557 | } else { | |
558 | sync_done: | |
100f49f1 | 559 | ret = io_u_sync_complete(td, io_u, bytes_done); |
2e1df07d JA |
560 | if (ret < 0) |
561 | break; | |
562 | } | |
563 | continue; | |
564 | case FIO_Q_QUEUED: | |
565 | break; | |
566 | case FIO_Q_BUSY: | |
567 | requeue_io_u(td, &io_u); | |
568 | ret2 = td_io_commit(td); | |
569 | if (ret2 < 0) | |
570 | ret = ret2; | |
571 | break; | |
572 | default: | |
573 | assert(ret < 0); | |
574 | td_verror(td, -ret, "td_io_queue"); | |
575 | break; | |
576 | } | |
577 | ||
fbccf46c | 578 | if (break_on_this_error(td, ddir, &ret)) |
2e1df07d JA |
579 | break; |
580 | ||
581 | /* | |
582 | * if we can queue more, do so. but check if there are | |
583 | * completed io_u's first. Note that we can get BUSY even | |
584 | * without IO queued, if the system is resource starved. | |
585 | */ | |
586 | full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); | |
587 | if (full || !td->o.iodepth_batch_complete) { | |
588 | min_events = min(td->o.iodepth_batch_complete, | |
589 | td->cur_depth); | |
8a74b56d JA |
590 | /* |
591 | * if the queue is full, we MUST reap at least 1 event | |
592 | */ | |
593 | if (full && !min_events) | |
2e1df07d JA |
594 | min_events = 1; |
595 | ||
596 | do { | |
597 | /* | |
598 | * Reap required number of io units, if any, | |
599 | * and do the verification on them through | |
600 | * the callback handler | |
601 | */ | |
100f49f1 | 602 | if (io_u_queued_complete(td, min_events, bytes_done) < 0) { |
2e1df07d JA |
603 | ret = -1; |
604 | break; | |
605 | } | |
606 | } while (full && (td->cur_depth > td->o.iodepth_low)); | |
607 | } | |
608 | if (ret < 0) | |
609 | break; | |
610 | } | |
611 | ||
c97f1ad6 JA |
612 | check_update_rusage(td); |
613 | ||
2e1df07d JA |
614 | if (!td->error) { |
615 | min_events = td->cur_depth; | |
616 | ||
617 | if (min_events) | |
618 | ret = io_u_queued_complete(td, min_events, NULL); | |
619 | } else | |
620 | cleanup_pending_aio(td); | |
621 | ||
622 | td_set_runstate(td, TD_RUNNING); | |
623 | ||
624 | dprint(FD_VERIFY, "exiting loop\n"); | |
625 | } | |
626 | ||
f7078f7b JA |
627 | static int io_bytes_exceeded(struct thread_data *td) |
628 | { | |
629 | unsigned long long bytes; | |
630 | ||
631 | if (td_rw(td)) | |
6eaf09d6 | 632 | bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE]; |
f7078f7b | 633 | else if (td_write(td)) |
6eaf09d6 SL |
634 | bytes = td->this_io_bytes[DDIR_WRITE]; |
635 | else if (td_read(td)) | |
636 | bytes = td->this_io_bytes[DDIR_READ]; | |
f7078f7b | 637 | else |
6eaf09d6 | 638 | bytes = td->this_io_bytes[DDIR_TRIM]; |
f7078f7b JA |
639 | |
640 | return bytes >= td->o.size; | |
641 | } | |
642 | ||
2e1df07d JA |
643 | /* |
644 | * Main IO worker function. It retrieves io_u's to process and queues | |
645 | * and reaps them, checking for rate and errors along the way. | |
100f49f1 JA |
646 | * |
647 | * Returns number of bytes written and trimmed. | |
2e1df07d | 648 | */ |
100f49f1 | 649 | static uint64_t do_io(struct thread_data *td) |
2e1df07d | 650 | { |
100f49f1 | 651 | uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; |
2e1df07d JA |
652 | unsigned int i; |
653 | int ret = 0; | |
654 | ||
655 | if (in_ramp_time(td)) | |
656 | td_set_runstate(td, TD_RAMP); | |
657 | else | |
658 | td_set_runstate(td, TD_RUNNING); | |
659 | ||
f7078f7b | 660 | while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || |
c04e4661 DE |
661 | (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) || |
662 | td->o.time_based) { | |
2e1df07d | 663 | struct timeval comp_time; |
2e1df07d JA |
664 | int min_evts = 0; |
665 | struct io_u *io_u; | |
666 | int ret2, full; | |
667 | enum fio_ddir ddir; | |
668 | ||
c97f1ad6 JA |
669 | check_update_rusage(td); |
670 | ||
7d7803fa | 671 | if (td->terminate || td->done) |
2e1df07d JA |
672 | break; |
673 | ||
674 | update_tv_cache(td); | |
675 | ||
676 | if (runtime_exceeded(td, &td->tv_cache)) { | |
677 | __update_tv_cache(td); | |
678 | if (runtime_exceeded(td, &td->tv_cache)) { | |
679 | td->terminate = 1; | |
680 | break; | |
681 | } | |
682 | } | |
683 | ||
9e684a49 DE |
684 | if (flow_threshold_exceeded(td)) |
685 | continue; | |
686 | ||
2e1df07d JA |
687 | io_u = get_io_u(td); |
688 | if (!io_u) | |
689 | break; | |
690 | ||
691 | ddir = io_u->ddir; | |
692 | ||
693 | /* | |
82af2a7c JA |
694 | * Add verification end_io handler if: |
695 | * - Asked to verify (!td_rw(td)) | |
696 | * - Or the io_u is from our verify list (mixed write/ver) | |
2e1df07d JA |
697 | */ |
698 | if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && | |
82af2a7c | 699 | ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { |
2e1df07d JA |
700 | if (td->o.verify_async) |
701 | io_u->end_io = verify_io_u_async; | |
702 | else | |
703 | io_u->end_io = verify_io_u; | |
704 | td_set_runstate(td, TD_VERIFYING); | |
705 | } else if (in_ramp_time(td)) | |
706 | td_set_runstate(td, TD_RAMP); | |
707 | else | |
708 | td_set_runstate(td, TD_RUNNING); | |
709 | ||
710 | ret = td_io_queue(td, io_u); | |
711 | switch (ret) { | |
712 | case FIO_Q_COMPLETED: | |
713 | if (io_u->error) { | |
714 | ret = -io_u->error; | |
715 | clear_io_u(td, io_u); | |
716 | } else if (io_u->resid) { | |
717 | int bytes = io_u->xfer_buflen - io_u->resid; | |
718 | struct fio_file *f = io_u->file; | |
719 | ||
720 | /* | |
721 | * zero read, fail | |
722 | */ | |
723 | if (!bytes) { | |
724 | td_verror(td, EIO, "full resid"); | |
725 | put_io_u(td, io_u); | |
726 | break; | |
727 | } | |
728 | ||
729 | io_u->xfer_buflen = io_u->resid; | |
730 | io_u->xfer_buf += bytes; | |
731 | io_u->offset += bytes; | |
732 | ||
733 | if (ddir_rw(io_u->ddir)) | |
734 | td->ts.short_io_u[io_u->ddir]++; | |
735 | ||
736 | if (io_u->offset == f->real_file_size) | |
737 | goto sync_done; | |
738 | ||
739 | requeue_io_u(td, &io_u); | |
740 | } else { | |
741 | sync_done: | |
6eaf09d6 SL |
742 | if (__should_check_rate(td, DDIR_READ) || |
743 | __should_check_rate(td, DDIR_WRITE) || | |
744 | __should_check_rate(td, DDIR_TRIM)) | |
2e1df07d JA |
745 | fio_gettime(&comp_time, NULL); |
746 | ||
747 | ret = io_u_sync_complete(td, io_u, bytes_done); | |
748 | if (ret < 0) | |
749 | break; | |
750 | } | |
751 | break; | |
752 | case FIO_Q_QUEUED: | |
753 | /* | |
754 | * if the engine doesn't have a commit hook, | |
755 | * the io_u is really queued. if it does have such | |
756 | * a hook, it has to call io_u_queued() itself. | |
757 | */ | |
758 | if (td->io_ops->commit == NULL) | |
759 | io_u_queued(td, io_u); | |
760 | break; | |
761 | case FIO_Q_BUSY: | |
762 | requeue_io_u(td, &io_u); | |
763 | ret2 = td_io_commit(td); | |
764 | if (ret2 < 0) | |
765 | ret = ret2; | |
766 | break; | |
767 | default: | |
768 | assert(ret < 0); | |
769 | put_io_u(td, io_u); | |
770 | break; | |
771 | } | |
772 | ||
773 | if (break_on_this_error(td, ddir, &ret)) | |
774 | break; | |
775 | ||
776 | /* | |
777 | * See if we need to complete some commands. Note that we | |
778 | * can get BUSY even without IO queued, if the system is | |
779 | * resource starved. | |
780 | */ | |
781 | full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); | |
782 | if (full || !td->o.iodepth_batch_complete) { | |
783 | min_evts = min(td->o.iodepth_batch_complete, | |
784 | td->cur_depth); | |
8a74b56d JA |
785 | /* |
786 | * if the queue is full, we MUST reap at least 1 event | |
787 | */ | |
788 | if (full && !min_evts) | |
2e1df07d JA |
789 | min_evts = 1; |
790 | ||
6eaf09d6 SL |
791 | if (__should_check_rate(td, DDIR_READ) || |
792 | __should_check_rate(td, DDIR_WRITE) || | |
793 | __should_check_rate(td, DDIR_TRIM)) | |
2e1df07d JA |
794 | fio_gettime(&comp_time, NULL); |
795 | ||
796 | do { | |
797 | ret = io_u_queued_complete(td, min_evts, bytes_done); | |
798 | if (ret < 0) | |
799 | break; | |
800 | ||
801 | } while (full && (td->cur_depth > td->o.iodepth_low)); | |
802 | } | |
803 | ||
804 | if (ret < 0) | |
805 | break; | |
d5abee06 | 806 | if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO)) |
2e1df07d JA |
807 | continue; |
808 | ||
809 | if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) { | |
810 | if (check_min_rate(td, &comp_time, bytes_done)) { | |
811 | if (exitall_on_terminate) | |
812 | fio_terminate_threads(td->groupid); | |
813 | td_verror(td, EIO, "check_min_rate"); | |
814 | break; | |
815 | } | |
816 | } | |
817 | ||
818 | if (td->o.thinktime) { | |
819 | unsigned long long b; | |
820 | ||
342f4be4 | 821 | b = ddir_rw_sum(td->io_blocks); |
2e1df07d JA |
822 | if (!(b % td->o.thinktime_blocks)) { |
823 | int left; | |
824 | ||
825 | if (td->o.thinktime_spin) | |
826 | usec_spin(td->o.thinktime_spin); | |
827 | ||
828 | left = td->o.thinktime - td->o.thinktime_spin; | |
829 | if (left) | |
830 | usec_sleep(td, left); | |
831 | } | |
832 | } | |
833 | } | |
834 | ||
c97f1ad6 JA |
835 | check_update_rusage(td); |
836 | ||
2e1df07d JA |
837 | if (td->trim_entries) |
838 | log_err("fio: %d trim entries leaked?\n", td->trim_entries); | |
839 | ||
840 | if (td->o.fill_device && td->error == ENOSPC) { | |
841 | td->error = 0; | |
842 | td->terminate = 1; | |
843 | } | |
844 | if (!td->error) { | |
845 | struct fio_file *f; | |
846 | ||
847 | i = td->cur_depth; | |
848 | if (i) { | |
5bd5f71a | 849 | ret = io_u_queued_complete(td, i, bytes_done); |
2e1df07d JA |
850 | if (td->o.fill_device && td->error == ENOSPC) |
851 | td->error = 0; | |
852 | } | |
853 | ||
854 | if (should_fsync(td) && td->o.end_fsync) { | |
855 | td_set_runstate(td, TD_FSYNCING); | |
856 | ||
857 | for_each_file(td, f, i) { | |
61ee0f86 | 858 | if (!fio_file_fsync(td, f)) |
2e1df07d | 859 | continue; |
61ee0f86 JA |
860 | |
861 | log_err("fio: end_fsync failed for file %s\n", | |
862 | f->file_name); | |
2e1df07d JA |
863 | } |
864 | } | |
865 | } else | |
866 | cleanup_pending_aio(td); | |
867 | ||
868 | /* | |
869 | * stop job if we failed doing any IO | |
870 | */ | |
342f4be4 | 871 | if (!ddir_rw_sum(td->this_io_bytes)) |
2e1df07d | 872 | td->done = 1; |
100f49f1 JA |
873 | |
874 | return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM]; | |
2e1df07d JA |
875 | } |
876 | ||
877 | static void cleanup_io_u(struct thread_data *td) | |
878 | { | |
879 | struct flist_head *entry, *n; | |
880 | struct io_u *io_u; | |
881 | ||
882 | flist_for_each_safe(entry, n, &td->io_u_freelist) { | |
883 | io_u = flist_entry(entry, struct io_u, list); | |
884 | ||
885 | flist_del(&io_u->list); | |
c73ed246 JA |
886 | |
887 | if (td->io_ops->io_u_free) | |
888 | td->io_ops->io_u_free(td, io_u); | |
889 | ||
2e1df07d JA |
890 | fio_memfree(io_u, sizeof(*io_u)); |
891 | } | |
892 | ||
893 | free_io_mem(td); | |
894 | } | |
895 | ||
896 | static int init_io_u(struct thread_data *td) | |
897 | { | |
898 | struct io_u *io_u; | |
9c42684e | 899 | unsigned int max_bs, min_write; |
2e1df07d | 900 | int cl_align, i, max_units; |
59d8d0f5 | 901 | int data_xfer = 1; |
2e1df07d JA |
902 | char *p; |
903 | ||
904 | max_units = td->o.iodepth; | |
74f4b020 | 905 | max_bs = td_max_bs(td); |
9c42684e | 906 | min_write = td->o.min_bs[DDIR_WRITE]; |
2e1df07d JA |
907 | td->orig_buffer_size = (unsigned long long) max_bs |
908 | * (unsigned long long) max_units; | |
909 | ||
88045e04 | 910 | if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td))) |
59d8d0f5 JA |
911 | data_xfer = 0; |
912 | ||
2e1df07d JA |
913 | if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) { |
914 | unsigned long bs; | |
915 | ||
916 | bs = td->orig_buffer_size + td->o.hugepage_size - 1; | |
917 | td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1); | |
918 | } | |
919 | ||
920 | if (td->orig_buffer_size != (size_t) td->orig_buffer_size) { | |
921 | log_err("fio: IO memory too large. Reduce max_bs or iodepth\n"); | |
922 | return 1; | |
923 | } | |
924 | ||
59d8d0f5 | 925 | if (data_xfer && allocate_io_mem(td)) |
2e1df07d JA |
926 | return 1; |
927 | ||
928 | if (td->o.odirect || td->o.mem_align || | |
929 | (td->io_ops->flags & FIO_RAWIO)) | |
930 | p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align; | |
931 | else | |
932 | p = td->orig_buffer; | |
933 | ||
934 | cl_align = os_cache_line_size(); | |
935 | ||
936 | for (i = 0; i < max_units; i++) { | |
937 | void *ptr; | |
938 | ||
939 | if (td->terminate) | |
940 | return 1; | |
941 | ||
942 | ptr = fio_memalign(cl_align, sizeof(*io_u)); | |
943 | if (!ptr) { | |
944 | log_err("fio: unable to allocate aligned memory\n"); | |
945 | break; | |
946 | } | |
947 | ||
948 | io_u = ptr; | |
949 | memset(io_u, 0, sizeof(*io_u)); | |
950 | INIT_FLIST_HEAD(&io_u->list); | |
951 | dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i); | |
952 | ||
59d8d0f5 | 953 | if (data_xfer) { |
2e1df07d JA |
954 | io_u->buf = p; |
955 | dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf); | |
956 | ||
957 | if (td_write(td)) | |
9c42684e | 958 | io_u_fill_buffer(td, io_u, min_write, max_bs); |
2e1df07d JA |
959 | if (td_write(td) && td->o.verify_pattern_bytes) { |
960 | /* | |
961 | * Fill the buffer with the pattern if we are | |
962 | * going to be doing writes. | |
963 | */ | |
964 | fill_pattern(td, io_u->buf, max_bs, io_u, 0, 0); | |
965 | } | |
966 | } | |
967 | ||
968 | io_u->index = i; | |
969 | io_u->flags = IO_U_F_FREE; | |
970 | flist_add(&io_u->list, &td->io_u_freelist); | |
c73ed246 JA |
971 | |
972 | if (td->io_ops->io_u_init) { | |
973 | int ret = td->io_ops->io_u_init(td, io_u); | |
974 | ||
975 | if (ret) { | |
976 | log_err("fio: failed to init engine data: %d\n", ret); | |
977 | return 1; | |
978 | } | |
979 | } | |
980 | ||
2e1df07d JA |
981 | p += max_bs; |
982 | } | |
983 | ||
984 | return 0; | |
985 | } | |
986 | ||
987 | static int switch_ioscheduler(struct thread_data *td) | |
988 | { | |
989 | char tmp[256], tmp2[128]; | |
990 | FILE *f; | |
991 | int ret; | |
992 | ||
993 | if (td->io_ops->flags & FIO_DISKLESSIO) | |
994 | return 0; | |
995 | ||
996 | sprintf(tmp, "%s/queue/scheduler", td->sysfs_root); | |
997 | ||
998 | f = fopen(tmp, "r+"); | |
999 | if (!f) { | |
1000 | if (errno == ENOENT) { | |
1001 | log_err("fio: os or kernel doesn't support IO scheduler" | |
1002 | " switching\n"); | |
1003 | return 0; | |
1004 | } | |
1005 | td_verror(td, errno, "fopen iosched"); | |
1006 | return 1; | |
1007 | } | |
1008 | ||
1009 | /* | |
1010 | * Set io scheduler. | |
1011 | */ | |
1012 | ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f); | |
1013 | if (ferror(f) || ret != 1) { | |
1014 | td_verror(td, errno, "fwrite"); | |
1015 | fclose(f); | |
1016 | return 1; | |
1017 | } | |
1018 | ||
1019 | rewind(f); | |
1020 | ||
1021 | /* | |
1022 | * Read back and check that the selected scheduler is now the default. | |
1023 | */ | |
1024 | ret = fread(tmp, 1, sizeof(tmp), f); | |
1025 | if (ferror(f) || ret < 0) { | |
1026 | td_verror(td, errno, "fread"); | |
1027 | fclose(f); | |
1028 | return 1; | |
1029 | } | |
1030 | ||
1031 | sprintf(tmp2, "[%s]", td->o.ioscheduler); | |
1032 | if (!strstr(tmp, tmp2)) { | |
1033 | log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); | |
1034 | td_verror(td, EINVAL, "iosched_switch"); | |
1035 | fclose(f); | |
1036 | return 1; | |
1037 | } | |
1038 | ||
1039 | fclose(f); | |
1040 | return 0; | |
1041 | } | |
1042 | ||
1043 | static int keep_running(struct thread_data *td) | |
1044 | { | |
2e1df07d JA |
1045 | if (td->done) |
1046 | return 0; | |
1047 | if (td->o.time_based) | |
1048 | return 1; | |
1049 | if (td->o.loops) { | |
1050 | td->o.loops--; | |
1051 | return 1; | |
1052 | } | |
1053 | ||
ee98e234 | 1054 | if (td->o.size != -1ULL && ddir_rw_sum(td->io_bytes) < td->o.size) { |
5bd5f71a JA |
1055 | uint64_t diff; |
1056 | ||
1057 | /* | |
1058 | * If the difference is less than the minimum IO size, we | |
1059 | * are done. | |
1060 | */ | |
1061 | diff = td->o.size - ddir_rw_sum(td->io_bytes); | |
74f4b020 | 1062 | if (diff < td_max_bs(td)) |
5bd5f71a JA |
1063 | return 0; |
1064 | ||
2e1df07d | 1065 | return 1; |
5bd5f71a | 1066 | } |
2e1df07d JA |
1067 | |
1068 | return 0; | |
1069 | } | |
1070 | ||
1071 | static int exec_string(const char *string) | |
1072 | { | |
1073 | int ret, newlen = strlen(string) + 1 + 8; | |
1074 | char *str; | |
1075 | ||
1076 | str = malloc(newlen); | |
1077 | sprintf(str, "sh -c %s", string); | |
1078 | ||
1079 | ret = system(str); | |
1080 | if (ret == -1) | |
1081 | log_err("fio: exec of cmd <%s> failed\n", str); | |
1082 | ||
1083 | free(str); | |
1084 | return ret; | |
1085 | } | |
1086 | ||
1087 | /* | |
1088 | * Entry point for the thread based jobs. The process based jobs end up | |
1089 | * here as well, after a little setup. | |
1090 | */ | |
1091 | static void *thread_main(void *data) | |
1092 | { | |
1093 | unsigned long long elapsed; | |
1094 | struct thread_data *td = data; | |
1095 | pthread_condattr_t attr; | |
1096 | int clear_state; | |
1097 | ||
1098 | if (!td->o.use_thread) { | |
1099 | setsid(); | |
1100 | td->pid = getpid(); | |
1101 | } else | |
1102 | td->pid = gettid(); | |
1103 | ||
5d879392 JA |
1104 | fio_local_clock_init(td->o.use_thread); |
1105 | ||
2e1df07d JA |
1106 | dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid); |
1107 | ||
1108 | INIT_FLIST_HEAD(&td->io_u_freelist); | |
1109 | INIT_FLIST_HEAD(&td->io_u_busylist); | |
1110 | INIT_FLIST_HEAD(&td->io_u_requeues); | |
1111 | INIT_FLIST_HEAD(&td->io_log_list); | |
1112 | INIT_FLIST_HEAD(&td->io_hist_list); | |
1113 | INIT_FLIST_HEAD(&td->verify_list); | |
1114 | INIT_FLIST_HEAD(&td->trim_list); | |
1ae83d45 | 1115 | INIT_FLIST_HEAD(&td->next_rand_list); |
2e1df07d JA |
1116 | pthread_mutex_init(&td->io_u_lock, NULL); |
1117 | td->io_hist_tree = RB_ROOT; | |
1118 | ||
1119 | pthread_condattr_init(&attr); | |
1120 | pthread_cond_init(&td->verify_cond, &attr); | |
1121 | pthread_cond_init(&td->free_cond, &attr); | |
1122 | ||
1123 | td_set_runstate(td, TD_INITIALIZED); | |
1124 | dprint(FD_MUTEX, "up startup_mutex\n"); | |
1125 | fio_mutex_up(startup_mutex); | |
1126 | dprint(FD_MUTEX, "wait on td->mutex\n"); | |
1127 | fio_mutex_down(td->mutex); | |
1128 | dprint(FD_MUTEX, "done waiting on td->mutex\n"); | |
1129 | ||
1130 | /* | |
1131 | * the ->mutex mutex is now no longer used, close it to avoid | |
1132 | * eating a file descriptor | |
1133 | */ | |
1134 | fio_mutex_remove(td->mutex); | |
1135 | ||
1136 | /* | |
1137 | * A new gid requires privilege, so we need to do this before setting | |
1138 | * the uid. | |
1139 | */ | |
1140 | if (td->o.gid != -1U && setgid(td->o.gid)) { | |
1141 | td_verror(td, errno, "setgid"); | |
1142 | goto err; | |
1143 | } | |
1144 | if (td->o.uid != -1U && setuid(td->o.uid)) { | |
1145 | td_verror(td, errno, "setuid"); | |
1146 | goto err; | |
1147 | } | |
1148 | ||
1149 | /* | |
1150 | * If we have a gettimeofday() thread, make sure we exclude that | |
1151 | * thread from this job | |
1152 | */ | |
1153 | if (td->o.gtod_cpu) | |
1154 | fio_cpu_clear(&td->o.cpumask, td->o.gtod_cpu); | |
1155 | ||
1156 | /* | |
1157 | * Set affinity first, in case it has an impact on the memory | |
1158 | * allocations. | |
1159 | */ | |
1160 | if (td->o.cpumask_set && fio_setaffinity(td->pid, td->o.cpumask) == -1) { | |
1161 | td_verror(td, errno, "cpu_set_affinity"); | |
1162 | goto err; | |
1163 | } | |
1164 | ||
67bf9823 | 1165 | #ifdef CONFIG_LIBNUMA |
d0b937ed YR |
1166 | /* numa node setup */ |
1167 | if (td->o.numa_cpumask_set || td->o.numa_memmask_set) { | |
1168 | int ret; | |
1169 | ||
1170 | if (numa_available() < 0) { | |
1171 | td_verror(td, errno, "Does not support NUMA API\n"); | |
1172 | goto err; | |
1173 | } | |
1174 | ||
1175 | if (td->o.numa_cpumask_set) { | |
1176 | ret = numa_run_on_node_mask(td->o.numa_cpunodesmask); | |
1177 | if (ret == -1) { | |
1178 | td_verror(td, errno, \ | |
1179 | "numa_run_on_node_mask failed\n"); | |
1180 | goto err; | |
1181 | } | |
1182 | } | |
1183 | ||
1184 | if (td->o.numa_memmask_set) { | |
1185 | ||
1186 | switch (td->o.numa_mem_mode) { | |
1187 | case MPOL_INTERLEAVE: | |
1188 | numa_set_interleave_mask(td->o.numa_memnodesmask); | |
1189 | break; | |
1190 | case MPOL_BIND: | |
1191 | numa_set_membind(td->o.numa_memnodesmask); | |
1192 | break; | |
1193 | case MPOL_LOCAL: | |
1194 | numa_set_localalloc(); | |
1195 | break; | |
1196 | case MPOL_PREFERRED: | |
1197 | numa_set_preferred(td->o.numa_mem_prefer_node); | |
1198 | break; | |
1199 | case MPOL_DEFAULT: | |
1200 | default: | |
1201 | break; | |
1202 | } | |
1203 | ||
1204 | } | |
1205 | } | |
1206 | #endif | |
1207 | ||
2e1df07d JA |
1208 | /* |
1209 | * May alter parameters that init_io_u() will use, so we need to | |
1210 | * do this first. | |
1211 | */ | |
1212 | if (init_iolog(td)) | |
1213 | goto err; | |
1214 | ||
1215 | if (init_io_u(td)) | |
1216 | goto err; | |
1217 | ||
1218 | if (td->o.verify_async && verify_async_init(td)) | |
1219 | goto err; | |
1220 | ||
1221 | if (td->ioprio_set) { | |
1222 | if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) { | |
1223 | td_verror(td, errno, "ioprio_set"); | |
1224 | goto err; | |
1225 | } | |
1226 | } | |
1227 | ||
5d89ff79 | 1228 | if (td->o.cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt)) |
2e1df07d JA |
1229 | goto err; |
1230 | ||
649c10c5 BC |
1231 | errno = 0; |
1232 | if (nice(td->o.nice) == -1 && errno != 0) { | |
2e1df07d JA |
1233 | td_verror(td, errno, "nice"); |
1234 | goto err; | |
1235 | } | |
1236 | ||
1237 | if (td->o.ioscheduler && switch_ioscheduler(td)) | |
1238 | goto err; | |
1239 | ||
1240 | if (!td->o.create_serialize && setup_files(td)) | |
1241 | goto err; | |
1242 | ||
1243 | if (td_io_init(td)) | |
1244 | goto err; | |
1245 | ||
1246 | if (init_random_map(td)) | |
1247 | goto err; | |
1248 | ||
1249 | if (td->o.exec_prerun) { | |
1250 | if (exec_string(td->o.exec_prerun)) | |
1251 | goto err; | |
1252 | } | |
1253 | ||
1254 | if (td->o.pre_read) { | |
1255 | if (pre_read_files(td) < 0) | |
1256 | goto err; | |
1257 | } | |
1258 | ||
dc5bfbb2 JA |
1259 | fio_verify_init(td); |
1260 | ||
2e1df07d | 1261 | fio_gettime(&td->epoch, NULL); |
44404c5a | 1262 | fio_getrusage(&td->ru_start); |
2e1df07d JA |
1263 | clear_state = 0; |
1264 | while (keep_running(td)) { | |
100f49f1 JA |
1265 | uint64_t verify_bytes; |
1266 | ||
2e1df07d JA |
1267 | fio_gettime(&td->start, NULL); |
1268 | memcpy(&td->bw_sample_time, &td->start, sizeof(td->start)); | |
1269 | memcpy(&td->iops_sample_time, &td->start, sizeof(td->start)); | |
1270 | memcpy(&td->tv_cache, &td->start, sizeof(td->start)); | |
1271 | ||
6eaf09d6 SL |
1272 | if (td->o.ratemin[DDIR_READ] || td->o.ratemin[DDIR_WRITE] || |
1273 | td->o.ratemin[DDIR_TRIM]) { | |
1274 | memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time, | |
2e1df07d | 1275 | sizeof(td->bw_sample_time)); |
6eaf09d6 SL |
1276 | memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time, |
1277 | sizeof(td->bw_sample_time)); | |
1278 | memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time, | |
2e1df07d JA |
1279 | sizeof(td->bw_sample_time)); |
1280 | } | |
1281 | ||
1282 | if (clear_state) | |
1283 | clear_io_state(td); | |
1284 | ||
1285 | prune_io_piece_log(td); | |
1286 | ||
100f49f1 | 1287 | verify_bytes = do_io(td); |
2e1df07d JA |
1288 | |
1289 | clear_state = 1; | |
1290 | ||
1291 | if (td_read(td) && td->io_bytes[DDIR_READ]) { | |
1292 | elapsed = utime_since_now(&td->start); | |
1293 | td->ts.runtime[DDIR_READ] += elapsed; | |
1294 | } | |
1295 | if (td_write(td) && td->io_bytes[DDIR_WRITE]) { | |
1296 | elapsed = utime_since_now(&td->start); | |
1297 | td->ts.runtime[DDIR_WRITE] += elapsed; | |
1298 | } | |
6eaf09d6 SL |
1299 | if (td_trim(td) && td->io_bytes[DDIR_TRIM]) { |
1300 | elapsed = utime_since_now(&td->start); | |
1301 | td->ts.runtime[DDIR_TRIM] += elapsed; | |
1302 | } | |
2e1df07d JA |
1303 | |
1304 | if (td->error || td->terminate) | |
1305 | break; | |
1306 | ||
1307 | if (!td->o.do_verify || | |
1308 | td->o.verify == VERIFY_NONE || | |
1309 | (td->io_ops->flags & FIO_UNIDIR)) | |
1310 | continue; | |
1311 | ||
1312 | clear_io_state(td); | |
1313 | ||
1314 | fio_gettime(&td->start, NULL); | |
1315 | ||
100f49f1 | 1316 | do_verify(td, verify_bytes); |
2e1df07d JA |
1317 | |
1318 | td->ts.runtime[DDIR_READ] += utime_since_now(&td->start); | |
1319 | ||
1320 | if (td->error || td->terminate) | |
1321 | break; | |
1322 | } | |
1323 | ||
1324 | update_rusage_stat(td); | |
6eaf09d6 SL |
1325 | td->ts.runtime[DDIR_READ] = (td->ts.runtime[DDIR_READ] + 999) / 1000; |
1326 | td->ts.runtime[DDIR_WRITE] = (td->ts.runtime[DDIR_WRITE] + 999) / 1000; | |
1327 | td->ts.runtime[DDIR_TRIM] = (td->ts.runtime[DDIR_TRIM] + 999) / 1000; | |
2e1df07d | 1328 | td->ts.total_run_time = mtime_since_now(&td->epoch); |
6eaf09d6 SL |
1329 | td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ]; |
1330 | td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE]; | |
1331 | td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; | |
2e1df07d JA |
1332 | |
1333 | fio_mutex_down(writeout_mutex); | |
1334 | if (td->bw_log) { | |
1335 | if (td->o.bw_log_file) { | |
1336 | finish_log_named(td, td->bw_log, | |
1337 | td->o.bw_log_file, "bw"); | |
1338 | } else | |
1339 | finish_log(td, td->bw_log, "bw"); | |
1340 | } | |
1341 | if (td->lat_log) { | |
1342 | if (td->o.lat_log_file) { | |
1343 | finish_log_named(td, td->lat_log, | |
1344 | td->o.lat_log_file, "lat"); | |
1345 | } else | |
1346 | finish_log(td, td->lat_log, "lat"); | |
1347 | } | |
1348 | if (td->slat_log) { | |
1349 | if (td->o.lat_log_file) { | |
1350 | finish_log_named(td, td->slat_log, | |
1351 | td->o.lat_log_file, "slat"); | |
1352 | } else | |
1353 | finish_log(td, td->slat_log, "slat"); | |
1354 | } | |
1355 | if (td->clat_log) { | |
1356 | if (td->o.lat_log_file) { | |
1357 | finish_log_named(td, td->clat_log, | |
1358 | td->o.lat_log_file, "clat"); | |
1359 | } else | |
1360 | finish_log(td, td->clat_log, "clat"); | |
1361 | } | |
1362 | if (td->iops_log) { | |
1363 | if (td->o.iops_log_file) { | |
1364 | finish_log_named(td, td->iops_log, | |
1365 | td->o.iops_log_file, "iops"); | |
1366 | } else | |
1367 | finish_log(td, td->iops_log, "iops"); | |
1368 | } | |
1369 | ||
1370 | fio_mutex_up(writeout_mutex); | |
1371 | if (td->o.exec_postrun) | |
1372 | exec_string(td->o.exec_postrun); | |
1373 | ||
1374 | if (exitall_on_terminate) | |
1375 | fio_terminate_threads(td->groupid); | |
1376 | ||
1377 | err: | |
1378 | if (td->error) | |
1379 | log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error, | |
1380 | td->verror); | |
1381 | ||
1382 | if (td->o.verify_async) | |
1383 | verify_async_exit(td); | |
1384 | ||
1385 | close_and_free_files(td); | |
2e1df07d | 1386 | cleanup_io_u(td); |
32dbca2c | 1387 | close_ioengine(td); |
2e1df07d JA |
1388 | cgroup_shutdown(td, &cgroup_mnt); |
1389 | ||
1390 | if (td->o.cpumask_set) { | |
1391 | int ret = fio_cpuset_exit(&td->o.cpumask); | |
1392 | ||
1393 | td_verror(td, ret, "fio_cpuset_exit"); | |
1394 | } | |
1395 | ||
1396 | /* | |
1397 | * do this very late, it will log file closing as well | |
1398 | */ | |
1399 | if (td->o.write_iolog_file) | |
1400 | write_iolog_close(td); | |
1401 | ||
c97f1ad6 JA |
1402 | fio_mutex_remove(td->rusage_sem); |
1403 | td->rusage_sem = NULL; | |
1404 | ||
2e1df07d | 1405 | td_set_runstate(td, TD_EXITED); |
e43606c2 | 1406 | return (void *) (uintptr_t) td->error; |
2e1df07d JA |
1407 | } |
1408 | ||
1409 | ||
1410 | /* | |
1411 | * We cannot pass the td data into a forked process, so attach the td and | |
1412 | * pass it to the thread worker. | |
1413 | */ | |
1414 | static int fork_main(int shmid, int offset) | |
1415 | { | |
1416 | struct thread_data *td; | |
1417 | void *data, *ret; | |
1418 | ||
1419 | #ifndef __hpux | |
1420 | data = shmat(shmid, NULL, 0); | |
1421 | if (data == (void *) -1) { | |
1422 | int __err = errno; | |
1423 | ||
1424 | perror("shmat"); | |
1425 | return __err; | |
1426 | } | |
1427 | #else | |
1428 | /* | |
1429 | * HP-UX inherits shm mappings? | |
1430 | */ | |
1431 | data = threads; | |
1432 | #endif | |
1433 | ||
1434 | td = data + offset * sizeof(struct thread_data); | |
1435 | ret = thread_main(td); | |
1436 | shmdt(data); | |
e43606c2 | 1437 | return (int) (uintptr_t) ret; |
2e1df07d JA |
1438 | } |
1439 | ||
1440 | /* | |
1441 | * Run over the job map and reap the threads that have exited, if any. | |
1442 | */ | |
1443 | static void reap_threads(unsigned int *nr_running, unsigned int *t_rate, | |
1444 | unsigned int *m_rate) | |
1445 | { | |
1446 | struct thread_data *td; | |
1447 | unsigned int cputhreads, realthreads, pending; | |
1448 | int i, status, ret; | |
1449 | ||
1450 | /* | |
1451 | * reap exited threads (TD_EXITED -> TD_REAPED) | |
1452 | */ | |
1453 | realthreads = pending = cputhreads = 0; | |
1454 | for_each_td(td, i) { | |
1455 | int flags = 0; | |
1456 | ||
1457 | /* | |
1458 | * ->io_ops is NULL for a thread that has closed its | |
1459 | * io engine | |
1460 | */ | |
1461 | if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) | |
1462 | cputhreads++; | |
1463 | else | |
1464 | realthreads++; | |
1465 | ||
1466 | if (!td->pid) { | |
1467 | pending++; | |
1468 | continue; | |
1469 | } | |
1470 | if (td->runstate == TD_REAPED) | |
1471 | continue; | |
1472 | if (td->o.use_thread) { | |
1473 | if (td->runstate == TD_EXITED) { | |
1474 | td_set_runstate(td, TD_REAPED); | |
1475 | goto reaped; | |
1476 | } | |
1477 | continue; | |
1478 | } | |
1479 | ||
1480 | flags = WNOHANG; | |
1481 | if (td->runstate == TD_EXITED) | |
1482 | flags = 0; | |
1483 | ||
1484 | /* | |
1485 | * check if someone quit or got killed in an unusual way | |
1486 | */ | |
1487 | ret = waitpid(td->pid, &status, flags); | |
1488 | if (ret < 0) { | |
1489 | if (errno == ECHILD) { | |
1490 | log_err("fio: pid=%d disappeared %d\n", | |
1491 | (int) td->pid, td->runstate); | |
a5e371a6 | 1492 | td->sig = ECHILD; |
2e1df07d JA |
1493 | td_set_runstate(td, TD_REAPED); |
1494 | goto reaped; | |
1495 | } | |
1496 | perror("waitpid"); | |
1497 | } else if (ret == td->pid) { | |
1498 | if (WIFSIGNALED(status)) { | |
1499 | int sig = WTERMSIG(status); | |
1500 | ||
36d80bc7 | 1501 | if (sig != SIGTERM && sig != SIGUSR2) |
2e1df07d JA |
1502 | log_err("fio: pid=%d, got signal=%d\n", |
1503 | (int) td->pid, sig); | |
a5e371a6 | 1504 | td->sig = sig; |
2e1df07d JA |
1505 | td_set_runstate(td, TD_REAPED); |
1506 | goto reaped; | |
1507 | } | |
1508 | if (WIFEXITED(status)) { | |
1509 | if (WEXITSTATUS(status) && !td->error) | |
1510 | td->error = WEXITSTATUS(status); | |
1511 | ||
1512 | td_set_runstate(td, TD_REAPED); | |
1513 | goto reaped; | |
1514 | } | |
1515 | } | |
1516 | ||
1517 | /* | |
1518 | * thread is not dead, continue | |
1519 | */ | |
1520 | pending++; | |
1521 | continue; | |
1522 | reaped: | |
1523 | (*nr_running)--; | |
342f4be4 JA |
1524 | (*m_rate) -= ddir_rw_sum(td->o.ratemin); |
1525 | (*t_rate) -= ddir_rw_sum(td->o.rate); | |
2e1df07d JA |
1526 | if (!td->pid) |
1527 | pending--; | |
1528 | ||
1529 | if (td->error) | |
1530 | exit_value++; | |
1531 | ||
1532 | done_secs += mtime_since_now(&td->epoch) / 1000; | |
1533 | } | |
1534 | ||
1535 | if (*nr_running == cputhreads && !pending && realthreads) | |
1536 | fio_terminate_threads(TERMINATE_ALL); | |
1537 | } | |
1538 | ||
2e1df07d JA |
1539 | /* |
1540 | * Main function for kicking off and reaping jobs, as needed. | |
1541 | */ | |
1542 | static void run_threads(void) | |
1543 | { | |
1544 | struct thread_data *td; | |
1545 | unsigned long spent; | |
1546 | unsigned int i, todo, nr_running, m_rate, t_rate, nr_started; | |
1547 | ||
1548 | if (fio_pin_memory()) | |
1549 | return; | |
1550 | ||
1551 | if (fio_gtod_offload && fio_start_gtod_thread()) | |
1552 | return; | |
f2a2ce0e HL |
1553 | |
1554 | fio_idle_prof_init(); | |
2e1df07d JA |
1555 | |
1556 | set_sig_handlers(); | |
1557 | ||
f3afa57e | 1558 | if (output_format == FIO_OUTPUT_NORMAL) { |
2e1df07d JA |
1559 | log_info("Starting "); |
1560 | if (nr_thread) | |
1561 | log_info("%d thread%s", nr_thread, | |
1562 | nr_thread > 1 ? "s" : ""); | |
1563 | if (nr_process) { | |
1564 | if (nr_thread) | |
1565 | log_info(" and "); | |
1566 | log_info("%d process%s", nr_process, | |
1567 | nr_process > 1 ? "es" : ""); | |
1568 | } | |
1569 | log_info("\n"); | |
1570 | fflush(stdout); | |
1571 | } | |
1572 | ||
1573 | todo = thread_number; | |
1574 | nr_running = 0; | |
1575 | nr_started = 0; | |
1576 | m_rate = t_rate = 0; | |
1577 | ||
1578 | for_each_td(td, i) { | |
1579 | print_status_init(td->thread_number - 1); | |
1580 | ||
1581 | if (!td->o.create_serialize) | |
1582 | continue; | |
1583 | ||
1584 | /* | |
1585 | * do file setup here so it happens sequentially, | |
1586 | * we don't want X number of threads getting their | |
1587 | * client data interspersed on disk | |
1588 | */ | |
1589 | if (setup_files(td)) { | |
1590 | exit_value++; | |
1591 | if (td->error) | |
1592 | log_err("fio: pid=%d, err=%d/%s\n", | |
1593 | (int) td->pid, td->error, td->verror); | |
1594 | td_set_runstate(td, TD_REAPED); | |
1595 | todo--; | |
1596 | } else { | |
1597 | struct fio_file *f; | |
1598 | unsigned int j; | |
1599 | ||
1600 | /* | |
1601 | * for sharing to work, each job must always open | |
1602 | * its own files. so close them, if we opened them | |
1603 | * for creation | |
1604 | */ | |
1605 | for_each_file(td, f, j) { | |
1606 | if (fio_file_open(f)) | |
1607 | td_io_close_file(td, f); | |
1608 | } | |
1609 | } | |
1610 | } | |
1611 | ||
f2a2ce0e HL |
1612 | /* start idle threads before io threads start to run */ |
1613 | fio_idle_prof_start(); | |
1614 | ||
2e1df07d JA |
1615 | set_genesis_time(); |
1616 | ||
1617 | while (todo) { | |
1618 | struct thread_data *map[REAL_MAX_JOBS]; | |
1619 | struct timeval this_start; | |
1620 | int this_jobs = 0, left; | |
1621 | ||
1622 | /* | |
1623 | * create threads (TD_NOT_CREATED -> TD_CREATED) | |
1624 | */ | |
1625 | for_each_td(td, i) { | |
1626 | if (td->runstate != TD_NOT_CREATED) | |
1627 | continue; | |
1628 | ||
1629 | /* | |
1630 | * never got a chance to start, killed by other | |
1631 | * thread for some reason | |
1632 | */ | |
1633 | if (td->terminate) { | |
1634 | todo--; | |
1635 | continue; | |
1636 | } | |
1637 | ||
1638 | if (td->o.start_delay) { | |
1639 | spent = mtime_since_genesis(); | |
1640 | ||
1641 | if (td->o.start_delay * 1000 > spent) | |
1642 | continue; | |
1643 | } | |
1644 | ||
1645 | if (td->o.stonewall && (nr_started || nr_running)) { | |
1646 | dprint(FD_PROCESS, "%s: stonewall wait\n", | |
1647 | td->o.name); | |
1648 | break; | |
1649 | } | |
1650 | ||
1651 | init_disk_util(td); | |
1652 | ||
c97f1ad6 JA |
1653 | td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED); |
1654 | td->update_rusage = 0; | |
1655 | ||
2e1df07d JA |
1656 | /* |
1657 | * Set state to created. Thread will transition | |
1658 | * to TD_INITIALIZED when it's done setting up. | |
1659 | */ | |
1660 | td_set_runstate(td, TD_CREATED); | |
1661 | map[this_jobs++] = td; | |
1662 | nr_started++; | |
1663 | ||
1664 | if (td->o.use_thread) { | |
1665 | int ret; | |
1666 | ||
1667 | dprint(FD_PROCESS, "will pthread_create\n"); | |
1668 | ret = pthread_create(&td->thread, NULL, | |
1669 | thread_main, td); | |
1670 | if (ret) { | |
1671 | log_err("pthread_create: %s\n", | |
1672 | strerror(ret)); | |
1673 | nr_started--; | |
1674 | break; | |
1675 | } | |
1676 | ret = pthread_detach(td->thread); | |
1677 | if (ret) | |
1678 | log_err("pthread_detach: %s", | |
1679 | strerror(ret)); | |
1680 | } else { | |
1681 | pid_t pid; | |
1682 | dprint(FD_PROCESS, "will fork\n"); | |
1683 | pid = fork(); | |
1684 | if (!pid) { | |
1685 | int ret = fork_main(shm_id, i); | |
1686 | ||
1687 | _exit(ret); | |
1688 | } else if (i == fio_debug_jobno) | |
1689 | *fio_debug_jobp = pid; | |
1690 | } | |
1691 | dprint(FD_MUTEX, "wait on startup_mutex\n"); | |
1692 | if (fio_mutex_down_timeout(startup_mutex, 10)) { | |
1693 | log_err("fio: job startup hung? exiting.\n"); | |
1694 | fio_terminate_threads(TERMINATE_ALL); | |
1695 | fio_abort = 1; | |
1696 | nr_started--; | |
1697 | break; | |
1698 | } | |
1699 | dprint(FD_MUTEX, "done waiting on startup_mutex\n"); | |
1700 | } | |
1701 | ||
1702 | /* | |
1703 | * Wait for the started threads to transition to | |
1704 | * TD_INITIALIZED. | |
1705 | */ | |
1706 | fio_gettime(&this_start, NULL); | |
1707 | left = this_jobs; | |
1708 | while (left && !fio_abort) { | |
1709 | if (mtime_since_now(&this_start) > JOB_START_TIMEOUT) | |
1710 | break; | |
1711 | ||
1712 | usleep(100000); | |
1713 | ||
1714 | for (i = 0; i < this_jobs; i++) { | |
1715 | td = map[i]; | |
1716 | if (!td) | |
1717 | continue; | |
1718 | if (td->runstate == TD_INITIALIZED) { | |
1719 | map[i] = NULL; | |
1720 | left--; | |
1721 | } else if (td->runstate >= TD_EXITED) { | |
1722 | map[i] = NULL; | |
1723 | left--; | |
1724 | todo--; | |
1725 | nr_running++; /* work-around... */ | |
1726 | } | |
1727 | } | |
1728 | } | |
1729 | ||
1730 | if (left) { | |
4e87c37a JA |
1731 | log_err("fio: %d job%s failed to start\n", left, |
1732 | left > 1 ? "s" : ""); | |
2e1df07d JA |
1733 | for (i = 0; i < this_jobs; i++) { |
1734 | td = map[i]; | |
1735 | if (!td) | |
1736 | continue; | |
1737 | kill(td->pid, SIGTERM); | |
1738 | } | |
1739 | break; | |
1740 | } | |
1741 | ||
1742 | /* | |
1743 | * start created threads (TD_INITIALIZED -> TD_RUNNING). | |
1744 | */ | |
1745 | for_each_td(td, i) { | |
1746 | if (td->runstate != TD_INITIALIZED) | |
1747 | continue; | |
1748 | ||
1749 | if (in_ramp_time(td)) | |
1750 | td_set_runstate(td, TD_RAMP); | |
1751 | else | |
1752 | td_set_runstate(td, TD_RUNNING); | |
1753 | nr_running++; | |
1754 | nr_started--; | |
342f4be4 JA |
1755 | m_rate += ddir_rw_sum(td->o.ratemin); |
1756 | t_rate += ddir_rw_sum(td->o.rate); | |
2e1df07d JA |
1757 | todo--; |
1758 | fio_mutex_up(td->mutex); | |
1759 | } | |
1760 | ||
1761 | reap_threads(&nr_running, &t_rate, &m_rate); | |
1762 | ||
1763 | if (todo) { | |
1764 | if (is_backend) | |
1765 | fio_server_idle_loop(); | |
1766 | else | |
1767 | usleep(100000); | |
1768 | } | |
1769 | } | |
1770 | ||
1771 | while (nr_running) { | |
1772 | reap_threads(&nr_running, &t_rate, &m_rate); | |
1773 | ||
1774 | if (is_backend) | |
1775 | fio_server_idle_loop(); | |
1776 | else | |
1777 | usleep(10000); | |
1778 | } | |
1779 | ||
f2a2ce0e HL |
1780 | fio_idle_prof_stop(); |
1781 | ||
2e1df07d JA |
1782 | update_io_ticks(); |
1783 | fio_unpin_memory(); | |
1784 | } | |
1785 | ||
9ec7779f JA |
1786 | void wait_for_disk_thread_exit(void) |
1787 | { | |
1788 | fio_mutex_down(disk_thread_mutex); | |
1789 | } | |
1790 | ||
27357187 JA |
1791 | static void free_disk_util(void) |
1792 | { | |
1793 | disk_util_start_exit(); | |
1794 | wait_for_disk_thread_exit(); | |
1795 | disk_util_prune_entries(); | |
1796 | } | |
1797 | ||
2e1df07d JA |
1798 | static void *disk_thread_main(void *data) |
1799 | { | |
9ec7779f JA |
1800 | int ret = 0; |
1801 | ||
2e1df07d JA |
1802 | fio_mutex_up(startup_mutex); |
1803 | ||
9ec7779f | 1804 | while (threads && !ret) { |
2e1df07d JA |
1805 | usleep(DISK_UTIL_MSEC * 1000); |
1806 | if (!threads) | |
1807 | break; | |
9ec7779f | 1808 | ret = update_io_ticks(); |
2e1df07d JA |
1809 | |
1810 | if (!is_backend) | |
1811 | print_thread_status(); | |
1812 | } | |
1813 | ||
9ec7779f | 1814 | fio_mutex_up(disk_thread_mutex); |
2e1df07d JA |
1815 | return NULL; |
1816 | } | |
1817 | ||
1818 | static int create_disk_util_thread(void) | |
1819 | { | |
1820 | int ret; | |
1821 | ||
9ec7779f JA |
1822 | setup_disk_util(); |
1823 | ||
521da527 | 1824 | disk_thread_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); |
9ec7779f | 1825 | |
2e1df07d JA |
1826 | ret = pthread_create(&disk_util_thread, NULL, disk_thread_main, NULL); |
1827 | if (ret) { | |
9ec7779f | 1828 | fio_mutex_remove(disk_thread_mutex); |
2e1df07d JA |
1829 | log_err("Can't create disk util thread: %s\n", strerror(ret)); |
1830 | return 1; | |
1831 | } | |
1832 | ||
1833 | ret = pthread_detach(disk_util_thread); | |
1834 | if (ret) { | |
9ec7779f | 1835 | fio_mutex_remove(disk_thread_mutex); |
2e1df07d JA |
1836 | log_err("Can't detatch disk util thread: %s\n", strerror(ret)); |
1837 | return 1; | |
1838 | } | |
1839 | ||
1840 | dprint(FD_MUTEX, "wait on startup_mutex\n"); | |
1841 | fio_mutex_down(startup_mutex); | |
1842 | dprint(FD_MUTEX, "done waiting on startup_mutex\n"); | |
1843 | return 0; | |
1844 | } | |
1845 | ||
2e1df07d JA |
1846 | int fio_backend(void) |
1847 | { | |
1848 | struct thread_data *td; | |
1849 | int i; | |
1850 | ||
1851 | if (exec_profile) { | |
1852 | if (load_profile(exec_profile)) | |
1853 | return 1; | |
1854 | free(exec_profile); | |
1855 | exec_profile = NULL; | |
1856 | } | |
1857 | if (!thread_number) | |
1858 | return 0; | |
1859 | ||
1860 | if (write_bw_log) { | |
1861 | setup_log(&agg_io_log[DDIR_READ], 0); | |
1862 | setup_log(&agg_io_log[DDIR_WRITE], 0); | |
6eaf09d6 | 1863 | setup_log(&agg_io_log[DDIR_TRIM], 0); |
2e1df07d JA |
1864 | } |
1865 | ||
521da527 | 1866 | startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); |
2e1df07d JA |
1867 | if (startup_mutex == NULL) |
1868 | return 1; | |
521da527 | 1869 | writeout_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED); |
2e1df07d JA |
1870 | if (writeout_mutex == NULL) |
1871 | return 1; | |
1872 | ||
1873 | set_genesis_time(); | |
1874 | create_disk_util_thread(); | |
1875 | ||
1876 | cgroup_list = smalloc(sizeof(*cgroup_list)); | |
1877 | INIT_FLIST_HEAD(cgroup_list); | |
1878 | ||
1879 | run_threads(); | |
1880 | ||
1881 | if (!fio_abort) { | |
1882 | show_run_stats(); | |
1883 | if (write_bw_log) { | |
1884 | __finish_log(agg_io_log[DDIR_READ], "agg-read_bw.log"); | |
1885 | __finish_log(agg_io_log[DDIR_WRITE], | |
1886 | "agg-write_bw.log"); | |
6eaf09d6 SL |
1887 | __finish_log(agg_io_log[DDIR_TRIM], |
1888 | "agg-write_bw.log"); | |
2e1df07d JA |
1889 | } |
1890 | } | |
1891 | ||
1892 | for_each_td(td, i) | |
1893 | fio_options_free(td); | |
1894 | ||
a462baef | 1895 | free_disk_util(); |
2e1df07d JA |
1896 | cgroup_kill(cgroup_list); |
1897 | sfree(cgroup_list); | |
1898 | sfree(cgroup_mnt); | |
1899 | ||
1900 | fio_mutex_remove(startup_mutex); | |
1901 | fio_mutex_remove(writeout_mutex); | |
9ec7779f | 1902 | fio_mutex_remove(disk_thread_mutex); |
2e1df07d JA |
1903 | return exit_value; |
1904 | } |