uint64_t bytes_done[DDIR_RWDIR_CNT];
int deadlock_loop_cnt;
bool clear_state;
- int res, ret;
+ int ret;
sk_out_assign(sk_out);
free(fd);
* another thread is checking its io_u's for overlap
*/
if (td_offload_overlap(td)) {
- int res = pthread_mutex_lock(&overlap_check);
- assert(res == 0);
+ int res;
+
+ res = pthread_mutex_lock(&overlap_check);
+ if (res) {
+ td->error = errno;
+ goto err;
+ }
}
td_set_runstate(td, TD_FINISHING);
if (td_offload_overlap(td)) {
+ int res;
+
res = pthread_mutex_unlock(&overlap_check);
- assert(res == 0);
+ if (res) {
+ td->error = errno;
+ goto err;
+ }
}
update_rusage_stat(td);
+#include <errno.h>
#include <signal.h>
+#include <stdio.h>
+#include <string.h>
#include <unistd.h>
#ifdef CONFIG_HAVE_TIMERFD_CREATE
#include <sys/timerfd.h>
return;
ret = write_to_pipe(helper_data->pipe[1], &data, sizeof(data));
- assert(ret == 1);
+ if (ret != 1) {
+ log_err("failed to write action into pipe, err %i:%s", errno, strerror(errno));
+ assert(0);
+ }
}
void helper_reset(void)
{
const bool needs_lock = td_async_processing(td);
struct io_u *io_u = NULL;
- int ret;
if (td->stop_io)
return NULL;
io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH);
io_u->ipo = NULL;
} else if (td_async_processing(td)) {
+ int ret;
/*
* We ran out, wait for async verify threads to finish and
* return one
*/
assert(!(td->flags & TD_F_CHILD));
ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock);
- assert(ret == 0);
- if (!td->error)
+ if (fio_unlikely(ret != 0)) {
+ td->error = errno;
+ } else if (!td->error)
goto again;
}
#include <assert.h>
#include <sys/types.h>
#include <dirent.h>
+#include <errno.h>
#include "fio.h"
#include "diskutil.h"
* flag is now set
*/
if (td_offload_overlap(td)) {
- int res = pthread_mutex_unlock(&overlap_check);
- assert(res == 0);
+ int res;
+
+ res = pthread_mutex_unlock(&overlap_check);
+ if (fio_unlikely(res != 0)) {
+ log_err("failed to unlock overlap check mutex, err: %i:%s", errno, strerror(errno));
+ abort();
+ }
}
assert(fio_file_open(io_u->file));
*
*/
#include <assert.h>
+#include <errno.h>
+#include <pthread.h>
+
#include "fio.h"
#include "ioengines.h"
#include "lib/getrusage.h"
* threads as they assess overlap.
*/
res = pthread_mutex_lock(&overlap_check);
- assert(res == 0);
+ if (fio_unlikely(res != 0)) {
+ log_err("failed to lock overlap check mutex, err: %i:%s", errno, strerror(errno));
+ abort();
+ }
retry:
for_each_td(td, i) {
continue;
res = pthread_mutex_unlock(&overlap_check);
- assert(res == 0);
+ if (fio_unlikely(res != 0)) {
+ log_err("failed to unlock overlap check mutex, err: %i:%s", errno, strerror(errno));
+ abort();
+ }
res = pthread_mutex_lock(&overlap_check);
- assert(res == 0);
+ if (fio_unlikely(res != 0)) {
+ log_err("failed to lock overlap check mutex, err: %i:%s", errno, strerror(errno));
+ abort();
+ }
goto retry;
}
}
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <poll.h>
void fio_server_send_start(struct thread_data *td)
{
struct sk_out *sk_out = pthread_getspecific(sk_out_key);
-
- assert(sk_out->sk != -1);
+ if (!sk_out || sk_out->sk == -1) {
+ log_err("pthread getting specific for key failed, sk_out %p, sk %i, err: %i:%s",
+ sk_out, sk_out->sk, errno, strerror(errno));
+ abort();
+ }
fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, NULL, SK_F_SIMPLE);
}
#include <sys/stat.h>
#include <unistd.h>
+#include "compiler/compiler.h"
#include "os/os.h"
#include "file.h"
#include "fio.h"
static void zone_lock(struct thread_data *td, const struct fio_file *f,
struct fio_zone_info *z)
{
+#ifndef NDEBUG
struct zoned_block_device_info *zbd = f->zbd_info;
- uint32_t nz = z - zbd->zone_info;
-
+ uint32_t const nz = z - zbd->zone_info;
/* A thread should never lock zones outside its working area. */
assert(f->min_zone <= nz && nz < f->max_zone);
-
assert(z->has_wp);
+#endif
/*
* Lock the io_u target zone. The zone will be unlocked if io_u offset
static inline void zone_unlock(struct fio_zone_info *z)
{
- int ret;
-
assert(z->has_wp);
- ret = pthread_mutex_unlock(&z->mutex);
- assert(!ret);
+ pthread_mutex_unlock(&z->mutex);
}
static inline struct fio_zone_info *zbd_get_zone(const struct fio_file *f,
const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
int res = 0;
- assert(min_bs);
+ if (fio_unlikely(0 == min_bs))
+ return 1;
dprint(FD_ZBD, "%s: examining zones %u .. %u\n",
f->file_name, zbd_zone_idx(f, zb), zbd_zone_idx(f, ze));
static void zbd_put_io(struct thread_data *td, const struct io_u *io_u)
{
const struct fio_file *f = io_u->file;
- struct zoned_block_device_info *zbd_info = f->zbd_info;
struct fio_zone_info *z;
- assert(zbd_info);
+ assert(f->zbd_info);
z = zbd_offset_to_zone(f, io_u->offset);
assert(z->has_wp);