#define should_fsync(td) ((td_write(td) || td_rw(td)) && (!(td)->odirect || (td)->override_sync))
-static sem_t startup_sem;
+static volatile int startup_sem;
#define TERMINATE_ALL (-1)
#define JOB_START_TIMEOUT (5 * 1000)
int loops = 50;
do {
- lrand48_r(&td->random_state, &r);
+ r = os_random_long(&td->random_state);
b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0));
rb = b + (td->file_offset / td->min_bs);
loops--;
if (td->min_bs == td->max_bs)
buflen = td->min_bs;
else {
- lrand48_r(&td->bsrange_state, &r);
+ r = os_random_long(&td->bsrange_state);
buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
}
double r;
while (len) {
- drand48_r(&td->verify_state, &r);
+ r = os_random_double(&td->verify_state);
/*
* lrand48_r seems to be broken and only fill the bottom
{
unsigned char *p = (unsigned char *) io_u->buf;
unsigned long c;
- int ret;
p += sizeof(*hdr);
c = crc32(p, hdr->len - sizeof(*hdr));
- ret = c != hdr->crc32;
- if (ret) {
+ if (c != hdr->crc32) {
fprintf(stderr, "crc32: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
fprintf(stderr, "crc32: wanted %lx, got %lx\n", hdr->crc32, c);
+ return 1;
}
- return ret;
+ return 0;
}
static int verify_io_u_md5(struct verify_header *hdr, struct io_u *io_u)
{
unsigned char *p = (unsigned char *) io_u->buf;
struct md5_ctx md5_ctx;
- int ret;
memset(&md5_ctx, 0, sizeof(md5_ctx));
p += sizeof(*hdr);
md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
- ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
- if (ret) {
+ if (memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash))) {
fprintf(stderr, "md5: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
+ return 1;
}
- return ret;
+ return 0;
}
static int verify_io_u(struct io_u *io_u)
* Check if it's time to seed a new data direction.
*/
if (elapsed >= td->rwmixcycle) {
- unsigned long v;
+ int v;
long r;
- lrand48_r(&td->random_state, &r);
- v = 100UL * r / (unsigned long) (RAND_MAX + 1.0);
+ r = os_random_long(&td->rwmix_state);
+ v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
if (v < td->rwmixread)
td->rwmix_ddir = DDIR_READ;
else
/*
* fill body of io_u->buf with random data and add a header with the
- * (eg) sha1sum of that data.
+ * crc32 or md5 sum of that data.
*/
static void populate_io_u(struct thread_data *td, struct io_u *io_u)
{
return 1;
}
-#define queue_full(td) (list_empty(&(td)->io_u_freelist))
+#define queue_full(td) list_empty(&(td)->io_u_freelist)
struct io_u *__get_io_u(struct thread_data *td)
{
- struct io_u *io_u;
+ struct io_u *io_u = NULL;
- if (queue_full(td))
- return NULL;
+ if (!queue_full(td)) {
+ io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
+
+ io_u->error = 0;
+ io_u->resid = 0;
+ list_del(&io_u->list);
+ list_add(&io_u->list, &td->io_u_busylist);
+ td->cur_depth++;
+ }
- io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
- io_u->error = 0;
- io_u->resid = 0;
- list_del(&io_u->list);
- list_add(&io_u->list, &td->io_u_busylist);
- td->cur_depth++;
return io_u;
}
{
struct io_piece *ipo;
- if (list_empty(&td->io_hist_list))
- return 1;
+ if (!list_empty(&td->io_hist_list)) {
+ ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
- ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
- list_del(&ipo->list);
+ list_del(&ipo->list);
- io_u->offset = ipo->offset;
- io_u->buflen = ipo->len;
- io_u->ddir = DDIR_READ;
- free(ipo);
- return 0;
+ io_u->offset = ipo->offset;
+ io_u->buflen = ipo->len;
+ io_u->ddir = DDIR_READ;
+ free(ipo);
+ return 0;
+ }
+
+ return 1;
}
static int sync_td(struct thread_data *td)
td_set_runstate(td, TD_RUNNING);
}
+/*
+ * Main IO worker functions. It retrieves io_u's to process and queues
+ * and reaps them, checking for rate and errors along the way.
+ */
static void do_io(struct thread_data *td)
{
struct io_completion_data icd;
return 0;
}
-static void cleanup_allocs(struct thread_data *td)
-{
- if (td->directory)
- free(td->directory);
- if (td->iolog_file)
- free(td->iolog_file);
- if (td->exec_prerun)
- free(td->exec_prerun);
- if (td->exec_postrun)
- free(td->exec_postrun);
- if (td->ioscheduler)
- free(td->ioscheduler);
- if (td->sysfs_root)
- free(td->sysfs_root);
-}
-
static int create_file(struct thread_data *td, unsigned long long size,
int extend)
{
goto err;
td_set_runstate(td, TD_INITIALIZED);
- sem_post(&startup_sem);
- sem_wait(&td->mutex);
+ fio_sem_up(&startup_sem);
+ fio_sem_down(&td->mutex);
if (!td->create_serialize && setup_file(td))
goto err;
}
if (td->mmap)
munmap(td->mmap, td->file_size);
- cleanup_allocs(td);
cleanup_io(td);
cleanup_io_u(td);
td_set_runstate(td, TD_EXITED);
static void *fio_pin_memory(void)
{
- long pagesize, pages;
+ unsigned long long phys_mem;
void *ptr;
if (!mlock_size)
/*
* Don't allow mlock of more than real_mem-128MB
*/
- pagesize = sysconf(_SC_PAGESIZE);
- pages = sysconf(_SC_PHYS_PAGES);
- if (pages != -1 && pagesize != -1) {
- unsigned long long real_mem = pages * pagesize;
-
- if ((mlock_size + 128 * 1024 * 1024) > real_mem) {
- mlock_size = real_mem - 128 * 1024 * 1024;
+ phys_mem = os_phys_mem();
+ if (phys_mem) {
+ if ((mlock_size + 128 * 1024 * 1024) > phys_mem) {
+ mlock_size = phys_mem - 128 * 1024 * 1024;
printf("fio: limiting mlocked memory to %lluMiB\n",
mlock_size >> 20);
}
*/
td_set_runstate(td, TD_CREATED);
map[this_jobs++] = td;
- sem_init(&startup_sem, 0, 1);
+ fio_sem_init(&startup_sem, 1);
nr_started++;
if (td->use_thread) {
}
} else {
if (fork())
- sem_wait(&startup_sem);
+ fio_sem_down(&startup_sem);
else {
fork_main(shm_id, i);
exit(0);
m_rate += td->ratemin;
t_rate += td->rate;
todo--;
- sem_post(&td->mutex);
+ fio_sem_up(&td->mutex);
}
reap_threads(&nr_running, &t_rate, &m_rate);