int groupid = 0;
int thread_number = 0;
+int nr_process = 0;
+int nr_thread = 0;
int shm_id = 0;
int temp_stall_ts;
case FIO_Q_COMPLETED:
if (io_u->error)
ret = -io_u->error;
- else if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
+ else if (io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
+ /*
+ * zero read, fail
+ */
+ if (!bytes) {
+ td_verror(td, ENODATA, "full resid");
+ put_io_u(td, io_u);
+ break;
+ }
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buf += bytes;
requeue_io_u(td, &io_u);
td_set_runstate(td, TD_RUNNING);
}
-/*
- * Not really an io thread, all it does is burn CPU cycles in the specified
- * manner.
- */
-static void do_cpuio(struct thread_data *td)
-{
- struct timeval e;
- int split = 100 / td->cpuload;
- int i = 0;
-
- while (!td->terminate) {
- fio_gettime(&e, NULL);
-
- if (runtime_exceeded(td, &e))
- break;
-
- if (!(i % split))
- __usec_sleep(10000);
- else
- usec_sleep(td, 10000);
-
- i++;
- }
-}
-
/*
* Main IO worker function. It retrieves io_u's to process and queues
* and reaps them, checking for rate and errors along the way.
case FIO_Q_COMPLETED:
if (io_u->error)
ret = -io_u->error;
- else if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
+ else if (io_u->resid) {
int bytes = io_u->xfer_buflen - io_u->resid;
+ /*
+ * zero read, fail
+ */
+ if (!bytes) {
+ td_verror(td, ENODATA, "full resid");
+ put_io_u(td, io_u);
+ break;
+ }
+
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buf += bytes;
requeue_io_u(td, &io_u);
int i, max_units;
char *p;
- if (td->io_ops->flags & FIO_CPUIO)
- return 0;
-
if (td->io_ops->flags & FIO_SYNCIO)
max_units = 1;
else
FILE *f;
int ret;
- if (td->io_ops->flags & FIO_CPUIO)
+ if (td->io_ops->flags & FIO_DISKLESSIO)
return 0;
sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
td->zone_bytes = 0;
+ td->rate_bytes = 0;
td->last_was_sync = 0;
INIT_LIST_HEAD(&td->io_log_list);
if (init_io_u(td))
- goto err;
+ goto err_sem;
if (fio_setaffinity(td) == -1) {
td_verror(td, errno, "cpu_set_affinity");
- goto err;
+ goto err_sem;
}
if (init_iolog(td))
- goto err;
+ goto err_sem;
if (td->ioprio) {
if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
td_verror(td, errno, "ioprio_set");
- goto err;
+ goto err_sem;
}
}
if (nice(td->nice) == -1) {
td_verror(td, errno, "nice");
- goto err;
+ goto err_sem;
}
if (init_random_state(td))
- goto err;
+ goto err_sem;
if (td->ioscheduler && switch_ioscheduler(td))
- goto err;
+ goto err_sem;
td_set_runstate(td, TD_INITIALIZED);
fio_sem_up(startup_sem);
fio_sem_down(td->mutex);
+ /*
+ * the ->mutex semaphore is now no longer used, close it to avoid
+ * eating a file descriptor
+ */
+ fio_sem_remove(td->mutex);
+
if (!td->create_serialize && setup_files(td))
goto err;
prune_io_piece_log(td);
- if (td->io_ops->flags & FIO_CPUIO)
- do_cpuio(td);
- else
- do_io(td);
+ do_io(td);
clear_state = 1;
cleanup_io_u(td);
td_set_runstate(td, TD_EXITED);
return (void *) (unsigned long) td->error;
+err_sem:
+ fio_sem_up(startup_sem);
+ goto err;
}
/*
* ->io_ops is NULL for a thread that has closed its
* io engine
*/
- if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
+ if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
cputhreads++;
if (!td->pid || td->runstate == TD_REAPED)
perror("pthread_join");
}
- fio_sem_remove(td->mutex);
-
(*nr_running)--;
(*m_rate) -= td->ratemin;
(*t_rate) -= td->rate;
return;
if (!terse_output) {
- printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
+ printf("Starting ");
+ if (nr_thread)
+ printf("%d thread%s", nr_thread, nr_thread > 1 ? "s" : "");
+ if (nr_process) {
+ if (nr_thread)
+ printf(" and ");
+ printf("%d process%s", nr_process, nr_process > 1 ? "es" : "");
+ }
+ printf("\n");
fflush(stdout);
}
if (pthread_create(&td->thread, NULL, thread_main, td)) {
perror("thread_create");
nr_started--;
+ break;
}
} else {
if (!fork()) {