static int fio_cpuio_queue(struct thread_data *td, struct io_u fio_unused *io_u)
{
- __usec_sleep(td->o.cpucycle);
+ usec_spin(td->o.cpucycle);
return FIO_Q_COMPLETED;
}
int left;
if (td->o.thinktime_spin)
- __usec_sleep(td->o.thinktime_spin);
+ usec_spin(td->o.thinktime_spin);
left = td->o.thinktime - td->o.thinktime_spin;
if (left)
extern unsigned long mtime_since_now(struct timeval *);
extern unsigned long time_since_now(struct timeval *);
extern unsigned long mtime_since_genesis(void);
-extern void __usec_sleep(unsigned int);
+extern void usec_spin(unsigned int);
extern void usec_sleep(struct thread_data *, unsigned long);
extern void rate_throttle(struct thread_data *, unsigned long, unsigned int);
extern void fill_start_time(struct timeval *);
/*
* busy looping version for the last few usec
*/
-void __usec_sleep(unsigned int usec)
+void usec_spin(unsigned int usec)
{
struct timeval start;
unsigned long ts = usec;
if (usec < ns_granularity) {
- __usec_sleep(usec);
+ usec_spin(usec);
break;
}