4 * Doesn't transfer any data, merely burns CPU cycles according to
9 #include "../optgroup.h"
11 // number of 32 bit integers to sort
12 size_t qsort_size = (256 * (1ULL << 10)); // 256KB
27 unsigned int cpucycle;
28 enum stress_mode cpumode;
29 unsigned int exit_io_done;
33 static struct fio_option options[] = {
38 .off1 = offsetof(struct cpu_options, cpuload),
39 .help = "Use this percentage of CPU",
40 .category = FIO_OPT_C_ENGINE,
41 .group = FIO_OPT_G_INVALID,
47 .help = "Stress mode",
48 .off1 = offsetof(struct cpu_options, cpumode),
53 .help = "NOOP instructions",
56 .oval = FIO_CPU_QSORT,
57 .help = "QSORT computation",
60 .category = FIO_OPT_C_ENGINE,
61 .group = FIO_OPT_G_INVALID,
67 .off1 = offsetof(struct cpu_options, cpucycle),
68 .help = "Length of the CPU burn cycles (usecs)",
72 .category = FIO_OPT_C_ENGINE,
73 .group = FIO_OPT_G_INVALID,
76 .name = "exit_on_io_done",
77 .lname = "Exit when IO threads are done",
79 .off1 = offsetof(struct cpu_options, exit_io_done),
80 .help = "Exit when IO threads finish",
82 .category = FIO_OPT_C_ENGINE,
83 .group = FIO_OPT_G_INVALID,
92 * Multiply-with-carry random numbers
93 * fast pseudo random number generator, see
94 * http://www.cse.yorku.ca/~oz/marsaglia-rng.html
96 uint32_t mwc32(struct mwc_t *mwc)
98 mwc->z = 36969 * (mwc->z & 65535) + (mwc->z >> 16);
99 mwc->w = 18000 * (mwc->w & 65535) + (mwc->w >> 16);
100 return (mwc->z << 16) + mwc->w;
104 * stress_qsort_cmp_1()
105 * qsort comparison - sort on int32 values
107 static int stress_qsort_cmp_1(const void *p1, const void *p2)
109 const int32_t *i1 = (const int32_t *)p1;
110 const int32_t *i2 = (const int32_t *)p2;
121 * stress_qsort_cmp_2()
122 * qsort comparison - reverse sort on int32 values
124 static int stress_qsort_cmp_2(const void *p1, const void *p2)
126 return stress_qsort_cmp_1(p2, p1);
130 * stress_qsort_cmp_3()
131 * qsort comparison - sort on int8 values
133 static int stress_qsort_cmp_3(const void *p1, const void *p2)
135 const int8_t *i1 = (const int8_t *)p1;
136 const int8_t *i2 = (const int8_t *)p2;
138 /* Force re-ordering on 8 bit value */
142 static int do_qsort(struct thread_data *td)
144 struct thread_options *o = &td->o;
145 struct cpu_options *co = td->eo;
146 struct timespec start, now;
147 fio_get_mono_time(&start);
148 /* Sort "random" data */
149 qsort(co->qsort_data, qsort_size, sizeof(*(co->qsort_data)), stress_qsort_cmp_1);
152 qsort(co->qsort_data, qsort_size, sizeof(*(co->qsort_data)), stress_qsort_cmp_2);
154 /* And re-order by byte compare */
155 qsort((uint8_t *)co->qsort_data, qsort_size * 4, sizeof(uint8_t), stress_qsort_cmp_3);
157 /* Reverse sort this again */
158 qsort(co->qsort_data, qsort_size, sizeof(*(co->qsort_data)), stress_qsort_cmp_2);
159 fio_get_mono_time(&now);
161 /* Adjusting cpucycle automatically to be as close as possible to the expected cpuload
162 * The time to execute do_qsort() may change over time as per :
163 * - the job concurrency
164 * - the cpu clock adjusted by the power management
165 * After every do_qsort() call, the next thinktime is adjusted regarding the last run performance
167 co->cpucycle = utime_since(&start, &now);
168 o->thinktime = ((unsigned long long) co->cpucycle * (100 - co->cpuload)) / co->cpuload;
173 static enum fio_q_status fio_cpuio_queue(struct thread_data *td,
174 struct io_u fio_unused *io_u)
176 struct cpu_options *co = td->eo;
178 if (co->exit_io_done && !fio_running_or_pending_io_threads()) {
183 switch (co->cpumode) {
185 usec_spin(co->cpucycle);
192 return FIO_Q_COMPLETED;
195 static int noop_init(struct thread_data *td)
197 struct cpu_options *co = td->eo;
199 log_info("%s (noop): ioengine=%s, cpuload=%u, cpucycle=%u\n",
200 td->o.name, td->io_ops->name, co->cpuload, co->cpucycle);
204 static int qsort_cleanup(struct thread_data *td)
206 struct cpu_options *co = td->eo;
208 free(co->qsort_data);
212 static int qsort_init(struct thread_data *td)
214 struct cpu_options *co = td->eo;
216 // Setting up a default entropy
222 co->qsort_data = calloc(qsort_size, sizeof(*co->qsort_data));
223 if (co->qsort_data == NULL) {
224 td_verror(td, ENOMEM, "qsort_init");
228 /* This is expensive, init the memory once */
229 for (int32_t *ptr = co->qsort_data, i = 0; i < qsort_size; i++)
230 *ptr++ = mwc32(&mwc);
232 log_info("%s (qsort): ioengine=%s, cpuload=%u, cpucycle=%u\n",
233 td->o.name, td->io_ops->name, co->cpuload, co->cpucycle);
238 static int fio_cpuio_init(struct thread_data *td)
240 struct thread_options *o = &td->o;
241 struct cpu_options *co = td->eo;
242 int td_previous_state;
245 td_vmsg(td, EINVAL, "cpu thread needs rate (cpuload=)","cpuio");
249 if (co->cpuload > 100)
252 // Saving the current thread state
253 td_previous_state = td->runstate;
254 /* Reporting that we are preparing the engine
255 * This is useful as the qsort() calibration takes time
256 * This prevents the job from starting before init is completed
258 td_set_runstate(td, TD_SETTING_UP);
261 * set thinktime_sleep and thinktime_spin appropriately
263 o->thinktime_blocks = 1;
264 o->thinktime_spin = 0;
265 o->thinktime = ((unsigned long long) co->cpucycle * (100 - co->cpuload)) / co->cpuload;
267 o->nr_files = o->open_files = 1;
269 switch (co->cpumode) {
278 // Let's restore the previous state.
279 td_set_runstate(td, td_previous_state);
283 static void fio_cpuio_cleanup(struct thread_data *td)
285 struct cpu_options *co = td->eo;
287 switch (co->cpumode) {
296 static int fio_cpuio_open(struct thread_data fio_unused *td,
297 struct fio_file fio_unused *f)
302 static struct ioengine_ops ioengine = {
304 .version = FIO_IOOPS_VERSION,
305 .queue = fio_cpuio_queue,
306 .init = fio_cpuio_init,
307 .cleanup = fio_cpuio_cleanup,
308 .open_file = fio_cpuio_open,
309 .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_NOIO,
311 .option_struct_size = sizeof(struct cpu_options),
314 static void fio_init fio_cpuio_register(void)
316 register_ioengine(&ioengine);
319 static void fio_exit fio_cpuio_unregister(void)
321 unregister_ioengine(&ioengine);