+/*
+ * mwc32()
+ * Multiply-with-carry random numbers
+ * fast pseudo random number generator, see
+ * http://www.cse.yorku.ca/~oz/marsaglia-rng.html
+ */
+uint32_t mwc32(struct mwc *mwc)
+{
+ mwc->z = 36969 * (mwc->z & 65535) + (mwc->z >> 16);
+ mwc->w = 18000 * (mwc->w & 65535) + (mwc->w >> 16);
+ return (mwc->z << 16) + mwc->w;
+}
+
+/*
+ * stress_qsort_cmp_1()
+ * qsort comparison - sort on int32 values
+ */
+static int stress_qsort_cmp_1(const void *p1, const void *p2)
+{
+ const int32_t *i1 = (const int32_t *)p1;
+ const int32_t *i2 = (const int32_t *)p2;
+
+ if (*i1 > *i2)
+ return 1;
+ else if (*i1 < *i2)
+ return -1;
+ else
+ return 0;
+}
+
+/*
+ * stress_qsort_cmp_2()
+ * qsort comparison - reverse sort on int32 values
+ */
+static int stress_qsort_cmp_2(const void *p1, const void *p2)
+{
+ return stress_qsort_cmp_1(p2, p1);
+}
+
+/*
+ * stress_qsort_cmp_3()
+ * qsort comparison - sort on int8 values
+ */
+static int stress_qsort_cmp_3(const void *p1, const void *p2)
+{
+ const int8_t *i1 = (const int8_t *)p1;
+ const int8_t *i2 = (const int8_t *)p2;
+
+ /* Force re-ordering on 8 bit value */
+ return *i1 - *i2;
+}
+
+static int do_qsort(struct thread_data *td)
+{
+ struct thread_options *o = &td->o;
+ struct cpu_options *co = td->eo;
+ struct timespec start, now;
+
+ fio_get_mono_time(&start);
+
+ /* Sort "random" data */
+ qsort(co->qsort_data, qsort_size, sizeof(*(co->qsort_data)), stress_qsort_cmp_1);
+
+ /* Reverse sort */
+ qsort(co->qsort_data, qsort_size, sizeof(*(co->qsort_data)), stress_qsort_cmp_2);
+
+ /* And re-order by byte compare */
+ qsort((uint8_t *)co->qsort_data, qsort_size * 4, sizeof(uint8_t), stress_qsort_cmp_3);
+
+ /* Reverse sort this again */
+ qsort(co->qsort_data, qsort_size, sizeof(*(co->qsort_data)), stress_qsort_cmp_2);
+ fio_get_mono_time(&now);
+
+ /* Adjusting cpucycle automatically to be as close as possible to the
+ * expected cpuload The time to execute do_qsort() may change over time
+ * as per : - the job concurrency - the cpu clock adjusted by the power
+ * management After every do_qsort() call, the next thinktime is
+ * adjusted regarding the last run performance
+ */
+ co->cpucycle = utime_since(&start, &now);
+ o->thinktime = ((unsigned long long) co->cpucycle *
+ (100 - co->cpuload)) / co->cpuload;
+
+ return 0;
+}