td->o.odirect = 1;
/*
- * we don't know if this option was set or not. it defaults to 1,
- * so we'll just guess that we should override it if it's still 1
+ * If depth wasn't manually set, use probed depth
*/
- if (td->o.iodepth == 1)
+ if (!fio_option_is_set(&td->o, iodepth))
td->o.iodepth = td->o.iodepth_low = depth;
return 0;
int main(int argc, char **argv)
{
struct mtd_info_user info;
+ info.type = MTD_MLCNANDFLASH;
return ioctl(0, MEMGETINFO, &info);
}
EOF
size_t len1 = strlen(path);
size_t len2 = strlen(name);
- n = xmalloc(len1 + len2 + 2);
+ n = xmalloc(len1 + len2 + 6);
memcpy(n, path, len1);
if (n[len1 - 1] != '/')
for (i = 0; i < sizeof(int) * 8; i++) {
if ((1 << i) & *val) {
- if (i > max_cpu) {
+ if (i >= max_cpu) {
log_err("fio: CPU %d too large (max=%ld)\n", i,
- max_cpu);
+ max_cpu - 1);
return 1;
}
dprint(FD_PARSE, "set cpu allowed %d\n", i);
ret = 1;
break;
}
- if (icpu > max_cpu) {
+ if (icpu >= max_cpu) {
log_err("fio: CPU %d too large (max=%ld)\n",
- icpu, max_cpu);
+ icpu, max_cpu - 1);
ret = 1;
break;
}
struct group_run_stats *runstats, *rs;
struct thread_data *td;
struct thread_stat *threadstats, *ts;
- int i, j, nr_ts, last_ts, idx;
+ int i, j, k, nr_ts, last_ts, idx;
int kb_base_warned = 0;
int unit_base_warned = 0;
struct json_object *root = NULL;
ts->latency_window = td->o.latency_window;
ts->nr_block_infos = td->ts.nr_block_infos;
- for (i = 0; i < ts->nr_block_infos; i++)
- ts->block_infos[i] = td->ts.block_infos[i];
+ for (k = 0; k < ts->nr_block_infos; k++)
+ ts->block_infos[k] = td->ts.block_infos[k];
sum_thread_stats(ts, &td->ts, idx);
}
BLOCK_STATE_TRIM_FAILURE,
BLOCK_STATE_WRITE_FAILURE,
BLOCK_STATE_COUNT,
-} state;
+};
#define MAX_PATTERN_SIZE 512
#define FIO_JOBNAME_SIZE 128