summaryrefslogtreecommitdiff
path: root/cconv.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2016-03-04 15:29:32 -0700
committerJens Axboe <axboe@fb.com>2016-03-04 15:29:32 -0700
commite0a04ac15f6164cfceb3e1090cc1055e3c823f7d (patch)
treee5192ef672326867995e032415d619df728a0299 /cconv.c
parent8116fd24b737c9d878ccb6a4cc13cc4f974dc2dc (diff)
Add support for zones of random IO, with varying frequency of access
Let's say you want to ensure that 50% of the IO falls in the first 5% of the flie, with the remaining 50% over the last 95%. Now that's possible with random_distribution=zoned. For this particular case, you would do: random_distribution=zoned:50/5:50/95 Up to 64 ranges can be specified, and multiple data directions can be given as well. The above would apply to both reads, writes, and trims. If you wanted to have 50% of the writes fall in the first 10%, 25% in the next 10%, and the last 25% over the remaining 80%, you could extend the above ala: random_distribution=zoned:50/5:50/95,50/10:25/10:25/80 Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'cconv.c')
-rw-r--r--cconv.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/cconv.c b/cconv.c
index 6f57d90..0c3a36c 100644
--- a/cconv.c
+++ b/cconv.c
@@ -23,6 +23,8 @@ static void __string_to_net(uint8_t *dst, const char *src, size_t dst_size)
static void free_thread_options_to_cpu(struct thread_options *o)
{
+ int i;
+
free(o->description);
free(o->name);
free(o->wait_for);
@@ -43,6 +45,11 @@ static void free_thread_options_to_cpu(struct thread_options *o)
free(o->ioscheduler);
free(o->profile);
free(o->cgroup);
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ free(o->bssplit[i]);
+ free(o->zone_split[i]);
+ }
}
void convert_thread_options_to_cpu(struct thread_options *o,
@@ -111,6 +118,16 @@ void convert_thread_options_to_cpu(struct thread_options *o,
}
}
+ o->zone_split_nr[i] = le32_to_cpu(top->zone_split_nr[i]);
+
+ if (o->zone_split_nr[i]) {
+ o->zone_split[i] = malloc(o->zone_split_nr[i] * sizeof(struct zone_split));
+ for (j = 0; j < o->zone_split_nr[i]; j++) {
+ o->zone_split[i][j].access_perc = top->zone_split[i][j].access_perc;
+ o->zone_split[i][j].size_perc = top->zone_split[i][j].size_perc;
+ }
+ }
+
o->rwmix[i] = le32_to_cpu(top->rwmix[i]);
o->rate[i] = le32_to_cpu(top->rate[i]);
o->ratemin[i] = le32_to_cpu(top->ratemin[i]);
@@ -453,6 +470,21 @@ void convert_thread_options_to_net(struct thread_options_pack *top,
}
}
+ top->zone_split_nr[i] = cpu_to_le32(o->zone_split_nr[i]);
+
+ if (o->zone_split_nr[i]) {
+ unsigned int zone_split_nr = o->zone_split_nr[i];
+
+ if (zone_split_nr > ZONESPLIT_MAX) {
+ log_err("fio: ZONESPLIT_MAX is too small\n");
+ zone_split_nr = ZONESPLIT_MAX;
+ }
+ for (j = 0; j < zone_split_nr; j++) {
+ top->zone_split[i][j].access_perc = o->zone_split[i][j].access_perc;
+ top->zone_split[i][j].size_perc = o->zone_split[i][j].size_perc;
+ }
+ }
+
top->rwmix[i] = cpu_to_le32(o->rwmix[i]);
top->rate[i] = cpu_to_le32(o->rate[i]);
top->ratemin[i] = cpu_to_le32(o->ratemin[i]);