2 * blktrace support code for fio
14 #include "blktrace_api.h"
15 #include "oslib/linux-dev-lookup.h"
17 #define TRACE_FIFO_SIZE 8192
20 * fifo refill frontend, to avoid reading data in trace sized bites
22 static int refill_fifo(struct thread_data *td, struct fifo *fifo, int fd)
24 char buf[TRACE_FIFO_SIZE];
29 if (total > fifo_room(fifo))
30 total = fifo_room(fifo);
32 ret = read(fd, buf, total);
34 td_verror(td, errno, "read blktrace file");
39 ret = fifo_put(fifo, buf, ret);
41 dprint(FD_BLKTRACE, "refill: filled %d bytes\n", ret);
46 * Retrieve 'len' bytes from the fifo, refilling if necessary.
48 static int trace_fifo_get(struct thread_data *td, struct fifo *fifo, int fd,
49 void *buf, unsigned int len)
51 if (fifo_len(fifo) < len) {
52 int ret = refill_fifo(td, fifo, fd);
58 return fifo_get(fifo, buf, len);
62 * Just discard the pdu by seeking past it.
64 static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd,
65 struct blk_io_trace *t)
70 dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
71 return trace_fifo_get(td, fifo, fd, NULL, t->pdu_len);
75 * Check if this is a blktrace binary data file. We read a single trace
76 * into memory and check for the magic signature.
78 int is_blktrace(const char *filename, int *need_swap)
80 struct blk_io_trace t;
83 fd = open(filename, O_RDONLY);
87 ret = read(fd, &t, sizeof(t));
91 perror("read blktrace");
93 } else if (ret != sizeof(t)) {
94 log_err("fio: short read on blktrace file\n");
98 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
104 * Maybe it needs to be endian swapped...
106 t.magic = fio_swap32(t.magic);
107 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
115 #define FMINORBITS 20
116 #define FMINORMASK ((1U << FMINORBITS) - 1)
117 #define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS))
118 #define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK))
120 static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
122 struct io_piece *ipo;
124 ipo = calloc(1, sizeof(*ipo));
127 ipo->ddir = DDIR_INVAL;
128 ipo->fileno = fileno;
129 ipo->file_action = action;
130 flist_add_tail(&ipo->list, &td->io_log_list);
133 static int get_dev_blocksize(const char *dev, unsigned int *bs)
137 fd = open(dev, O_RDONLY);
141 if (ioctl(fd, BLKSSZGET, bs) < 0) {
150 static int trace_add_file(struct thread_data *td, __u32 device,
153 static unsigned int last_maj, last_min, last_fileno, last_bs;
154 unsigned int maj = FMAJOR(device);
155 unsigned int min = FMINOR(device);
160 if (last_maj == maj && last_min == min) {
169 * check for this file in our list
171 for_each_file(td, f, i) {
172 if (f->major == maj && f->minor == min) {
173 last_fileno = f->fileno;
180 if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) {
181 unsigned int this_bs;
184 if (td->o.replay_redirect)
185 dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden"
186 " with: %s\n", maj, min,
187 td->o.replay_redirect);
189 dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
191 dprint(FD_BLKTRACE, "add devices %s\n", dev);
192 fileno = add_file_exclusive(td, dev);
194 if (get_dev_blocksize(dev, &this_bs))
198 td->files[fileno]->major = maj;
199 td->files[fileno]->minor = min;
200 td->files[fileno]->bs = this_bs;
201 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
203 last_fileno = fileno;
212 static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t)
214 if (!o->replay_align)
217 t->bytes = (t->bytes + o->replay_align - 1) & ~(o->replay_align - 1);
221 * Store blk_io_trace data in an ipo for later retrieval.
223 static void store_ipo(struct thread_data *td, unsigned long long offset,
224 unsigned int bytes, int rw, unsigned long long ttime,
225 int fileno, unsigned int bs)
227 struct io_piece *ipo = malloc(sizeof(*ipo));
231 ipo->offset = offset * bs;
232 if (td->o.replay_scale)
233 ipo->offset = ipo->offset / td->o.replay_scale;
234 ipo_bytes_align(td->o.replay_align, ipo);
236 ipo->delay = ttime / 1000;
238 ipo->ddir = DDIR_WRITE;
240 ipo->ddir = DDIR_READ;
241 ipo->fileno = fileno;
243 dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
244 ipo->ddir, ipo->offset,
245 ipo->len, ipo->delay);
246 queue_io_piece(td, ipo);
249 static void handle_trace_notify(struct blk_io_trace *t)
253 dprint(FD_BLKTRACE, "got process notify: %x, %d\n",
256 case BLK_TN_TIMESTAMP:
257 dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n",
263 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
268 static void handle_trace_discard(struct thread_data *td,
269 struct blk_io_trace *t,
270 unsigned long long ttime,
271 unsigned long *ios, unsigned int *rw_bs)
273 struct io_piece *ipo = malloc(sizeof(*ipo));
278 fileno = trace_add_file(td, t->device, &bs);
281 if (t->bytes > rw_bs[DDIR_TRIM])
282 rw_bs[DDIR_TRIM] = t->bytes;
284 td->o.size += t->bytes;
286 memset(ipo, 0, sizeof(*ipo));
287 INIT_FLIST_HEAD(&ipo->list);
289 ipo->offset = t->sector * bs;
290 if (td->o.replay_scale)
291 ipo->offset = ipo->offset / td->o.replay_scale;
292 ipo_bytes_align(td->o.replay_align, ipo);
294 ipo->delay = ttime / 1000;
295 ipo->ddir = DDIR_TRIM;
296 ipo->fileno = fileno;
298 dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
299 ipo->offset, ipo->len,
301 queue_io_piece(td, ipo);
304 static void handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
305 unsigned long long ttime, unsigned long *ios,
312 fileno = trace_add_file(td, t->device, &bs);
314 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
316 if (t->bytes > rw_bs[rw])
317 rw_bs[rw] = t->bytes;
320 td->o.size += t->bytes;
321 store_ipo(td, t->sector, t->bytes, rw, ttime, fileno, bs);
325 * We only care for queue traces, most of the others are side effects
326 * due to internal workings of the block layer.
328 static void handle_trace(struct thread_data *td, struct blk_io_trace *t,
329 unsigned long *ios, unsigned int *bs)
331 static unsigned long long last_ttime;
332 unsigned long long delay = 0;
334 if ((t->action & 0xffff) != __BLK_TA_QUEUE)
337 if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
338 if (!last_ttime || td->o.no_stall) {
339 last_ttime = t->time;
342 delay = t->time - last_ttime;
343 last_ttime = t->time;
347 t_bytes_align(&td->o, t);
349 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
350 handle_trace_notify(t);
351 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
352 handle_trace_discard(td, t, delay, ios, bs);
354 handle_trace_fs(td, t, delay, ios, bs);
357 static void byteswap_trace(struct blk_io_trace *t)
359 t->magic = fio_swap32(t->magic);
360 t->sequence = fio_swap32(t->sequence);
361 t->time = fio_swap64(t->time);
362 t->sector = fio_swap64(t->sector);
363 t->bytes = fio_swap32(t->bytes);
364 t->action = fio_swap32(t->action);
365 t->pid = fio_swap32(t->pid);
366 t->device = fio_swap32(t->device);
367 t->cpu = fio_swap32(t->cpu);
368 t->error = fio_swap16(t->error);
369 t->pdu_len = fio_swap16(t->pdu_len);
372 static int t_is_write(struct blk_io_trace *t)
374 return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0;
377 static enum fio_ddir t_get_ddir(struct blk_io_trace *t)
379 if (t->action & BLK_TC_ACT(BLK_TC_READ))
381 else if (t->action & BLK_TC_ACT(BLK_TC_WRITE))
383 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
389 static void depth_inc(struct blk_io_trace *t, int *depth)
393 ddir = t_get_ddir(t);
394 if (ddir != DDIR_INVAL)
398 static void depth_dec(struct blk_io_trace *t, int *depth)
402 ddir = t_get_ddir(t);
403 if (ddir != DDIR_INVAL)
407 static void depth_end(struct blk_io_trace *t, int *this_depth, int *depth)
409 enum fio_ddir ddir = DDIR_INVAL;
411 ddir = t_get_ddir(t);
412 if (ddir != DDIR_INVAL) {
413 depth[ddir] = max(depth[ddir], this_depth[ddir]);
414 this_depth[ddir] = 0;
419 * Load a blktrace file by reading all the blk_io_trace entries, and storing
420 * them as io_pieces like the fio text version would do.
422 int load_blktrace(struct thread_data *td, const char *filename, int need_swap)
424 struct blk_io_trace t;
425 unsigned long ios[DDIR_RWDIR_CNT], skipped_writes;
426 unsigned int rw_bs[DDIR_RWDIR_CNT];
428 int fd, i, old_state;
430 int this_depth[DDIR_RWDIR_CNT], depth[DDIR_RWDIR_CNT], max_depth;
432 fd = open(filename, O_RDONLY);
434 td_verror(td, errno, "open blktrace file");
438 fifo = fifo_alloc(TRACE_FIFO_SIZE);
440 old_state = td_bump_runstate(td, TD_SETTING_UP);
444 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
453 int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));
459 else if (ret < (int) sizeof(t)) {
460 log_err("fio: short fifo get\n");
467 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
468 log_err("fio: bad magic in blktrace data: %x\n",
472 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
473 log_err("fio: bad blktrace version %d\n",
477 ret = discard_pdu(td, fifo, fd, &t);
479 td_verror(td, ret, "blktrace lseek");
481 } else if (t.pdu_len != ret) {
482 log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
485 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
486 if ((t.action & 0xffff) == __BLK_TA_QUEUE)
487 depth_inc(&t, this_depth);
488 else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) ||
489 ((t.action & 0xffff) == __BLK_TA_FRONTMERGE))
490 depth_dec(&t, this_depth);
491 else if ((t.action & 0xffff) == __BLK_TA_COMPLETE)
492 depth_end(&t, this_depth, depth);
494 if (t_is_write(&t) && read_only) {
500 handle_trace(td, &t, ios, rw_bs);
503 for (i = 0; i < td->files_index; i++) {
505 trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
511 td_restore_runstate(td, old_state);
513 if (!td->files_index) {
514 log_err("fio: did not find replay device(s)\n");
519 * For stacked devices, we don't always get a COMPLETE event so
520 * the depth grows to insane values. Limit it to something sane(r).
523 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
526 else if (!depth[i] && ios[i])
528 max_depth = max(depth[i], max_depth);
532 log_err("fio: %s skips replay of %lu writes due to read-only\n",
533 td->o.name, skipped_writes);
535 if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
536 log_err("fio: found no ios in blktrace data\n");
538 } else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) {
539 td->o.td_ddir = TD_DDIR_READ;
540 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
541 } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
542 td->o.td_ddir = TD_DDIR_WRITE;
543 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
545 td->o.td_ddir = TD_DDIR_RW;
546 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
547 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
548 td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
552 * We need to do direct/raw ios to the device, to avoid getting
553 * read-ahead in our way. But only do so if the minimum block size
554 * is a multiple of 4k, otherwise we don't know if it's safe to do so.
556 if (!fio_option_is_set(&td->o, odirect) && !(td_min_bs(td) & 4095))
560 * If depth wasn't manually set, use probed depth
562 if (!fio_option_is_set(&td->o, iodepth))
563 td->o.iodepth = td->o.iodepth_low = max_depth;