2 * blktrace support code for fio
11 #include "blktrace_api.h"
12 #include "lib/linux-dev-lookup.h"
14 #define TRACE_FIFO_SIZE 8192
17 * fifo refill frontend, to avoid reading data in trace sized bites
19 static int refill_fifo(struct thread_data *td, struct fifo *fifo, int fd)
21 char buf[TRACE_FIFO_SIZE];
26 if (total > fifo_room(fifo))
27 total = fifo_room(fifo);
29 ret = read(fd, buf, total);
31 td_verror(td, errno, "read blktrace file");
36 ret = fifo_put(fifo, buf, ret);
38 dprint(FD_BLKTRACE, "refill: filled %d bytes\n", ret);
43 * Retrieve 'len' bytes from the fifo, refilling if necessary.
45 static int trace_fifo_get(struct thread_data *td, struct fifo *fifo, int fd,
46 void *buf, unsigned int len)
48 if (fifo_len(fifo) < len) {
49 int ret = refill_fifo(td, fifo, fd);
55 return fifo_get(fifo, buf, len);
59 * Just discard the pdu by seeking past it.
61 static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd,
62 struct blk_io_trace *t)
67 dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
68 return trace_fifo_get(td, fifo, fd, NULL, t->pdu_len);
72 * Check if this is a blktrace binary data file. We read a single trace
73 * into memory and check for the magic signature.
75 int is_blktrace(const char *filename, int *need_swap)
77 struct blk_io_trace t;
80 fd = open(filename, O_RDONLY);
84 ret = read(fd, &t, sizeof(t));
88 perror("read blktrace");
90 } else if (ret != sizeof(t)) {
91 log_err("fio: short read on blktrace file\n");
95 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
101 * Maybe it needs to be endian swapped...
103 t.magic = fio_swap32(t.magic);
104 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
112 #define FMINORBITS 20
113 #define FMINORMASK ((1U << FMINORBITS) - 1)
114 #define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS))
115 #define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK))
117 static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
119 struct io_piece *ipo;
121 ipo = calloc(1, sizeof(*ipo));
124 ipo->ddir = DDIR_INVAL;
125 ipo->fileno = fileno;
126 ipo->file_action = action;
127 flist_add_tail(&ipo->list, &td->io_log_list);
130 static int trace_add_file(struct thread_data *td, __u32 device)
132 static unsigned int last_maj, last_min, last_fileno;
133 unsigned int maj = FMAJOR(device);
134 unsigned int min = FMINOR(device);
139 if (last_maj == maj && last_min == min)
146 * check for this file in our list
148 for_each_file(td, f, i)
149 if (f->major == maj && f->minor == min) {
150 last_fileno = f->fileno;
155 if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) {
158 if (td->o.replay_redirect)
159 dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden"
160 " with: %s\n", maj, min,
161 td->o.replay_redirect);
163 dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
165 dprint(FD_BLKTRACE, "add devices %s\n", dev);
166 fileno = add_file_exclusive(td, dev);
168 td->files[fileno]->major = maj;
169 td->files[fileno]->minor = min;
170 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
171 last_fileno = fileno;
178 * Store blk_io_trace data in an ipo for later retrieval.
180 static void store_ipo(struct thread_data *td, unsigned long long offset,
181 unsigned int bytes, int rw, unsigned long long ttime,
184 struct io_piece *ipo = malloc(sizeof(*ipo));
189 * the 512 is wrong here, it should be the hardware sector size...
191 ipo->offset = offset * 512;
193 ipo->delay = ttime / 1000;
195 ipo->ddir = DDIR_WRITE;
197 ipo->ddir = DDIR_READ;
198 ipo->fileno = fileno;
200 dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
201 ipo->ddir, ipo->offset,
202 ipo->len, ipo->delay);
203 queue_io_piece(td, ipo);
206 static void handle_trace_notify(struct blk_io_trace *t)
210 dprint(FD_BLKTRACE, "got process notify: %x, %d\n",
213 case BLK_TN_TIMESTAMP:
214 dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n",
220 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
225 static void handle_trace_discard(struct thread_data *td,
226 struct blk_io_trace *t,
227 unsigned long long ttime,
228 unsigned long *ios, unsigned int *bs)
230 struct io_piece *ipo = malloc(sizeof(*ipo));
234 fileno = trace_add_file(td, t->device);
237 if (t->bytes > bs[DDIR_TRIM])
238 bs[DDIR_TRIM] = t->bytes;
240 td->o.size += t->bytes;
242 memset(ipo, 0, sizeof(*ipo));
243 INIT_FLIST_HEAD(&ipo->list);
246 * the 512 is wrong here, it should be the hardware sector size...
248 ipo->offset = t->sector * 512;
250 ipo->delay = ttime / 1000;
251 ipo->ddir = DDIR_TRIM;
252 ipo->fileno = fileno;
254 dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
255 ipo->offset, ipo->len,
257 queue_io_piece(td, ipo);
260 static void handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
261 unsigned long long ttime, unsigned long *ios,
267 fileno = trace_add_file(td, t->device);
269 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
271 if (t->bytes > bs[rw])
275 td->o.size += t->bytes;
276 store_ipo(td, t->sector, t->bytes, rw, ttime, fileno);
280 * We only care for queue traces, most of the others are side effects
281 * due to internal workings of the block layer.
283 static void handle_trace(struct thread_data *td, struct blk_io_trace *t,
284 unsigned long *ios, unsigned int *bs)
286 static unsigned long long last_ttime;
287 unsigned long long delay;
289 if ((t->action & 0xffff) != __BLK_TA_QUEUE)
292 if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
293 if (!last_ttime || td->o.no_stall) {
294 last_ttime = t->time;
297 delay = t->time - last_ttime;
298 last_ttime = t->time;
302 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
303 handle_trace_notify(t);
304 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
305 handle_trace_discard(td, t, delay, ios, bs);
307 handle_trace_fs(td, t, delay, ios, bs);
310 static void byteswap_trace(struct blk_io_trace *t)
312 t->magic = fio_swap32(t->magic);
313 t->sequence = fio_swap32(t->sequence);
314 t->time = fio_swap64(t->time);
315 t->sector = fio_swap64(t->sector);
316 t->bytes = fio_swap32(t->bytes);
317 t->action = fio_swap32(t->action);
318 t->pid = fio_swap32(t->pid);
319 t->device = fio_swap32(t->device);
320 t->cpu = fio_swap32(t->cpu);
321 t->error = fio_swap16(t->error);
322 t->pdu_len = fio_swap16(t->pdu_len);
325 static int t_is_write(struct blk_io_trace *t)
327 return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0;
331 * Load a blktrace file by reading all the blk_io_trace entries, and storing
332 * them as io_pieces like the fio text version would do.
334 int load_blktrace(struct thread_data *td, const char *filename, int need_swap)
336 struct blk_io_trace t;
337 unsigned long ios[DDIR_RWDIR_CNT], skipped_writes;
338 unsigned int rw_bs[DDIR_RWDIR_CNT];
340 int fd, i, old_state;
342 int this_depth, depth;
344 fd = open(filename, O_RDONLY);
346 td_verror(td, errno, "open blktrace file");
350 fifo = fifo_alloc(TRACE_FIFO_SIZE);
352 old_state = td_bump_runstate(td, TD_SETTING_UP);
357 rw_bs[0] = rw_bs[1] = 0;
359 this_depth = depth = 0;
361 int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));
367 else if (ret < (int) sizeof(t)) {
368 log_err("fio: short fifo get\n");
375 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
376 log_err("fio: bad magic in blktrace data: %x\n",
380 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
381 log_err("fio: bad blktrace version %d\n",
385 ret = discard_pdu(td, fifo, fd, &t);
387 td_verror(td, ret, "blktrace lseek");
389 } else if (t.pdu_len != ret) {
390 log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
393 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
394 if ((t.action & 0xffff) == __BLK_TA_QUEUE)
396 else if ((t.action & 0xffff) == __BLK_TA_COMPLETE) {
397 depth = max(depth, this_depth);
401 if (t_is_write(&t) && read_only) {
407 handle_trace(td, &t, ios, rw_bs);
410 for (i = 0; i < td->files_index; i++) {
412 trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
418 td_restore_runstate(td, old_state);
420 if (!td->files_index) {
421 log_err("fio: did not find replay device(s)\n");
426 * For stacked devices, we don't always get a COMPLETE event so
427 * the depth grows to insane values. Limit it to something sane(r).
429 if (!depth || depth > 1024)
433 log_err("fio: %s skips replay of %lu writes due to read-only\n",
434 td->o.name, skipped_writes);
436 if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
437 log_err("fio: found no ios in blktrace data\n");
439 } else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) {
440 td->o.td_ddir = TD_DDIR_READ;
441 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
442 } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
443 td->o.td_ddir = TD_DDIR_WRITE;
444 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
446 td->o.td_ddir = TD_DDIR_RW;
447 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
448 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
449 td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
453 * We need to do direct/raw ios to the device, to avoid getting
454 * read-ahead in our way.
459 * we don't know if this option was set or not. it defaults to 1,
460 * so we'll just guess that we should override it if it's still 1
462 if (td->o.iodepth != 1)
463 td->o.iodepth = depth;