Add support for replaying blktrace trim/discard
[fio.git] / blktrace.c
CommitLineData
fb7b71a3
JA
1/*
2 * blktrace support code for fio
3 */
4#include <stdio.h>
5#include <stdlib.h>
5e6c2067
JA
6#include <sys/stat.h>
7#include <dirent.h>
8c1fdf04 8
01743ee1 9#include "flist.h"
fb7b71a3
JA
10#include "fio.h"
11#include "blktrace_api.h"
12
2da7df1d 13#define TRACE_FIFO_SIZE 8192
e2887563
JA
14
15/*
16 * fifo refill frontend, to avoid reading data in trace sized bites
17 */
18static int refill_fifo(struct thread_data *td, struct fifo *fifo, int fd)
19{
20 char buf[TRACE_FIFO_SIZE];
f12b323f 21 unsigned int total;
e2887563
JA
22 int ret;
23
f12b323f
JA
24 total = sizeof(buf);
25 if (total > fifo_room(fifo))
26 total = fifo_room(fifo);
e2887563 27
f12b323f
JA
28 ret = read(fd, buf, total);
29 if (ret < 0) {
30 td_verror(td, errno, "read blktrace file");
31 return -1;
e2887563
JA
32 }
33
f12b323f
JA
34 if (ret > 0)
35 ret = fifo_put(fifo, buf, ret);
36
bd6f78b2 37 dprint(FD_BLKTRACE, "refill: filled %d bytes\n", ret);
f12b323f 38 return ret;
e2887563
JA
39}
40
41/*
42 * Retrieve 'len' bytes from the fifo, refilling if necessary.
43 */
44static int trace_fifo_get(struct thread_data *td, struct fifo *fifo, int fd,
45 void *buf, unsigned int len)
46{
f12b323f
JA
47 if (fifo_len(fifo) < len) {
48 int ret = refill_fifo(td, fifo, fd);
e2887563 49
f12b323f
JA
50 if (ret < 0)
51 return ret;
52 }
e2887563
JA
53
54 return fifo_get(fifo, buf, len);
55}
56
8c1fdf04
JA
57/*
58 * Just discard the pdu by seeking past it.
59 */
f12b323f
JA
60static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd,
61 struct blk_io_trace *t)
fb7b71a3
JA
62{
63 if (t->pdu_len == 0)
64 return 0;
65
bd6f78b2 66 dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
f12b323f 67 return trace_fifo_get(td, fifo, fd, NULL, t->pdu_len);
fb7b71a3
JA
68}
69
8c1fdf04
JA
70/*
71 * Check if this is a blktrace binary data file. We read a single trace
72 * into memory and check for the magic signature.
73 */
fb7b71a3
JA
74int is_blktrace(const char *filename)
75{
76 struct blk_io_trace t;
77 int fd, ret;
78
79 fd = open(filename, O_RDONLY);
4dced407 80 if (fd < 0)
fb7b71a3 81 return 0;
fb7b71a3
JA
82
83 ret = read(fd, &t, sizeof(t));
84 close(fd);
85
86 if (ret < 0) {
87 perror("read blktrace");
88 return 0;
89 } else if (ret != sizeof(t)) {
90 log_err("fio: short read on blktrace file\n");
91 return 0;
92 }
93
94 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
95 return 1;
96
97 return 0;
98}
99
5e6c2067
JA
100static int lookup_device(char *path, unsigned int maj, unsigned int min)
101{
102 struct dirent *dir;
103 struct stat st;
104 int found = 0;
105 DIR *D;
106
107 D = opendir(path);
108 if (!D)
109 return 0;
110
111 while ((dir = readdir(D)) != NULL) {
112 char full_path[256];
113
114 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
115 continue;
116
117 sprintf(full_path, "%s/%s", path, dir->d_name);
118 if (lstat(full_path, &st) == -1) {
119 perror("lstat");
120 break;
121 }
122
123 if (S_ISDIR(st.st_mode)) {
124 found = lookup_device(full_path, maj, min);
125 if (found) {
126 strcpy(path, full_path);
127 break;
128 }
129 }
130
131 if (!S_ISBLK(st.st_mode))
132 continue;
133
134 if (maj == major(st.st_rdev) && min == minor(st.st_rdev)) {
5ec10eaa 135 dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
5e6c2067
JA
136 strcpy(path, full_path);
137 found = 1;
138 break;
139 }
140 }
141
142 closedir(D);
143 return found;
144}
145
c69aa91f
JA
146#define FMINORBITS 20
147#define FMINORMASK ((1U << FMINORBITS) - 1)
148#define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS))
149#define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK))
eeb9c2aa 150
691c8fb0
JA
151static void trace_add_open_event(struct thread_data *td, int fileno)
152{
153 struct io_piece *ipo;
154
155 ipo = calloc(1, sizeof(*ipo));
156
157 ipo->ddir = DDIR_INVAL;
158 ipo->fileno = fileno;
159 ipo->file_action = FIO_LOG_OPEN_FILE;
01743ee1 160 flist_add_tail(&ipo->list, &td->io_log_list);
691c8fb0
JA
161}
162
5e6c2067
JA
163static void trace_add_file(struct thread_data *td, __u32 device)
164{
165 static unsigned int last_maj, last_min;
c69aa91f
JA
166 unsigned int maj = FMAJOR(device);
167 unsigned int min = FMINOR(device);
5e6c2067
JA
168 struct fio_file *f;
169 char dev[256];
170 unsigned int i;
171
172 if (last_maj == maj && last_min == min)
173 return;
174
175 last_maj = maj;
176 last_min = min;
177
178 /*
179 * check for this file in our list
180 */
181 for_each_file(td, f, i)
182 if (f->major == maj && f->minor == min)
183 return;
184
185 strcpy(dev, "/dev");
bd6f78b2 186 if (lookup_device(dev, maj, min)) {
691c8fb0
JA
187 int fileno;
188
bd6f78b2 189 dprint(FD_BLKTRACE, "add devices %s\n", dev);
49ffb4a2 190 fileno = add_file_exclusive(td, dev);
691c8fb0 191 trace_add_open_event(td, fileno);
bd6f78b2 192 }
5e6c2067
JA
193}
194
8c1fdf04
JA
195/*
196 * Store blk_io_trace data in an ipo for later retrieval.
197 */
fdefd987 198static void store_ipo(struct thread_data *td, unsigned long long offset,
8c1fdf04 199 unsigned int bytes, int rw, unsigned long long ttime)
fdefd987
JA
200{
201 struct io_piece *ipo = malloc(sizeof(*ipo));
202
203 memset(ipo, 0, sizeof(*ipo));
01743ee1 204 INIT_FLIST_HEAD(&ipo->list);
a2eea81b
JA
205 /*
206 * the 512 is wrong here, it should be the hardware sector size...
207 */
208 ipo->offset = offset * 512;
fdefd987 209 ipo->len = bytes;
8c1fdf04 210 ipo->delay = ttime / 1000;
fdefd987
JA
211 if (rw)
212 ipo->ddir = DDIR_WRITE;
213 else
214 ipo->ddir = DDIR_READ;
215
bd6f78b2
JA
216 dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
217 ipo->ddir, ipo->offset,
218 ipo->len, ipo->delay);
691c8fb0 219 queue_io_piece(td, ipo);
fdefd987
JA
220}
221
0b9d69ec 222static void handle_trace_notify(struct blk_io_trace *t)
cd991b9e 223{
691c8fb0
JA
224 switch (t->action) {
225 case BLK_TN_PROCESS:
226 printf("got process notify: %x, %d\n", t->action, t->pid);
227 break;
228 case BLK_TN_TIMESTAMP:
229 printf("got timestamp notify: %x, %d\n", t->action, t->pid);
230 break;
ff58fced
JA
231 case BLK_TN_MESSAGE:
232 break;
691c8fb0
JA
233 default:
234 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
235 break;
236 }
237}
5b3023b8 238
ff58fced
JA
239static void handle_trace_discard(struct thread_data *td, struct blk_io_trace *t,
240 unsigned long long ttime, unsigned long *ios)
241{
242 struct io_piece *ipo = malloc(sizeof(*ipo));
243
244 trace_add_file(td, t->device);
245
246 ios[DDIR_WRITE]++;
247 td->o.size += t->bytes;
248
249 memset(ipo, 0, sizeof(*ipo));
250 INIT_FLIST_HEAD(&ipo->list);
251
252 /*
253 * the 512 is wrong here, it should be the hardware sector size...
254 */
255 ipo->offset = t->sector * 512;
256 ipo->len = t->bytes;
257 ipo->delay = ttime / 1000;
258 ipo->ddir = DDIR_TRIM;
259
260 dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
261 ipo->offset, ipo->len,
262 ipo->delay);
263 queue_io_piece(td, ipo);
264}
265
691c8fb0
JA
266static void handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
267 unsigned long long ttime, unsigned long *ios,
268 unsigned int *bs)
269{
270 int rw;
5b3023b8
JA
271
272 trace_add_file(td, t->device);
273
274 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
275
276 if (t->bytes > bs[rw])
277 bs[rw] = t->bytes;
278
279 ios[rw]++;
280 td->o.size += t->bytes;
281 store_ipo(td, t->sector, t->bytes, rw, ttime);
cd991b9e
JA
282}
283
691c8fb0
JA
284/*
285 * We only care for queue traces, most of the others are side effects
286 * due to internal workings of the block layer.
287 */
288static void handle_trace(struct thread_data *td, struct blk_io_trace *t,
289 unsigned long long ttime, unsigned long *ios,
290 unsigned int *bs)
291{
292 if ((t->action & 0xffff) != __BLK_TA_QUEUE)
293 return;
294 if (t->action & BLK_TC_ACT(BLK_TC_PC))
295 return;
296
297 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
aec2de20 298 handle_trace_notify(t);
ff58fced
JA
299 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
300 handle_trace_discard(td, t, ttime, ios);
691c8fb0
JA
301 else
302 handle_trace_fs(td, t, ttime, ios, bs);
303}
304
8c1fdf04
JA
305/*
306 * Load a blktrace file by reading all the blk_io_trace entries, and storing
307 * them as io_pieces like the fio text version would do.
308 */
fb7b71a3
JA
309int load_blktrace(struct thread_data *td, const char *filename)
310{
a61eddec 311 unsigned long long ttime, delay;
fb7b71a3 312 struct blk_io_trace t;
4241ea8f 313 unsigned long ios[2], skipped_writes;
a61eddec 314 unsigned int cpu;
d84f8d49 315 unsigned int rw_bs[2];
e2887563 316 struct fifo *fifo;
fb7b71a3
JA
317 int fd;
318
319 fd = open(filename, O_RDONLY);
320 if (fd < 0) {
321 td_verror(td, errno, "open blktrace file");
322 return 1;
323 }
324
e2887563
JA
325 fifo = fifo_alloc(TRACE_FIFO_SIZE);
326
6df8adaa
JA
327 td->o.size = 0;
328
a61eddec 329 cpu = 0;
d84f8d49
JA
330 ttime = 0;
331 ios[0] = ios[1] = 0;
332 rw_bs[0] = rw_bs[1] = 0;
4241ea8f 333 skipped_writes = 0;
fb7b71a3 334 do {
e2887563 335 int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));
fb7b71a3 336
e2887563 337 if (ret < 0)
8c1fdf04 338 goto err;
e2887563
JA
339 else if (!ret)
340 break;
341 else if (ret < (int) sizeof(t)) {
342 log_err("fio: short fifo get\n");
fb7b71a3 343 break;
fb7b71a3
JA
344 }
345
346 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
5ec10eaa
JA
347 log_err("fio: bad magic in blktrace data: %x\n",
348 t.magic);
8c1fdf04 349 goto err;
fb7b71a3
JA
350 }
351 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
5ec10eaa
JA
352 log_err("fio: bad blktrace version %d\n",
353 t.magic & 0xff);
8c1fdf04 354 goto err;
fb7b71a3 355 }
f12b323f
JA
356 ret = discard_pdu(td, fifo, fd, &t);
357 if (ret < 0) {
fb7b71a3 358 td_verror(td, ret, "blktrace lseek");
8c1fdf04 359 goto err;
f12b323f
JA
360 } else if (t.pdu_len != ret) {
361 log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
362 goto err;
fb7b71a3 363 }
691c8fb0
JA
364 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
365 if (!ttime) {
366 ttime = t.time;
367 cpu = t.cpu;
368 }
369
370 delay = 0;
371 if (cpu == t.cpu)
372 delay = t.time - ttime;
373 if ((t.action & BLK_TC_ACT(BLK_TC_WRITE)) && read_only)
374 skipped_writes++;
64bbb865
DN
375 else {
376 /*
377 * set delay to zero if no_stall enabled for
378 * fast replay
379 */
380 if (td->o.no_stall)
381 delay = 0;
382
691c8fb0 383 handle_trace(td, &t, delay, ios, rw_bs);
64bbb865 384 }
691c8fb0 385
8c1fdf04 386 ttime = t.time;
a61eddec 387 cpu = t.cpu;
a6edd638
JA
388 } else {
389 delay = 0;
4241ea8f 390 handle_trace(td, &t, delay, ios, rw_bs);
a6edd638 391 }
fb7b71a3
JA
392 } while (1);
393
38470f85 394 fifo_free(fifo);
fb7b71a3 395 close(fd);
8c1fdf04 396
4241ea8f 397 if (skipped_writes)
5ec10eaa
JA
398 log_err("fio: %s skips replay of %lu writes due to read-only\n",
399 td->o.name, skipped_writes);
4241ea8f 400
8c1fdf04
JA
401 if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
402 log_err("fio: found no ios in blktrace data\n");
403 return 1;
d84f8d49 404 } else if (ios[DDIR_READ] && !ios[DDIR_READ]) {
8c1fdf04 405 td->o.td_ddir = TD_DDIR_READ;
d84f8d49
JA
406 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
407 } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
8c1fdf04 408 td->o.td_ddir = TD_DDIR_WRITE;
d84f8d49
JA
409 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
410 } else {
8c1fdf04 411 td->o.td_ddir = TD_DDIR_RW;
d84f8d49
JA
412 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
413 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
414 }
8c1fdf04
JA
415
416 /*
417 * We need to do direct/raw ios to the device, to avoid getting
418 * read-ahead in our way.
419 */
420 td->o.odirect = 1;
421
fb7b71a3 422 return 0;
8c1fdf04
JA
423err:
424 close(fd);
38470f85 425 fifo_free(fifo);
8c1fdf04 426 return 1;
fb7b71a3 427}