Update io engine version
[fio.git] / blktrace.c
1 /*
2  * blktrace support code for fio
3  */
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <sys/stat.h>
7 #include <dirent.h>
8
9 #include "flist.h"
10 #include "fio.h"
11 #include "blktrace_api.h"
12
13 #define TRACE_FIFO_SIZE 8192
14
15 /*
16  * fifo refill frontend, to avoid reading data in trace sized bites
17  */
18 static int refill_fifo(struct thread_data *td, struct fifo *fifo, int fd)
19 {
20         char buf[TRACE_FIFO_SIZE];
21         unsigned int total;
22         int ret;
23
24         total = sizeof(buf);
25         if (total > fifo_room(fifo))
26                 total = fifo_room(fifo);
27
28         ret = read(fd, buf, total);
29         if (ret < 0) {
30                 td_verror(td, errno, "read blktrace file");
31                 return -1;
32         }
33
34         if (ret > 0)
35                 ret = fifo_put(fifo, buf, ret);
36
37         dprint(FD_BLKTRACE, "refill: filled %d bytes\n", ret);
38         return ret;
39 }
40
41 /*
42  * Retrieve 'len' bytes from the fifo, refilling if necessary.
43  */
44 static int trace_fifo_get(struct thread_data *td, struct fifo *fifo, int fd,
45                           void *buf, unsigned int len)
46 {
47         if (fifo_len(fifo) < len) {
48                 int ret = refill_fifo(td, fifo, fd);
49
50                 if (ret < 0)
51                         return ret;
52         }
53
54         return fifo_get(fifo, buf, len);
55 }
56
57 /*
58  * Just discard the pdu by seeking past it.
59  */
60 static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd,
61                        struct blk_io_trace *t)
62 {
63         if (t->pdu_len == 0)
64                 return 0;
65
66         dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
67         return trace_fifo_get(td, fifo, fd, NULL, t->pdu_len);
68 }
69
70 /*
71  * Check if this is a blktrace binary data file. We read a single trace
72  * into memory and check for the magic signature.
73  */
74 int is_blktrace(const char *filename, int *need_swap)
75 {
76         struct blk_io_trace t;
77         int fd, ret;
78
79         fd = open(filename, O_RDONLY);
80         if (fd < 0)
81                 return 0;
82
83         ret = read(fd, &t, sizeof(t));
84         close(fd);
85
86         if (ret < 0) {
87                 perror("read blktrace");
88                 return 0;
89         } else if (ret != sizeof(t)) {
90                 log_err("fio: short read on blktrace file\n");
91                 return 0;
92         }
93
94         if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
95                 *need_swap = 0;
96                 return 1;
97         }
98
99         /*
100          * Maybe it needs to be endian swapped...
101          */
102         t.magic = fio_swap32(t.magic);
103         if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
104                 *need_swap = 1;
105                 return 1;
106         }
107
108         return 0;
109 }
110
111 static int lookup_device(struct thread_data *td, char *path, unsigned int maj,
112                          unsigned int min)
113 {
114         struct dirent *dir;
115         struct stat st;
116         int found = 0;
117         DIR *D;
118
119         D = opendir(path);
120         if (!D)
121                 return 0;
122
123         while ((dir = readdir(D)) != NULL) {
124                 char full_path[256];
125
126                 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
127                         continue;
128
129                 sprintf(full_path, "%s%s%s", path, FIO_OS_PATH_SEPARATOR, dir->d_name);
130                 if (lstat(full_path, &st) == -1) {
131                         perror("lstat");
132                         break;
133                 }
134
135                 if (S_ISDIR(st.st_mode)) {
136                         found = lookup_device(td, full_path, maj, min);
137                         if (found) {
138                                 strcpy(path, full_path);
139                                 break;
140                         }
141                 }
142
143                 if (!S_ISBLK(st.st_mode))
144                         continue;
145
146                 /*
147                  * If replay_redirect is set then always return this device
148                  * upon lookup which overrides the device lookup based on
149                  * major minor in the actual blktrace
150                  */
151                 if (td->o.replay_redirect) {
152                         dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden"
153                                         " with: %s\n", maj, min,
154                                         td->o.replay_redirect);
155                         strcpy(path, td->o.replay_redirect);
156                         found = 1;
157                         break;
158                 }
159
160                 if (maj == major(st.st_rdev) && min == minor(st.st_rdev)) {
161                         dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
162                         strcpy(path, full_path);
163                         found = 1;
164                         break;
165                 }
166         }
167
168         closedir(D);
169         return found;
170 }
171
172 #define FMINORBITS      20
173 #define FMINORMASK      ((1U << FMINORBITS) - 1)
174 #define FMAJOR(dev)     ((unsigned int) ((dev) >> FMINORBITS))
175 #define FMINOR(dev)     ((unsigned int) ((dev) & FMINORMASK))
176
177 static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
178 {
179         struct io_piece *ipo;
180
181         ipo = calloc(1, sizeof(*ipo));
182         init_ipo(ipo);
183
184         ipo->ddir = DDIR_INVAL;
185         ipo->fileno = fileno;
186         ipo->file_action = action;
187         flist_add_tail(&ipo->list, &td->io_log_list);
188 }
189
190 static int trace_add_file(struct thread_data *td, __u32 device)
191 {
192         static unsigned int last_maj, last_min, last_fileno;
193         unsigned int maj = FMAJOR(device);
194         unsigned int min = FMINOR(device);
195         struct fio_file *f;
196         char dev[256];
197         unsigned int i;
198
199         if (last_maj == maj && last_min == min)
200                 return last_fileno;
201
202         last_maj = maj;
203         last_min = min;
204
205         /*
206          * check for this file in our list
207          */
208         for_each_file(td, f, i)
209                 if (f->major == maj && f->minor == min) {
210                         last_fileno = f->fileno;
211                         return last_fileno;
212                 }
213
214         strcpy(dev, "/dev");
215         if (lookup_device(td, dev, maj, min)) {
216                 int fileno;
217
218                 dprint(FD_BLKTRACE, "add devices %s\n", dev);
219                 fileno = add_file_exclusive(td, dev);
220                 td->o.open_files++;
221                 td->files[fileno]->major = maj;
222                 td->files[fileno]->minor = min;
223                 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
224                 last_fileno = fileno;
225         }
226
227         return last_fileno;
228 }
229
230 /*
231  * Store blk_io_trace data in an ipo for later retrieval.
232  */
233 static void store_ipo(struct thread_data *td, unsigned long long offset,
234                       unsigned int bytes, int rw, unsigned long long ttime,
235                       int fileno)
236 {
237         struct io_piece *ipo = malloc(sizeof(*ipo));
238
239         init_ipo(ipo);
240
241         /*
242          * the 512 is wrong here, it should be the hardware sector size...
243          */
244         ipo->offset = offset * 512;
245         ipo->len = bytes;
246         ipo->delay = ttime / 1000;
247         if (rw)
248                 ipo->ddir = DDIR_WRITE;
249         else
250                 ipo->ddir = DDIR_READ;
251         ipo->fileno = fileno;
252
253         dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
254                                                         ipo->ddir, ipo->offset,
255                                                         ipo->len, ipo->delay);
256         queue_io_piece(td, ipo);
257 }
258
259 static void handle_trace_notify(struct blk_io_trace *t)
260 {
261         switch (t->action) {
262         case BLK_TN_PROCESS:
263                 dprint(FD_BLKTRACE, "got process notify: %x, %d\n",
264                                 t->action, t->pid);
265                 break;
266         case BLK_TN_TIMESTAMP:
267                 dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n",
268                                 t->action, t->pid);
269                 break;
270         case BLK_TN_MESSAGE:
271                 break;
272         default:
273                 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
274                 break;
275         }
276 }
277
278 static void handle_trace_discard(struct thread_data *td,
279                                  struct blk_io_trace *t,
280                                  unsigned long long ttime,
281                                  unsigned long *ios, unsigned int *bs)
282 {
283         struct io_piece *ipo = malloc(sizeof(*ipo));
284         int fileno;
285
286         init_ipo(ipo);
287         fileno = trace_add_file(td, t->device);
288
289         ios[DDIR_TRIM]++;
290         if (t->bytes > bs[DDIR_TRIM])
291                 bs[DDIR_TRIM] = t->bytes;
292
293         td->o.size += t->bytes;
294
295         memset(ipo, 0, sizeof(*ipo));
296         INIT_FLIST_HEAD(&ipo->list);
297
298         /*
299          * the 512 is wrong here, it should be the hardware sector size...
300          */
301         ipo->offset = t->sector * 512;
302         ipo->len = t->bytes;
303         ipo->delay = ttime / 1000;
304         ipo->ddir = DDIR_TRIM;
305         ipo->fileno = fileno;
306
307         dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
308                                                         ipo->offset, ipo->len,
309                                                         ipo->delay);
310         queue_io_piece(td, ipo);
311 }
312
313 static void handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
314                             unsigned long long ttime, unsigned long *ios,
315                             unsigned int *bs)
316 {
317         int rw;
318         int fileno;
319
320         fileno = trace_add_file(td, t->device);
321
322         rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
323
324         if (t->bytes > bs[rw])
325                 bs[rw] = t->bytes;
326
327         ios[rw]++;
328         td->o.size += t->bytes;
329         store_ipo(td, t->sector, t->bytes, rw, ttime, fileno);
330 }
331
332 /*
333  * We only care for queue traces, most of the others are side effects
334  * due to internal workings of the block layer.
335  */
336 static void handle_trace(struct thread_data *td, struct blk_io_trace *t,
337                          unsigned long *ios, unsigned int *bs)
338 {
339         static unsigned long long last_ttime;
340         unsigned long long delay;
341
342         if ((t->action & 0xffff) != __BLK_TA_QUEUE)
343                 return;
344
345         if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
346                 if (!last_ttime || td->o.no_stall) {
347                         last_ttime = t->time;
348                         delay = 0;
349                 } else {
350                         delay = t->time - last_ttime;
351                         last_ttime = t->time;
352                 }
353         }
354
355         if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
356                 handle_trace_notify(t);
357         else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
358                 handle_trace_discard(td, t, delay, ios, bs);
359         else
360                 handle_trace_fs(td, t, delay, ios, bs);
361 }
362
363 static void byteswap_trace(struct blk_io_trace *t)
364 {
365         t->magic = fio_swap32(t->magic);
366         t->sequence = fio_swap32(t->sequence);
367         t->time = fio_swap64(t->time);
368         t->sector = fio_swap64(t->sector);
369         t->bytes = fio_swap32(t->bytes);
370         t->action = fio_swap32(t->action);
371         t->pid = fio_swap32(t->pid);
372         t->device = fio_swap32(t->device);
373         t->cpu = fio_swap32(t->cpu);
374         t->error = fio_swap16(t->error);
375         t->pdu_len = fio_swap16(t->pdu_len);
376 }
377
378 static int t_is_write(struct blk_io_trace *t)
379 {
380         return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0;
381 }
382
383 /*
384  * Load a blktrace file by reading all the blk_io_trace entries, and storing
385  * them as io_pieces like the fio text version would do.
386  */
387 int load_blktrace(struct thread_data *td, const char *filename, int need_swap)
388 {
389         struct blk_io_trace t;
390         unsigned long ios[DDIR_RWDIR_CNT], skipped_writes;
391         unsigned int rw_bs[DDIR_RWDIR_CNT];
392         struct fifo *fifo;
393         int fd, i, old_state;
394         struct fio_file *f;
395         int this_depth, depth;
396
397         fd = open(filename, O_RDONLY);
398         if (fd < 0) {
399                 td_verror(td, errno, "open blktrace file");
400                 return 1;
401         }
402
403         fifo = fifo_alloc(TRACE_FIFO_SIZE);
404
405         old_state = td_bump_runstate(td, TD_SETTING_UP);
406
407         td->o.size = 0;
408
409         ios[0] = ios[1] = 0;
410         rw_bs[0] = rw_bs[1] = 0;
411         skipped_writes = 0;
412         this_depth = depth = 0;
413         do {
414                 int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));
415
416                 if (ret < 0)
417                         goto err;
418                 else if (!ret)
419                         break;
420                 else if (ret < (int) sizeof(t)) {
421                         log_err("fio: short fifo get\n");
422                         break;
423                 }
424
425                 if (need_swap)
426                         byteswap_trace(&t);
427
428                 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
429                         log_err("fio: bad magic in blktrace data: %x\n",
430                                                                 t.magic);
431                         goto err;
432                 }
433                 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
434                         log_err("fio: bad blktrace version %d\n",
435                                                                 t.magic & 0xff);
436                         goto err;
437                 }
438                 ret = discard_pdu(td, fifo, fd, &t);
439                 if (ret < 0) {
440                         td_verror(td, ret, "blktrace lseek");
441                         goto err;
442                 } else if (t.pdu_len != ret) {
443                         log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
444                         goto err;
445                 }
446                 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
447                         if ((t.action & 0xffff) == __BLK_TA_QUEUE)
448                                 this_depth++;
449                         else if ((t.action & 0xffff) == __BLK_TA_COMPLETE) {
450                                 depth = max(depth, this_depth);
451                                 this_depth = 0;
452                         }
453
454                         if (t_is_write(&t) && read_only) {
455                                 skipped_writes++;
456                                 continue;
457                         }
458                 }
459
460                 handle_trace(td, &t, ios, rw_bs);
461         } while (1);
462
463         for (i = 0; i < td->files_index; i++) {
464                 f = td->files[i];
465                 trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
466         }
467
468         fifo_free(fifo);
469         close(fd);
470
471         td_restore_runstate(td, old_state);
472
473         if (!td->files_index) {
474                 log_err("fio: did not find replay device(s)\n");
475                 return 1;
476         }
477
478         /*
479          * For stacked devices, we don't always get a COMPLETE event so
480          * the depth grows to insane values. Limit it to something sane(r).
481          */
482         if (!depth || depth > 1024)
483                 depth = 1024;
484
485         if (skipped_writes)
486                 log_err("fio: %s skips replay of %lu writes due to read-only\n",
487                                                 td->o.name, skipped_writes);
488
489         if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
490                 log_err("fio: found no ios in blktrace data\n");
491                 return 1;
492         } else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) {
493                 td->o.td_ddir = TD_DDIR_READ;
494                 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
495         } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
496                 td->o.td_ddir = TD_DDIR_WRITE;
497                 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
498         } else {
499                 td->o.td_ddir = TD_DDIR_RW;
500                 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
501                 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
502                 td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
503         }
504
505         /*
506          * We need to do direct/raw ios to the device, to avoid getting
507          * read-ahead in our way.
508          */
509         td->o.odirect = 1;
510
511         /*
512          * we don't know if this option was set or not. it defaults to 1,
513          * so we'll just guess that we should override it if it's still 1
514          */
515         if (td->o.iodepth != 1)
516                 td->o.iodepth = depth;
517
518         return 0;
519 err:
520         close(fd);
521         fifo_free(fifo);
522         return 1;
523 }