configure: ensure that fatal errors kill config-host.h and mak files
[fio.git] / blktrace.c
1 /*
2  * blktrace support code for fio
3  */
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <sys/stat.h>
7 #include <dirent.h>
8
9 #include "flist.h"
10 #include "fio.h"
11 #include "blktrace_api.h"
12
13 #define TRACE_FIFO_SIZE 8192
14
15 /*
16  * fifo refill frontend, to avoid reading data in trace sized bites
17  */
18 static int refill_fifo(struct thread_data *td, struct fifo *fifo, int fd)
19 {
20         char buf[TRACE_FIFO_SIZE];
21         unsigned int total;
22         int ret;
23
24         total = sizeof(buf);
25         if (total > fifo_room(fifo))
26                 total = fifo_room(fifo);
27
28         ret = read(fd, buf, total);
29         if (ret < 0) {
30                 td_verror(td, errno, "read blktrace file");
31                 return -1;
32         }
33
34         if (ret > 0)
35                 ret = fifo_put(fifo, buf, ret);
36
37         dprint(FD_BLKTRACE, "refill: filled %d bytes\n", ret);
38         return ret;
39 }
40
41 /*
42  * Retrieve 'len' bytes from the fifo, refilling if necessary.
43  */
44 static int trace_fifo_get(struct thread_data *td, struct fifo *fifo, int fd,
45                           void *buf, unsigned int len)
46 {
47         if (fifo_len(fifo) < len) {
48                 int ret = refill_fifo(td, fifo, fd);
49
50                 if (ret < 0)
51                         return ret;
52         }
53
54         return fifo_get(fifo, buf, len);
55 }
56
57 /*
58  * Just discard the pdu by seeking past it.
59  */
60 static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd,
61                        struct blk_io_trace *t)
62 {
63         if (t->pdu_len == 0)
64                 return 0;
65
66         dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
67         return trace_fifo_get(td, fifo, fd, NULL, t->pdu_len);
68 }
69
70 /*
71  * Check if this is a blktrace binary data file. We read a single trace
72  * into memory and check for the magic signature.
73  */
74 int is_blktrace(const char *filename)
75 {
76         struct blk_io_trace t;
77         int fd, ret;
78
79         fd = open(filename, O_RDONLY);
80         if (fd < 0)
81                 return 0;
82
83         ret = read(fd, &t, sizeof(t));
84         close(fd);
85
86         if (ret < 0) {
87                 perror("read blktrace");
88                 return 0;
89         } else if (ret != sizeof(t)) {
90                 log_err("fio: short read on blktrace file\n");
91                 return 0;
92         }
93
94         if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
95                 return 1;
96
97         return 0;
98 }
99
100 static int lookup_device(struct thread_data *td, char *path, unsigned int maj,
101                          unsigned int min)
102 {
103         struct dirent *dir;
104         struct stat st;
105         int found = 0;
106         DIR *D;
107
108         D = opendir(path);
109         if (!D)
110                 return 0;
111
112         while ((dir = readdir(D)) != NULL) {
113                 char full_path[256];
114
115                 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
116                         continue;
117
118                 sprintf(full_path, "%s%s%s", path, FIO_OS_PATH_SEPARATOR, dir->d_name);
119                 if (lstat(full_path, &st) == -1) {
120                         perror("lstat");
121                         break;
122                 }
123
124                 if (S_ISDIR(st.st_mode)) {
125                         found = lookup_device(td, full_path, maj, min);
126                         if (found) {
127                                 strcpy(path, full_path);
128                                 break;
129                         }
130                 }
131
132                 if (!S_ISBLK(st.st_mode))
133                         continue;
134
135                 /*
136                  * If replay_redirect is set then always return this device
137                  * upon lookup which overrides the device lookup based on
138                  * major minor in the actual blktrace
139                  */
140                 if (td->o.replay_redirect) {
141                         dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden"
142                                         " with: %s", maj, min,
143                                         td->o.replay_redirect);
144                         strcpy(path, td->o.replay_redirect);
145                         found = 1;
146                         break;
147                 }
148
149                 if (maj == major(st.st_rdev) && min == minor(st.st_rdev)) {
150                         dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
151                         strcpy(path, full_path);
152                         found = 1;
153                         break;
154                 }
155         }
156
157         closedir(D);
158         return found;
159 }
160
161 #define FMINORBITS      20
162 #define FMINORMASK      ((1U << FMINORBITS) - 1)
163 #define FMAJOR(dev)     ((unsigned int) ((dev) >> FMINORBITS))
164 #define FMINOR(dev)     ((unsigned int) ((dev) & FMINORMASK))
165
166 static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
167 {
168         struct io_piece *ipo;
169
170         ipo = calloc(1, sizeof(*ipo));
171         init_ipo(ipo);
172
173         ipo->ddir = DDIR_INVAL;
174         ipo->fileno = fileno;
175         ipo->file_action = action;
176         flist_add_tail(&ipo->list, &td->io_log_list);
177 }
178
179 static int trace_add_file(struct thread_data *td, __u32 device)
180 {
181         static unsigned int last_maj, last_min, last_fileno;
182         unsigned int maj = FMAJOR(device);
183         unsigned int min = FMINOR(device);
184         struct fio_file *f;
185         char dev[256];
186         unsigned int i;
187
188         if (last_maj == maj && last_min == min)
189                 return last_fileno;
190
191         last_maj = maj;
192         last_min = min;
193
194         /*
195          * check for this file in our list
196          */
197         for_each_file(td, f, i)
198                 if (f->major == maj && f->minor == min) {
199                         last_fileno = f->fileno;
200                         return last_fileno;
201                 }
202
203         strcpy(dev, "/dev");
204         if (lookup_device(td, dev, maj, min)) {
205                 int fileno;
206
207                 dprint(FD_BLKTRACE, "add devices %s\n", dev);
208                 fileno = add_file_exclusive(td, dev);
209                 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
210                 last_fileno = fileno;
211         }
212         return last_fileno;
213 }
214
215 /*
216  * Store blk_io_trace data in an ipo for later retrieval.
217  */
218 static void store_ipo(struct thread_data *td, unsigned long long offset,
219                       unsigned int bytes, int rw, unsigned long long ttime,
220                       int fileno)
221 {
222         struct io_piece *ipo = malloc(sizeof(*ipo));
223
224         init_ipo(ipo);
225
226         /*
227          * the 512 is wrong here, it should be the hardware sector size...
228          */
229         ipo->offset = offset * 512;
230         ipo->len = bytes;
231         ipo->delay = ttime / 1000;
232         if (rw)
233                 ipo->ddir = DDIR_WRITE;
234         else
235                 ipo->ddir = DDIR_READ;
236         ipo->fileno = fileno;
237
238         dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
239                                                         ipo->ddir, ipo->offset,
240                                                         ipo->len, ipo->delay);
241         queue_io_piece(td, ipo);
242 }
243
244 static void handle_trace_notify(struct blk_io_trace *t)
245 {
246         switch (t->action) {
247         case BLK_TN_PROCESS:
248                 printf("got process notify: %x, %d\n", t->action, t->pid);
249                 break;
250         case BLK_TN_TIMESTAMP:
251                 printf("got timestamp notify: %x, %d\n", t->action, t->pid);
252                 break;
253         case BLK_TN_MESSAGE:
254                 break;
255         default:
256                 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
257                 break;
258         }
259 }
260
261 static void handle_trace_discard(struct thread_data *td, struct blk_io_trace *t,
262                                  unsigned long long ttime, unsigned long *ios)
263 {
264         struct io_piece *ipo = malloc(sizeof(*ipo));
265         int fileno;
266
267         init_ipo(ipo);
268         fileno = trace_add_file(td, t->device);
269
270         ios[DDIR_WRITE]++;
271         td->o.size += t->bytes;
272
273         memset(ipo, 0, sizeof(*ipo));
274         INIT_FLIST_HEAD(&ipo->list);
275
276         /*
277          * the 512 is wrong here, it should be the hardware sector size...
278          */
279         ipo->offset = t->sector * 512;
280         ipo->len = t->bytes;
281         ipo->delay = ttime / 1000;
282         ipo->ddir = DDIR_TRIM;
283         ipo->fileno = fileno;
284
285         dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
286                                                         ipo->offset, ipo->len,
287                                                         ipo->delay);
288         queue_io_piece(td, ipo);
289 }
290
291 static void handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
292                             unsigned long long ttime, unsigned long *ios,
293                             unsigned int *bs)
294 {
295         int rw;
296         int fileno;
297
298         fileno = trace_add_file(td, t->device);
299
300         rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
301
302         if (t->bytes > bs[rw])
303                 bs[rw] = t->bytes;
304
305         ios[rw]++;
306         td->o.size += t->bytes;
307         store_ipo(td, t->sector, t->bytes, rw, ttime, fileno);
308 }
309
310 /*
311  * We only care for queue traces, most of the others are side effects
312  * due to internal workings of the block layer.
313  */
314 static void handle_trace(struct thread_data *td, struct blk_io_trace *t,
315                          unsigned long long ttime, unsigned long *ios,
316                          unsigned int *bs)
317 {
318         if ((t->action & 0xffff) != __BLK_TA_QUEUE)
319                 return;
320         if (t->action & BLK_TC_ACT(BLK_TC_PC))
321                 return;
322
323         if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
324                 handle_trace_notify(t);
325         else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
326                 handle_trace_discard(td, t, ttime, ios);
327         else
328                 handle_trace_fs(td, t, ttime, ios, bs);
329 }
330
331 /*
332  * Load a blktrace file by reading all the blk_io_trace entries, and storing
333  * them as io_pieces like the fio text version would do.
334  */
335 int load_blktrace(struct thread_data *td, const char *filename)
336 {
337         unsigned long long ttime, delay;
338         struct blk_io_trace t;
339         unsigned long ios[2], skipped_writes;
340         unsigned int cpu;
341         unsigned int rw_bs[2];
342         struct fifo *fifo;
343         int fd, i;
344         struct fio_file *f;
345
346         fd = open(filename, O_RDONLY);
347         if (fd < 0) {
348                 td_verror(td, errno, "open blktrace file");
349                 return 1;
350         }
351
352         fifo = fifo_alloc(TRACE_FIFO_SIZE);
353
354         td->o.size = 0;
355
356         cpu = 0;
357         ttime = 0;
358         ios[0] = ios[1] = 0;
359         rw_bs[0] = rw_bs[1] = 0;
360         skipped_writes = 0;
361         do {
362                 int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));
363
364                 if (ret < 0)
365                         goto err;
366                 else if (!ret)
367                         break;
368                 else if (ret < (int) sizeof(t)) {
369                         log_err("fio: short fifo get\n");
370                         break;
371                 }
372
373                 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
374                         log_err("fio: bad magic in blktrace data: %x\n",
375                                                                 t.magic);
376                         goto err;
377                 }
378                 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
379                         log_err("fio: bad blktrace version %d\n",
380                                                                 t.magic & 0xff);
381                         goto err;
382                 }
383                 ret = discard_pdu(td, fifo, fd, &t);
384                 if (ret < 0) {
385                         td_verror(td, ret, "blktrace lseek");
386                         goto err;
387                 } else if (t.pdu_len != ret) {
388                         log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
389                         goto err;
390                 }
391                 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
392                         if (!ttime) {
393                                 ttime = t.time;
394                                 cpu = t.cpu;
395                         }
396
397                         delay = 0;
398                         if (cpu == t.cpu)
399                                 delay = t.time - ttime;
400                         if ((t.action & BLK_TC_ACT(BLK_TC_WRITE)) && read_only)
401                                 skipped_writes++;
402                         else {
403                                 /*
404                                  * set delay to zero if no_stall enabled for
405                                  * fast replay
406                                  */
407                                 if (td->o.no_stall)
408                                         delay = 0;
409
410                                 handle_trace(td, &t, delay, ios, rw_bs);
411                         }
412
413                         ttime = t.time;
414                         cpu = t.cpu;
415                 } else {
416                         delay = 0;
417                         handle_trace(td, &t, delay, ios, rw_bs);
418                 }
419         } while (1);
420
421         for (i = 0; i < td->files_index; i++) {
422                 f= td->files[i];
423                 trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
424         }
425
426         fifo_free(fifo);
427         close(fd);
428
429         if (skipped_writes)
430                 log_err("fio: %s skips replay of %lu writes due to read-only\n",
431                                                 td->o.name, skipped_writes);
432
433         if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
434                 log_err("fio: found no ios in blktrace data\n");
435                 return 1;
436         } else if (ios[DDIR_READ] && !ios[DDIR_READ]) {
437                 td->o.td_ddir = TD_DDIR_READ;
438                 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
439         } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
440                 td->o.td_ddir = TD_DDIR_WRITE;
441                 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
442         } else {
443                 td->o.td_ddir = TD_DDIR_RW;
444                 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
445                 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
446         }
447
448         /*
449          * We need to do direct/raw ios to the device, to avoid getting
450          * read-ahead in our way.
451          */
452         td->o.odirect = 1;
453
454         return 0;
455 err:
456         close(fd);
457         fifo_free(fifo);
458         return 1;
459 }