Style fixups
[fio.git] / blktrace.c
... / ...
CommitLineData
1/*
2 * blktrace support code for fio
3 */
4#include <stdio.h>
5#include <stdlib.h>
6#include <sys/stat.h>
7#include <dirent.h>
8
9#include "list.h"
10#include "fio.h"
11#include "blktrace_api.h"
12
13#define TRACE_FIFO_SIZE 65536
14
15/*
16 * fifo refill frontend, to avoid reading data in trace sized bites
17 */
18static int refill_fifo(struct thread_data *td, struct fifo *fifo, int fd)
19{
20 char buf[TRACE_FIFO_SIZE];
21 unsigned int total;
22 int ret;
23
24 total = sizeof(buf);
25 if (total > fifo_room(fifo))
26 total = fifo_room(fifo);
27
28 ret = read(fd, buf, total);
29 if (ret < 0) {
30 td_verror(td, errno, "read blktrace file");
31 return -1;
32 }
33
34 if (ret > 0)
35 ret = fifo_put(fifo, buf, ret);
36
37 dprint(FD_BLKTRACE, "refill: filled %d bytes\n", ret);
38 return ret;
39}
40
41/*
42 * Retrieve 'len' bytes from the fifo, refilling if necessary.
43 */
44static int trace_fifo_get(struct thread_data *td, struct fifo *fifo, int fd,
45 void *buf, unsigned int len)
46{
47 if (fifo_len(fifo) < len) {
48 int ret = refill_fifo(td, fifo, fd);
49
50 if (ret < 0)
51 return ret;
52 }
53
54 return fifo_get(fifo, buf, len);
55}
56
57/*
58 * Just discard the pdu by seeking past it.
59 */
60static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd,
61 struct blk_io_trace *t)
62{
63 if (t->pdu_len == 0)
64 return 0;
65
66 dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
67 return trace_fifo_get(td, fifo, fd, NULL, t->pdu_len);
68}
69
70/*
71 * Check if this is a blktrace binary data file. We read a single trace
72 * into memory and check for the magic signature.
73 */
74int is_blktrace(const char *filename)
75{
76 struct blk_io_trace t;
77 int fd, ret;
78
79 fd = open(filename, O_RDONLY);
80 if (fd < 0)
81 return 0;
82
83 ret = read(fd, &t, sizeof(t));
84 close(fd);
85
86 if (ret < 0) {
87 perror("read blktrace");
88 return 0;
89 } else if (ret != sizeof(t)) {
90 log_err("fio: short read on blktrace file\n");
91 return 0;
92 }
93
94 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
95 return 1;
96
97 return 0;
98}
99
100static int lookup_device(char *path, unsigned int maj, unsigned int min)
101{
102 struct dirent *dir;
103 struct stat st;
104 int found = 0;
105 DIR *D;
106
107 D = opendir(path);
108 if (!D)
109 return 0;
110
111 while ((dir = readdir(D)) != NULL) {
112 char full_path[256];
113
114 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
115 continue;
116
117 sprintf(full_path, "%s/%s", path, dir->d_name);
118 if (lstat(full_path, &st) == -1) {
119 perror("lstat");
120 break;
121 }
122
123 if (S_ISDIR(st.st_mode)) {
124 found = lookup_device(full_path, maj, min);
125 if (found) {
126 strcpy(path, full_path);
127 break;
128 }
129 }
130
131 if (!S_ISBLK(st.st_mode))
132 continue;
133
134 if (maj == major(st.st_rdev) && min == minor(st.st_rdev)) {
135 dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
136 strcpy(path, full_path);
137 found = 1;
138 break;
139 }
140 }
141
142 closedir(D);
143 return found;
144}
145
146#define FMINORBITS 20
147#define FMINORMASK ((1U << FMINORBITS) - 1)
148#define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS))
149#define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK))
150
151static void trace_add_file(struct thread_data *td, __u32 device)
152{
153 static unsigned int last_maj, last_min;
154 unsigned int maj = FMAJOR(device);
155 unsigned int min = FMINOR(device);
156 struct fio_file *f;
157 char dev[256];
158 unsigned int i;
159
160 if (last_maj == maj && last_min == min)
161 return;
162
163 last_maj = maj;
164 last_min = min;
165
166 /*
167 * check for this file in our list
168 */
169 for_each_file(td, f, i)
170 if (f->major == maj && f->minor == min)
171 return;
172
173 strcpy(dev, "/dev");
174 if (lookup_device(dev, maj, min)) {
175 dprint(FD_BLKTRACE, "add devices %s\n", dev);
176 add_file(td, dev);
177 }
178}
179
180/*
181 * Store blk_io_trace data in an ipo for later retrieval.
182 */
183static void store_ipo(struct thread_data *td, unsigned long long offset,
184 unsigned int bytes, int rw, unsigned long long ttime)
185{
186 struct io_piece *ipo = malloc(sizeof(*ipo));
187
188 memset(ipo, 0, sizeof(*ipo));
189 INIT_LIST_HEAD(&ipo->list);
190 /*
191 * the 512 is wrong here, it should be the hardware sector size...
192 */
193 ipo->offset = offset * 512;
194 ipo->len = bytes;
195 ipo->delay = ttime / 1000;
196 if (rw)
197 ipo->ddir = DDIR_WRITE;
198 else
199 ipo->ddir = DDIR_READ;
200
201 dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
202 ipo->ddir, ipo->offset,
203 ipo->len, ipo->delay);
204 list_add_tail(&ipo->list, &td->io_log_list);
205}
206
207/*
208 * We only care for queue traces, most of the others are side effects
209 * due to internal workings of the block layer.
210 */
211static void handle_trace(struct thread_data *td, struct blk_io_trace *t,
212 unsigned long long ttime, unsigned long *ios,
213 unsigned int *bs)
214{
215 int rw;
216
217 if ((t->action & 0xffff) != __BLK_TA_QUEUE)
218 return;
219 if (t->action & BLK_TC_ACT(BLK_TC_PC))
220 return;
221 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
222 return;
223
224 trace_add_file(td, t->device);
225
226 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
227
228 if (t->bytes > bs[rw])
229 bs[rw] = t->bytes;
230
231 ios[rw]++;
232 td->o.size += t->bytes;
233 store_ipo(td, t->sector, t->bytes, rw, ttime);
234}
235
236/*
237 * Load a blktrace file by reading all the blk_io_trace entries, and storing
238 * them as io_pieces like the fio text version would do.
239 */
240int load_blktrace(struct thread_data *td, const char *filename)
241{
242 unsigned long long ttime, delay;
243 struct blk_io_trace t;
244 unsigned long ios[2], skipped_writes;
245 unsigned int cpu;
246 unsigned int rw_bs[2];
247 struct fifo *fifo;
248 int fd;
249
250 fd = open(filename, O_RDONLY);
251 if (fd < 0) {
252 td_verror(td, errno, "open blktrace file");
253 return 1;
254 }
255
256 fifo = fifo_alloc(TRACE_FIFO_SIZE);
257
258 td->o.size = 0;
259
260 cpu = 0;
261 ttime = 0;
262 ios[0] = ios[1] = 0;
263 rw_bs[0] = rw_bs[1] = 0;
264 skipped_writes = 0;
265 do {
266 int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));
267
268 if (ret < 0)
269 goto err;
270 else if (!ret)
271 break;
272 else if (ret < (int) sizeof(t)) {
273 log_err("fio: short fifo get\n");
274 break;
275 }
276
277 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
278 log_err("fio: bad magic in blktrace data: %x\n",
279 t.magic);
280 goto err;
281 }
282 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
283 log_err("fio: bad blktrace version %d\n",
284 t.magic & 0xff);
285 goto err;
286 }
287 ret = discard_pdu(td, fifo, fd, &t);
288 if (ret < 0) {
289 td_verror(td, ret, "blktrace lseek");
290 goto err;
291 } else if (t.pdu_len != ret) {
292 log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
293 goto err;
294 }
295 if (t.action & BLK_TC_ACT(BLK_TC_NOTIFY))
296 continue;
297 if (!ttime) {
298 ttime = t.time;
299 cpu = t.cpu;
300 }
301 delay = 0;
302 if (cpu == t.cpu)
303 delay = t.time - ttime;
304 if ((t.action & BLK_TC_ACT(BLK_TC_WRITE)) && read_only)
305 skipped_writes++;
306 else
307 handle_trace(td, &t, delay, ios, rw_bs);
308 ttime = t.time;
309 cpu = t.cpu;
310 } while (1);
311
312 fifo_free(fifo);
313 close(fd);
314
315 if (skipped_writes)
316 log_err("fio: %s skips replay of %lu writes due to read-only\n",
317 td->o.name, skipped_writes);
318
319 if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
320 log_err("fio: found no ios in blktrace data\n");
321 return 1;
322 } else if (ios[DDIR_READ] && !ios[DDIR_READ]) {
323 td->o.td_ddir = TD_DDIR_READ;
324 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
325 } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
326 td->o.td_ddir = TD_DDIR_WRITE;
327 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
328 } else {
329 td->o.td_ddir = TD_DDIR_RW;
330 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
331 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
332 }
333
334 /*
335 * We need to do direct/raw ios to the device, to avoid getting
336 * read-ahead in our way.
337 */
338 td->o.odirect = 1;
339
340 return 0;
341err:
342 close(fd);
343 fifo_free(fifo);
344 return 1;
345}