blktrace support: adjust largest io_u buffer size to match trace
[fio.git] / blktrace.c
CommitLineData
fb7b71a3
JA
1/*
2 * blktrace support code for fio
3 */
4#include <stdio.h>
5#include <stdlib.h>
8c1fdf04 6
fb7b71a3
JA
7#include "list.h"
8#include "fio.h"
9#include "blktrace_api.h"
10
8c1fdf04
JA
11/*
12 * Just discard the pdu by seeking past it.
13 */
fb7b71a3
JA
14static int discard_pdu(int fd, struct blk_io_trace *t)
15{
16 if (t->pdu_len == 0)
17 return 0;
18
19 if (lseek(fd, t->pdu_len, SEEK_CUR) < 0)
20 return errno;
21
22 return 0;
23}
24
8c1fdf04
JA
25/*
26 * Check if this is a blktrace binary data file. We read a single trace
27 * into memory and check for the magic signature.
28 */
fb7b71a3
JA
29int is_blktrace(const char *filename)
30{
31 struct blk_io_trace t;
32 int fd, ret;
33
34 fd = open(filename, O_RDONLY);
35 if (fd < 0) {
36 perror("open blktrace");
37 return 0;
38 }
39
40 ret = read(fd, &t, sizeof(t));
41 close(fd);
42
43 if (ret < 0) {
44 perror("read blktrace");
45 return 0;
46 } else if (ret != sizeof(t)) {
47 log_err("fio: short read on blktrace file\n");
48 return 0;
49 }
50
51 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
52 return 1;
53
54 return 0;
55}
56
8c1fdf04
JA
57/*
58 * Store blk_io_trace data in an ipo for later retrieval.
59 */
fdefd987 60static void store_ipo(struct thread_data *td, unsigned long long offset,
8c1fdf04 61 unsigned int bytes, int rw, unsigned long long ttime)
fdefd987
JA
62{
63 struct io_piece *ipo = malloc(sizeof(*ipo));
64
65 memset(ipo, 0, sizeof(*ipo));
66 INIT_LIST_HEAD(&ipo->list);
a2eea81b
JA
67 /*
68 * the 512 is wrong here, it should be the hardware sector size...
69 */
70 ipo->offset = offset * 512;
fdefd987 71 ipo->len = bytes;
8c1fdf04 72 ipo->delay = ttime / 1000;
fdefd987
JA
73 if (rw)
74 ipo->ddir = DDIR_WRITE;
75 else
76 ipo->ddir = DDIR_READ;
77
78 list_add_tail(&ipo->list, &td->io_log_list);
79}
80
8c1fdf04
JA
81/*
82 * We only care for queue traces, most of the others are side effects
83 * due to internal workings of the block layer.
84 */
85static void handle_trace(struct thread_data *td, struct blk_io_trace *t,
d84f8d49
JA
86 unsigned long long ttime, unsigned long *ios,
87 unsigned int *bs)
fb7b71a3 88{
fdefd987
JA
89 int rw;
90
91 if ((t->action & 0xffff) != __BLK_TA_QUEUE)
92 return;
a2eea81b
JA
93 if (t->action & BLK_TC_ACT(BLK_TC_PC))
94 return;
95
96 /*
97 * should not happen, need to look into that...
98 */
99 if (!t->bytes)
100 return;
fdefd987 101
e7a7d70b 102 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
d84f8d49
JA
103
104 if (t->bytes > bs[rw])
105 bs[rw] = t->bytes;
106
8c1fdf04 107 ios[rw]++;
6df8adaa 108 td->o.size += t->bytes;
8c1fdf04 109 store_ipo(td, t->sector, t->bytes, rw, ttime);
fb7b71a3
JA
110}
111
8c1fdf04
JA
112/*
113 * Load a blktrace file by reading all the blk_io_trace entries, and storing
114 * them as io_pieces like the fio text version would do.
115 */
fb7b71a3
JA
116int load_blktrace(struct thread_data *td, const char *filename)
117{
a61eddec 118 unsigned long long ttime, delay;
fb7b71a3 119 struct blk_io_trace t;
8c1fdf04 120 unsigned long ios[2];
a61eddec 121 unsigned int cpu;
d84f8d49 122 unsigned int rw_bs[2];
fb7b71a3
JA
123 int fd;
124
125 fd = open(filename, O_RDONLY);
126 if (fd < 0) {
127 td_verror(td, errno, "open blktrace file");
128 return 1;
129 }
130
6df8adaa
JA
131 td->o.size = 0;
132
a61eddec 133 cpu = 0;
d84f8d49
JA
134 ttime = 0;
135 ios[0] = ios[1] = 0;
136 rw_bs[0] = rw_bs[1] = 0;
fb7b71a3 137 do {
8c1fdf04
JA
138 /*
139 * Once this is working fully, I'll add a layer between
140 * here and read to cache trace data. Then we can avoid
141 * doing itsy bitsy reads, but instead pull in a larger
142 * chunk of data at the time.
143 */
fb7b71a3
JA
144 int ret = read(fd, &t, sizeof(t));
145
146 if (ret < 0) {
147 td_verror(td, errno, "read blktrace file");
8c1fdf04 148 goto err;
fb7b71a3
JA
149 } else if (!ret) {
150 break;
151 } else if (ret != sizeof(t)) {
152 log_err("fio: short read on blktrace file\n");
8c1fdf04 153 goto err;
fb7b71a3
JA
154 }
155
156 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
157 log_err("fio: bad magic in blktrace data\n");
8c1fdf04 158 goto err;
fb7b71a3
JA
159 }
160 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
161 log_err("fio: bad blktrace version %d\n", t.magic & 0xff);
8c1fdf04 162 goto err;
fb7b71a3
JA
163 }
164 ret = discard_pdu(fd, &t);
165 if (ret) {
166 td_verror(td, ret, "blktrace lseek");
8c1fdf04 167 goto err;
fb7b71a3 168 }
a61eddec 169 if (!ttime) {
8c1fdf04 170 ttime = t.time;
a61eddec
JA
171 cpu = t.cpu;
172 }
173 delay = 0;
174 if (cpu == t.cpu)
175 delay = t.time - ttime;
d84f8d49 176 handle_trace(td, &t, delay, ios, rw_bs);
8c1fdf04 177 ttime = t.time;
a61eddec 178 cpu = t.cpu;
fb7b71a3
JA
179 } while (1);
180
181 close(fd);
8c1fdf04
JA
182
183 if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
184 log_err("fio: found no ios in blktrace data\n");
185 return 1;
d84f8d49 186 } else if (ios[DDIR_READ] && !ios[DDIR_READ]) {
8c1fdf04 187 td->o.td_ddir = TD_DDIR_READ;
d84f8d49
JA
188 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
189 } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
8c1fdf04 190 td->o.td_ddir = TD_DDIR_WRITE;
d84f8d49
JA
191 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
192 } else {
8c1fdf04 193 td->o.td_ddir = TD_DDIR_RW;
d84f8d49
JA
194 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
195 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
196 }
8c1fdf04
JA
197
198 /*
199 * We need to do direct/raw ios to the device, to avoid getting
200 * read-ahead in our way.
201 */
202 td->o.odirect = 1;
203
fb7b71a3 204 return 0;
8c1fdf04
JA
205err:
206 close(fd);
207 return 1;
fb7b71a3 208}