Full readonly check
[fio.git] / engines / syslet-rw.c
CommitLineData
a4f4fdd7 1/*
da751ca9
JA
2 * syslet engine
3 *
4 * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
5 * with syslets to make the execution async.
a4f4fdd7
JA
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
b8846354 13#include <asm/unistd.h>
a4f4fdd7
JA
14
15#include "../fio.h"
a4f4fdd7
JA
16
17#ifdef FIO_HAVE_SYSLET
18
1760e679
JA
19#ifdef __NR_pread64
20#define __NR_fio_pread __NR_pread64
21#define __NR_fio_pwrite __NR_pwrite64
22#else
23#define __NR_fio_pread __NR_pread
24#define __NR_fio_pwrite __NR_pwrite
25#endif
26
a4f4fdd7
JA
27struct syslet_data {
28 struct io_u **events;
29 unsigned int nr_events;
30
bf0dc8fa 31 struct async_head_user ahu;
a4f4fdd7 32 struct syslet_uatom **ring;
9ff9de69
JA
33
34 struct syslet_uatom *head, *tail;
a4f4fdd7
JA
35};
36
9ff9de69
JA
37static void fio_syslet_complete_atom(struct thread_data *td,
38 struct syslet_uatom *atom)
39{
40 struct syslet_data *sd = td->io_ops->data;
5b38ee84 41 struct syslet_uatom *last;
9ff9de69 42 struct io_u *io_u;
9ff9de69
JA
43
44 /*
5b38ee84
JA
45 * complete from the beginning of the sequence up to (and
46 * including) this atom
9ff9de69 47 */
5b38ee84
JA
48 last = atom;
49 io_u = atom->private;
50 atom = io_u->req.head;
9ff9de69
JA
51
52 /*
53 * now complete in right order
54 */
5b38ee84 55 do {
9ff9de69
JA
56 long ret;
57
9ff9de69
JA
58 io_u = atom->private;
59 ret = *atom->ret_ptr;
e2e67912 60 if (ret >= 0)
9ff9de69
JA
61 io_u->resid = io_u->xfer_buflen - ret;
62 else if (ret < 0)
63 io_u->error = ret;
64
2dc1bbeb 65 assert(sd->nr_events < td->o.iodepth);
9ff9de69 66 sd->events[sd->nr_events++] = io_u;
9ff9de69 67
5b38ee84
JA
68 if (atom == last)
69 break;
9ff9de69 70
5b38ee84
JA
71 atom = atom->next;
72 } while (1);
73
74 assert(!last->next);
9ff9de69
JA
75}
76
a4f4fdd7
JA
77/*
78 * Inspect the ring to see if we have completed events
79 */
80static void fio_syslet_complete(struct thread_data *td)
81{
82 struct syslet_data *sd = td->io_ops->data;
83
84 do {
85 struct syslet_uatom *atom;
a4f4fdd7 86
bf0dc8fa 87 atom = sd->ring[sd->ahu.user_ring_idx];
a4f4fdd7
JA
88 if (!atom)
89 break;
90
bf0dc8fa 91 sd->ring[sd->ahu.user_ring_idx] = NULL;
2dc1bbeb 92 if (++sd->ahu.user_ring_idx == td->o.iodepth)
bf0dc8fa 93 sd->ahu.user_ring_idx = 0;
a4f4fdd7 94
9ff9de69 95 fio_syslet_complete_atom(td, atom);
a4f4fdd7
JA
96 } while (1);
97}
98
99static int fio_syslet_getevents(struct thread_data *td, int min,
100 int fio_unused max,
101 struct timespec fio_unused *t)
102{
103 struct syslet_data *sd = td->io_ops->data;
a4f4fdd7
JA
104 long ret;
105
106 do {
107 fio_syslet_complete(td);
108
109 /*
110 * do we have enough immediate completions?
111 */
112 if (sd->nr_events >= (unsigned int) min)
113 break;
114
115 /*
116 * OK, we need to wait for some events...
117 */
9ff9de69 118 ret = async_wait(1, sd->ahu.user_ring_idx, &sd->ahu);
a4f4fdd7 119 if (ret < 0)
e49499f8 120 return -errno;
a4f4fdd7
JA
121 } while (1);
122
123 ret = sd->nr_events;
124 sd->nr_events = 0;
125 return ret;
126}
127
128static struct io_u *fio_syslet_event(struct thread_data *td, int event)
129{
130 struct syslet_data *sd = td->io_ops->data;
131
132 return sd->events[event];
133}
134
135static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
a2e1b08a
JA
136 void *arg1, void *arg2, void *arg3, void *ret_ptr,
137 unsigned long flags, void *priv)
a4f4fdd7
JA
138{
139 atom->flags = flags;
140 atom->nr = nr;
141 atom->ret_ptr = ret_ptr;
a2e1b08a 142 atom->next = NULL;
a4f4fdd7
JA
143 atom->arg_ptr[0] = arg0;
144 atom->arg_ptr[1] = arg1;
145 atom->arg_ptr[2] = arg2;
a2e1b08a
JA
146 atom->arg_ptr[3] = arg3;
147 atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
a4f4fdd7
JA
148 atom->private = priv;
149}
150
151/*
152 * Use seek atom for sync
153 */
154static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
155{
a2e1b08a 156 init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL,
7d44a745 157 &io_u->req.ret, 0, io_u);
a4f4fdd7
JA
158}
159
160static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
161{
162 int nr;
163
a4f4fdd7
JA
164 /*
165 * prepare rw
166 */
167 if (io_u->ddir == DDIR_READ)
1760e679 168 nr = __NR_fio_pread;
a4f4fdd7 169 else
1760e679 170 nr = __NR_fio_pwrite;
a4f4fdd7 171
a2e1b08a 172 init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
7d44a745 173 &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
a4f4fdd7
JA
174}
175
176static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
177{
178 struct fio_file *f = io_u->file;
179
180 if (io_u->ddir == DDIR_SYNC)
181 fio_syslet_prep_sync(io_u, f);
182 else
183 fio_syslet_prep_rw(io_u, f);
184
185 return 0;
186}
187
bf0dc8fa
IM
188static void cachemiss_thread_start(void)
189{
190 while (1)
7756b0d0 191 async_thread(NULL, NULL);
bf0dc8fa
IM
192}
193
194#define THREAD_STACK_SIZE (16384)
195
196static unsigned long thread_stack_alloc()
197{
5b38ee84 198 return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
bf0dc8fa
IM
199}
200
a0a930ef
JA
201static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
202{
203 struct syslet_uatom *atom;
204 struct timeval now;
205
206 fio_gettime(&now, NULL);
207
208 atom = sd->head;
209 while (atom) {
210 struct io_u *io_u = atom->private;
211
212 memcpy(&io_u->issue_time, &now, sizeof(now));
213 io_u_queued(td, io_u);
214 atom = atom->next;
215 }
216}
217
9ff9de69 218static int fio_syslet_commit(struct thread_data *td)
a4f4fdd7
JA
219{
220 struct syslet_data *sd = td->io_ops->data;
bf0dc8fa 221 struct syslet_uatom *done;
9ff9de69
JA
222
223 if (!sd->head)
224 return 0;
a4f4fdd7 225
5b38ee84
JA
226 assert(!sd->tail->next);
227
bf0dc8fa
IM
228 if (!sd->ahu.new_thread_stack)
229 sd->ahu.new_thread_stack = thread_stack_alloc();
230
a0a930ef
JA
231 fio_syslet_queued(td, sd);
232
7d44a745
JA
233 /*
234 * On sync completion, the atom is returned. So on NULL return
235 * it's queued asynchronously.
236 */
9ff9de69 237 done = async_exec(sd->head, &sd->ahu);
bf0dc8fa 238
76f58b92
JA
239 if (done == (void *) -1) {
240 log_err("fio: syslets don't appear to work\n");
241 return -1;
242 }
243
9ff9de69 244 sd->head = sd->tail = NULL;
a4f4fdd7 245
9ff9de69
JA
246 if (done)
247 fio_syslet_complete_atom(td, done);
a4f4fdd7 248
9ff9de69
JA
249 return 0;
250}
251
252static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
253{
254 struct syslet_data *sd = td->io_ops->data;
bf0dc8fa 255
7101d9c2
JA
256 fio_ro_check(td, io_u);
257
9ff9de69
JA
258 if (sd->tail) {
259 sd->tail->next = &io_u->req.atom;
260 sd->tail = &io_u->req.atom;
261 } else
262 sd->head = sd->tail = &io_u->req.atom;
a4f4fdd7 263
5b38ee84 264 io_u->req.head = sd->head;
9ff9de69 265 return FIO_Q_QUEUED;
a4f4fdd7
JA
266}
267
db64e9bc 268static int async_head_init(struct syslet_data *sd, unsigned int depth)
a4f4fdd7 269{
a4f4fdd7
JA
270 unsigned long ring_size;
271
bf0dc8fa 272 memset(&sd->ahu, 0, sizeof(struct async_head_user));
2ca50be4 273
a4f4fdd7
JA
274 ring_size = sizeof(struct syslet_uatom *) * depth;
275 sd->ring = malloc(ring_size);
276 memset(sd->ring, 0, ring_size);
277
bf0dc8fa
IM
278 sd->ahu.user_ring_idx = 0;
279 sd->ahu.completion_ring = sd->ring;
280 sd->ahu.ring_size_bytes = ring_size;
281 sd->ahu.head_stack = thread_stack_alloc();
5b38ee84
JA
282 sd->ahu.head_eip = (unsigned long) cachemiss_thread_start;
283 sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start;
db64e9bc
JA
284
285 return 0;
a4f4fdd7
JA
286}
287
2ca50be4 288static void async_head_exit(struct syslet_data *sd)
a4f4fdd7 289{
7f059a76 290 free(sd->ring);
a4f4fdd7
JA
291}
292
76f58b92
JA
293static int check_syslet_support(struct syslet_data *sd)
294{
295 struct syslet_uatom atom;
296 void *ret;
297
298 init_atom(&atom, __NR_getpid, NULL, NULL, NULL, NULL, NULL, 0, NULL);
299 ret = async_exec(sd->head, &sd->ahu);
300 if (ret == (void *) -1)
301 return 1;
302
303 return 0;
304}
305
a4f4fdd7
JA
306static void fio_syslet_cleanup(struct thread_data *td)
307{
308 struct syslet_data *sd = td->io_ops->data;
309
310 if (sd) {
2ca50be4 311 async_head_exit(sd);
a4f4fdd7
JA
312 free(sd->events);
313 free(sd);
314 td->io_ops->data = NULL;
315 }
316}
317
318static int fio_syslet_init(struct thread_data *td)
319{
320 struct syslet_data *sd;
321
322 sd = malloc(sizeof(*sd));
323 memset(sd, 0, sizeof(*sd));
2dc1bbeb
JA
324 sd->events = malloc(sizeof(struct io_u *) * td->o.iodepth);
325 memset(sd->events, 0, sizeof(struct io_u *) * td->o.iodepth);
db64e9bc
JA
326
327 /*
328 * This will handily fail for kernels where syslet isn't available
329 */
2dc1bbeb 330 if (async_head_init(sd, td->o.iodepth)) {
db64e9bc
JA
331 free(sd->events);
332 free(sd);
333 return 1;
334 }
335
76f58b92
JA
336 if (check_syslet_support(sd)) {
337 log_err("fio: syslets do not appear to work\n");
338 free(sd->events);
339 free(sd);
340 return 1;
341 }
342
a4f4fdd7 343 td->io_ops->data = sd;
a4f4fdd7
JA
344 return 0;
345}
346
347static struct ioengine_ops ioengine = {
348 .name = "syslet-rw",
349 .version = FIO_IOOPS_VERSION,
350 .init = fio_syslet_init,
351 .prep = fio_syslet_prep,
352 .queue = fio_syslet_queue,
9ff9de69 353 .commit = fio_syslet_commit,
a4f4fdd7
JA
354 .getevents = fio_syslet_getevents,
355 .event = fio_syslet_event,
356 .cleanup = fio_syslet_cleanup,
b5af8293
JA
357 .open_file = generic_open_file,
358 .close_file = generic_close_file,
a4f4fdd7
JA
359};
360
361#else /* FIO_HAVE_SYSLET */
362
363/*
364 * When we have a proper configure system in place, we simply wont build
365 * and install this io engine. For now install a crippled version that
366 * just complains and fails to load.
367 */
368static int fio_syslet_init(struct thread_data fio_unused *td)
369{
370 fprintf(stderr, "fio: syslet not available\n");
371 return 1;
372}
373
374static struct ioengine_ops ioengine = {
375 .name = "syslet-rw",
376 .version = FIO_IOOPS_VERSION,
377 .init = fio_syslet_init,
378};
379
380#endif /* FIO_HAVE_SYSLET */
381
382static void fio_init fio_syslet_register(void)
383{
384 register_ioengine(&ioengine);
385}
386
387static void fio_exit fio_syslet_unregister(void)
388{
389 unregister_ioengine(&ioengine);
390}