Update io engine comments
[fio.git] / engines / syslet-rw.c
CommitLineData
a4f4fdd7 1/*
da751ca9
JA
2 * syslet engine
3 *
4 * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
5 * with syslets to make the execution async.
a4f4fdd7
JA
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13
14#include "../fio.h"
15#include "../os.h"
16
17#ifdef FIO_HAVE_SYSLET
18
19struct syslet_data {
20 struct io_u **events;
21 unsigned int nr_events;
22
bf0dc8fa 23 struct async_head_user ahu;
a4f4fdd7 24 struct syslet_uatom **ring;
9ff9de69
JA
25
26 struct syslet_uatom *head, *tail;
a4f4fdd7
JA
27};
28
9ff9de69
JA
29static void fio_syslet_complete_atom(struct thread_data *td,
30 struct syslet_uatom *atom)
31{
32 struct syslet_data *sd = td->io_ops->data;
5b38ee84 33 struct syslet_uatom *last;
9ff9de69 34 struct io_u *io_u;
9ff9de69
JA
35
36 /*
5b38ee84
JA
37 * complete from the beginning of the sequence up to (and
38 * including) this atom
9ff9de69 39 */
5b38ee84
JA
40 last = atom;
41 io_u = atom->private;
42 atom = io_u->req.head;
9ff9de69
JA
43
44 /*
45 * now complete in right order
46 */
5b38ee84 47 do {
9ff9de69
JA
48 long ret;
49
9ff9de69
JA
50 io_u = atom->private;
51 ret = *atom->ret_ptr;
e2e67912 52 if (ret >= 0)
9ff9de69
JA
53 io_u->resid = io_u->xfer_buflen - ret;
54 else if (ret < 0)
55 io_u->error = ret;
56
57 assert(sd->nr_events < td->iodepth);
58 sd->events[sd->nr_events++] = io_u;
9ff9de69 59
5b38ee84
JA
60 if (atom == last)
61 break;
9ff9de69 62
5b38ee84
JA
63 atom = atom->next;
64 } while (1);
65
66 assert(!last->next);
9ff9de69
JA
67}
68
a4f4fdd7
JA
69/*
70 * Inspect the ring to see if we have completed events
71 */
72static void fio_syslet_complete(struct thread_data *td)
73{
74 struct syslet_data *sd = td->io_ops->data;
75
76 do {
77 struct syslet_uatom *atom;
a4f4fdd7 78
bf0dc8fa 79 atom = sd->ring[sd->ahu.user_ring_idx];
a4f4fdd7
JA
80 if (!atom)
81 break;
82
bf0dc8fa
IM
83 sd->ring[sd->ahu.user_ring_idx] = NULL;
84 if (++sd->ahu.user_ring_idx == td->iodepth)
85 sd->ahu.user_ring_idx = 0;
a4f4fdd7 86
9ff9de69 87 fio_syslet_complete_atom(td, atom);
a4f4fdd7
JA
88 } while (1);
89}
90
91static int fio_syslet_getevents(struct thread_data *td, int min,
92 int fio_unused max,
93 struct timespec fio_unused *t)
94{
95 struct syslet_data *sd = td->io_ops->data;
a4f4fdd7
JA
96 long ret;
97
98 do {
99 fio_syslet_complete(td);
100
101 /*
102 * do we have enough immediate completions?
103 */
104 if (sd->nr_events >= (unsigned int) min)
105 break;
106
107 /*
108 * OK, we need to wait for some events...
109 */
9ff9de69 110 ret = async_wait(1, sd->ahu.user_ring_idx, &sd->ahu);
a4f4fdd7 111 if (ret < 0)
e49499f8 112 return -errno;
a4f4fdd7
JA
113 } while (1);
114
115 ret = sd->nr_events;
116 sd->nr_events = 0;
117 return ret;
118}
119
120static struct io_u *fio_syslet_event(struct thread_data *td, int event)
121{
122 struct syslet_data *sd = td->io_ops->data;
123
124 return sd->events[event];
125}
126
127static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
a2e1b08a
JA
128 void *arg1, void *arg2, void *arg3, void *ret_ptr,
129 unsigned long flags, void *priv)
a4f4fdd7
JA
130{
131 atom->flags = flags;
132 atom->nr = nr;
133 atom->ret_ptr = ret_ptr;
a2e1b08a 134 atom->next = NULL;
a4f4fdd7
JA
135 atom->arg_ptr[0] = arg0;
136 atom->arg_ptr[1] = arg1;
137 atom->arg_ptr[2] = arg2;
a2e1b08a
JA
138 atom->arg_ptr[3] = arg3;
139 atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
a4f4fdd7
JA
140 atom->private = priv;
141}
142
143/*
144 * Use seek atom for sync
145 */
146static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
147{
a2e1b08a 148 init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL,
7d44a745 149 &io_u->req.ret, 0, io_u);
a4f4fdd7
JA
150}
151
152static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
153{
154 int nr;
155
a4f4fdd7
JA
156 /*
157 * prepare rw
158 */
159 if (io_u->ddir == DDIR_READ)
a2e1b08a 160 nr = __NR_pread64;
a4f4fdd7 161 else
a2e1b08a 162 nr = __NR_pwrite64;
a4f4fdd7 163
a2e1b08a 164 init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
7d44a745 165 &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
a4f4fdd7
JA
166}
167
168static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
169{
170 struct fio_file *f = io_u->file;
171
172 if (io_u->ddir == DDIR_SYNC)
173 fio_syslet_prep_sync(io_u, f);
174 else
175 fio_syslet_prep_rw(io_u, f);
176
177 return 0;
178}
179
bf0dc8fa
IM
180static void cachemiss_thread_start(void)
181{
182 while (1)
7756b0d0 183 async_thread(NULL, NULL);
bf0dc8fa
IM
184}
185
186#define THREAD_STACK_SIZE (16384)
187
188static unsigned long thread_stack_alloc()
189{
5b38ee84 190 return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
bf0dc8fa
IM
191}
192
a0a930ef
JA
193static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
194{
195 struct syslet_uatom *atom;
196 struct timeval now;
197
198 fio_gettime(&now, NULL);
199
200 atom = sd->head;
201 while (atom) {
202 struct io_u *io_u = atom->private;
203
204 memcpy(&io_u->issue_time, &now, sizeof(now));
205 io_u_queued(td, io_u);
206 atom = atom->next;
207 }
208}
209
9ff9de69 210static int fio_syslet_commit(struct thread_data *td)
a4f4fdd7
JA
211{
212 struct syslet_data *sd = td->io_ops->data;
bf0dc8fa 213 struct syslet_uatom *done;
9ff9de69
JA
214
215 if (!sd->head)
216 return 0;
a4f4fdd7 217
5b38ee84
JA
218 assert(!sd->tail->next);
219
bf0dc8fa
IM
220 if (!sd->ahu.new_thread_stack)
221 sd->ahu.new_thread_stack = thread_stack_alloc();
222
a0a930ef
JA
223 fio_syslet_queued(td, sd);
224
7d44a745
JA
225 /*
226 * On sync completion, the atom is returned. So on NULL return
227 * it's queued asynchronously.
228 */
9ff9de69 229 done = async_exec(sd->head, &sd->ahu);
bf0dc8fa 230
9ff9de69 231 sd->head = sd->tail = NULL;
a4f4fdd7 232
9ff9de69
JA
233 if (done)
234 fio_syslet_complete_atom(td, done);
a4f4fdd7 235
9ff9de69
JA
236 return 0;
237}
238
239static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
240{
241 struct syslet_data *sd = td->io_ops->data;
bf0dc8fa 242
9ff9de69
JA
243 if (sd->tail) {
244 sd->tail->next = &io_u->req.atom;
245 sd->tail = &io_u->req.atom;
246 } else
247 sd->head = sd->tail = &io_u->req.atom;
a4f4fdd7 248
5b38ee84 249 io_u->req.head = sd->head;
9ff9de69 250 return FIO_Q_QUEUED;
a4f4fdd7
JA
251}
252
db64e9bc 253static int async_head_init(struct syslet_data *sd, unsigned int depth)
a4f4fdd7 254{
a4f4fdd7
JA
255 unsigned long ring_size;
256
bf0dc8fa 257 memset(&sd->ahu, 0, sizeof(struct async_head_user));
2ca50be4 258
a4f4fdd7
JA
259 ring_size = sizeof(struct syslet_uatom *) * depth;
260 sd->ring = malloc(ring_size);
261 memset(sd->ring, 0, ring_size);
262
bf0dc8fa
IM
263 sd->ahu.user_ring_idx = 0;
264 sd->ahu.completion_ring = sd->ring;
265 sd->ahu.ring_size_bytes = ring_size;
266 sd->ahu.head_stack = thread_stack_alloc();
5b38ee84
JA
267 sd->ahu.head_eip = (unsigned long) cachemiss_thread_start;
268 sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start;
db64e9bc
JA
269
270 return 0;
a4f4fdd7
JA
271}
272
2ca50be4 273static void async_head_exit(struct syslet_data *sd)
a4f4fdd7 274{
7f059a76 275 free(sd->ring);
a4f4fdd7
JA
276}
277
278static void fio_syslet_cleanup(struct thread_data *td)
279{
280 struct syslet_data *sd = td->io_ops->data;
281
282 if (sd) {
2ca50be4 283 async_head_exit(sd);
a4f4fdd7
JA
284 free(sd->events);
285 free(sd);
286 td->io_ops->data = NULL;
287 }
288}
289
290static int fio_syslet_init(struct thread_data *td)
291{
292 struct syslet_data *sd;
293
db64e9bc 294
a4f4fdd7
JA
295 sd = malloc(sizeof(*sd));
296 memset(sd, 0, sizeof(*sd));
297 sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
298 memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
db64e9bc
JA
299
300 /*
301 * This will handily fail for kernels where syslet isn't available
302 */
303 if (async_head_init(sd, td->iodepth)) {
304 free(sd->events);
305 free(sd);
306 return 1;
307 }
308
a4f4fdd7 309 td->io_ops->data = sd;
a4f4fdd7
JA
310 return 0;
311}
312
313static struct ioengine_ops ioengine = {
314 .name = "syslet-rw",
315 .version = FIO_IOOPS_VERSION,
316 .init = fio_syslet_init,
317 .prep = fio_syslet_prep,
318 .queue = fio_syslet_queue,
9ff9de69 319 .commit = fio_syslet_commit,
a4f4fdd7
JA
320 .getevents = fio_syslet_getevents,
321 .event = fio_syslet_event,
322 .cleanup = fio_syslet_cleanup,
b5af8293
JA
323 .open_file = generic_open_file,
324 .close_file = generic_close_file,
a4f4fdd7
JA
325};
326
327#else /* FIO_HAVE_SYSLET */
328
329/*
330 * When we have a proper configure system in place, we simply wont build
331 * and install this io engine. For now install a crippled version that
332 * just complains and fails to load.
333 */
334static int fio_syslet_init(struct thread_data fio_unused *td)
335{
336 fprintf(stderr, "fio: syslet not available\n");
337 return 1;
338}
339
340static struct ioengine_ops ioengine = {
341 .name = "syslet-rw",
342 .version = FIO_IOOPS_VERSION,
343 .init = fio_syslet_init,
344};
345
346#endif /* FIO_HAVE_SYSLET */
347
348static void fio_init fio_syslet_register(void)
349{
350 register_ioengine(&ioengine);
351}
352
353static void fio_exit fio_syslet_unregister(void)
354{
355 unregister_ioengine(&ioengine);
356}