Fio 1.14a
[fio.git] / engines / syslet-rw.c
... / ...
CommitLineData
1/*
2 * syslet engine
3 *
4 * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
5 * with syslets to make the execution async.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13
14#include "../fio.h"
15#include "../os.h"
16
17#ifdef FIO_HAVE_SYSLET
18
19struct syslet_data {
20 struct io_u **events;
21 unsigned int nr_events;
22
23 struct async_head_user ahu;
24 struct syslet_uatom **ring;
25
26 struct syslet_uatom *head, *tail;
27};
28
29static void fio_syslet_complete_atom(struct thread_data *td,
30 struct syslet_uatom *atom)
31{
32 struct syslet_data *sd = td->io_ops->data;
33 struct syslet_uatom *last;
34 struct io_u *io_u;
35
36 /*
37 * complete from the beginning of the sequence up to (and
38 * including) this atom
39 */
40 last = atom;
41 io_u = atom->private;
42 atom = io_u->req.head;
43
44 /*
45 * now complete in right order
46 */
47 do {
48 long ret;
49
50 io_u = atom->private;
51 ret = *atom->ret_ptr;
52 if (ret >= 0)
53 io_u->resid = io_u->xfer_buflen - ret;
54 else if (ret < 0)
55 io_u->error = ret;
56
57 assert(sd->nr_events < td->iodepth);
58 sd->events[sd->nr_events++] = io_u;
59
60 if (atom == last)
61 break;
62
63 atom = atom->next;
64 } while (1);
65
66 assert(!last->next);
67}
68
69/*
70 * Inspect the ring to see if we have completed events
71 */
72static void fio_syslet_complete(struct thread_data *td)
73{
74 struct syslet_data *sd = td->io_ops->data;
75
76 do {
77 struct syslet_uatom *atom;
78
79 atom = sd->ring[sd->ahu.user_ring_idx];
80 if (!atom)
81 break;
82
83 sd->ring[sd->ahu.user_ring_idx] = NULL;
84 if (++sd->ahu.user_ring_idx == td->iodepth)
85 sd->ahu.user_ring_idx = 0;
86
87 fio_syslet_complete_atom(td, atom);
88 } while (1);
89}
90
91static int fio_syslet_getevents(struct thread_data *td, int min,
92 int fio_unused max,
93 struct timespec fio_unused *t)
94{
95 struct syslet_data *sd = td->io_ops->data;
96 long ret;
97
98 do {
99 fio_syslet_complete(td);
100
101 /*
102 * do we have enough immediate completions?
103 */
104 if (sd->nr_events >= (unsigned int) min)
105 break;
106
107 /*
108 * OK, we need to wait for some events...
109 */
110 ret = async_wait(1, sd->ahu.user_ring_idx, &sd->ahu);
111 if (ret < 0)
112 return -errno;
113 } while (1);
114
115 ret = sd->nr_events;
116 sd->nr_events = 0;
117 return ret;
118}
119
120static struct io_u *fio_syslet_event(struct thread_data *td, int event)
121{
122 struct syslet_data *sd = td->io_ops->data;
123
124 return sd->events[event];
125}
126
127static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
128 void *arg1, void *arg2, void *arg3, void *ret_ptr,
129 unsigned long flags, void *priv)
130{
131 atom->flags = flags;
132 atom->nr = nr;
133 atom->ret_ptr = ret_ptr;
134 atom->next = NULL;
135 atom->arg_ptr[0] = arg0;
136 atom->arg_ptr[1] = arg1;
137 atom->arg_ptr[2] = arg2;
138 atom->arg_ptr[3] = arg3;
139 atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
140 atom->private = priv;
141}
142
143/*
144 * Use seek atom for sync
145 */
146static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
147{
148 init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL,
149 &io_u->req.ret, 0, io_u);
150}
151
152static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
153{
154 int nr;
155
156 /*
157 * prepare rw
158 */
159 if (io_u->ddir == DDIR_READ)
160 nr = __NR_pread64;
161 else
162 nr = __NR_pwrite64;
163
164 init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
165 &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
166}
167
168static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
169{
170 struct fio_file *f = io_u->file;
171
172 if (io_u->ddir == DDIR_SYNC)
173 fio_syslet_prep_sync(io_u, f);
174 else
175 fio_syslet_prep_rw(io_u, f);
176
177 return 0;
178}
179
180static void cachemiss_thread_start(void)
181{
182 while (1)
183 async_thread(NULL, NULL);
184}
185
186#define THREAD_STACK_SIZE (16384)
187
188static unsigned long thread_stack_alloc()
189{
190 return (unsigned long) malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
191}
192
193static void fio_syslet_queued(struct thread_data *td, struct syslet_data *sd)
194{
195 struct syslet_uatom *atom;
196 struct timeval now;
197
198 fio_gettime(&now, NULL);
199
200 atom = sd->head;
201 while (atom) {
202 struct io_u *io_u = atom->private;
203
204 memcpy(&io_u->issue_time, &now, sizeof(now));
205 io_u_queued(td, io_u);
206 atom = atom->next;
207 }
208}
209
210static int fio_syslet_commit(struct thread_data *td)
211{
212 struct syslet_data *sd = td->io_ops->data;
213 struct syslet_uatom *done;
214
215 if (!sd->head)
216 return 0;
217
218 assert(!sd->tail->next);
219
220 if (!sd->ahu.new_thread_stack)
221 sd->ahu.new_thread_stack = thread_stack_alloc();
222
223 fio_syslet_queued(td, sd);
224
225 /*
226 * On sync completion, the atom is returned. So on NULL return
227 * it's queued asynchronously.
228 */
229 done = async_exec(sd->head, &sd->ahu);
230
231 sd->head = sd->tail = NULL;
232
233 if (done)
234 fio_syslet_complete_atom(td, done);
235
236 return 0;
237}
238
239static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
240{
241 struct syslet_data *sd = td->io_ops->data;
242
243 if (sd->tail) {
244 sd->tail->next = &io_u->req.atom;
245 sd->tail = &io_u->req.atom;
246 } else
247 sd->head = sd->tail = &io_u->req.atom;
248
249 io_u->req.head = sd->head;
250 return FIO_Q_QUEUED;
251}
252
253static int async_head_init(struct syslet_data *sd, unsigned int depth)
254{
255 unsigned long ring_size;
256
257 memset(&sd->ahu, 0, sizeof(struct async_head_user));
258
259 ring_size = sizeof(struct syslet_uatom *) * depth;
260 sd->ring = malloc(ring_size);
261 memset(sd->ring, 0, ring_size);
262
263 sd->ahu.user_ring_idx = 0;
264 sd->ahu.completion_ring = sd->ring;
265 sd->ahu.ring_size_bytes = ring_size;
266 sd->ahu.head_stack = thread_stack_alloc();
267 sd->ahu.head_eip = (unsigned long) cachemiss_thread_start;
268 sd->ahu.new_thread_eip = (unsigned long) cachemiss_thread_start;
269
270 return 0;
271}
272
273static void async_head_exit(struct syslet_data *sd)
274{
275 free(sd->ring);
276}
277
278static void fio_syslet_cleanup(struct thread_data *td)
279{
280 struct syslet_data *sd = td->io_ops->data;
281
282 if (sd) {
283 async_head_exit(sd);
284 free(sd->events);
285 free(sd);
286 td->io_ops->data = NULL;
287 }
288}
289
290static int fio_syslet_init(struct thread_data *td)
291{
292 struct syslet_data *sd;
293
294
295 sd = malloc(sizeof(*sd));
296 memset(sd, 0, sizeof(*sd));
297 sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
298 memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
299
300 /*
301 * This will handily fail for kernels where syslet isn't available
302 */
303 if (async_head_init(sd, td->iodepth)) {
304 free(sd->events);
305 free(sd);
306 return 1;
307 }
308
309 td->io_ops->data = sd;
310 return 0;
311}
312
313static struct ioengine_ops ioengine = {
314 .name = "syslet-rw",
315 .version = FIO_IOOPS_VERSION,
316 .init = fio_syslet_init,
317 .prep = fio_syslet_prep,
318 .queue = fio_syslet_queue,
319 .commit = fio_syslet_commit,
320 .getevents = fio_syslet_getevents,
321 .event = fio_syslet_event,
322 .cleanup = fio_syslet_cleanup,
323 .open_file = generic_open_file,
324 .close_file = generic_close_file,
325};
326
327#else /* FIO_HAVE_SYSLET */
328
329/*
330 * When we have a proper configure system in place, we simply wont build
331 * and install this io engine. For now install a crippled version that
332 * just complains and fails to load.
333 */
334static int fio_syslet_init(struct thread_data fio_unused *td)
335{
336 fprintf(stderr, "fio: syslet not available\n");
337 return 1;
338}
339
340static struct ioengine_ops ioengine = {
341 .name = "syslet-rw",
342 .version = FIO_IOOPS_VERSION,
343 .init = fio_syslet_init,
344};
345
346#endif /* FIO_HAVE_SYSLET */
347
348static void fio_init fio_syslet_register(void)
349{
350 register_ioengine(&ioengine);
351}
352
353static void fio_exit fio_syslet_unregister(void)
354{
355 unregister_ioengine(&ioengine);
356}