syslet engine: style fix and ->queue() error return value fix
[fio.git] / engines / syslet-rw.c
CommitLineData
a4f4fdd7
JA
1/*
2 * read/write() engine that uses syslet to be async
3 *
4 */
5#include <stdio.h>
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <assert.h>
10
11#include "../fio.h"
12#include "../os.h"
13
14#ifdef FIO_HAVE_SYSLET
15
16struct syslet_data {
17 struct io_u **events;
18 unsigned int nr_events;
19
bf0dc8fa 20 struct async_head_user ahu;
a4f4fdd7 21 struct syslet_uatom **ring;
a4f4fdd7
JA
22};
23
24/*
25 * Inspect the ring to see if we have completed events
26 */
27static void fio_syslet_complete(struct thread_data *td)
28{
29 struct syslet_data *sd = td->io_ops->data;
30
31 do {
32 struct syslet_uatom *atom;
33 struct io_u *io_u;
34 long ret;
35
bf0dc8fa 36 atom = sd->ring[sd->ahu.user_ring_idx];
a4f4fdd7
JA
37 if (!atom)
38 break;
39
bf0dc8fa
IM
40 sd->ring[sd->ahu.user_ring_idx] = NULL;
41 if (++sd->ahu.user_ring_idx == td->iodepth)
42 sd->ahu.user_ring_idx = 0;
a4f4fdd7
JA
43
44 io_u = atom->private;
45 ret = *atom->ret_ptr;
46 if (ret > 0)
47 io_u->resid = io_u->xfer_buflen - ret;
48 else if (ret < 0)
49 io_u->error = ret;
50
51 sd->events[sd->nr_events++] = io_u;
52 } while (1);
53}
54
55static int fio_syslet_getevents(struct thread_data *td, int min,
56 int fio_unused max,
57 struct timespec fio_unused *t)
58{
59 struct syslet_data *sd = td->io_ops->data;
60 int get_events;
61 long ret;
62
63 do {
64 fio_syslet_complete(td);
65
66 /*
67 * do we have enough immediate completions?
68 */
69 if (sd->nr_events >= (unsigned int) min)
70 break;
71
72 /*
73 * OK, we need to wait for some events...
74 */
75 get_events = min - sd->nr_events;
bf0dc8fa 76 ret = async_wait(get_events, sd->ahu.user_ring_idx, &sd->ahu);
a4f4fdd7 77 if (ret < 0)
e49499f8 78 return -errno;
a4f4fdd7
JA
79 } while (1);
80
81 ret = sd->nr_events;
82 sd->nr_events = 0;
83 return ret;
84}
85
86static struct io_u *fio_syslet_event(struct thread_data *td, int event)
87{
88 struct syslet_data *sd = td->io_ops->data;
89
90 return sd->events[event];
91}
92
93static void init_atom(struct syslet_uatom *atom, int nr, void *arg0,
a2e1b08a
JA
94 void *arg1, void *arg2, void *arg3, void *ret_ptr,
95 unsigned long flags, void *priv)
a4f4fdd7
JA
96{
97 atom->flags = flags;
98 atom->nr = nr;
99 atom->ret_ptr = ret_ptr;
a2e1b08a 100 atom->next = NULL;
a4f4fdd7
JA
101 atom->arg_ptr[0] = arg0;
102 atom->arg_ptr[1] = arg1;
103 atom->arg_ptr[2] = arg2;
a2e1b08a
JA
104 atom->arg_ptr[3] = arg3;
105 atom->arg_ptr[4] = atom->arg_ptr[5] = NULL;
a4f4fdd7
JA
106 atom->private = priv;
107}
108
109/*
110 * Use seek atom for sync
111 */
112static void fio_syslet_prep_sync(struct io_u *io_u, struct fio_file *f)
113{
a2e1b08a 114 init_atom(&io_u->req.atom, __NR_fsync, &f->fd, NULL, NULL, NULL,
7d44a745 115 &io_u->req.ret, 0, io_u);
a4f4fdd7
JA
116}
117
118static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f)
119{
120 int nr;
121
a4f4fdd7
JA
122 /*
123 * prepare rw
124 */
125 if (io_u->ddir == DDIR_READ)
a2e1b08a 126 nr = __NR_pread64;
a4f4fdd7 127 else
a2e1b08a 128 nr = __NR_pwrite64;
a4f4fdd7 129
a2e1b08a 130 init_atom(&io_u->req.atom, nr, &f->fd, &io_u->xfer_buf,
7d44a745 131 &io_u->xfer_buflen, &io_u->offset, &io_u->req.ret, 0, io_u);
a4f4fdd7
JA
132}
133
134static int fio_syslet_prep(struct thread_data fio_unused *td, struct io_u *io_u)
135{
136 struct fio_file *f = io_u->file;
137
138 if (io_u->ddir == DDIR_SYNC)
139 fio_syslet_prep_sync(io_u, f);
140 else
141 fio_syslet_prep_rw(io_u, f);
142
143 return 0;
144}
145
bf0dc8fa
IM
146static void cachemiss_thread_start(void)
147{
148 while (1)
149 async_thread();
150}
151
152#define THREAD_STACK_SIZE (16384)
153
154static unsigned long thread_stack_alloc()
155{
156 return (unsigned long)malloc(THREAD_STACK_SIZE) + THREAD_STACK_SIZE;
157}
158
a4f4fdd7
JA
159static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
160{
161 struct syslet_data *sd = td->io_ops->data;
bf0dc8fa 162 struct syslet_uatom *done;
a4f4fdd7
JA
163 long ret;
164
bf0dc8fa
IM
165 if (!sd->ahu.new_thread_stack)
166 sd->ahu.new_thread_stack = thread_stack_alloc();
167
7d44a745
JA
168 /*
169 * On sync completion, the atom is returned. So on NULL return
170 * it's queued asynchronously.
171 */
bf0dc8fa
IM
172 done = async_exec(&io_u->req.atom, &sd->ahu);
173
174 if (!done)
36167d82 175 return FIO_Q_QUEUED;
a4f4fdd7
JA
176
177 /*
178 * completed sync
179 */
a2e1b08a 180 ret = io_u->req.ret;
a4f4fdd7
JA
181 if (ret != (long) io_u->xfer_buflen) {
182 if (ret > 0) {
183 io_u->resid = io_u->xfer_buflen - ret;
184 io_u->error = 0;
36167d82 185 return FIO_Q_COMPLETED;
a4f4fdd7
JA
186 } else
187 io_u->error = errno;
188 }
189
e49499f8 190 assert(sd->nr_events < td->iodepth);
bf0dc8fa 191
e49499f8 192 if (io_u->error)
a4f4fdd7 193
36167d82 194 return FIO_Q_COMPLETED;
a4f4fdd7
JA
195}
196
db64e9bc 197static int async_head_init(struct syslet_data *sd, unsigned int depth)
a4f4fdd7 198{
a4f4fdd7
JA
199 unsigned long ring_size;
200
bf0dc8fa 201 memset(&sd->ahu, 0, sizeof(struct async_head_user));
2ca50be4 202
a4f4fdd7
JA
203 ring_size = sizeof(struct syslet_uatom *) * depth;
204 sd->ring = malloc(ring_size);
205 memset(sd->ring, 0, ring_size);
206
bf0dc8fa
IM
207 sd->ahu.user_ring_idx = 0;
208 sd->ahu.completion_ring = sd->ring;
209 sd->ahu.ring_size_bytes = ring_size;
210 sd->ahu.head_stack = thread_stack_alloc();
211 sd->ahu.head_eip = (unsigned long)cachemiss_thread_start;
212 sd->ahu.new_thread_eip = (unsigned long)cachemiss_thread_start;
db64e9bc
JA
213
214 return 0;
a4f4fdd7
JA
215}
216
2ca50be4 217static void async_head_exit(struct syslet_data *sd)
a4f4fdd7 218{
7f059a76 219 free(sd->ring);
a4f4fdd7
JA
220}
221
222static void fio_syslet_cleanup(struct thread_data *td)
223{
224 struct syslet_data *sd = td->io_ops->data;
225
226 if (sd) {
2ca50be4 227 async_head_exit(sd);
a4f4fdd7
JA
228 free(sd->events);
229 free(sd);
230 td->io_ops->data = NULL;
231 }
232}
233
234static int fio_syslet_init(struct thread_data *td)
235{
236 struct syslet_data *sd;
237
db64e9bc 238
a4f4fdd7
JA
239 sd = malloc(sizeof(*sd));
240 memset(sd, 0, sizeof(*sd));
241 sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
242 memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
db64e9bc
JA
243
244 /*
245 * This will handily fail for kernels where syslet isn't available
246 */
247 if (async_head_init(sd, td->iodepth)) {
248 free(sd->events);
249 free(sd);
250 return 1;
251 }
252
a4f4fdd7 253 td->io_ops->data = sd;
a4f4fdd7
JA
254 return 0;
255}
256
257static struct ioengine_ops ioengine = {
258 .name = "syslet-rw",
259 .version = FIO_IOOPS_VERSION,
260 .init = fio_syslet_init,
261 .prep = fio_syslet_prep,
262 .queue = fio_syslet_queue,
263 .getevents = fio_syslet_getevents,
264 .event = fio_syslet_event,
265 .cleanup = fio_syslet_cleanup,
266};
267
268#else /* FIO_HAVE_SYSLET */
269
270/*
271 * When we have a proper configure system in place, we simply wont build
272 * and install this io engine. For now install a crippled version that
273 * just complains and fails to load.
274 */
275static int fio_syslet_init(struct thread_data fio_unused *td)
276{
277 fprintf(stderr, "fio: syslet not available\n");
278 return 1;
279}
280
281static struct ioengine_ops ioengine = {
282 .name = "syslet-rw",
283 .version = FIO_IOOPS_VERSION,
284 .init = fio_syslet_init,
285};
286
287#endif /* FIO_HAVE_SYSLET */
288
289static void fio_init fio_syslet_register(void)
290{
291 register_ioengine(&ioengine);
292}
293
294static void fio_exit fio_syslet_unregister(void)
295{
296 unregister_ioengine(&ioengine);
297}