syslet: add proper read barrier between user_tail and completion read
[fio.git] / engines / syslet-rw.c
... / ...
CommitLineData
1/*
2 * syslet engine
3 *
4 * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
5 * with syslets to make the execution async.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13#include <malloc.h>
14#include <asm/unistd.h>
15
16#include "../fio.h"
17#include "../indirect.h"
18#include "../syslet.h"
19
20#ifdef FIO_HAVE_SYSLET
21
22#ifdef __NR_pread64
23#define __NR_fio_pread __NR_pread64
24#define __NR_fio_pwrite __NR_pwrite64
25#else
26#define __NR_fio_pread __NR_pread
27#define __NR_fio_pwrite __NR_pwrite
28#endif
29
30struct syslet_data {
31 struct io_u **events;
32 unsigned int nr_events;
33
34 struct syslet_ring *ring;
35 unsigned int ring_mask;
36 void *stack;
37};
38
39static void fio_syslet_add_event(struct thread_data *td, struct io_u *io_u)
40{
41 struct syslet_data *sd = td->io_ops->data;
42
43 assert(sd->nr_events < td->o.iodepth);
44 sd->events[sd->nr_events++] = io_u;
45}
46
47static void fio_syslet_add_events(struct thread_data *td, unsigned int nr)
48{
49 struct syslet_data *sd = td->io_ops->data;
50 unsigned int i, uidx;
51
52 uidx = sd->ring->user_tail;
53 read_barrier();
54
55 for (i = 0; i < nr; i++) {
56 unsigned int idx = (i + uidx) & sd->ring_mask;
57 struct syslet_completion *comp = &sd->ring->comp[idx];
58 struct io_u *io_u = (struct io_u *) (long) comp->caller_data;
59 long ret;
60
61 ret = comp->status;
62 if (ret <= 0) {
63 io_u->resid = io_u->xfer_buflen;
64 io_u->error = -ret;
65 } else {
66 io_u->resid = io_u->xfer_buflen - ret;
67 io_u->error = 0;
68 }
69
70 fio_syslet_add_event(td, io_u);
71 }
72}
73
74static void fio_syslet_wait_for_events(struct thread_data *td)
75{
76 struct syslet_data *sd = td->io_ops->data;
77 struct syslet_ring *ring = sd->ring;
78 unsigned int events;
79
80 events = 0;
81 do {
82 unsigned int kh = ring->kernel_head;
83 int ret;
84
85 /*
86 * first reap events that are already completed
87 */
88 if (ring->user_tail != kh) {
89 unsigned int nr = kh - ring->user_tail;
90
91 fio_syslet_add_events(td, nr);
92 events += nr;
93 ring->user_tail = kh;
94 continue;
95 }
96
97 /*
98 * block waiting for at least one event
99 */
100 ret = syscall(__NR_syslet_ring_wait, ring, ring->user_tail);
101 assert(!ret);
102 } while (!events);
103}
104
105static int fio_syslet_getevents(struct thread_data *td, int min,
106 int fio_unused max,
107 struct timespec fio_unused *t)
108{
109 struct syslet_data *sd = td->io_ops->data;
110 long ret;
111
112 do {
113 /*
114 * do we have enough immediate completions?
115 */
116 if (sd->nr_events >= (unsigned int) min)
117 break;
118
119 fio_syslet_wait_for_events(td);
120 } while (1);
121
122 ret = sd->nr_events;
123 sd->nr_events = 0;
124 return ret;
125}
126
127static struct io_u *fio_syslet_event(struct thread_data *td, int event)
128{
129 struct syslet_data *sd = td->io_ops->data;
130
131 return sd->events[event];
132}
133
134static void fio_syslet_prep_sync(struct fio_file *f,
135 struct indirect_registers *regs)
136{
137 FILL_IN(*regs, __NR_fsync, (long) f->fd);
138}
139
140static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f,
141 struct indirect_registers *regs)
142{
143 long nr;
144
145 /*
146 * prepare rw
147 */
148 if (io_u->ddir == DDIR_READ)
149 nr = __NR_fio_pread;
150 else
151 nr = __NR_fio_pwrite;
152
153 FILL_IN(*regs, nr, (long) f->fd, (long) io_u->xfer_buf,
154 (long) io_u->xfer_buflen, (long) io_u->offset);
155}
156
157static void fio_syslet_prep(struct io_u *io_u, struct indirect_registers *regs)
158{
159 struct fio_file *f = io_u->file;
160
161 if (io_u->ddir == DDIR_SYNC)
162 fio_syslet_prep_sync(f, regs);
163 else
164 fio_syslet_prep_rw(io_u, f, regs);
165}
166
167static void ret_func(void)
168{
169 syscall(__NR_exit);
170}
171
172static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
173{
174 struct syslet_data *sd = td->io_ops->data;
175 union indirect_params params;
176 struct indirect_registers regs;
177 int ret;
178
179 fio_ro_check(td, io_u);
180
181 memset(&params, 0, sizeof(params));
182 fill_syslet_args(&params.syslet, sd->ring, (long)io_u, ret_func, sd->stack);
183
184 fio_syslet_prep(io_u, &regs);
185
186 ret = syscall(__NR_indirect, &regs, &params, sizeof(params), 0);
187 if (ret == (int) io_u->xfer_buflen) {
188 /*
189 * completed sync, account. this also catches fsync().
190 */
191 return FIO_Q_COMPLETED;
192 } else if (ret < 0) {
193 /*
194 * queued for async execution
195 */
196 if (errno == ESYSLETPENDING)
197 return FIO_Q_QUEUED;
198 }
199
200 io_u->error = errno;
201 td_verror(td, io_u->error, "xfer");
202 return FIO_Q_COMPLETED;
203}
204
205static int check_syslet_support(struct syslet_data *sd)
206{
207 union indirect_params params;
208 struct indirect_registers regs;
209 pid_t pid, my_pid = getpid();
210
211 memset(&params, 0, sizeof(params));
212 fill_syslet_args(&params.syslet, sd->ring, 0, ret_func, sd->stack);
213
214 FILL_IN(regs, __NR_getpid);
215
216 pid = syscall(__NR_indirect, &regs, &params, sizeof(params), 0);
217 if (pid == my_pid)
218 return 0;
219
220 return 1;
221}
222
223static void fio_syslet_cleanup(struct thread_data *td)
224{
225 struct syslet_data *sd = td->io_ops->data;
226
227 if (sd) {
228 free(sd->events);
229 free(sd->ring);
230 free(sd->stack);
231 free(sd);
232 td->io_ops->data = NULL;
233 }
234}
235
236static int fio_syslet_init(struct thread_data *td)
237{
238 struct syslet_data *sd;
239 void *ring = NULL, *stack = NULL;
240 unsigned int ring_size, ring_nr;
241
242 sd = malloc(sizeof(*sd));
243 memset(sd, 0, sizeof(*sd));
244
245 sd->events = malloc(sizeof(struct io_u *) * td->o.iodepth);
246 memset(sd->events, 0, sizeof(struct io_u *) * td->o.iodepth);
247
248 /*
249 * The ring needs to be a power-of-2, so round it up if we have to
250 */
251 ring_nr = td->o.iodepth;
252 if (ring_nr & (ring_nr - 1)) {
253 int bits = 1;
254
255 while (ring_nr >>= 1)
256 bits++;
257
258 ring_nr = 1 << bits;
259 }
260
261 ring_size = sizeof(struct syslet_ring) +
262 ring_nr * sizeof(struct syslet_completion);
263 if (posix_memalign(&ring, sizeof(uint64_t), ring_size))
264 goto err_mem;
265 if (posix_memalign(&stack, page_size, page_size))
266 goto err_mem;
267
268 sd->ring = ring;
269 sd->ring_mask = ring_nr - 1;
270 sd->stack = stack;
271
272 memset(sd->ring, 0, ring_size);
273 sd->ring->elements = ring_nr;
274
275 if (!check_syslet_support(sd)) {
276 td->io_ops->data = sd;
277 return 0;
278 }
279
280 log_err("fio: syslets do not appear to work\n");
281err_mem:
282 free(sd->events);
283 if (ring)
284 free(ring);
285 if (stack)
286 free(stack);
287 free(sd);
288 return 1;
289}
290
291static struct ioengine_ops ioengine = {
292 .name = "syslet-rw",
293 .version = FIO_IOOPS_VERSION,
294 .init = fio_syslet_init,
295 .queue = fio_syslet_queue,
296 .getevents = fio_syslet_getevents,
297 .event = fio_syslet_event,
298 .cleanup = fio_syslet_cleanup,
299 .open_file = generic_open_file,
300 .close_file = generic_close_file,
301};
302
303#else /* FIO_HAVE_SYSLET */
304
305/*
306 * When we have a proper configure system in place, we simply wont build
307 * and install this io engine. For now install a crippled version that
308 * just complains and fails to load.
309 */
310static int fio_syslet_init(struct thread_data fio_unused *td)
311{
312 fprintf(stderr, "fio: syslet not available\n");
313 return 1;
314}
315
316static struct ioengine_ops ioengine = {
317 .name = "syslet-rw",
318 .version = FIO_IOOPS_VERSION,
319 .init = fio_syslet_init,
320};
321
322#endif /* FIO_HAVE_SYSLET */
323
324static void fio_init fio_syslet_register(void)
325{
326 register_ioengine(&ioengine);
327}
328
329static void fio_exit fio_syslet_unregister(void)
330{
331 unregister_ioengine(&ioengine);
332}