Add strong madvise() hint for cache pruning
[fio.git] / engines / syslet-rw.c
... / ...
CommitLineData
1/*
2 * syslet engine
3 *
4 * IO engine that does regular pread(2)/pwrite(2) to transfer data, but
5 * with syslets to make the execution async.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13#include <malloc.h>
14#include <asm/unistd.h>
15
16#include "../fio.h"
17#include "../lib/fls.h"
18
19#ifdef FIO_HAVE_SYSLET
20
21#ifdef __NR_pread64
22#define __NR_fio_pread __NR_pread64
23#define __NR_fio_pwrite __NR_pwrite64
24#else
25#define __NR_fio_pread __NR_pread
26#define __NR_fio_pwrite __NR_pwrite
27#endif
28
29struct syslet_data {
30 struct io_u **events;
31 unsigned int nr_events;
32
33 struct syslet_ring *ring;
34 unsigned int ring_mask;
35 void *stack;
36};
37
38static void fio_syslet_add_event(struct thread_data *td, struct io_u *io_u)
39{
40 struct syslet_data *sd = td->io_ops->data;
41
42 assert(sd->nr_events < td->o.iodepth);
43 sd->events[sd->nr_events++] = io_u;
44}
45
46static void fio_syslet_add_events(struct thread_data *td, unsigned int nr)
47{
48 struct syslet_data *sd = td->io_ops->data;
49 unsigned int i, uidx;
50
51 uidx = sd->ring->user_tail;
52 read_barrier();
53
54 for (i = 0; i < nr; i++) {
55 unsigned int idx = (i + uidx) & sd->ring_mask;
56 struct syslet_completion *comp = &sd->ring->comp[idx];
57 struct io_u *io_u = (struct io_u *) (long) comp->caller_data;
58 long ret;
59
60 ret = comp->status;
61 if (ret <= 0) {
62 io_u->resid = io_u->xfer_buflen;
63 io_u->error = -ret;
64 } else {
65 io_u->resid = io_u->xfer_buflen - ret;
66 io_u->error = 0;
67 }
68
69 fio_syslet_add_event(td, io_u);
70 }
71}
72
73static void fio_syslet_wait_for_events(struct thread_data *td)
74{
75 struct syslet_data *sd = td->io_ops->data;
76 struct syslet_ring *ring = sd->ring;
77
78 do {
79 unsigned int kh = ring->kernel_head;
80 int ret;
81
82 /*
83 * first reap events that are already completed
84 */
85 if (ring->user_tail != kh) {
86 unsigned int nr = kh - ring->user_tail;
87
88 fio_syslet_add_events(td, nr);
89 ring->user_tail = kh;
90 break;
91 }
92
93 /*
94 * block waiting for at least one event
95 */
96 ret = syscall(__NR_syslet_ring_wait, ring, ring->user_tail);
97 assert(!ret);
98 } while (1);
99}
100
101static int fio_syslet_getevents(struct thread_data *td, unsigned int min,
102 unsigned int fio_unused max,
103 struct timespec fio_unused *t)
104{
105 struct syslet_data *sd = td->io_ops->data;
106 long ret;
107
108 /*
109 * While we have less events than requested, block waiting for them
110 * (if we have to, there may already be more completed events ready
111 * for us - see fio_syslet_wait_for_events()
112 */
113 while (sd->nr_events < min)
114 fio_syslet_wait_for_events(td);
115
116 ret = sd->nr_events;
117 sd->nr_events = 0;
118 return ret;
119}
120
121static struct io_u *fio_syslet_event(struct thread_data *td, int event)
122{
123 struct syslet_data *sd = td->io_ops->data;
124
125 return sd->events[event];
126}
127
128static void fio_syslet_prep_sync(struct fio_file *f,
129 struct indirect_registers *regs)
130{
131 FILL_IN(*regs, __NR_fsync, (long) f->fd);
132}
133
134static void fio_syslet_prep_datasync(struct fio_file *f,
135 struct indirect_registers *regs)
136{
137 FILL_IN(*regs, __NR_fdatasync, (long) f->fd);
138}
139
140static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f,
141 struct indirect_registers *regs)
142{
143 long nr;
144
145 /*
146 * prepare rw
147 */
148 if (io_u->ddir == DDIR_READ)
149 nr = __NR_fio_pread;
150 else
151 nr = __NR_fio_pwrite;
152
153 FILL_IN(*regs, nr, (long) f->fd, (long) io_u->xfer_buf,
154 (long) io_u->xfer_buflen, (long) io_u->offset);
155}
156
157static void fio_syslet_prep(struct io_u *io_u, struct indirect_registers *regs)
158{
159 struct fio_file *f = io_u->file;
160
161 if (io_u->ddir == DDIR_SYNC)
162 fio_syslet_prep_sync(f, regs);
163 else if (io_u->ddir == DDIR_DATASYNC)
164 fio_syslet_prep_datasync(f, regs);
165 else
166 fio_syslet_prep_rw(io_u, f, regs);
167}
168
169static void ret_func(void)
170{
171 syscall(__NR_exit);
172}
173
174static int fio_syslet_queue(struct thread_data *td, struct io_u *io_u)
175{
176 struct syslet_data *sd = td->io_ops->data;
177 union indirect_params params;
178 struct indirect_registers regs;
179 int ret;
180
181 fio_ro_check(td, io_u);
182
183 memset(&params, 0, sizeof(params));
184 fill_syslet_args(&params.syslet, sd->ring, (long)io_u, ret_func, sd->stack);
185
186 fio_syslet_prep(io_u, &regs);
187
188 ret = syscall(__NR_indirect, &regs, &params, sizeof(params), 0);
189 if (ret == (int) io_u->xfer_buflen) {
190 /*
191 * completed sync, account. this also catches fsync().
192 */
193 return FIO_Q_COMPLETED;
194 } else if (ret < 0) {
195 /*
196 * queued for async execution
197 */
198 if (errno == ESYSLETPENDING)
199 return FIO_Q_QUEUED;
200 }
201
202 io_u->error = errno;
203 td_verror(td, io_u->error, "xfer");
204 return FIO_Q_COMPLETED;
205}
206
207static int check_syslet_support(struct syslet_data *sd)
208{
209 union indirect_params params;
210 struct indirect_registers regs;
211 pid_t pid, my_pid = getpid();
212
213 memset(&params, 0, sizeof(params));
214 fill_syslet_args(&params.syslet, sd->ring, 0, ret_func, sd->stack);
215
216 FILL_IN(regs, __NR_getpid);
217
218 pid = syscall(__NR_indirect, &regs, &params, sizeof(params), 0);
219 if (pid == my_pid)
220 return 0;
221
222 return 1;
223}
224
225static void fio_syslet_cleanup(struct thread_data *td)
226{
227 struct syslet_data *sd = td->io_ops->data;
228
229 if (sd) {
230 free(sd->events);
231 free(sd->ring);
232 free(sd);
233 }
234}
235
236static int fio_syslet_init(struct thread_data *td)
237{
238 struct syslet_data *sd;
239 void *ring = NULL, *stack = NULL;
240 unsigned int ring_size, ring_nr;
241
242 sd = malloc(sizeof(*sd));
243 memset(sd, 0, sizeof(*sd));
244
245 sd->events = malloc(sizeof(struct io_u *) * td->o.iodepth);
246 memset(sd->events, 0, sizeof(struct io_u *) * td->o.iodepth);
247
248 /*
249 * The ring needs to be a power-of-2, so round it up if we have to
250 */
251 ring_nr = td->o.iodepth;
252 if (ring_nr & (ring_nr - 1))
253 ring_nr = 1 << __fls(ring_nr);
254
255 ring_size = sizeof(struct syslet_ring) +
256 ring_nr * sizeof(struct syslet_completion);
257 if (posix_memalign(&ring, sizeof(uint64_t), ring_size))
258 goto err_mem;
259 if (posix_memalign(&stack, page_size, page_size))
260 goto err_mem;
261
262 sd->ring = ring;
263 sd->ring_mask = ring_nr - 1;
264 sd->stack = stack;
265
266 memset(sd->ring, 0, ring_size);
267 sd->ring->elements = ring_nr;
268
269 if (!check_syslet_support(sd)) {
270 td->io_ops->data = sd;
271 return 0;
272 }
273
274 log_err("fio: syslets do not appear to work\n");
275err_mem:
276 free(sd->events);
277 if (ring)
278 free(ring);
279 if (stack)
280 free(stack);
281 free(sd);
282 return 1;
283}
284
285static struct ioengine_ops ioengine = {
286 .name = "syslet-rw",
287 .version = FIO_IOOPS_VERSION,
288 .init = fio_syslet_init,
289 .queue = fio_syslet_queue,
290 .getevents = fio_syslet_getevents,
291 .event = fio_syslet_event,
292 .cleanup = fio_syslet_cleanup,
293 .open_file = generic_open_file,
294 .close_file = generic_close_file,
295 .get_file_size = generic_get_file_size,
296};
297
298#else /* FIO_HAVE_SYSLET */
299
300/*
301 * When we have a proper configure system in place, we simply wont build
302 * and install this io engine. For now install a crippled version that
303 * just complains and fails to load.
304 */
305static int fio_syslet_init(struct thread_data fio_unused *td)
306{
307 fprintf(stderr, "fio: syslet not available\n");
308 return 1;
309}
310
311static struct ioengine_ops ioengine = {
312 .name = "syslet-rw",
313 .version = FIO_IOOPS_VERSION,
314 .init = fio_syslet_init,
315};
316
317#endif /* FIO_HAVE_SYSLET */
318
319static void fio_init fio_syslet_register(void)
320{
321 register_ioengine(&ioengine);
322}
323
324static void fio_exit fio_syslet_unregister(void)
325{
326 unregister_ioengine(&ioengine);
327}