aioring: make sq/cqring_offsets a bit more future proof
[fio.git] / t / aio-ring.c
CommitLineData
c9fb4c5b
JA
1/*
2 * gcc -D_GNU_SOURCE -Wall -O2 -o aio-ring aio-ring.c -lpthread -laio
3 */
4#include <stdio.h>
5#include <errno.h>
6#include <assert.h>
7#include <stdlib.h>
8#include <stddef.h>
9#include <signal.h>
10#include <inttypes.h>
11
12#include <sys/types.h>
13#include <sys/stat.h>
14#include <sys/ioctl.h>
15#include <sys/syscall.h>
16#include <sys/resource.h>
c3e2fc25 17#include <sys/mman.h>
c9fb4c5b
JA
18#include <linux/fs.h>
19#include <fcntl.h>
20#include <unistd.h>
21#include <libaio.h>
22#include <string.h>
23#include <pthread.h>
24#include <sched.h>
25
74efb029
JA
26#include "../arch/arch.h"
27
c3e2fc25
JA
28#define IOCTX_FLAG_SCQRING (1 << 0) /* Use SQ/CQ rings */
29#define IOCTX_FLAG_IOPOLL (1 << 1)
c9fb4c5b 30#define IOCTX_FLAG_FIXEDBUFS (1 << 2)
478a96a8
JA
31#define IOCTX_FLAG_SQTHREAD (1 << 3) /* Use SQ thread */
32#define IOCTX_FLAG_SQWQ (1 << 4) /* Use SQ wq */
c3e2fc25 33#define IOCTX_FLAG_SQPOLL (1 << 5)
c9fb4c5b 34
305c06ce
JA
35#define IOEV_RES2_CACHEHIT (1 << 0)
36
c9fb4c5b
JA
37#define barrier() __asm__ __volatile__("": : :"memory")
38
39#define min(a, b) ((a < b) ? (a) : (b))
40
fa3e6c25 41typedef uint64_t u64;
c9fb4c5b 42typedef uint32_t u32;
478a96a8 43typedef uint16_t u16;
c9fb4c5b 44
c3e2fc25
JA
45#define IORING_OFF_SQ_RING 0ULL
46#define IORING_OFF_CQ_RING 0x8000000ULL
47#define IORING_OFF_IOCB 0x10000000ULL
48
ac122fea 49struct aio_sqring_offsets {
c3e2fc25
JA
50 u32 head;
51 u32 tail;
52 u32 ring_mask;
53 u32 ring_entries;
54 u32 flags;
b538f06c 55 u32 dropped;
ac122fea 56 u32 array;
b538f06c 57 u32 resv[3];
ac122fea
JA
58};
59
60struct aio_cqring_offsets {
61 u32 head;
62 u32 tail;
63 u32 ring_mask;
64 u32 ring_entries;
65 u32 overflow;
66 u32 events;
b538f06c 67 u32 resv[4];
c3e2fc25
JA
68};
69
70struct aio_uring_params {
71 u32 sq_entries;
72 u32 cq_entries;
73 u32 flags;
74 u16 sq_thread_cpu;
75 u16 resv[9];
ac122fea
JA
76 struct aio_sqring_offsets sq_off;
77 struct aio_cqring_offsets cq_off;
c3e2fc25
JA
78};
79
fa3e6c25 80struct aio_sq_ring {
c3e2fc25
JA
81 u32 *head;
82 u32 *tail;
83 u32 *ring_mask;
84 u32 *ring_entries;
85 u32 *array;
c9fb4c5b
JA
86};
87
fa3e6c25 88struct aio_cq_ring {
c3e2fc25
JA
89 u32 *head;
90 u32 *tail;
91 u32 *ring_mask;
92 u32 *ring_entries;
93 struct io_event *events;
c9fb4c5b
JA
94};
95
c3e2fc25 96#define IORING_ENTER_GETEVENTS (1 << 0)
c9fb4c5b 97
c9fb4c5b 98#define DEPTH 32
c9fb4c5b
JA
99
100#define BATCH_SUBMIT 8
101#define BATCH_COMPLETE 8
102
103#define BS 4096
104
c3e2fc25 105static unsigned sq_ring_mask, cq_ring_mask;
e39c34dc 106
c9fb4c5b
JA
107struct submitter {
108 pthread_t thread;
109 unsigned long max_blocks;
c3e2fc25 110 int fd;
c9fb4c5b 111 struct drand48_data rand;
c3e2fc25 112 struct aio_sq_ring sq_ring;
fa3e6c25 113 struct iocb *iocbs;
c3e2fc25
JA
114 struct iovec iovecs[DEPTH];
115 struct aio_cq_ring cq_ring;
c9fb4c5b
JA
116 int inflight;
117 unsigned long reaps;
118 unsigned long done;
119 unsigned long calls;
305c06ce 120 unsigned long cachehit, cachemiss;
c9fb4c5b
JA
121 volatile int finish;
122 char filename[128];
123};
124
125static struct submitter submitters[1];
126static volatile int finish;
127
c3e2fc25
JA
128static int polled = 0; /* use IO polling */
129static int fixedbufs = 0; /* use fixed user buffers */
130static int buffered = 1; /* use buffered IO, not O_DIRECT */
0527b23f
JA
131static int sq_thread = 0; /* use kernel submission thread */
132static int sq_thread_cpu = 0; /* pin above thread to this CPU */
c9fb4c5b 133
c3e2fc25
JA
134static int io_uring_setup(unsigned entries, struct iovec *iovecs,
135 struct aio_uring_params *p)
c9fb4c5b 136{
74efb029 137 return syscall(__NR_sys_io_uring_setup, entries, iovecs, p);
c9fb4c5b
JA
138}
139
c3e2fc25
JA
140static int io_uring_enter(struct submitter *s, unsigned int to_submit,
141 unsigned int min_complete, unsigned int flags)
c9fb4c5b 142{
74efb029
JA
143 return syscall(__NR_sys_io_uring_enter, s->fd, to_submit, min_complete,
144 flags);
c9fb4c5b
JA
145}
146
147static int gettid(void)
148{
149 return syscall(__NR_gettid);
150}
151
c3e2fc25 152static void init_io(struct submitter *s, int fd, unsigned index)
c9fb4c5b 153{
c3e2fc25 154 struct iocb *iocb = &s->iocbs[index];
c9fb4c5b
JA
155 unsigned long offset;
156 long r;
157
158 lrand48_r(&s->rand, &r);
159 offset = (r % (s->max_blocks - 1)) * BS;
160
161 iocb->aio_fildes = fd;
162 iocb->aio_lio_opcode = IO_CMD_PREAD;
c3e2fc25
JA
163 iocb->u.c.buf = s->iovecs[index].iov_base;
164 iocb->u.c.nbytes = BS;
c9fb4c5b 165 iocb->u.c.offset = offset;
c9fb4c5b
JA
166}
167
168static int prep_more_ios(struct submitter *s, int fd, int max_ios)
169{
c3e2fc25 170 struct aio_sq_ring *ring = &s->sq_ring;
e39c34dc 171 u32 index, tail, next_tail, prepped = 0;
c9fb4c5b 172
c3e2fc25 173 next_tail = tail = *ring->tail;
c9fb4c5b
JA
174 do {
175 next_tail++;
c9fb4c5b 176 barrier();
c3e2fc25 177 if (next_tail == *ring->head)
c9fb4c5b
JA
178 break;
179
e39c34dc 180 index = tail & sq_ring_mask;
c3e2fc25
JA
181 init_io(s, fd, index);
182 ring->array[index] = index;
c9fb4c5b
JA
183 prepped++;
184 tail = next_tail;
185 } while (prepped < max_ios);
186
c3e2fc25 187 if (*ring->tail != tail) {
c9fb4c5b
JA
188 /* order tail store with writes to iocbs above */
189 barrier();
c3e2fc25 190 *ring->tail = tail;
c9fb4c5b
JA
191 barrier();
192 }
193 return prepped;
194}
195
196static int get_file_size(int fd, unsigned long *blocks)
197{
198 struct stat st;
199
200 if (fstat(fd, &st) < 0)
201 return -1;
202 if (S_ISBLK(st.st_mode)) {
203 unsigned long long bytes;
204
205 if (ioctl(fd, BLKGETSIZE64, &bytes) != 0)
206 return -1;
207
208 *blocks = bytes / BS;
209 return 0;
210 } else if (S_ISREG(st.st_mode)) {
211 *blocks = st.st_size / BS;
212 return 0;
213 }
214
215 return -1;
216}
217
218static int reap_events(struct submitter *s)
219{
c3e2fc25 220 struct aio_cq_ring *ring = &s->cq_ring;
c9fb4c5b
JA
221 struct io_event *ev;
222 u32 head, reaped = 0;
223
c3e2fc25 224 head = *ring->head;
c9fb4c5b
JA
225 do {
226 barrier();
c3e2fc25 227 if (head == *ring->tail)
c9fb4c5b 228 break;
e39c34dc 229 ev = &ring->events[head & cq_ring_mask];
c9fb4c5b 230 if (ev->res != BS) {
fa3e6c25 231 struct iocb *iocb = ev->obj;
c9fb4c5b
JA
232
233 printf("io: unexpected ret=%ld\n", ev->res);
234 printf("offset=%lu, size=%lu\n", (unsigned long) iocb->u.c.offset, (unsigned long) iocb->u.c.nbytes);
235 return -1;
236 }
305c06ce
JA
237 if (ev->res2 & IOEV_RES2_CACHEHIT)
238 s->cachehit++;
239 else
240 s->cachemiss++;
c9fb4c5b
JA
241 reaped++;
242 head++;
c9fb4c5b
JA
243 } while (1);
244
245 s->inflight -= reaped;
c3e2fc25 246 *ring->head = head;
c9fb4c5b
JA
247 barrier();
248 return reaped;
249}
250
251static void *submitter_fn(void *data)
252{
253 struct submitter *s = data;
254 int fd, ret, prepped, flags;
255
256 printf("submitter=%d\n", gettid());
257
258 flags = O_RDONLY;
259 if (!buffered)
260 flags |= O_DIRECT;
261 fd = open(s->filename, flags);
262 if (fd < 0) {
263 perror("open");
264 goto done;
265 }
266
267 if (get_file_size(fd, &s->max_blocks)) {
268 printf("failed getting size of device/file\n");
269 goto err;
270 }
271 if (!s->max_blocks) {
272 printf("Zero file/device size?\n");
273 goto err;
274 }
275
276 s->max_blocks--;
277
278 srand48_r(pthread_self(), &s->rand);
279
280 prepped = 0;
281 do {
605d3b6a 282 int to_wait, to_submit, this_reap;
c9fb4c5b
JA
283
284 if (!prepped && s->inflight < DEPTH)
285 prepped = prep_more_ios(s, fd, min(DEPTH - s->inflight, BATCH_SUBMIT));
286 s->inflight += prepped;
287submit_more:
288 to_submit = prepped;
289submit:
290 if (s->inflight + BATCH_SUBMIT < DEPTH)
291 to_wait = 0;
292 else
293 to_wait = min(s->inflight + to_submit, BATCH_COMPLETE);
294
c3e2fc25 295 ret = io_uring_enter(s, to_submit, to_wait, IORING_ENTER_GETEVENTS);
c9fb4c5b
JA
296 s->calls++;
297
298 this_reap = reap_events(s);
299 if (this_reap == -1)
300 break;
301 s->reaps += this_reap;
302
303 if (ret >= 0) {
304 if (!ret) {
305 to_submit = 0;
306 if (s->inflight)
307 goto submit;
308 continue;
309 } else if (ret < to_submit) {
310 int diff = to_submit - ret;
311
312 s->done += ret;
313 prepped -= diff;
314 goto submit_more;
315 }
316 s->done += ret;
317 prepped = 0;
318 continue;
319 } else if (ret < 0) {
ac122fea 320 if (errno == EAGAIN) {
c9fb4c5b
JA
321 if (s->finish)
322 break;
323 if (this_reap)
324 goto submit;
c9fb4c5b
JA
325 to_submit = 0;
326 goto submit;
327 }
ac122fea 328 printf("io_submit: %s\n", strerror(errno));
c9fb4c5b
JA
329 break;
330 }
331 } while (!s->finish);
332err:
333 close(fd);
334done:
335 finish = 1;
336 return NULL;
337}
338
339static void sig_int(int sig)
340{
341 printf("Exiting on signal %d\n", sig);
342 submitters[0].finish = 1;
343 finish = 1;
344}
345
346static void arm_sig_int(void)
347{
348 struct sigaction act;
349
350 memset(&act, 0, sizeof(act));
351 act.sa_handler = sig_int;
352 act.sa_flags = SA_RESTART;
353 sigaction(SIGINT, &act, NULL);
354}
355
c3e2fc25
JA
356static int setup_ring(struct submitter *s)
357{
358 struct aio_sq_ring *sring = &s->sq_ring;
359 struct aio_cq_ring *cring = &s->cq_ring;
360 struct aio_uring_params p;
361 void *ptr;
362 int fd;
363
364 memset(&p, 0, sizeof(p));
365
366 p.flags = IOCTX_FLAG_SCQRING;
367 if (polled)
368 p.flags |= IOCTX_FLAG_IOPOLL;
369 if (fixedbufs)
370 p.flags |= IOCTX_FLAG_FIXEDBUFS;
371 if (buffered)
372 p.flags |= IOCTX_FLAG_SQWQ;
373 else if (sq_thread) {
374 p.flags |= IOCTX_FLAG_SQTHREAD;
375 p.sq_thread_cpu = sq_thread_cpu;
376 }
377
378 if (fixedbufs)
379 fd = io_uring_setup(DEPTH, s->iovecs, &p);
380 else
381 fd = io_uring_setup(DEPTH, NULL, &p);
382 if (fd < 0) {
383 perror("io_uring_setup");
384 return 1;
385 }
386
387 s->fd = fd;
388
ac122fea 389 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(u32),
c3e2fc25
JA
390 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
391 fd, IORING_OFF_SQ_RING);
392 printf("sq_ring ptr = 0x%p\n", ptr);
393 sring->head = ptr + p.sq_off.head;
394 sring->tail = ptr + p.sq_off.tail;
395 sring->ring_mask = ptr + p.sq_off.ring_mask;
396 sring->ring_entries = ptr + p.sq_off.ring_entries;
ac122fea 397 sring->array = ptr + p.sq_off.array;
c3e2fc25
JA
398 sq_ring_mask = *sring->ring_mask;
399
400 s->iocbs = mmap(0, p.sq_entries * sizeof(struct iocb), PROT_READ | PROT_WRITE,
401 MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_IOCB);
402 printf("iocbs ptr = 0x%p\n", s->iocbs);
403
ac122fea 404 ptr = mmap(0, p.cq_off.events + p.cq_entries * sizeof(struct io_event),
c3e2fc25
JA
405 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
406 fd, IORING_OFF_CQ_RING);
407 printf("cq_ring ptr = 0x%p\n", ptr);
408 cring->head = ptr + p.cq_off.head;
409 cring->tail = ptr + p.cq_off.tail;
410 cring->ring_mask = ptr + p.cq_off.ring_mask;
411 cring->ring_entries = ptr + p.cq_off.ring_entries;
ac122fea 412 cring->events = ptr + p.cq_off.events;
c3e2fc25
JA
413 cq_ring_mask = *cring->ring_mask;
414 return 0;
415}
416
c9fb4c5b
JA
417int main(int argc, char *argv[])
418{
419 struct submitter *s = &submitters[0];
305c06ce 420 unsigned long done, calls, reap, cache_hit, cache_miss;
c3e2fc25 421 int err, i;
c9fb4c5b 422 struct rlimit rlim;
c3e2fc25 423 void *ret;
c9fb4c5b
JA
424
425 if (argc < 2) {
426 printf("%s: filename\n", argv[0]);
427 return 1;
428 }
429
430 rlim.rlim_cur = RLIM_INFINITY;
431 rlim.rlim_max = RLIM_INFINITY;
432 if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
433 perror("setrlimit");
434 return 1;
435 }
436
437 arm_sig_int();
438
c3e2fc25
JA
439 for (i = 0; i < DEPTH; i++) {
440 void *buf;
c9fb4c5b 441
c3e2fc25 442 if (posix_memalign(&buf, BS, BS)) {
c9fb4c5b
JA
443 printf("failed alloc\n");
444 return 1;
445 }
c3e2fc25
JA
446 s->iovecs[i].iov_base = buf;
447 s->iovecs[i].iov_len = BS;
c9fb4c5b
JA
448 }
449
c3e2fc25 450 err = setup_ring(s);
c9fb4c5b 451 if (err) {
c3e2fc25 452 printf("ring setup failed: %s, %d\n", strerror(errno), err);
c9fb4c5b
JA
453 return 1;
454 }
c3e2fc25
JA
455 printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
456 printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
c9fb4c5b
JA
457 strcpy(s->filename, argv[1]);
458
459 pthread_create(&s->thread, NULL, submitter_fn, s);
460
305c06ce 461 cache_hit = cache_miss = reap = calls = done = 0;
c9fb4c5b
JA
462 do {
463 unsigned long this_done = 0;
464 unsigned long this_reap = 0;
465 unsigned long this_call = 0;
305c06ce
JA
466 unsigned long this_cache_hit = 0;
467 unsigned long this_cache_miss = 0;
c9fb4c5b 468 unsigned long rpc = 0, ipc = 0;
305c06ce 469 double hit = 0.0;
c9fb4c5b
JA
470
471 sleep(1);
472 this_done += s->done;
473 this_call += s->calls;
474 this_reap += s->reaps;
305c06ce
JA
475 this_cache_hit += s->cachehit;
476 this_cache_miss += s->cachemiss;
477 if (this_cache_hit && this_cache_miss) {
478 unsigned long hits, total;
479
480 hits = this_cache_hit - cache_hit;
481 total = hits + this_cache_miss - cache_miss;
482 hit = (double) hits / (double) total;
483 hit *= 100.0;
484 }
c9fb4c5b
JA
485 if (this_call - calls) {
486 rpc = (this_done - done) / (this_call - calls);
487 ipc = (this_reap - reap) / (this_call - calls);
488 }
10c4d131 489 printf("IOPS=%lu, IOS/call=%lu/%lu, inflight=%u (head=%u tail=%u), Cachehit=%0.2f%%\n",
c9fb4c5b 490 this_done - done, rpc, ipc, s->inflight,
c3e2fc25 491 *s->cq_ring.head, *s->cq_ring.tail, hit);
c9fb4c5b
JA
492 done = this_done;
493 calls = this_call;
494 reap = this_reap;
305c06ce
JA
495 cache_hit = s->cachehit;
496 cache_miss = s->cachemiss;
c9fb4c5b
JA
497 } while (!finish);
498
499 pthread_join(s->thread, &ret);
500 return 0;
501}