t/aio-ring: update to newer mmap() API
[fio.git] / t / aio-ring.c
1 /*
2  * gcc -D_GNU_SOURCE -Wall -O2 -o aio-ring aio-ring.c  -lpthread -laio
3  */
4 #include <stdio.h>
5 #include <errno.h>
6 #include <assert.h>
7 #include <stdlib.h>
8 #include <stddef.h>
9 #include <signal.h>
10 #include <inttypes.h>
11
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <sys/ioctl.h>
15 #include <sys/syscall.h>
16 #include <sys/resource.h>
17 #include <sys/mman.h>
18 #include <linux/fs.h>
19 #include <fcntl.h>
20 #include <unistd.h>
21 #include <libaio.h>
22 #include <string.h>
23 #include <pthread.h>
24 #include <sched.h>
25
26 #define IOCTX_FLAG_SCQRING      (1 << 0)        /* Use SQ/CQ rings */
27 #define IOCTX_FLAG_IOPOLL       (1 << 1)
28 #define IOCTX_FLAG_FIXEDBUFS    (1 << 2)
29 #define IOCTX_FLAG_SQTHREAD     (1 << 3)        /* Use SQ thread */
30 #define IOCTX_FLAG_SQWQ         (1 << 4)        /* Use SQ wq */
31 #define IOCTX_FLAG_SQPOLL       (1 << 5)
32
33 #define IOEV_RES2_CACHEHIT      (1 << 0)
34
35 #define barrier()       __asm__ __volatile__("": : :"memory")
36
37 #define min(a, b)               ((a < b) ? (a) : (b))
38
39 typedef uint64_t u64;
40 typedef uint32_t u32;
41 typedef uint16_t u16;
42
43 #define IORING_OFF_SQ_RING      0ULL
44 #define IORING_OFF_CQ_RING      0x8000000ULL
45 #define IORING_OFF_IOCB         0x10000000ULL
46
47 struct aio_uring_offsets {
48         u32 head;
49         u32 tail;
50         u32 ring_mask;
51         u32 ring_entries;
52         u32 flags;
53         u32 elems;
54 };
55
56 struct aio_uring_params {
57         u32 sq_entries;
58         u32 cq_entries;
59         u32 flags;
60         u16 sq_thread_cpu;
61         u16 resv[9];
62         struct aio_uring_offsets sq_off;
63         struct aio_uring_offsets cq_off;
64 };
65
66 struct aio_sq_ring {
67         u32 *head;
68         u32 *tail;
69         u32 *ring_mask;
70         u32 *ring_entries;
71         u32 *array;
72 };
73
74 struct aio_cq_ring {
75         u32 *head;
76         u32 *tail;
77         u32 *ring_mask;
78         u32 *ring_entries;
79         struct io_event *events;
80 };
81
82 #define IORING_ENTER_GETEVENTS  (1 << 0)
83
84 #define DEPTH                   32
85
86 #define BATCH_SUBMIT            8
87 #define BATCH_COMPLETE          8
88
89 #define BS                      4096
90
91 static unsigned sq_ring_mask, cq_ring_mask;
92
93 struct submitter {
94         pthread_t thread;
95         unsigned long max_blocks;
96         int fd;
97         struct drand48_data rand;
98         struct aio_sq_ring sq_ring;
99         struct iocb *iocbs;
100         struct iovec iovecs[DEPTH];
101         struct aio_cq_ring cq_ring;
102         int inflight;
103         unsigned long reaps;
104         unsigned long done;
105         unsigned long calls;
106         unsigned long cachehit, cachemiss;
107         volatile int finish;
108         char filename[128];
109 };
110
111 static struct submitter submitters[1];
112 static volatile int finish;
113
114 static int polled = 0;          /* use IO polling */
115 static int fixedbufs = 0;       /* use fixed user buffers */
116 static int buffered = 1;        /* use buffered IO, not O_DIRECT */
117 static int sq_thread = 0;       /* use kernel submission thread */
118 static int sq_thread_cpu = 0;   /* pin above thread to this CPU */
119
120 static int io_uring_setup(unsigned entries, struct iovec *iovecs,
121                           struct aio_uring_params *p)
122 {
123         return syscall(335, entries, iovecs, p);
124 }
125
126 static int io_uring_enter(struct submitter *s, unsigned int to_submit,
127                           unsigned int min_complete, unsigned int flags)
128 {
129         return syscall(336, s->fd, to_submit, min_complete, flags);
130 }
131
132 static int gettid(void)
133 {
134         return syscall(__NR_gettid);
135 }
136
137 static void init_io(struct submitter *s, int fd, unsigned index)
138 {
139         struct iocb *iocb = &s->iocbs[index];
140         unsigned long offset;
141         long r;
142
143         lrand48_r(&s->rand, &r);
144         offset = (r % (s->max_blocks - 1)) * BS;
145
146         iocb->aio_fildes = fd;
147         iocb->aio_lio_opcode = IO_CMD_PREAD;
148         iocb->u.c.buf = s->iovecs[index].iov_base;
149         iocb->u.c.nbytes = BS;
150         iocb->u.c.offset = offset;
151 }
152
153 static int prep_more_ios(struct submitter *s, int fd, int max_ios)
154 {
155         struct aio_sq_ring *ring = &s->sq_ring;
156         u32 index, tail, next_tail, prepped = 0;
157
158         next_tail = tail = *ring->tail;
159         do {
160                 next_tail++;
161                 barrier();
162                 if (next_tail == *ring->head)
163                         break;
164
165                 index = tail & sq_ring_mask;
166                 init_io(s, fd, index);
167                 ring->array[index] = index;
168                 prepped++;
169                 tail = next_tail;
170         } while (prepped < max_ios);
171
172         if (*ring->tail != tail) {
173                 /* order tail store with writes to iocbs above */
174                 barrier();
175                 *ring->tail = tail;
176                 barrier();
177         }
178         return prepped;
179 }
180
181 static int get_file_size(int fd, unsigned long *blocks)
182 {
183         struct stat st;
184
185         if (fstat(fd, &st) < 0)
186                 return -1;
187         if (S_ISBLK(st.st_mode)) {
188                 unsigned long long bytes;
189
190                 if (ioctl(fd, BLKGETSIZE64, &bytes) != 0)
191                         return -1;
192
193                 *blocks = bytes / BS;
194                 return 0;
195         } else if (S_ISREG(st.st_mode)) {
196                 *blocks = st.st_size / BS;
197                 return 0;
198         }
199
200         return -1;
201 }
202
203 static int reap_events(struct submitter *s)
204 {
205         struct aio_cq_ring *ring = &s->cq_ring;
206         struct io_event *ev;
207         u32 head, reaped = 0;
208
209         head = *ring->head;
210         do {
211                 barrier();
212                 if (head == *ring->tail)
213                         break;
214                 ev = &ring->events[head & cq_ring_mask];
215                 if (ev->res != BS) {
216                         struct iocb *iocb = ev->obj;
217
218                         printf("io: unexpected ret=%ld\n", ev->res);
219                         printf("offset=%lu, size=%lu\n", (unsigned long) iocb->u.c.offset, (unsigned long) iocb->u.c.nbytes);
220                         return -1;
221                 }
222                 if (ev->res2 & IOEV_RES2_CACHEHIT)
223                         s->cachehit++;
224                 else
225                         s->cachemiss++;
226                 reaped++;
227                 head++;
228         } while (1);
229
230         s->inflight -= reaped;
231         *ring->head = head;
232         barrier();
233         return reaped;
234 }
235
236 static void *submitter_fn(void *data)
237 {
238         struct submitter *s = data;
239         int fd, ret, prepped, flags;
240
241         printf("submitter=%d\n", gettid());
242
243         flags = O_RDONLY;
244         if (!buffered)
245                 flags |= O_DIRECT;
246         fd = open(s->filename, flags);
247         if (fd < 0) {
248                 perror("open");
249                 goto done;
250         }
251
252         if (get_file_size(fd, &s->max_blocks)) {
253                 printf("failed getting size of device/file\n");
254                 goto err;
255         }
256         if (!s->max_blocks) {
257                 printf("Zero file/device size?\n");
258                 goto err;
259         }
260
261         s->max_blocks--;
262
263         srand48_r(pthread_self(), &s->rand);
264
265         prepped = 0;
266         do {
267                 int to_wait, to_submit, this_reap;
268
269                 if (!prepped && s->inflight < DEPTH)
270                         prepped = prep_more_ios(s, fd, min(DEPTH - s->inflight, BATCH_SUBMIT));
271                 s->inflight += prepped;
272 submit_more:
273                 to_submit = prepped;
274 submit:
275                 if (s->inflight + BATCH_SUBMIT < DEPTH)
276                         to_wait = 0;
277                 else
278                         to_wait = min(s->inflight + to_submit, BATCH_COMPLETE);
279
280                 ret = io_uring_enter(s, to_submit, to_wait, IORING_ENTER_GETEVENTS);
281                 s->calls++;
282
283                 this_reap = reap_events(s);
284                 if (this_reap == -1)
285                         break;
286                 s->reaps += this_reap;
287
288                 if (ret >= 0) {
289                         if (!ret) {
290                                 to_submit = 0;
291                                 if (s->inflight)
292                                         goto submit;
293                                 continue;
294                         } else if (ret < to_submit) {
295                                 int diff = to_submit - ret;
296
297                                 s->done += ret;
298                                 prepped -= diff;
299                                 goto submit_more;
300                         }
301                         s->done += ret;
302                         prepped = 0;
303                         continue;
304                 } else if (ret < 0) {
305                         if ((ret == -1 && errno == EAGAIN) || ret == -EAGAIN) {
306                                 if (s->finish)
307                                         break;
308                                 if (this_reap)
309                                         goto submit;
310                                 to_submit = 0;
311                                 goto submit;
312                         }
313                         if (ret == -1)
314                                 printf("io_submit: %s\n", strerror(errno));
315                         else
316                                 printf("io_submit: %s\n", strerror(-ret));
317                         break;
318                 }
319         } while (!s->finish);
320 err:
321         close(fd);
322 done:
323         finish = 1;
324         return NULL;
325 }
326
327 static void sig_int(int sig)
328 {
329         printf("Exiting on signal %d\n", sig);
330         submitters[0].finish = 1;
331         finish = 1;
332 }
333
334 static void arm_sig_int(void)
335 {
336         struct sigaction act;
337
338         memset(&act, 0, sizeof(act));
339         act.sa_handler = sig_int;
340         act.sa_flags = SA_RESTART;
341         sigaction(SIGINT, &act, NULL);
342 }
343
344 static int setup_ring(struct submitter *s)
345 {
346         struct aio_sq_ring *sring = &s->sq_ring;
347         struct aio_cq_ring *cring = &s->cq_ring;
348         struct aio_uring_params p;
349         void *ptr;
350         int fd;
351
352         memset(&p, 0, sizeof(p));
353
354         p.flags = IOCTX_FLAG_SCQRING;
355         if (polled)
356                 p.flags |= IOCTX_FLAG_IOPOLL;
357         if (fixedbufs)
358                 p.flags |= IOCTX_FLAG_FIXEDBUFS;
359         if (buffered)
360                 p.flags |= IOCTX_FLAG_SQWQ;
361         else if (sq_thread) {
362                 p.flags |= IOCTX_FLAG_SQTHREAD;
363                 p.sq_thread_cpu = sq_thread_cpu;
364         }
365
366         if (fixedbufs)
367                 fd = io_uring_setup(DEPTH, s->iovecs, &p);
368         else
369                 fd = io_uring_setup(DEPTH, NULL, &p);
370         if (fd < 0) {
371                 perror("io_uring_setup");
372                 return 1;
373         }
374
375         s->fd = fd;
376
377         ptr = mmap(0, p.sq_off.elems + p.sq_entries * sizeof(u32),
378                         PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
379                         fd, IORING_OFF_SQ_RING);
380         printf("sq_ring ptr = 0x%p\n", ptr);
381         sring->head = ptr + p.sq_off.head;
382         sring->tail = ptr + p.sq_off.tail;
383         sring->ring_mask = ptr + p.sq_off.ring_mask;
384         sring->ring_entries = ptr + p.sq_off.ring_entries;
385         sring->array = ptr + p.sq_off.elems;
386         sq_ring_mask = *sring->ring_mask;
387
388         s->iocbs = mmap(0, p.sq_entries * sizeof(struct iocb), PROT_READ | PROT_WRITE,
389                         MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_IOCB);
390         printf("iocbs ptr   = 0x%p\n", s->iocbs);
391
392         ptr = mmap(0, p.cq_off.elems + p.cq_entries * sizeof(struct io_event),
393                         PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
394                         fd, IORING_OFF_CQ_RING);
395         printf("cq_ring ptr = 0x%p\n", ptr);
396         cring->head = ptr + p.cq_off.head;
397         cring->tail = ptr + p.cq_off.tail;
398         cring->ring_mask = ptr + p.cq_off.ring_mask;
399         cring->ring_entries = ptr + p.cq_off.ring_entries;
400         cring->events = ptr + p.cq_off.elems;
401         cq_ring_mask = *cring->ring_mask;
402         return 0;
403 }
404
405 int main(int argc, char *argv[])
406 {
407         struct submitter *s = &submitters[0];
408         unsigned long done, calls, reap, cache_hit, cache_miss;
409         int err, i;
410         struct rlimit rlim;
411         void *ret;
412
413         if (argc < 2) {
414                 printf("%s: filename\n", argv[0]);
415                 return 1;
416         }
417
418         rlim.rlim_cur = RLIM_INFINITY;
419         rlim.rlim_max = RLIM_INFINITY;
420         if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
421                 perror("setrlimit");
422                 return 1;
423         }
424
425         arm_sig_int();
426
427         for (i = 0; i < DEPTH; i++) {
428                 void *buf;
429
430                 if (posix_memalign(&buf, BS, BS)) {
431                         printf("failed alloc\n");
432                         return 1;
433                 }
434                 s->iovecs[i].iov_base = buf;
435                 s->iovecs[i].iov_len = BS;
436         }
437
438         err = setup_ring(s);
439         if (err) {
440                 printf("ring setup failed: %s, %d\n", strerror(errno), err);
441                 return 1;
442         }
443         printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
444         printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
445         strcpy(s->filename, argv[1]);
446
447         pthread_create(&s->thread, NULL, submitter_fn, s);
448
449         cache_hit = cache_miss = reap = calls = done = 0;
450         do {
451                 unsigned long this_done = 0;
452                 unsigned long this_reap = 0;
453                 unsigned long this_call = 0;
454                 unsigned long this_cache_hit = 0;
455                 unsigned long this_cache_miss = 0;
456                 unsigned long rpc = 0, ipc = 0;
457                 double hit = 0.0;
458
459                 sleep(1);
460                 this_done += s->done;
461                 this_call += s->calls;
462                 this_reap += s->reaps;
463                 this_cache_hit += s->cachehit;
464                 this_cache_miss += s->cachemiss;
465                 if (this_cache_hit && this_cache_miss) {
466                         unsigned long hits, total;
467
468                         hits = this_cache_hit - cache_hit;
469                         total = hits + this_cache_miss - cache_miss;
470                         hit = (double) hits / (double) total;
471                         hit *= 100.0;
472                 }
473                 if (this_call - calls) {
474                         rpc = (this_done - done) / (this_call - calls);
475                         ipc = (this_reap - reap) / (this_call - calls);
476                 }
477                 printf("IOPS=%lu, IOS/call=%lu/%lu, inflight=%u (head=%u tail=%u), Cachehit=%0.2f%%\n",
478                                 this_done - done, rpc, ipc, s->inflight,
479                                 *s->cq_ring.head, *s->cq_ring.tail, hit);
480                 done = this_done;
481                 calls = this_call;
482                 reap = this_reap;
483                 cache_hit = s->cachehit;
484                 cache_miss = s->cachemiss;
485         } while (!finish);
486
487         pthread_join(s->thread, &ret);
488         return 0;
489 }