io_uring: use kernel header directly
[fio.git] / t / io_uring.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <errno.h>
3#include <assert.h>
4#include <stdlib.h>
5#include <stddef.h>
6#include <signal.h>
7#include <inttypes.h>
8
9#include <sys/types.h>
10#include <sys/stat.h>
11#include <sys/ioctl.h>
12#include <sys/syscall.h>
13#include <sys/resource.h>
14#include <sys/mman.h>
15#include <sys/uio.h>
16#include <linux/fs.h>
17#include <fcntl.h>
18#include <unistd.h>
19#include <string.h>
20#include <pthread.h>
21#include <sched.h>
22
23#include "../arch/arch.h"
24#include "../os/io_uring.h"
25
26#define barrier() __asm__ __volatile__("": : :"memory")
27
28#define min(a, b) ((a < b) ? (a) : (b))
29
30struct io_sq_ring {
31 unsigned *head;
32 unsigned *tail;
33 unsigned *ring_mask;
34 unsigned *ring_entries;
35 unsigned *array;
36};
37
38struct io_cq_ring {
39 unsigned *head;
40 unsigned *tail;
41 unsigned *ring_mask;
42 unsigned *ring_entries;
43 struct io_uring_event *events;
44};
45
46#define DEPTH 32
47
48#define BATCH_SUBMIT 8
49#define BATCH_COMPLETE 8
50
51#define BS 4096
52
53static unsigned sq_ring_mask, cq_ring_mask;
54
55struct submitter {
56 pthread_t thread;
57 unsigned long max_blocks;
58 int ring_fd;
59 struct drand48_data rand;
60 struct io_sq_ring sq_ring;
61 struct io_uring_iocb *iocbs;
62 struct iovec iovecs[DEPTH];
63 struct io_cq_ring cq_ring;
64 int inflight;
65 unsigned long reaps;
66 unsigned long done;
67 unsigned long calls;
68 unsigned long cachehit, cachemiss;
69 volatile int finish;
70 char filename[128];
71};
72
73static struct submitter submitters[1];
74static volatile int finish;
75
76static int polled = 0; /* use IO polling */
77static int fixedbufs = 0; /* use fixed user buffers */
78static int buffered = 1; /* use buffered IO, not O_DIRECT */
79static int sq_thread = 0; /* use kernel submission thread */
80static int sq_thread_cpu = 0; /* pin above thread to this CPU */
81
82static int io_uring_setup(unsigned entries, struct iovec *iovecs,
83 struct io_uring_params *p)
84{
85 return syscall(__NR_sys_io_uring_setup, entries, iovecs, p);
86}
87
88static int io_uring_enter(struct submitter *s, unsigned int to_submit,
89 unsigned int min_complete, unsigned int flags)
90{
91 return syscall(__NR_sys_io_uring_enter, s->ring_fd, to_submit,
92 min_complete, flags);
93}
94
95static int gettid(void)
96{
97 return syscall(__NR_gettid);
98}
99
100static void init_io(struct submitter *s, int fd, unsigned index)
101{
102 struct io_uring_iocb *iocb = &s->iocbs[index];
103 unsigned long offset;
104 long r;
105
106 lrand48_r(&s->rand, &r);
107 offset = (r % (s->max_blocks - 1)) * BS;
108
109 iocb->opcode = IORING_OP_READ;
110 iocb->flags = 0;
111 iocb->ioprio = 0;
112 iocb->fd = fd;
113 iocb->off = offset;
114 iocb->addr = s->iovecs[index].iov_base;
115 iocb->len = BS;
116}
117
118static int prep_more_ios(struct submitter *s, int fd, int max_ios)
119{
120 struct io_sq_ring *ring = &s->sq_ring;
121 unsigned index, tail, next_tail, prepped = 0;
122
123 next_tail = tail = *ring->tail;
124 do {
125 next_tail++;
126 barrier();
127 if (next_tail == *ring->head)
128 break;
129
130 index = tail & sq_ring_mask;
131 init_io(s, fd, index);
132 ring->array[index] = index;
133 prepped++;
134 tail = next_tail;
135 } while (prepped < max_ios);
136
137 if (*ring->tail != tail) {
138 /* order tail store with writes to iocbs above */
139 barrier();
140 *ring->tail = tail;
141 barrier();
142 }
143 return prepped;
144}
145
146static int get_file_size(int fd, unsigned long *blocks)
147{
148 struct stat st;
149
150 if (fstat(fd, &st) < 0)
151 return -1;
152 if (S_ISBLK(st.st_mode)) {
153 unsigned long long bytes;
154
155 if (ioctl(fd, BLKGETSIZE64, &bytes) != 0)
156 return -1;
157
158 *blocks = bytes / BS;
159 return 0;
160 } else if (S_ISREG(st.st_mode)) {
161 *blocks = st.st_size / BS;
162 return 0;
163 }
164
165 return -1;
166}
167
168static int reap_events(struct submitter *s)
169{
170 struct io_cq_ring *ring = &s->cq_ring;
171 struct io_uring_event *ev;
172 unsigned head, reaped = 0;
173
174 head = *ring->head;
175 do {
176 barrier();
177 if (head == *ring->tail)
178 break;
179 ev = &ring->events[head & cq_ring_mask];
180 if (ev->res != BS) {
181 struct io_uring_iocb *iocb = &s->iocbs[ev->index];
182
183 printf("io: unexpected ret=%d\n", ev->res);
184 printf("offset=%lu, size=%lu\n",
185 (unsigned long) iocb->off,
186 (unsigned long) iocb->len);
187 return -1;
188 }
189 if (ev->flags & IOEV_FLAG_CACHEHIT)
190 s->cachehit++;
191 else
192 s->cachemiss++;
193 reaped++;
194 head++;
195 } while (1);
196
197 s->inflight -= reaped;
198 *ring->head = head;
199 barrier();
200 return reaped;
201}
202
203static void *submitter_fn(void *data)
204{
205 struct submitter *s = data;
206 int fd, ret, prepped, flags;
207
208 printf("submitter=%d\n", gettid());
209
210 flags = O_RDONLY;
211 if (!buffered)
212 flags |= O_DIRECT;
213 fd = open(s->filename, flags);
214 if (fd < 0) {
215 perror("open");
216 goto done;
217 }
218
219 if (get_file_size(fd, &s->max_blocks)) {
220 printf("failed getting size of device/file\n");
221 goto err;
222 }
223 if (s->max_blocks <= 1) {
224 printf("Zero file/device size?\n");
225 goto err;
226 }
227 s->max_blocks--;
228
229 srand48_r(pthread_self(), &s->rand);
230
231 prepped = 0;
232 do {
233 int to_wait, to_submit, this_reap, to_prep;
234
235 if (!prepped && s->inflight < DEPTH) {
236 to_prep = min(DEPTH - s->inflight, BATCH_SUBMIT);
237 prepped = prep_more_ios(s, fd, to_prep);
238 }
239 s->inflight += prepped;
240submit_more:
241 to_submit = prepped;
242submit:
243 if (s->inflight + BATCH_SUBMIT < DEPTH)
244 to_wait = 0;
245 else
246 to_wait = min(s->inflight + to_submit, BATCH_COMPLETE);
247
248 ret = io_uring_enter(s, to_submit, to_wait,
249 IORING_ENTER_GETEVENTS);
250 s->calls++;
251
252 this_reap = reap_events(s);
253 if (this_reap == -1)
254 break;
255 s->reaps += this_reap;
256
257 if (ret >= 0) {
258 if (!ret) {
259 to_submit = 0;
260 if (s->inflight)
261 goto submit;
262 continue;
263 } else if (ret < to_submit) {
264 int diff = to_submit - ret;
265
266 s->done += ret;
267 prepped -= diff;
268 goto submit_more;
269 }
270 s->done += ret;
271 prepped = 0;
272 continue;
273 } else if (ret < 0) {
274 if (errno == EAGAIN) {
275 if (s->finish)
276 break;
277 if (this_reap)
278 goto submit;
279 to_submit = 0;
280 goto submit;
281 }
282 printf("io_submit: %s\n", strerror(errno));
283 break;
284 }
285 } while (!s->finish);
286err:
287 close(fd);
288done:
289 finish = 1;
290 return NULL;
291}
292
293static void sig_int(int sig)
294{
295 printf("Exiting on signal %d\n", sig);
296 submitters[0].finish = 1;
297 finish = 1;
298}
299
300static void arm_sig_int(void)
301{
302 struct sigaction act;
303
304 memset(&act, 0, sizeof(act));
305 act.sa_handler = sig_int;
306 act.sa_flags = SA_RESTART;
307 sigaction(SIGINT, &act, NULL);
308}
309
310static int setup_ring(struct submitter *s)
311{
312 struct io_sq_ring *sring = &s->sq_ring;
313 struct io_cq_ring *cring = &s->cq_ring;
314 struct io_uring_params p;
315 void *ptr;
316 int fd;
317
318 memset(&p, 0, sizeof(p));
319
320 if (polled)
321 p.flags |= IORING_SETUP_IOPOLL;
322 if (fixedbufs)
323 p.flags |= IORING_SETUP_FIXEDBUFS;
324 if (buffered)
325 p.flags |= IORING_SETUP_SQWQ;
326 else if (sq_thread) {
327 p.flags |= IORING_SETUP_SQTHREAD;
328 p.sq_thread_cpu = sq_thread_cpu;
329 }
330
331 if (fixedbufs)
332 fd = io_uring_setup(DEPTH, s->iovecs, &p);
333 else
334 fd = io_uring_setup(DEPTH, NULL, &p);
335 if (fd < 0) {
336 perror("io_uring_setup");
337 return 1;
338 }
339
340 s->ring_fd = fd;
341 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
342 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
343 IORING_OFF_SQ_RING);
344 printf("sq_ring ptr = 0x%p\n", ptr);
345 sring->head = ptr + p.sq_off.head;
346 sring->tail = ptr + p.sq_off.tail;
347 sring->ring_mask = ptr + p.sq_off.ring_mask;
348 sring->ring_entries = ptr + p.sq_off.ring_entries;
349 sring->array = ptr + p.sq_off.array;
350 sq_ring_mask = *sring->ring_mask;
351
352 s->iocbs = mmap(0, p.sq_entries * sizeof(struct io_uring_iocb),
353 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
354 IORING_OFF_IOCB);
355 printf("iocbs ptr = 0x%p\n", s->iocbs);
356
357 ptr = mmap(0, p.cq_off.events + p.cq_entries * sizeof(struct io_uring_event),
358 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
359 IORING_OFF_CQ_RING);
360 printf("cq_ring ptr = 0x%p\n", ptr);
361 cring->head = ptr + p.cq_off.head;
362 cring->tail = ptr + p.cq_off.tail;
363 cring->ring_mask = ptr + p.cq_off.ring_mask;
364 cring->ring_entries = ptr + p.cq_off.ring_entries;
365 cring->events = ptr + p.cq_off.events;
366 cq_ring_mask = *cring->ring_mask;
367 return 0;
368}
369
370int main(int argc, char *argv[])
371{
372 struct submitter *s = &submitters[0];
373 unsigned long done, calls, reap, cache_hit, cache_miss;
374 int err, i;
375 struct rlimit rlim;
376 void *ret;
377
378 if (argc < 2) {
379 printf("%s: filename\n", argv[0]);
380 return 1;
381 }
382
383 rlim.rlim_cur = RLIM_INFINITY;
384 rlim.rlim_max = RLIM_INFINITY;
385 if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
386 perror("setrlimit");
387 return 1;
388 }
389
390 arm_sig_int();
391
392 for (i = 0; i < DEPTH; i++) {
393 void *buf;
394
395 if (posix_memalign(&buf, BS, BS)) {
396 printf("failed alloc\n");
397 return 1;
398 }
399 s->iovecs[i].iov_base = buf;
400 s->iovecs[i].iov_len = BS;
401 }
402
403 err = setup_ring(s);
404 if (err) {
405 printf("ring setup failed: %s, %d\n", strerror(errno), err);
406 return 1;
407 }
408 printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
409 printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
410 strcpy(s->filename, argv[1]);
411
412 pthread_create(&s->thread, NULL, submitter_fn, s);
413
414 cache_hit = cache_miss = reap = calls = done = 0;
415 do {
416 unsigned long this_done = 0;
417 unsigned long this_reap = 0;
418 unsigned long this_call = 0;
419 unsigned long this_cache_hit = 0;
420 unsigned long this_cache_miss = 0;
421 unsigned long rpc = 0, ipc = 0;
422 double hit = 0.0;
423
424 sleep(1);
425 this_done += s->done;
426 this_call += s->calls;
427 this_reap += s->reaps;
428 this_cache_hit += s->cachehit;
429 this_cache_miss += s->cachemiss;
430 if (this_cache_hit && this_cache_miss) {
431 unsigned long hits, total;
432
433 hits = this_cache_hit - cache_hit;
434 total = hits + this_cache_miss - cache_miss;
435 hit = (double) hits / (double) total;
436 hit *= 100.0;
437 }
438 if (this_call - calls) {
439 rpc = (this_done - done) / (this_call - calls);
440 ipc = (this_reap - reap) / (this_call - calls);
441 }
442 printf("IOPS=%lu, IOS/call=%lu/%lu, inflight=%u (head=%u tail=%u), Cachehit=%0.2f%%\n",
443 this_done - done, rpc, ipc, s->inflight,
444 *s->cq_ring.head, *s->cq_ring.tail, hit);
445 done = this_done;
446 calls = this_call;
447 reap = this_reap;
448 cache_hit = s->cachehit;
449 cache_miss = s->cachemiss;
450 } while (!finish);
451
452 pthread_join(s->thread, &ret);
453 close(s->ring_fd);
454 return 0;
455}