t/io_uring: use the right check for when to wait
[fio.git] / t / io_uring.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <errno.h>
3#include <assert.h>
4#include <stdlib.h>
5#include <stddef.h>
6#include <signal.h>
7#include <inttypes.h>
8
9#include <sys/types.h>
10#include <sys/stat.h>
11#include <sys/ioctl.h>
12#include <sys/syscall.h>
13#include <sys/resource.h>
14#include <sys/mman.h>
15#include <sys/uio.h>
16#include <linux/fs.h>
17#include <fcntl.h>
18#include <unistd.h>
19#include <string.h>
20#include <pthread.h>
21#include <sched.h>
22
23#include "../arch/arch.h"
24#include "../lib/types.h"
25#include "../os/linux/io_uring.h"
26
27#define barrier() __asm__ __volatile__("": : :"memory")
28
29#define min(a, b) ((a < b) ? (a) : (b))
30
31struct io_sq_ring {
32 unsigned *head;
33 unsigned *tail;
34 unsigned *ring_mask;
35 unsigned *ring_entries;
36 unsigned *flags;
37 unsigned *array;
38};
39
40struct io_cq_ring {
41 unsigned *head;
42 unsigned *tail;
43 unsigned *ring_mask;
44 unsigned *ring_entries;
45 struct io_uring_cqe *cqes;
46};
47
48#define DEPTH 128
49
50#define BATCH_SUBMIT 64
51#define BATCH_COMPLETE 64
52
53#define BS 4096
54
55#define MAX_FDS 16
56
57static unsigned sq_ring_mask, cq_ring_mask;
58
59struct file {
60 unsigned long max_blocks;
61 unsigned pending_ios;
62 int real_fd;
63 int fixed_fd;
64};
65
66struct submitter {
67 pthread_t thread;
68 int ring_fd;
69 struct drand48_data rand;
70 struct io_sq_ring sq_ring;
71 struct io_uring_sqe *sqes;
72 struct iovec iovecs[DEPTH];
73 struct io_cq_ring cq_ring;
74 int inflight;
75 unsigned long reaps;
76 unsigned long done;
77 unsigned long calls;
78 unsigned long cachehit, cachemiss;
79 volatile int finish;
80
81 __s32 *fds;
82
83 struct file files[MAX_FDS];
84 unsigned nr_files;
85 unsigned cur_file;
86};
87
88static struct submitter submitters[1];
89static volatile int finish;
90
91static int polled = 1; /* use IO polling */
92static int fixedbufs = 1; /* use fixed user buffers */
93static int register_files = 1; /* use fixed files */
94static int buffered = 0; /* use buffered IO, not O_DIRECT */
95static int sq_thread_poll = 0; /* use kernel submission/poller thread */
96static int sq_thread_cpu = -1; /* pin above thread to this CPU */
97static int do_nop = 0; /* no-op SQ ring commands */
98
99static int io_uring_register_buffers(struct submitter *s)
100{
101 struct io_uring_register_buffers reg = {
102 .iovecs = s->iovecs,
103 .nr_iovecs = DEPTH
104 };
105
106 if (do_nop)
107 return 0;
108
109 return syscall(__NR_sys_io_uring_register, s->ring_fd,
110 IORING_REGISTER_BUFFERS, &reg);
111}
112
113static int io_uring_register_files(struct submitter *s)
114{
115 struct io_uring_register_files reg;
116 int i;
117
118 if (do_nop)
119 return 0;
120
121 s->fds = calloc(s->nr_files, sizeof(__s32));
122 for (i = 0; i < s->nr_files; i++) {
123 s->fds[i] = s->files[i].real_fd;
124 s->files[i].fixed_fd = i;
125 }
126 reg.fds = s->fds;
127 reg.nr_fds = s->nr_files;
128
129 return syscall(__NR_sys_io_uring_register, s->ring_fd,
130 IORING_REGISTER_FILES, &reg);
131}
132
133static int io_uring_setup(unsigned entries, struct io_uring_params *p)
134{
135 return syscall(__NR_sys_io_uring_setup, entries, p);
136}
137
138static int io_uring_enter(struct submitter *s, unsigned int to_submit,
139 unsigned int min_complete, unsigned int flags)
140{
141 return syscall(__NR_sys_io_uring_enter, s->ring_fd, to_submit,
142 min_complete, flags);
143}
144
145static int gettid(void)
146{
147 return syscall(__NR_gettid);
148}
149
150static unsigned file_depth(struct submitter *s)
151{
152 return (DEPTH + s->nr_files - 1) / s->nr_files;
153}
154
155static void init_io(struct submitter *s, unsigned index)
156{
157 struct io_uring_sqe *sqe = &s->sqes[index];
158 unsigned long offset;
159 struct file *f;
160 long r;
161
162 if (do_nop) {
163 sqe->opcode = IORING_OP_NOP;
164 return;
165 }
166
167 if (s->nr_files == 1) {
168 f = &s->files[0];
169 } else {
170 f = &s->files[s->cur_file];
171 if (f->pending_ios >= file_depth(s)) {
172 s->cur_file++;
173 if (s->cur_file == s->nr_files)
174 s->cur_file = 0;
175 }
176 }
177 f->pending_ios++;
178
179 lrand48_r(&s->rand, &r);
180 offset = (r % (f->max_blocks - 1)) * BS;
181
182 if (register_files) {
183 sqe->flags = IOSQE_FIXED_FILE;
184 sqe->fd = f->fixed_fd;
185 } else {
186 sqe->flags = 0;
187 sqe->fd = f->real_fd;
188 }
189 if (fixedbufs) {
190 sqe->opcode = IORING_OP_READ_FIXED;
191 sqe->addr = s->iovecs[index].iov_base;
192 sqe->len = BS;
193 sqe->buf_index = index;
194 } else {
195 sqe->opcode = IORING_OP_READV;
196 sqe->addr = &s->iovecs[index];
197 sqe->len = 1;
198 sqe->buf_index = 0;
199 }
200 sqe->ioprio = 0;
201 sqe->off = offset;
202 sqe->user_data = (unsigned long) f;
203}
204
205static int prep_more_ios(struct submitter *s, int max_ios)
206{
207 struct io_sq_ring *ring = &s->sq_ring;
208 unsigned index, tail, next_tail, prepped = 0;
209
210 next_tail = tail = *ring->tail;
211 do {
212 next_tail++;
213 barrier();
214 if (next_tail == *ring->head)
215 break;
216
217 index = tail & sq_ring_mask;
218 init_io(s, index);
219 ring->array[index] = index;
220 prepped++;
221 tail = next_tail;
222 } while (prepped < max_ios);
223
224 if (*ring->tail != tail) {
225 /* order tail store with writes to sqes above */
226 barrier();
227 *ring->tail = tail;
228 barrier();
229 }
230 return prepped;
231}
232
233static int get_file_size(struct file *f)
234{
235 struct stat st;
236
237 if (fstat(f->real_fd, &st) < 0)
238 return -1;
239 if (S_ISBLK(st.st_mode)) {
240 unsigned long long bytes;
241
242 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
243 return -1;
244
245 f->max_blocks = bytes / BS;
246 return 0;
247 } else if (S_ISREG(st.st_mode)) {
248 f->max_blocks = st.st_size / BS;
249 return 0;
250 }
251
252 return -1;
253}
254
255static int reap_events(struct submitter *s)
256{
257 struct io_cq_ring *ring = &s->cq_ring;
258 struct io_uring_cqe *cqe;
259 unsigned head, reaped = 0;
260
261 head = *ring->head;
262 do {
263 struct file *f;
264
265 barrier();
266 if (head == *ring->tail)
267 break;
268 cqe = &ring->cqes[head & cq_ring_mask];
269 if (!do_nop) {
270 f = (struct file *) (uintptr_t) cqe->user_data;
271 f->pending_ios--;
272 if (cqe->res != BS) {
273 printf("io: unexpected ret=%d\n", cqe->res);
274 return -1;
275 }
276 }
277 if (cqe->flags & IOCQE_FLAG_CACHEHIT)
278 s->cachehit++;
279 else
280 s->cachemiss++;
281 reaped++;
282 head++;
283 } while (1);
284
285 s->inflight -= reaped;
286 *ring->head = head;
287 barrier();
288 return reaped;
289}
290
291static void *submitter_fn(void *data)
292{
293 struct submitter *s = data;
294 struct io_sq_ring *ring = &s->sq_ring;
295 int ret, prepped;
296
297 printf("submitter=%d\n", gettid());
298
299 srand48_r(pthread_self(), &s->rand);
300
301 prepped = 0;
302 do {
303 int to_wait, to_submit, this_reap, to_prep;
304
305 if (!prepped && s->inflight < DEPTH) {
306 to_prep = min(DEPTH - s->inflight, BATCH_SUBMIT);
307 prepped = prep_more_ios(s, to_prep);
308 }
309 s->inflight += prepped;
310submit_more:
311 to_submit = prepped;
312submit:
313 if (to_submit && (s->inflight + to_submit < DEPTH))
314 to_wait = 0;
315 else
316 to_wait = min(s->inflight + to_submit, BATCH_COMPLETE);
317
318 /*
319 * Only need to call io_uring_enter if we're not using SQ thread
320 * poll, or if IORING_SQ_NEED_WAKEUP is set.
321 */
322 if (!sq_thread_poll || (*ring->flags & IORING_SQ_NEED_WAKEUP)) {
323 unsigned flags = 0;
324
325 if (to_wait)
326 flags = IORING_ENTER_GETEVENTS;
327 ret = io_uring_enter(s, to_submit, to_wait, flags);
328 s->calls++;
329 }
330
331 /*
332 * For non SQ thread poll, we already got the events we needed
333 * through the io_uring_enter() above. For SQ thread poll, we
334 * need to loop here until we find enough events.
335 */
336 this_reap = 0;
337 do {
338 int r;
339 r = reap_events(s);
340 if (r == -1)
341 break;
342 else if (r > 0)
343 this_reap += r;
344 } while (sq_thread_poll && this_reap < to_wait);
345 s->reaps += this_reap;
346
347 if (ret >= 0) {
348 if (!ret) {
349 to_submit = 0;
350 if (s->inflight)
351 goto submit;
352 continue;
353 } else if (ret < to_submit) {
354 int diff = to_submit - ret;
355
356 s->done += ret;
357 prepped -= diff;
358 goto submit_more;
359 }
360 s->done += ret;
361 prepped = 0;
362 continue;
363 } else if (ret < 0) {
364 if (errno == EAGAIN) {
365 if (s->finish)
366 break;
367 if (this_reap)
368 goto submit;
369 to_submit = 0;
370 goto submit;
371 }
372 printf("io_submit: %s\n", strerror(errno));
373 break;
374 }
375 } while (!s->finish);
376
377 finish = 1;
378 return NULL;
379}
380
381static void sig_int(int sig)
382{
383 printf("Exiting on signal %d\n", sig);
384 submitters[0].finish = 1;
385 finish = 1;
386}
387
388static void arm_sig_int(void)
389{
390 struct sigaction act;
391
392 memset(&act, 0, sizeof(act));
393 act.sa_handler = sig_int;
394 act.sa_flags = SA_RESTART;
395 sigaction(SIGINT, &act, NULL);
396}
397
398static int setup_ring(struct submitter *s)
399{
400 struct io_sq_ring *sring = &s->sq_ring;
401 struct io_cq_ring *cring = &s->cq_ring;
402 struct io_uring_params p;
403 int ret, fd;
404 void *ptr;
405
406 memset(&p, 0, sizeof(p));
407
408 if (polled)
409 p.flags |= IORING_SETUP_IOPOLL;
410 if (sq_thread_poll) {
411 p.flags |= IORING_SETUP_SQPOLL;
412 if (sq_thread_cpu != -1) {
413 p.flags |= IORING_SETUP_SQ_AFF;
414 p.sq_thread_cpu = sq_thread_cpu;
415 }
416 }
417
418 fd = io_uring_setup(DEPTH, &p);
419 if (fd < 0) {
420 perror("io_uring_setup");
421 return 1;
422 }
423 s->ring_fd = fd;
424
425 if (fixedbufs) {
426 ret = io_uring_register_buffers(s);
427 if (ret < 0) {
428 perror("io_uring_register_buffers");
429 return 1;
430 }
431 }
432
433 if (register_files) {
434 ret = io_uring_register_files(s);
435 if (ret < 0) {
436 perror("io_uring_register_files");
437 return 1;
438 }
439 }
440
441 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
442 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
443 IORING_OFF_SQ_RING);
444 printf("sq_ring ptr = 0x%p\n", ptr);
445 sring->head = ptr + p.sq_off.head;
446 sring->tail = ptr + p.sq_off.tail;
447 sring->ring_mask = ptr + p.sq_off.ring_mask;
448 sring->ring_entries = ptr + p.sq_off.ring_entries;
449 sring->flags = ptr + p.sq_off.flags;
450 sring->array = ptr + p.sq_off.array;
451 sq_ring_mask = *sring->ring_mask;
452
453 s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
454 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
455 IORING_OFF_SQES);
456 printf("sqes ptr = 0x%p\n", s->sqes);
457
458 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
459 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
460 IORING_OFF_CQ_RING);
461 printf("cq_ring ptr = 0x%p\n", ptr);
462 cring->head = ptr + p.cq_off.head;
463 cring->tail = ptr + p.cq_off.tail;
464 cring->ring_mask = ptr + p.cq_off.ring_mask;
465 cring->ring_entries = ptr + p.cq_off.ring_entries;
466 cring->cqes = ptr + p.cq_off.cqes;
467 cq_ring_mask = *cring->ring_mask;
468 return 0;
469}
470
471int main(int argc, char *argv[])
472{
473 struct submitter *s = &submitters[0];
474 unsigned long done, calls, reap, cache_hit, cache_miss;
475 int err, i, flags, fd;
476 void *ret;
477
478 if (!do_nop && argc < 2) {
479 printf("%s: filename\n", argv[0]);
480 return 1;
481 }
482
483 flags = O_RDONLY | O_NOATIME;
484 if (!buffered)
485 flags |= O_DIRECT;
486
487 i = 1;
488 while (!do_nop && i < argc) {
489 struct file *f = &s->files[s->nr_files];
490
491 fd = open(argv[i], flags);
492 if (fd < 0) {
493 perror("open");
494 return 1;
495 }
496 f->real_fd = fd;
497 if (get_file_size(f)) {
498 printf("failed getting size of device/file\n");
499 return 1;
500 }
501 if (f->max_blocks <= 1) {
502 printf("Zero file/device size?\n");
503 return 1;
504 }
505 f->max_blocks--;
506
507 printf("Added file %s\n", argv[i]);
508 s->nr_files++;
509 i++;
510 }
511
512 if (fixedbufs) {
513 struct rlimit rlim;
514
515 rlim.rlim_cur = RLIM_INFINITY;
516 rlim.rlim_max = RLIM_INFINITY;
517 if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
518 perror("setrlimit");
519 return 1;
520 }
521 }
522
523 arm_sig_int();
524
525 for (i = 0; i < DEPTH; i++) {
526 void *buf;
527
528 if (posix_memalign(&buf, BS, BS)) {
529 printf("failed alloc\n");
530 return 1;
531 }
532 s->iovecs[i].iov_base = buf;
533 s->iovecs[i].iov_len = BS;
534 }
535
536 err = setup_ring(s);
537 if (err) {
538 printf("ring setup failed: %s, %d\n", strerror(errno), err);
539 return 1;
540 }
541 printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
542 printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
543
544 pthread_create(&s->thread, NULL, submitter_fn, s);
545
546 cache_hit = cache_miss = reap = calls = done = 0;
547 do {
548 unsigned long this_done = 0;
549 unsigned long this_reap = 0;
550 unsigned long this_call = 0;
551 unsigned long this_cache_hit = 0;
552 unsigned long this_cache_miss = 0;
553 unsigned long rpc = 0, ipc = 0;
554 double hit = 0.0;
555
556 sleep(1);
557 this_done += s->done;
558 this_call += s->calls;
559 this_reap += s->reaps;
560 this_cache_hit += s->cachehit;
561 this_cache_miss += s->cachemiss;
562 if (this_cache_hit && this_cache_miss) {
563 unsigned long hits, total;
564
565 hits = this_cache_hit - cache_hit;
566 total = hits + this_cache_miss - cache_miss;
567 hit = (double) hits / (double) total;
568 hit *= 100.0;
569 }
570 if (this_call - calls) {
571 rpc = (this_done - done) / (this_call - calls);
572 ipc = (this_reap - reap) / (this_call - calls);
573 } else
574 rpc = ipc = -1;
575 printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (head=%u tail=%u), Cachehit=%0.2f%%\n",
576 this_done - done, rpc, ipc, s->inflight,
577 *s->cq_ring.head, *s->cq_ring.tail, hit);
578 done = this_done;
579 calls = this_call;
580 reap = this_reap;
581 cache_hit = s->cachehit;
582 cache_miss = s->cachemiss;
583 } while (!finish);
584
585 pthread_join(s->thread, &ret);
586 close(s->ring_fd);
587 return 0;
588}