Commit | Line | Data |
---|---|---|
21b4aa5d JA |
1 | #include <sys/types.h> |
2 | #include <sys/stat.h> | |
3 | #include <sys/mman.h> | |
4 | #include <unistd.h> | |
5 | #include <errno.h> | |
6 | #include <string.h> | |
7 | ||
8 | #include "liburing.h" | |
9 | ||
10 | static int io_uring_mmap(int fd, struct io_uring_params *p, | |
11 | struct io_uring_sq *sq, struct io_uring_cq *cq) | |
12 | { | |
13 | size_t size; | |
14 | void *ptr; | |
15 | int ret; | |
16 | ||
17 | sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned); | |
18 | ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE, | |
19 | MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING); | |
20 | if (ptr == MAP_FAILED) | |
21 | return -errno; | |
22 | sq->khead = ptr + p->sq_off.head; | |
23 | sq->ktail = ptr + p->sq_off.tail; | |
24 | sq->kring_mask = ptr + p->sq_off.ring_mask; | |
25 | sq->kring_entries = ptr + p->sq_off.ring_entries; | |
26 | sq->kflags = ptr + p->sq_off.flags; | |
27 | sq->kdropped = ptr + p->sq_off.dropped; | |
28 | sq->array = ptr + p->sq_off.array; | |
29 | ||
004d564f | 30 | size = p->sq_entries * sizeof(struct io_uring_sqe); |
21b4aa5d JA |
31 | sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE, |
32 | MAP_SHARED | MAP_POPULATE, fd, | |
33 | IORING_OFF_SQES); | |
34 | if (sq->sqes == MAP_FAILED) { | |
35 | ret = -errno; | |
36 | err: | |
37 | munmap(sq->khead, sq->ring_sz); | |
38 | return ret; | |
39 | } | |
40 | ||
41 | cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); | |
42 | ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE, | |
43 | MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING); | |
44 | if (ptr == MAP_FAILED) { | |
45 | ret = -errno; | |
46 | munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe)); | |
47 | goto err; | |
48 | } | |
49 | cq->khead = ptr + p->cq_off.head; | |
50 | cq->ktail = ptr + p->cq_off.tail; | |
51 | cq->kring_mask = ptr + p->cq_off.ring_mask; | |
52 | cq->kring_entries = ptr + p->cq_off.ring_entries; | |
53 | cq->koverflow = ptr + p->cq_off.overflow; | |
54 | cq->cqes = ptr + p->cq_off.cqes; | |
55 | return 0; | |
56 | } | |
57 | ||
58 | /* | |
59 | * For users that want to specify sq_thread_cpu or sq_thread_idle, this | |
60 | * interface is a convenient helper for mmap()ing the rings. | |
61 | * Returns -1 on error, or zero on success. On success, 'ring' | |
62 | * contains the necessary information to read/write to the rings. | |
63 | */ | |
64 | int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring) | |
65 | { | |
66 | int ret; | |
67 | ||
68 | memset(ring, 0, sizeof(*ring)); | |
69 | ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq); | |
70 | if (!ret) | |
71 | ring->ring_fd = fd; | |
72 | return ret; | |
73 | } | |
74 | ||
75 | /* | |
76 | * Returns -1 on error, or zero on success. On success, 'ring' | |
77 | * contains the necessary information to read/write to the rings. | |
78 | */ | |
79 | int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags) | |
80 | { | |
81 | struct io_uring_params p; | |
004d564f | 82 | int fd, ret; |
21b4aa5d JA |
83 | |
84 | memset(&p, 0, sizeof(p)); | |
85 | p.flags = flags; | |
86 | ||
87 | fd = io_uring_setup(entries, &p); | |
88 | if (fd < 0) | |
89 | return fd; | |
90 | ||
004d564f JA |
91 | ret = io_uring_queue_mmap(fd, &p, ring); |
92 | if (ret) | |
93 | close(fd); | |
94 | ||
95 | return ret; | |
21b4aa5d JA |
96 | } |
97 | ||
98 | void io_uring_queue_exit(struct io_uring *ring) | |
99 | { | |
100 | struct io_uring_sq *sq = &ring->sq; | |
101 | struct io_uring_cq *cq = &ring->cq; | |
102 | ||
103 | munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe)); | |
104 | munmap(sq->khead, sq->ring_sz); | |
105 | munmap(cq->khead, cq->ring_sz); | |
106 | close(ring->ring_fd); | |
107 | } |