summaryrefslogtreecommitdiff
path: root/src/setup.c
diff options
context:
space:
mode:
authorKornilios Kourtis <kornilios@gmail.com>2019-06-02 11:50:00 +0200
committerJens Axboe <axboe@kernel.dk>2019-06-03 14:52:01 -0600
commitfb34ae6d37f45543971b83208d436890fe95f643 (patch)
treeb58a05254c742d2b3eb0347e1946732f0f5b22a0 /src/setup.c
parentf8865bc65feced660a3075175200a60968187bba (diff)
downloadliburing-fb34ae6d37f45543971b83208d436890fe95f643.tar.gz
liburing-fb34ae6d37f45543971b83208d436890fe95f643.tar.bz2
Do not assume p->{cq,sq}_off.head is 0
Ring memory is released by umapping {cq,sq}->khead, which assumes that p->{cq,sq}_off.head will always be 0. Add another field to for the mmaped region and use that instead of ->khead when umapping. Signed-off-by: Kornilios Kourtis <kkourt@kkourt.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'src/setup.c')
-rw-r--r--src/setup.c43
1 files changed, 21 insertions, 22 deletions
diff --git a/src/setup.c b/src/setup.c
index 9da3c19..343a317 100644
--- a/src/setup.c
+++ b/src/setup.c
@@ -13,21 +13,20 @@ static int io_uring_mmap(int fd, struct io_uring_params *p,
struct io_uring_sq *sq, struct io_uring_cq *cq)
{
size_t size;
- void *ptr;
int ret;
sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
- ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
+ sq->ring_ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
- if (ptr == MAP_FAILED)
+ if (sq->ring_ptr == MAP_FAILED)
return -errno;
- sq->khead = ptr + p->sq_off.head;
- sq->ktail = ptr + p->sq_off.tail;
- sq->kring_mask = ptr + p->sq_off.ring_mask;
- sq->kring_entries = ptr + p->sq_off.ring_entries;
- sq->kflags = ptr + p->sq_off.flags;
- sq->kdropped = ptr + p->sq_off.dropped;
- sq->array = ptr + p->sq_off.array;
+ sq->khead = sq->ring_ptr + p->sq_off.head;
+ sq->ktail = sq->ring_ptr + p->sq_off.tail;
+ sq->kring_mask = sq->ring_ptr + p->sq_off.ring_mask;
+ sq->kring_entries = sq->ring_ptr + p->sq_off.ring_entries;
+ sq->kflags = sq->ring_ptr + p->sq_off.flags;
+ sq->kdropped = sq->ring_ptr + p->sq_off.dropped;
+ sq->array = sq->ring_ptr + p->sq_off.array;
size = p->sq_entries * sizeof(struct io_uring_sqe);
sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
@@ -36,24 +35,24 @@ static int io_uring_mmap(int fd, struct io_uring_params *p,
if (sq->sqes == MAP_FAILED) {
ret = -errno;
err:
- munmap(sq->khead, sq->ring_sz);
+ munmap(sq->ring_ptr, sq->ring_sz);
return ret;
}
cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
- ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
+ cq->ring_ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
- if (ptr == MAP_FAILED) {
+ if (cq->ring_ptr == MAP_FAILED) {
ret = -errno;
- munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe));
+ munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
goto err;
}
- cq->khead = ptr + p->cq_off.head;
- cq->ktail = ptr + p->cq_off.tail;
- cq->kring_mask = ptr + p->cq_off.ring_mask;
- cq->kring_entries = ptr + p->cq_off.ring_entries;
- cq->koverflow = ptr + p->cq_off.overflow;
- cq->cqes = ptr + p->cq_off.cqes;
+ cq->khead = cq->ring_ptr + p->cq_off.head;
+ cq->ktail = cq->ring_ptr + p->cq_off.tail;
+ cq->kring_mask = cq->ring_ptr + p->cq_off.ring_mask;
+ cq->kring_entries = cq->ring_ptr + p->cq_off.ring_entries;
+ cq->koverflow = cq->ring_ptr + p->cq_off.overflow;
+ cq->cqes = cq->ring_ptr + p->cq_off.cqes;
return 0;
}
@@ -105,7 +104,7 @@ void io_uring_queue_exit(struct io_uring *ring)
struct io_uring_cq *cq = &ring->cq;
munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
- munmap(sq->khead, sq->ring_sz);
- munmap(cq->khead, cq->ring_sz);
+ munmap(sq->ring_ptr, sq->ring_sz);
+ munmap(cq->ring_ptr, cq->ring_sz);
close(ring->ring_fd);
}