diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-12-04 20:53:01 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-12-04 20:53:01 -0700 |
commit | 84b89ca437add3d6c1658e87f6f6b9fc2fe3b2cc (patch) | |
tree | ede2a4936cd3697e9191ee4352a324ac95a67bef | |
parent | 790d285bc9a9f5809ae9200d4becd2e4bdd54815 (diff) | |
download | liburing-84b89ca437add3d6c1658e87f6f6b9fc2fe3b2cc.tar.gz liburing-84b89ca437add3d6c1658e87f6f6b9fc2fe3b2cc.tar.bz2 |
test/poll-cancel-ton: improve test case
We use 30K entries, but hard code 10K in the deletion case. We also
use incrementing integers as the index. Use a more realistic index
instead, and ensure we use the full range of requests for cancellation.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | test/poll-cancel-ton.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/test/poll-cancel-ton.c b/test/poll-cancel-ton.c index 0cf5cc0..5bcf314 100644 --- a/test/poll-cancel-ton.c +++ b/test/poll-cancel-ton.c @@ -14,6 +14,10 @@ #include "liburing.h" +#define POLL_COUNT 30000 + +static void *sqe_index[POLL_COUNT]; + static int reap_events(struct io_uring *ring, unsigned nr_events) { struct io_uring_cqe *cqe; @@ -46,10 +50,11 @@ static int del_polls(struct io_uring *ring, int fd, int nr) batch = nr; for (i = 0; i < batch; i++) { - unsigned data; + void *data; + sqe = io_uring_get_sqe(ring); - data = rand() % 10001; - io_uring_prep_poll_remove(sqe, (void *) (unsigned long) data); + data = sqe_index[lrand48() % nr]; + io_uring_prep_poll_remove(sqe, data); } ret = io_uring_submit(ring); @@ -77,7 +82,8 @@ static int add_polls(struct io_uring *ring, int fd, int nr) for (i = 0; i < batch; i++) { sqe = io_uring_get_sqe(ring); io_uring_prep_poll_add(sqe, fd, POLLIN); - sqe->user_data = ++count; + sqe_index[count++] = sqe; + sqe->user_data = (unsigned long) sqe; } ret = io_uring_submit(ring); |