Currently multishot recvzc requests have no read limit and will remain
active so as long as the socket remains open. But, there are sometimes a
need to do a fixed length read e.g. peeking at some data in the socket.
Add a length limit to recvzc requests `len`. A value of 0 means no limit
which is the previous behaviour. A positive value N specifies how many
bytes to read from the socket.
Data will still be posted in aux completions, as before. This could be
split across multiple frags. But the primary recvzc request will now
complete once N bytes have been read. The completion of the recvzc
request will have res and cflags both set to 0.
Signed-off-by: David Wei <dw@davidwei.uk>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/20250224041319.2389785-2-dw@davidwei.uk
[axboe: fixup io_zcrx_recv() for !CONFIG_NET]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
struct file *file;
unsigned msg_flags;
u16 flags;
+ u32 len;
struct io_zcrx_ifq *ifq;
};
unsigned ifq_idx;
if (unlikely(sqe->file_index || sqe->addr2 || sqe->addr ||
- sqe->len || sqe->addr3))
+ sqe->addr3))
return -EINVAL;
ifq_idx = READ_ONCE(sqe->zcrx_ifq_idx);
zc->ifq = req->ctx->ifq;
if (!zc->ifq)
return -EINVAL;
-
+ zc->len = READ_ONCE(sqe->len);
zc->flags = READ_ONCE(sqe->ioprio);
zc->msg_flags = READ_ONCE(sqe->msg_flags);
if (zc->msg_flags)
{
struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc);
struct socket *sock;
+ unsigned int len;
int ret;
if (!(req->flags & REQ_F_POLLED) &&
if (unlikely(!sock))
return -ENOTSOCK;
+ len = zc->len;
ret = io_zcrx_recv(req, zc->ifq, sock, zc->msg_flags | MSG_DONTWAIT,
- issue_flags);
+ issue_flags, &zc->len);
+ if (len && zc->len == 0) {
+ io_req_set_res(req, 0, 0);
+
+ if (issue_flags & IO_URING_F_MULTISHOT)
+ return IOU_STOP_MULTISHOT;
+ return IOU_OK;
+ }
if (unlikely(ret <= 0) && ret != -EAGAIN) {
if (ret == -ERESTARTSYS)
ret = -EINTR;
int i, copy, end, off;
int ret = 0;
+ len = min_t(size_t, len, desc->count);
if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
return -EAGAIN;
out:
if (offset == start_off)
return ret;
+ desc->count -= (offset - start_off);
return offset - start_off;
}
static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
struct sock *sk, int flags,
- unsigned issue_flags)
+ unsigned issue_flags, unsigned int *outlen)
{
+ unsigned int len = *outlen;
struct io_zcrx_args args = {
.req = req,
.ifq = ifq,
.sock = sk->sk_socket,
};
read_descriptor_t rd_desc = {
- .count = 1,
+ .count = len ? len : UINT_MAX,
.arg.data = &args,
};
int ret;
lock_sock(sk);
ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb);
+ if (len && ret > 0)
+ *outlen = len - ret;
if (ret <= 0) {
if (ret < 0 || sock_flag(sk, SOCK_DONE))
goto out;
int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
struct socket *sock, unsigned int flags,
- unsigned issue_flags)
+ unsigned issue_flags, unsigned int *len)
{
struct sock *sk = sock->sk;
const struct proto *prot = READ_ONCE(sk->sk_prot);
return -EPROTONOSUPPORT;
sock_rps_record_flow(sk);
- return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags);
+ return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len);
}
void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx);
int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
struct socket *sock, unsigned int flags,
- unsigned issue_flags);
+ unsigned issue_flags, unsigned int *len);
#else
static inline int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
struct io_uring_zcrx_ifq_reg __user *arg)
}
static inline int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
struct socket *sock, unsigned int flags,
- unsigned issue_flags)
+ unsigned issue_flags, unsigned int *len)
{
return -EOPNOTSUPP;
}