/* requests with any of those set should undergo io_disarm_next() */
#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
+#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
static bool io_disarm_next(struct io_kiocb *req);
static void io_uring_del_tctx_node(unsigned long index);
* free_list cache.
*/
if (req_ref_put_and_test(req)) {
- if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
+ if (req->flags & IO_REQ_LINK_FLAGS) {
if (req->flags & IO_DISARM_MASK)
io_disarm_next(req);
if (req->link) {
&ctx->apoll_cache);
req->flags &= ~REQ_F_POLLED;
}
- if (req->flags & (REQ_F_LINK|REQ_F_HARDLINK))
+ if (req->flags & IO_REQ_LINK_FLAGS)
io_queue_next(req);
if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
io_clean_op(req);
struct io_kiocb *nxt = NULL;
if (req_ref_put_and_test(req)) {
- if (unlikely(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))
+ if (unlikely(req->flags & IO_REQ_LINK_FLAGS))
nxt = io_req_find_next(req);
io_free_req(req);
}
*/
if (!(link->head->flags & REQ_F_FAIL))
req_fail_link_node(link->head, -ECANCELED);
- } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
+ } else if (!(req->flags & IO_REQ_LINK_FLAGS)) {
/*
* the current req is a normal req, we should return
* error and thus break the submittion loop.
link->last->link = req;
link->last = req;
- if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
+ if (req->flags & IO_REQ_LINK_FLAGS)
return 0;
/* last request of a link, enqueue the link */
link->head = NULL;
req = head;
- } else if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
+ } else if (req->flags & IO_REQ_LINK_FLAGS) {
link->head = req;
link->last = req;
return 0;