block: free merged request in the caller
[linux-block.git] / block / blk-mq-sched.h
1 #ifndef BLK_MQ_SCHED_H
2 #define BLK_MQ_SCHED_H
3
4 #include "blk-mq.h"
5 #include "blk-mq-tag.h"
6
7 int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size,
8                                 int (*init)(struct blk_mq_hw_ctx *),
9                                 void (*exit)(struct blk_mq_hw_ctx *));
10
11 void blk_mq_sched_free_hctx_data(struct request_queue *q,
12                                  void (*exit)(struct blk_mq_hw_ctx *));
13
14 struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
15 void blk_mq_sched_put_request(struct request *rq);
16
17 void blk_mq_sched_request_inserted(struct request *rq);
18 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
19                                 struct request **merged_request);
20 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
22 void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
23
24 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
25                                  bool run_queue, bool async, bool can_block);
26 void blk_mq_sched_insert_requests(struct request_queue *q,
27                                   struct blk_mq_ctx *ctx,
28                                   struct list_head *list, bool run_queue_async);
29
30 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
31 void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
32                         struct list_head *rq_list,
33                         struct request *(*get_rq)(struct blk_mq_hw_ctx *));
34
35 int blk_mq_sched_setup(struct request_queue *q);
36 void blk_mq_sched_teardown(struct request_queue *q);
37
38 int blk_mq_sched_init(struct request_queue *q);
39
40 static inline bool
41 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
42 {
43         struct elevator_queue *e = q->elevator;
44
45         if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
46                 return false;
47
48         return __blk_mq_sched_bio_merge(q, bio);
49 }
50
51 static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
52                                            struct request *rq)
53 {
54         struct elevator_queue *e = q->elevator;
55
56         if (e && e->type->ops.mq.get_rq_priv)
57                 return e->type->ops.mq.get_rq_priv(q, rq);
58
59         return 0;
60 }
61
62 static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
63                                             struct request *rq)
64 {
65         struct elevator_queue *e = q->elevator;
66
67         if (e && e->type->ops.mq.put_rq_priv)
68                 e->type->ops.mq.put_rq_priv(q, rq);
69 }
70
71 static inline bool
72 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
73                          struct bio *bio)
74 {
75         struct elevator_queue *e = q->elevator;
76
77         if (e && e->type->ops.mq.allow_merge)
78                 return e->type->ops.mq.allow_merge(q, rq, bio);
79
80         return true;
81 }
82
83 static inline void
84 blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
85 {
86         struct elevator_queue *e = hctx->queue->elevator;
87
88         if (e && e->type->ops.mq.completed_request)
89                 e->type->ops.mq.completed_request(hctx, rq);
90
91         BUG_ON(rq->internal_tag == -1);
92
93         blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag);
94 }
95
96 static inline void blk_mq_sched_started_request(struct request *rq)
97 {
98         struct request_queue *q = rq->q;
99         struct elevator_queue *e = q->elevator;
100
101         if (e && e->type->ops.mq.started_request)
102                 e->type->ops.mq.started_request(rq);
103 }
104
105 static inline void blk_mq_sched_requeue_request(struct request *rq)
106 {
107         struct request_queue *q = rq->q;
108         struct elevator_queue *e = q->elevator;
109
110         if (e && e->type->ops.mq.requeue_request)
111                 e->type->ops.mq.requeue_request(rq);
112 }
113
114 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
115 {
116         struct elevator_queue *e = hctx->queue->elevator;
117
118         if (e && e->type->ops.mq.has_work)
119                 return e->type->ops.mq.has_work(hctx);
120
121         return false;
122 }
123
124 static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx)
125 {
126         if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
127                 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
128                 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
129                         struct request_queue *q = hctx->queue;
130
131                         if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
132                                 set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
133                 }
134         }
135 }
136
137 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
138 {
139         return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
140 }
141
142 #endif