Commit | Line | Data |
---|---|---|
320ae51f JA |
1 | #ifndef INT_BLK_MQ_H |
2 | #define INT_BLK_MQ_H | |
3 | ||
24d2f903 CH |
4 | struct blk_mq_tag_set; |
5 | ||
320ae51f JA |
6 | struct blk_mq_ctx { |
7 | struct { | |
8 | spinlock_t lock; | |
9 | struct list_head rq_list; | |
10 | } ____cacheline_aligned_in_smp; | |
11 | ||
12 | unsigned int cpu; | |
13 | unsigned int index_hw; | |
320ae51f JA |
14 | |
15 | /* incremented at dispatch time */ | |
16 | unsigned long rq_dispatched[2]; | |
17 | unsigned long rq_merged; | |
18 | ||
19 | /* incremented at completion time */ | |
20 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; | |
21 | ||
22 | struct request_queue *queue; | |
23 | struct kobject kobj; | |
4bb659b1 | 24 | } ____cacheline_aligned_in_smp; |
320ae51f | 25 | |
320ae51f | 26 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
780db207 | 27 | void blk_mq_freeze_queue(struct request_queue *q); |
3edcc0ce | 28 | void blk_mq_free_queue(struct request_queue *q); |
e3a2b3f9 | 29 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
aed3ea94 | 30 | void blk_mq_wake_waiters(struct request_queue *q); |
320ae51f JA |
31 | |
32 | /* | |
33 | * CPU hotplug helpers | |
34 | */ | |
676141e4 JA |
35 | void blk_mq_enable_hotplug(void); |
36 | void blk_mq_disable_hotplug(void); | |
320ae51f JA |
37 | |
38 | /* | |
39 | * CPU -> queue mappings | |
40 | */ | |
da695ba2 | 41 | int blk_mq_map_queues(struct blk_mq_tag_set *set); |
f14bbe77 | 42 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); |
320ae51f | 43 | |
7d7e0f90 CH |
44 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, |
45 | int cpu) | |
46 | { | |
47 | return q->queue_hw_ctx[q->mq_map[cpu]]; | |
48 | } | |
49 | ||
67aec14c JA |
50 | /* |
51 | * sysfs helpers | |
52 | */ | |
53 | extern int blk_mq_sysfs_register(struct request_queue *q); | |
54 | extern void blk_mq_sysfs_unregister(struct request_queue *q); | |
868f2f0b | 55 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
67aec14c | 56 | |
90415837 CH |
57 | extern void blk_mq_rq_timed_out(struct request *req, bool reserved); |
58 | ||
e09aae7e ML |
59 | void blk_mq_release(struct request_queue *q); |
60 | ||
1aecfe48 ML |
61 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
62 | unsigned int cpu) | |
63 | { | |
64 | return per_cpu_ptr(q->queue_ctx, cpu); | |
65 | } | |
66 | ||
67 | /* | |
68 | * This assumes per-cpu software queueing queues. They could be per-node | |
69 | * as well, for instance. For now this is hardcoded as-is. Note that we don't | |
70 | * care about preemption, since we know the ctx's are persistent. This does | |
71 | * mean that we can't rely on ctx always matching the currently running CPU. | |
72 | */ | |
73 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) | |
74 | { | |
75 | return __blk_mq_get_ctx(q, get_cpu()); | |
76 | } | |
77 | ||
78 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | |
79 | { | |
80 | put_cpu(); | |
81 | } | |
82 | ||
cb96a42c ML |
83 | struct blk_mq_alloc_data { |
84 | /* input parameter */ | |
85 | struct request_queue *q; | |
6f3b0e8b | 86 | unsigned int flags; |
cb96a42c ML |
87 | |
88 | /* input & output parameter */ | |
89 | struct blk_mq_ctx *ctx; | |
90 | struct blk_mq_hw_ctx *hctx; | |
91 | }; | |
92 | ||
93 | static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, | |
6f3b0e8b CH |
94 | struct request_queue *q, unsigned int flags, |
95 | struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx) | |
cb96a42c ML |
96 | { |
97 | data->q = q; | |
6f3b0e8b | 98 | data->flags = flags; |
cb96a42c ML |
99 | data->ctx = ctx; |
100 | data->hctx = hctx; | |
101 | } | |
102 | ||
19c66e59 ML |
103 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) |
104 | { | |
105 | return hctx->nr_ctx && hctx->tags; | |
106 | } | |
107 | ||
320ae51f | 108 | #endif |