netfilter: nf_conntrack: initialize net.ct.generation
[linux-2.6-block.git] / block / blk-mq-tag.c
CommitLineData
320ae51f
JA
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/percpu_ida.h>
4
5#include <linux/blk-mq.h>
6#include "blk.h"
7#include "blk-mq.h"
8#include "blk-mq-tag.h"
9
10/*
11 * Per tagged queue (tag address space) map
12 */
13struct blk_mq_tags {
14 unsigned int nr_tags;
15 unsigned int nr_reserved_tags;
16 unsigned int nr_batch_move;
17 unsigned int nr_max_cache;
18
19 struct percpu_ida free_tags;
20 struct percpu_ida reserved_tags;
21};
22
23void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
24{
25 int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
26 blk_mq_put_tag(tags, tag);
27}
28
29bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
30{
31 return !tags ||
32 percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0;
33}
34
35static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
36{
37 int tag;
38
6f6b5d1e
KO
39 tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ?
40 TASK_UNINTERRUPTIBLE : TASK_RUNNING);
320ae51f
JA
41 if (tag < 0)
42 return BLK_MQ_TAG_FAIL;
43 return tag + tags->nr_reserved_tags;
44}
45
46static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
47 gfp_t gfp)
48{
49 int tag;
50
51 if (unlikely(!tags->nr_reserved_tags)) {
52 WARN_ON_ONCE(1);
53 return BLK_MQ_TAG_FAIL;
54 }
55
6f6b5d1e
KO
56 tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ?
57 TASK_UNINTERRUPTIBLE : TASK_RUNNING);
320ae51f
JA
58 if (tag < 0)
59 return BLK_MQ_TAG_FAIL;
60 return tag;
61}
62
63unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved)
64{
65 if (!reserved)
66 return __blk_mq_get_tag(tags, gfp);
67
68 return __blk_mq_get_reserved_tag(tags, gfp);
69}
70
71static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
72{
73 BUG_ON(tag >= tags->nr_tags);
74
75 percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags);
76}
77
78static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
79 unsigned int tag)
80{
81 BUG_ON(tag >= tags->nr_reserved_tags);
82
83 percpu_ida_free(&tags->reserved_tags, tag);
84}
85
86void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
87{
88 if (tag >= tags->nr_reserved_tags)
89 __blk_mq_put_tag(tags, tag);
90 else
91 __blk_mq_put_reserved_tag(tags, tag);
92}
93
94static int __blk_mq_tag_iter(unsigned id, void *data)
95{
96 unsigned long *tag_map = data;
97 __set_bit(id, tag_map);
98 return 0;
99}
100
101void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
102 void (*fn)(void *, unsigned long *), void *data)
103{
104 unsigned long *tag_map;
105 size_t map_size;
106
107 map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
108 tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
109 if (!tag_map)
110 return;
111
112 percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map);
113 if (tags->nr_reserved_tags)
114 percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter,
115 tag_map);
116
117 fn(data, tag_map);
118 kfree(tag_map);
119}
120
121struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
122 unsigned int reserved_tags, int node)
123{
124 unsigned int nr_tags, nr_cache;
125 struct blk_mq_tags *tags;
126 int ret;
127
128 if (total_tags > BLK_MQ_TAG_MAX) {
129 pr_err("blk-mq: tag depth too large\n");
130 return NULL;
131 }
132
133 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
134 if (!tags)
135 return NULL;
136
137 nr_tags = total_tags - reserved_tags;
138 nr_cache = nr_tags / num_possible_cpus();
139
140 if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
141 nr_cache = BLK_MQ_TAG_CACHE_MIN;
142 else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
143 nr_cache = BLK_MQ_TAG_CACHE_MAX;
144
145 tags->nr_tags = total_tags;
146 tags->nr_reserved_tags = reserved_tags;
147 tags->nr_max_cache = nr_cache;
148 tags->nr_batch_move = max(1u, nr_cache / 2);
149
150 ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags -
151 tags->nr_reserved_tags,
152 tags->nr_max_cache,
153 tags->nr_batch_move);
154 if (ret)
155 goto err_free_tags;
156
157 if (reserved_tags) {
158 /*
159 * With max_cahe and batch set to 1, the allocator fallbacks to
160 * no cached. It's fine reserved tags allocation is slow.
161 */
162 ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags,
163 1, 1);
164 if (ret)
165 goto err_reserved_tags;
166 }
167
168 return tags;
169
170err_reserved_tags:
171 percpu_ida_destroy(&tags->free_tags);
172err_free_tags:
173 kfree(tags);
174 return NULL;
175}
176
177void blk_mq_free_tags(struct blk_mq_tags *tags)
178{
179 percpu_ida_destroy(&tags->free_tags);
180 percpu_ida_destroy(&tags->reserved_tags);
181 kfree(tags);
182}
183
184ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
185{
186 char *orig_page = page;
11c94444 187 unsigned int cpu;
320ae51f
JA
188
189 if (!tags)
190 return 0;
191
192 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u,"
193 " max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags,
194 tags->nr_batch_move, tags->nr_max_cache);
195
196 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n",
197 percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids),
198 percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
199
200 for_each_possible_cpu(cpu) {
201 page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu,
202 percpu_ida_free_tags(&tags->free_tags, cpu));
203 }
204
205 return page - orig_page;
206}