mellanox: Switch to bitmap_zalloc()
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / uar.c
CommitLineData
e126ba97 1/*
302bdf68 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
88a85f99 35#include <linux/io-mapping.h>
e126ba97
EC
36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/cmd.h>
38#include "mlx5_core.h"
39
e126ba97
EC
40int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
41{
732ef5ad
SM
42 u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
43 u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0};
e126ba97
EC
44 int err;
45
732ef5ad
SM
46 MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
47 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
c4f287c4
SM
48 if (!err)
49 *uarn = MLX5_GET(alloc_uar_out, out, uar);
e126ba97
EC
50 return err;
51}
52EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
53
54int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
55{
732ef5ad
SM
56 u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
57 u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0};
e126ba97 58
732ef5ad
SM
59 MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
60 MLX5_SET(dealloc_uar_in, in, uar, uarn);
c4f287c4 61 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
e126ba97
EC
62}
63EXPORT_SYMBOL(mlx5_cmd_free_uar);
64
a6d51b68
EC
65static int uars_per_sys_page(struct mlx5_core_dev *mdev)
66{
67 if (MLX5_CAP_GEN(mdev, uar_4k))
68 return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
69
70 return 1;
71}
72
73static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
74{
75 u32 system_page_index;
76
77 if (MLX5_CAP_GEN(mdev, uar_4k))
78 system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
79 else
80 system_page_index = index;
81
82 return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
83}
84
85static void up_rel_func(struct kref *kref)
86{
87 struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
88
89 list_del(&up->list);
5ae85b0e 90 iounmap(up->map);
a6d51b68
EC
91 if (mlx5_cmd_free_uar(up->mdev, up->index))
92 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
214fa1c4
AS
93 bitmap_free(up->reg_bitmap);
94 bitmap_free(up->fp_bitmap);
a6d51b68
EC
95 kfree(up);
96}
97
98static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
99 bool map_wc)
100{
101 struct mlx5_uars_page *up;
102 int err = -ENOMEM;
103 phys_addr_t pfn;
104 int bfregs;
105 int i;
106
107 bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
108 up = kzalloc(sizeof(*up), GFP_KERNEL);
109 if (!up)
110 return ERR_PTR(err);
111
112 up->mdev = mdev;
214fa1c4 113 up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
a6d51b68
EC
114 if (!up->reg_bitmap)
115 goto error1;
116
214fa1c4 117 up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
a6d51b68
EC
118 if (!up->fp_bitmap)
119 goto error1;
120
121 for (i = 0; i < bfregs; i++)
122 if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
123 set_bit(i, up->reg_bitmap);
124 else
125 set_bit(i, up->fp_bitmap);
126
127 up->bfregs = bfregs;
128 up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
129 up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
130
131 err = mlx5_cmd_alloc_uar(mdev, &up->index);
132 if (err) {
133 mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
134 goto error1;
135 }
136
137 pfn = uar2pfn(mdev, up->index);
138 if (map_wc) {
139 up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
140 if (!up->map) {
141 err = -EAGAIN;
142 goto error2;
143 }
144 } else {
145 up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
146 if (!up->map) {
147 err = -ENOMEM;
148 goto error2;
149 }
150 }
151 kref_init(&up->ref_count);
152 mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
153 up->index, up->bfregs);
154 return up;
155
156error2:
157 if (mlx5_cmd_free_uar(mdev, up->index))
158 mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
159error1:
214fa1c4
AS
160 bitmap_free(up->fp_bitmap);
161 bitmap_free(up->reg_bitmap);
a6d51b68
EC
162 kfree(up);
163 return ERR_PTR(err);
164}
165
01187175
EC
166struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
167{
168 struct mlx5_uars_page *ret;
169
170 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
72f36be0 171 if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
01187175
EC
172 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
173 struct mlx5_uars_page, list);
174 kref_get(&ret->ref_count);
72f36be0 175 goto out;
01187175 176 }
72f36be0
EBE
177 ret = alloc_uars_page(mdev, false);
178 if (IS_ERR(ret))
179 goto out;
180 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
01187175
EC
181out:
182 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
183
184 return ret;
185}
186EXPORT_SYMBOL(mlx5_get_uars_page);
187
188void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
189{
190 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
191 kref_put(&up->ref_count, up_rel_func);
192 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
193}
194EXPORT_SYMBOL(mlx5_put_uars_page);
195
a6d51b68
EC
196static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
197{
198 /* return the offset in bytes from the start of the page to the
199 * blue flame area of the UAR
200 */
201 return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
202 (dbi % MLX5_BFREGS_PER_UAR) *
203 (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
204}
205
206static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
207 bool map_wc, bool fast_path)
208{
209 struct mlx5_bfreg_data *bfregs;
210 struct mlx5_uars_page *up;
211 struct list_head *head;
212 unsigned long *bitmap;
213 unsigned int *avail;
214 struct mutex *lock; /* pointer to right mutex */
215 int dbi;
216
217 bfregs = &mdev->priv.bfregs;
218 if (map_wc) {
219 head = &bfregs->wc_head.list;
220 lock = &bfregs->wc_head.lock;
221 } else {
222 head = &bfregs->reg_head.list;
223 lock = &bfregs->reg_head.lock;
224 }
225 mutex_lock(lock);
226 if (list_empty(head)) {
227 up = alloc_uars_page(mdev, map_wc);
228 if (IS_ERR(up)) {
229 mutex_unlock(lock);
230 return PTR_ERR(up);
231 }
232 list_add(&up->list, head);
233 } else {
234 up = list_entry(head->next, struct mlx5_uars_page, list);
235 kref_get(&up->ref_count);
236 }
237 if (fast_path) {
238 bitmap = up->fp_bitmap;
239 avail = &up->fp_avail;
240 } else {
241 bitmap = up->reg_bitmap;
242 avail = &up->reg_avail;
243 }
244 dbi = find_first_bit(bitmap, up->bfregs);
245 clear_bit(dbi, bitmap);
246 (*avail)--;
247 if (!(*avail))
248 list_del(&up->list);
249
250 bfreg->map = up->map + map_offset(mdev, dbi);
251 bfreg->up = up;
252 bfreg->wc = map_wc;
253 bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
254 mutex_unlock(lock);
255
256 return 0;
257}
258
259int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
260 bool map_wc, bool fast_path)
261{
262 int err;
263
264 err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
265 if (!err)
266 return 0;
267
268 if (err == -EAGAIN && map_wc)
269 return alloc_bfreg(mdev, bfreg, false, fast_path);
270
271 return err;
272}
273EXPORT_SYMBOL(mlx5_alloc_bfreg);
274
275static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
276 struct mlx5_uars_page *up,
277 struct mlx5_sq_bfreg *bfreg)
278{
279 unsigned int uar_idx;
280 unsigned int bfreg_idx;
281 unsigned int bf_reg_size;
282
283 bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
284
285 uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
286 bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
287
288 return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
289}
290
291void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
292{
293 struct mlx5_bfreg_data *bfregs;
294 struct mlx5_uars_page *up;
295 struct mutex *lock; /* pointer to right mutex */
296 unsigned int dbi;
297 bool fp;
298 unsigned int *avail;
299 unsigned long *bitmap;
300 struct list_head *head;
301
302 bfregs = &mdev->priv.bfregs;
303 if (bfreg->wc) {
304 head = &bfregs->wc_head.list;
305 lock = &bfregs->wc_head.lock;
306 } else {
307 head = &bfregs->reg_head.list;
308 lock = &bfregs->reg_head.lock;
309 }
310 up = bfreg->up;
311 dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
312 fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
313 if (fp) {
314 avail = &up->fp_avail;
315 bitmap = up->fp_bitmap;
316 } else {
317 avail = &up->reg_avail;
318 bitmap = up->reg_bitmap;
319 }
320 mutex_lock(lock);
321 (*avail)++;
322 set_bit(dbi, bitmap);
323 if (*avail == 1)
324 list_add_tail(&up->list, head);
325
326 kref_put(&up->ref_count, up_rel_func);
327 mutex_unlock(lock);
328}
329EXPORT_SYMBOL(mlx5_free_bfreg);