2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/errno.h>
35 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/bitmap.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/vmalloc.h>
44 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
48 spin_lock(&bitmap->lock);
50 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
51 if (obj >= bitmap->max) {
52 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
54 obj = find_first_zero_bit(bitmap->table, bitmap->max);
57 if (obj < bitmap->max) {
58 set_bit(obj, bitmap->table);
59 bitmap->last = (obj + 1);
60 if (bitmap->last == bitmap->max)
69 spin_unlock(&bitmap->lock);
74 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
76 mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
79 static unsigned long find_aligned_range(unsigned long *bitmap,
81 int len, int align, u32 skip_mask)
86 start = ALIGN(start, align);
88 while ((start < nbits) && (test_bit(start, bitmap) ||
99 for (i = start + 1; i < end; i++) {
100 if (test_bit(i, bitmap) || ((u32)i & skip_mask)) {
109 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
110 int align, u32 skip_mask)
114 if (likely(cnt == 1 && align == 1 && !skip_mask))
115 return mlx4_bitmap_alloc(bitmap);
117 spin_lock(&bitmap->lock);
119 obj = find_aligned_range(bitmap->table, bitmap->last,
120 bitmap->max, cnt, align, skip_mask);
121 if (obj >= bitmap->max) {
122 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
124 obj = find_aligned_range(bitmap->table, 0, bitmap->max,
125 cnt, align, skip_mask);
128 if (obj < bitmap->max) {
129 bitmap_set(bitmap->table, obj, cnt);
130 if (obj == bitmap->last) {
131 bitmap->last = (obj + cnt);
132 if (bitmap->last >= bitmap->max)
140 bitmap->avail -= cnt;
142 spin_unlock(&bitmap->lock);
147 u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
149 return bitmap->avail;
152 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
155 obj &= bitmap->max + bitmap->reserved_top - 1;
157 spin_lock(&bitmap->lock);
159 bitmap->last = min(bitmap->last, obj);
160 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
163 bitmap_clear(bitmap->table, obj, cnt);
164 bitmap->avail += cnt;
165 spin_unlock(&bitmap->lock);
168 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
169 u32 reserved_bot, u32 reserved_top)
171 /* num must be a power of 2 */
172 if (num != roundup_pow_of_two(num))
177 bitmap->max = num - reserved_top;
179 bitmap->reserved_top = reserved_top;
180 bitmap->avail = num - reserved_top - reserved_bot;
181 spin_lock_init(&bitmap->lock);
182 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
183 sizeof (long), GFP_KERNEL);
187 bitmap_set(bitmap->table, 0, reserved_bot);
192 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
194 kfree(bitmap->table);
198 * Handling for queue buffers -- we allocate a bunch of memory and
199 * register it in a memory region at HCA virtual address 0. If the
200 * requested size is > max_direct, we split the allocation into
201 * multiple pages, so we don't require too much contiguous memory.
204 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
205 struct mlx4_buf *buf, gfp_t gfp)
209 if (size <= max_direct) {
212 buf->page_shift = get_order(size) + PAGE_SHIFT;
213 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
215 if (!buf->direct.buf)
220 while (t & ((1 << buf->page_shift) - 1)) {
225 memset(buf->direct.buf, 0, size);
229 buf->direct.buf = NULL;
230 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
231 buf->npages = buf->nbufs;
232 buf->page_shift = PAGE_SHIFT;
233 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
238 for (i = 0; i < buf->nbufs; ++i) {
239 buf->page_list[i].buf =
240 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
242 if (!buf->page_list[i].buf)
245 buf->page_list[i].map = t;
247 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
250 if (BITS_PER_LONG == 64) {
252 pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
255 for (i = 0; i < buf->nbufs; ++i)
256 pages[i] = virt_to_page(buf->page_list[i].buf);
257 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
259 if (!buf->direct.buf)
267 mlx4_buf_free(dev, size, buf);
271 EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
273 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
278 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
281 if (BITS_PER_LONG == 64 && buf->direct.buf)
282 vunmap(buf->direct.buf);
284 for (i = 0; i < buf->nbufs; ++i)
285 if (buf->page_list[i].buf)
286 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
287 buf->page_list[i].buf,
288 buf->page_list[i].map);
289 kfree(buf->page_list);
292 EXPORT_SYMBOL_GPL(mlx4_buf_free);
294 static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
297 struct mlx4_db_pgdir *pgdir;
299 pgdir = kzalloc(sizeof *pgdir, gfp);
303 bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
304 pgdir->bits[0] = pgdir->order0;
305 pgdir->bits[1] = pgdir->order1;
306 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
307 &pgdir->db_dma, gfp);
308 if (!pgdir->db_page) {
316 static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
317 struct mlx4_db *db, int order)
322 for (o = order; o <= 1; ++o) {
323 i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
324 if (i < MLX4_DB_PER_PAGE >> o)
331 clear_bit(i, pgdir->bits[o]);
336 set_bit(i ^ 1, pgdir->bits[order]);
340 db->db = pgdir->db_page + db->index;
341 db->dma = pgdir->db_dma + db->index * 4;
347 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp)
349 struct mlx4_priv *priv = mlx4_priv(dev);
350 struct mlx4_db_pgdir *pgdir;
353 mutex_lock(&priv->pgdir_mutex);
355 list_for_each_entry(pgdir, &priv->pgdir_list, list)
356 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
359 pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp);
365 list_add(&pgdir->list, &priv->pgdir_list);
367 /* This should never fail -- we just allocated an empty page: */
368 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
371 mutex_unlock(&priv->pgdir_mutex);
375 EXPORT_SYMBOL_GPL(mlx4_db_alloc);
377 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
379 struct mlx4_priv *priv = mlx4_priv(dev);
383 mutex_lock(&priv->pgdir_mutex);
388 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
389 clear_bit(i ^ 1, db->u.pgdir->order0);
393 set_bit(i, db->u.pgdir->bits[o]);
395 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
396 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
397 db->u.pgdir->db_page, db->u.pgdir->db_dma);
398 list_del(&db->u.pgdir->list);
402 mutex_unlock(&priv->pgdir_mutex);
404 EXPORT_SYMBOL_GPL(mlx4_db_free);
406 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
407 int size, int max_direct)
411 err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL);
417 err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
421 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
426 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL);
433 mlx4_mtt_cleanup(dev, &wqres->mtt);
435 mlx4_buf_free(dev, size, &wqres->buf);
437 mlx4_db_free(dev, &wqres->db);
441 EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
443 void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
446 mlx4_mtt_cleanup(dev, &wqres->mtt);
447 mlx4_buf_free(dev, size, &wqres->buf);
448 mlx4_db_free(dev, &wqres->db);
450 EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);