net/mlx5e: Implement Fragmented Work Queue (WQ)
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / alloc.c
index 2c6e3c7b7417943b643f21cd3e7d25ebd0061d9a..44791de5afe6e191df22b1fb4940d82dcd5f0a58 100644 (file)
@@ -106,6 +106,63 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_free);
 
+int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+                            struct mlx5_frag_buf *buf, int node)
+{
+       int i;
+
+       buf->size = size;
+       buf->npages = 1 << get_order(size);
+       buf->page_shift = PAGE_SHIFT;
+       buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
+                            GFP_KERNEL);
+       if (!buf->frags)
+               goto err_out;
+
+       for (i = 0; i < buf->npages; i++) {
+               struct mlx5_buf_list *frag = &buf->frags[i];
+               int frag_sz = min_t(int, size, PAGE_SIZE);
+
+               frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
+                                                         &frag->map, node);
+               if (!frag->buf)
+                       goto err_free_buf;
+               if (frag->map & ((1 << buf->page_shift) - 1)) {
+                       dma_free_coherent(&dev->pdev->dev, frag_sz,
+                                         buf->frags[i].buf, buf->frags[i].map);
+                       mlx5_core_warn(dev, "unexpected map alignment: 0x%p, page_shift=%d\n",
+                                      (void *)frag->map, buf->page_shift);
+                       goto err_free_buf;
+               }
+               size -= frag_sz;
+       }
+
+       return 0;
+
+err_free_buf:
+       while (i--)
+               dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
+                                 buf->frags[i].map);
+       kfree(buf->frags);
+err_out:
+       return -ENOMEM;
+}
+
+void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
+{
+       int size = buf->size;
+       int i;
+
+       for (i = 0; i < buf->npages; i++) {
+               int frag_sz = min_t(int, size, PAGE_SIZE);
+
+               dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
+                                 buf->frags[i].map);
+               size -= frag_sz;
+       }
+       kfree(buf->frags);
+}
+
 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
                                                 int node)
 {
@@ -230,3 +287,12 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
        }
 }
 EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
+
+void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
+{
+       int i;
+
+       for (i = 0; i < buf->npages; i++)
+               pas[i] = cpu_to_be64(buf->frags[i].map);
+}
+EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);