2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/errno.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/kernel.h>
39 #include <linux/vmalloc.h>
41 #include <linux/mlx4/cmd.h>
46 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
52 spin_lock(&buddy->lock);
54 for (o = order; o <= buddy->max_order; ++o)
55 if (buddy->num_free[o]) {
56 m = 1 << (buddy->max_order - o);
57 seg = find_first_bit(buddy->bits[o], m);
62 spin_unlock(&buddy->lock);
66 clear_bit(seg, buddy->bits[o]);
72 set_bit(seg ^ 1, buddy->bits[o]);
76 spin_unlock(&buddy->lock);
83 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
87 spin_lock(&buddy->lock);
89 while (test_bit(seg ^ 1, buddy->bits[order])) {
90 clear_bit(seg ^ 1, buddy->bits[order]);
91 --buddy->num_free[order];
96 set_bit(seg, buddy->bits[order]);
97 ++buddy->num_free[order];
99 spin_unlock(&buddy->lock);
102 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
106 buddy->max_order = max_order;
107 spin_lock_init(&buddy->lock);
109 buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
111 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
113 if (!buddy->bits || !buddy->num_free)
116 for (i = 0; i <= buddy->max_order; ++i) {
117 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
118 buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
119 if (!buddy->bits[i]) {
120 buddy->bits[i] = vzalloc(s * sizeof(long));
126 set_bit(0, buddy->bits[buddy->max_order]);
127 buddy->num_free[buddy->max_order] = 1;
132 for (i = 0; i <= buddy->max_order; ++i)
133 kvfree(buddy->bits[i]);
137 kfree(buddy->num_free);
142 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
146 for (i = 0; i <= buddy->max_order; ++i)
147 kvfree(buddy->bits[i]);
150 kfree(buddy->num_free);
153 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
155 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
160 seg_order = max_t(int, order - log_mtts_per_seg, 0);
162 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
166 offset = seg * (1 << log_mtts_per_seg);
168 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
169 offset + (1 << order) - 1)) {
170 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
177 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
183 if (mlx4_is_mfunc(dev)) {
184 set_param_l(&in_param, order);
185 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
186 RES_OP_RESERVE_AND_MAP,
188 MLX4_CMD_TIME_CLASS_A,
192 return get_param_l(&out_param);
194 return __mlx4_alloc_mtt_range(dev, order);
197 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
198 struct mlx4_mtt *mtt)
204 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
207 mtt->page_shift = page_shift;
209 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
212 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
213 if (mtt->offset == -1)
218 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
220 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
224 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
226 seg_order = max_t(int, order - log_mtts_per_seg, 0);
227 first_seg = offset / (1 << log_mtts_per_seg);
229 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
230 mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
231 offset + (1 << order) - 1);
234 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
239 if (mlx4_is_mfunc(dev)) {
240 set_param_l(&in_param, offset);
241 set_param_h(&in_param, order);
242 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
244 MLX4_CMD_TIME_CLASS_A,
247 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
251 __mlx4_free_mtt_range(dev, offset, order);
254 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
259 mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
261 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
263 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
265 return (u64) mtt->offset * dev->caps.mtt_entry_sz;
267 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
269 static u32 hw_index_to_key(u32 ind)
271 return (ind >> 24) | (ind << 8);
274 static u32 key_to_hw_index(u32 key)
276 return (key << 24) | (key >> 8);
279 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
282 return mlx4_cmd(dev, mailbox->dma, mpt_index,
283 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
287 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
290 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
291 !mailbox, MLX4_CMD_HW2SW_MPT,
292 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
295 /* Must protect against concurrent access */
296 int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
297 struct mlx4_mpt_entry ***mpt_entry)
300 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
301 struct mlx4_cmd_mailbox *mailbox = NULL;
303 if (mmr->enabled != MLX4_MPT_EN_HW)
306 err = mlx4_HW2SW_MPT(dev, NULL, key);
308 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
309 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
313 mmr->enabled = MLX4_MPT_EN_SW;
315 if (!mlx4_is_mfunc(dev)) {
316 **mpt_entry = mlx4_table_find(
317 &mlx4_priv(dev)->mr_table.dmpt_table,
320 mailbox = mlx4_alloc_cmd_mailbox(dev);
321 if (IS_ERR_OR_NULL(mailbox))
322 return PTR_ERR(mailbox);
324 err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
325 0, MLX4_CMD_QUERY_MPT,
326 MLX4_CMD_TIME_CLASS_B,
331 *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf;
334 if (!(*mpt_entry) || !(**mpt_entry)) {
342 mlx4_free_cmd_mailbox(dev, mailbox);
345 EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt);
347 int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
348 struct mlx4_mpt_entry **mpt_entry)
352 if (!mlx4_is_mfunc(dev)) {
353 /* Make sure any changes to this entry are flushed */
356 *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW;
358 /* Make sure the new status is written */
361 err = mlx4_SYNC_TPT(dev);
363 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
365 struct mlx4_cmd_mailbox *mailbox =
366 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
369 err = mlx4_SW2HW_MPT(dev, mailbox, key);
373 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
374 mmr->enabled = MLX4_MPT_EN_HW;
378 EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
380 void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev,
381 struct mlx4_mpt_entry **mpt_entry)
383 if (mlx4_is_mfunc(dev)) {
384 struct mlx4_cmd_mailbox *mailbox =
385 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
387 mlx4_free_cmd_mailbox(dev, mailbox);
390 EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
392 int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
395 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
396 /* The wrapper function will put the slave's id here */
397 if (mlx4_is_mfunc(dev))
398 pd_flags &= ~MLX4_MPT_PD_VF_MASK;
400 mpt_entry->pd_flags = cpu_to_be32(pd_flags |
401 (pdn & MLX4_MPT_PD_MASK)
402 | MLX4_MPT_PD_FLAG_EN_INV);
405 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd);
407 int mlx4_mr_hw_change_access(struct mlx4_dev *dev,
408 struct mlx4_mpt_entry *mpt_entry,
411 u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) |
412 (access & MLX4_PERM_MASK);
414 mpt_entry->flags = cpu_to_be32(flags);
417 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access);
419 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
420 u64 iova, u64 size, u32 access, int npages,
421 int page_shift, struct mlx4_mr *mr)
427 mr->enabled = MLX4_MPT_DISABLED;
428 mr->key = hw_index_to_key(mridx);
430 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
433 static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
434 struct mlx4_cmd_mailbox *mailbox,
437 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
438 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
441 int __mlx4_mpt_reserve(struct mlx4_dev *dev)
443 struct mlx4_priv *priv = mlx4_priv(dev);
445 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
448 static int mlx4_mpt_reserve(struct mlx4_dev *dev)
452 if (mlx4_is_mfunc(dev)) {
453 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
455 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
457 return get_param_l(&out_param);
459 return __mlx4_mpt_reserve(dev);
462 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
464 struct mlx4_priv *priv = mlx4_priv(dev);
466 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
469 static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
473 if (mlx4_is_mfunc(dev)) {
474 set_param_l(&in_param, index);
475 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
477 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
478 mlx4_warn(dev, "Failed to release mr index:%d\n",
482 __mlx4_mpt_release(dev, index);
485 int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
487 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
489 return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp);
492 static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
496 if (mlx4_is_mfunc(dev)) {
497 set_param_l(¶m, index);
498 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM,
500 MLX4_CMD_TIME_CLASS_A,
503 return __mlx4_mpt_alloc_icm(dev, index, gfp);
506 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
508 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
510 mlx4_table_put(dev, &mr_table->dmpt_table, index);
513 static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
517 if (mlx4_is_mfunc(dev)) {
518 set_param_l(&in_param, index);
519 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
520 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
522 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
526 return __mlx4_mpt_free_icm(dev, index);
529 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
530 int npages, int page_shift, struct mlx4_mr *mr)
535 index = mlx4_mpt_reserve(dev);
539 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
540 access, npages, page_shift, mr);
542 mlx4_mpt_release(dev, index);
546 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
548 static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
552 if (mr->enabled == MLX4_MPT_EN_HW) {
553 err = mlx4_HW2SW_MPT(dev, NULL,
554 key_to_hw_index(mr->key) &
555 (dev->caps.num_mpts - 1));
557 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
562 mr->enabled = MLX4_MPT_EN_SW;
564 mlx4_mtt_cleanup(dev, &mr->mtt);
569 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
573 ret = mlx4_mr_free_reserved(dev, mr);
577 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
578 mlx4_mpt_release(dev, key_to_hw_index(mr->key));
582 EXPORT_SYMBOL_GPL(mlx4_mr_free);
584 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
586 mlx4_mtt_cleanup(dev, &mr->mtt);
588 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
590 int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
591 u64 iova, u64 size, int npages,
592 int page_shift, struct mlx4_mpt_entry *mpt_entry)
596 mpt_entry->start = cpu_to_be64(iova);
597 mpt_entry->length = cpu_to_be64(size);
598 mpt_entry->entity_size = cpu_to_be32(page_shift);
600 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
604 mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
605 MLX4_MPT_PD_FLAG_EN_INV);
606 mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
607 MLX4_MPT_FLAG_SW_OWNS);
608 if (mr->mtt.order < 0) {
609 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
610 mpt_entry->mtt_addr = 0;
612 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
614 if (mr->mtt.page_shift == 0)
615 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
617 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
618 /* fast register MR in free state */
619 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
620 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
621 MLX4_MPT_PD_FLAG_RAE);
623 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
625 mr->enabled = MLX4_MPT_EN_SW;
629 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write);
631 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
633 struct mlx4_cmd_mailbox *mailbox;
634 struct mlx4_mpt_entry *mpt_entry;
637 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL);
641 mailbox = mlx4_alloc_cmd_mailbox(dev);
642 if (IS_ERR(mailbox)) {
643 err = PTR_ERR(mailbox);
646 mpt_entry = mailbox->buf;
647 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
648 MLX4_MPT_FLAG_REGION |
651 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
652 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
653 mpt_entry->start = cpu_to_be64(mr->iova);
654 mpt_entry->length = cpu_to_be64(mr->size);
655 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
657 if (mr->mtt.order < 0) {
658 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
659 mpt_entry->mtt_addr = 0;
661 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
665 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
666 /* fast register MR in free state */
667 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
668 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
669 MLX4_MPT_PD_FLAG_RAE);
670 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
672 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
675 err = mlx4_SW2HW_MPT(dev, mailbox,
676 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
678 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
681 mr->enabled = MLX4_MPT_EN_HW;
683 mlx4_free_cmd_mailbox(dev, mailbox);
688 mlx4_free_cmd_mailbox(dev, mailbox);
691 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
694 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
696 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
697 int start_index, int npages, u64 *page_list)
699 struct mlx4_priv *priv = mlx4_priv(dev);
701 dma_addr_t dma_handle;
704 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
705 start_index, &dma_handle);
710 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
711 npages * sizeof (u64), DMA_TO_DEVICE);
713 for (i = 0; i < npages; ++i)
714 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
716 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
717 npages * sizeof (u64), DMA_TO_DEVICE);
722 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
723 int start_index, int npages, u64 *page_list)
728 int max_mtts_first_page;
730 /* compute how may mtts fit in the first page */
731 mtts_per_page = PAGE_SIZE / sizeof(u64);
732 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
735 chunk = min_t(int, max_mtts_first_page, npages);
738 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
742 start_index += chunk;
745 chunk = min_t(int, mtts_per_page, npages);
750 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
751 int start_index, int npages, u64 *page_list)
753 struct mlx4_cmd_mailbox *mailbox = NULL;
754 __be64 *inbox = NULL;
762 if (mlx4_is_mfunc(dev)) {
763 mailbox = mlx4_alloc_cmd_mailbox(dev);
765 return PTR_ERR(mailbox);
766 inbox = mailbox->buf;
769 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
771 inbox[0] = cpu_to_be64(mtt->offset + start_index);
773 for (i = 0; i < chunk; ++i)
774 inbox[i + 2] = cpu_to_be64(page_list[i] |
775 MLX4_MTT_FLAG_PRESENT);
776 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
778 mlx4_free_cmd_mailbox(dev, mailbox);
783 start_index += chunk;
786 mlx4_free_cmd_mailbox(dev, mailbox);
790 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
792 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
794 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
795 struct mlx4_buf *buf, gfp_t gfp)
801 page_list = kmalloc(buf->npages * sizeof *page_list,
806 for (i = 0; i < buf->npages; ++i)
808 page_list[i] = buf->direct.map + (i << buf->page_shift);
810 page_list[i] = buf->page_list[i].map;
812 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
817 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
819 int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
824 if ((type == MLX4_MW_TYPE_1 &&
825 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
826 (type == MLX4_MW_TYPE_2 &&
827 !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
830 index = mlx4_mpt_reserve(dev);
834 mw->key = hw_index_to_key(index);
837 mw->enabled = MLX4_MPT_DISABLED;
841 EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
843 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
845 struct mlx4_cmd_mailbox *mailbox;
846 struct mlx4_mpt_entry *mpt_entry;
849 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL);
853 mailbox = mlx4_alloc_cmd_mailbox(dev);
854 if (IS_ERR(mailbox)) {
855 err = PTR_ERR(mailbox);
858 mpt_entry = mailbox->buf;
860 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
861 * off, thus creating a memory window and not a memory region.
863 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
864 mpt_entry->pd_flags = cpu_to_be32(mw->pd);
865 if (mw->type == MLX4_MW_TYPE_2) {
866 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
867 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
868 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
871 err = mlx4_SW2HW_MPT(dev, mailbox,
872 key_to_hw_index(mw->key) &
873 (dev->caps.num_mpts - 1));
875 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
878 mw->enabled = MLX4_MPT_EN_HW;
880 mlx4_free_cmd_mailbox(dev, mailbox);
885 mlx4_free_cmd_mailbox(dev, mailbox);
888 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
891 EXPORT_SYMBOL_GPL(mlx4_mw_enable);
893 void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
897 if (mw->enabled == MLX4_MPT_EN_HW) {
898 err = mlx4_HW2SW_MPT(dev, NULL,
899 key_to_hw_index(mw->key) &
900 (dev->caps.num_mpts - 1));
902 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
904 mw->enabled = MLX4_MPT_EN_SW;
907 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
908 mlx4_mpt_release(dev, key_to_hw_index(mw->key));
910 EXPORT_SYMBOL_GPL(mlx4_mw_free);
912 int mlx4_init_mr_table(struct mlx4_dev *dev)
914 struct mlx4_priv *priv = mlx4_priv(dev);
915 struct mlx4_mr_table *mr_table = &priv->mr_table;
918 /* Nothing to do for slaves - all MR handling is forwarded
920 if (mlx4_is_slave(dev))
923 if (!is_power_of_2(dev->caps.num_mpts))
926 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
927 ~0, dev->caps.reserved_mrws, 0);
931 err = mlx4_buddy_init(&mr_table->mtt_buddy,
932 ilog2((u32)dev->caps.num_mtts /
933 (1 << log_mtts_per_seg)));
937 if (dev->caps.reserved_mtts) {
938 priv->reserved_mtts =
939 mlx4_alloc_mtt_range(dev,
940 fls(dev->caps.reserved_mtts - 1));
941 if (priv->reserved_mtts < 0) {
942 mlx4_warn(dev, "MTT table of order %u is too small\n",
943 mr_table->mtt_buddy.max_order);
945 goto err_reserve_mtts;
952 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
955 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
960 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
962 struct mlx4_priv *priv = mlx4_priv(dev);
963 struct mlx4_mr_table *mr_table = &priv->mr_table;
965 if (mlx4_is_slave(dev))
967 if (priv->reserved_mtts >= 0)
968 mlx4_free_mtt_range(dev, priv->reserved_mtts,
969 fls(dev->caps.reserved_mtts - 1));
970 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
971 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
974 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
975 int npages, u64 iova)
979 if (npages > fmr->max_pages)
982 page_mask = (1 << fmr->page_shift) - 1;
984 /* We are getting page lists, so va must be page aligned. */
985 if (iova & page_mask)
988 /* Trust the user not to pass misaligned data in page_list */
990 for (i = 0; i < npages; ++i) {
991 if (page_list[i] & ~page_mask)
995 if (fmr->maps >= fmr->max_maps)
1001 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
1002 int npages, u64 iova, u32 *lkey, u32 *rkey)
1007 err = mlx4_check_fmr(fmr, page_list, npages, iova);
1013 key = key_to_hw_index(fmr->mr.key);
1014 key += dev->caps.num_mpts;
1015 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
1017 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
1019 /* Make sure MPT status is visible before writing MTT entries */
1022 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
1023 npages * sizeof(u64), DMA_TO_DEVICE);
1025 for (i = 0; i < npages; ++i)
1026 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
1028 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
1029 npages * sizeof(u64), DMA_TO_DEVICE);
1031 fmr->mpt->key = cpu_to_be32(key);
1032 fmr->mpt->lkey = cpu_to_be32(key);
1033 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
1034 fmr->mpt->start = cpu_to_be64(iova);
1036 /* Make MTT entries are visible before setting MPT status */
1039 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
1041 /* Make sure MPT status is visible before consumer can use FMR */
1046 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
1048 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
1049 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
1051 struct mlx4_priv *priv = mlx4_priv(dev);
1054 if (max_maps > dev->caps.max_fmr_maps)
1057 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
1060 /* All MTTs must fit in the same page */
1061 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
1064 fmr->page_shift = page_shift;
1065 fmr->max_pages = max_pages;
1066 fmr->max_maps = max_maps;
1069 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
1070 page_shift, &fmr->mr);
1074 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
1086 (void) mlx4_mr_free(dev, &fmr->mr);
1089 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
1091 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
1093 struct mlx4_priv *priv = mlx4_priv(dev);
1096 err = mlx4_mr_enable(dev, &fmr->mr);
1100 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
1101 key_to_hw_index(fmr->mr.key), NULL);
1107 EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
1109 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
1110 u32 *lkey, u32 *rkey)
1112 struct mlx4_cmd_mailbox *mailbox;
1120 mailbox = mlx4_alloc_cmd_mailbox(dev);
1121 if (IS_ERR(mailbox)) {
1122 err = PTR_ERR(mailbox);
1123 pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
1127 err = mlx4_HW2SW_MPT(dev, NULL,
1128 key_to_hw_index(fmr->mr.key) &
1129 (dev->caps.num_mpts - 1));
1130 mlx4_free_cmd_mailbox(dev, mailbox);
1132 pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
1135 fmr->mr.enabled = MLX4_MPT_EN_SW;
1137 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
1139 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
1146 ret = mlx4_mr_free(dev, &fmr->mr);
1149 fmr->mr.enabled = MLX4_MPT_DISABLED;
1153 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
1155 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
1157 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
1160 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);