2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/errno.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/kernel.h>
39 #include <linux/vmalloc.h>
41 #include <linux/mlx4/cmd.h>
46 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
52 spin_lock(&buddy->lock);
54 for (o = order; o <= buddy->max_order; ++o)
55 if (buddy->num_free[o]) {
56 m = 1 << (buddy->max_order - o);
57 seg = find_first_bit(buddy->bits[o], m);
62 spin_unlock(&buddy->lock);
66 clear_bit(seg, buddy->bits[o]);
72 set_bit(seg ^ 1, buddy->bits[o]);
76 spin_unlock(&buddy->lock);
83 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
87 spin_lock(&buddy->lock);
89 while (test_bit(seg ^ 1, buddy->bits[order])) {
90 clear_bit(seg ^ 1, buddy->bits[order]);
91 --buddy->num_free[order];
96 set_bit(seg, buddy->bits[order]);
97 ++buddy->num_free[order];
99 spin_unlock(&buddy->lock);
102 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
106 buddy->max_order = max_order;
107 spin_lock_init(&buddy->lock);
109 buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
111 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
113 if (!buddy->bits || !buddy->num_free)
116 for (i = 0; i <= buddy->max_order; ++i) {
117 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
118 buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
119 if (!buddy->bits[i]) {
120 buddy->bits[i] = vzalloc(s * sizeof(long));
126 set_bit(0, buddy->bits[buddy->max_order]);
127 buddy->num_free[buddy->max_order] = 1;
132 for (i = 0; i <= buddy->max_order; ++i)
133 if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
134 vfree(buddy->bits[i]);
136 kfree(buddy->bits[i]);
140 kfree(buddy->num_free);
145 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
149 for (i = 0; i <= buddy->max_order; ++i)
150 if (is_vmalloc_addr(buddy->bits[i]))
151 vfree(buddy->bits[i]);
153 kfree(buddy->bits[i]);
156 kfree(buddy->num_free);
159 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
161 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
166 seg_order = max_t(int, order - log_mtts_per_seg, 0);
168 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
172 offset = seg * (1 << log_mtts_per_seg);
174 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
175 offset + (1 << order) - 1)) {
176 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
183 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
189 if (mlx4_is_mfunc(dev)) {
190 set_param_l(&in_param, order);
191 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
192 RES_OP_RESERVE_AND_MAP,
194 MLX4_CMD_TIME_CLASS_A,
198 return get_param_l(&out_param);
200 return __mlx4_alloc_mtt_range(dev, order);
203 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
204 struct mlx4_mtt *mtt)
210 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
213 mtt->page_shift = page_shift;
215 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
218 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
219 if (mtt->offset == -1)
224 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
226 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
230 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
232 seg_order = max_t(int, order - log_mtts_per_seg, 0);
233 first_seg = offset / (1 << log_mtts_per_seg);
235 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
236 mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
237 offset + (1 << order) - 1);
240 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
245 if (mlx4_is_mfunc(dev)) {
246 set_param_l(&in_param, offset);
247 set_param_h(&in_param, order);
248 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
250 MLX4_CMD_TIME_CLASS_A,
253 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
257 __mlx4_free_mtt_range(dev, offset, order);
260 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
265 mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
267 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
269 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
271 return (u64) mtt->offset * dev->caps.mtt_entry_sz;
273 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
275 static u32 hw_index_to_key(u32 ind)
277 return (ind >> 24) | (ind << 8);
280 static u32 key_to_hw_index(u32 key)
282 return (key << 24) | (key >> 8);
285 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
288 return mlx4_cmd(dev, mailbox->dma, mpt_index,
289 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
293 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
296 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
297 !mailbox, MLX4_CMD_HW2SW_MPT,
298 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
301 int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
302 struct mlx4_mpt_entry ***mpt_entry)
305 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
306 struct mlx4_cmd_mailbox *mailbox = NULL;
308 /* Make sure that at this point we have single-threaded access only */
310 if (mmr->enabled != MLX4_MPT_EN_HW)
313 err = mlx4_HW2SW_MPT(dev, NULL, key);
316 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
317 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
321 mmr->enabled = MLX4_MPT_EN_SW;
323 if (!mlx4_is_mfunc(dev)) {
324 **mpt_entry = mlx4_table_find(
325 &mlx4_priv(dev)->mr_table.dmpt_table,
328 mailbox = mlx4_alloc_cmd_mailbox(dev);
329 if (IS_ERR_OR_NULL(mailbox))
330 return PTR_ERR(mailbox);
332 err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
333 0, MLX4_CMD_QUERY_MPT,
334 MLX4_CMD_TIME_CLASS_B,
340 *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf;
343 if (!(*mpt_entry) || !(**mpt_entry)) {
351 mlx4_free_cmd_mailbox(dev, mailbox);
354 EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt);
356 int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
357 struct mlx4_mpt_entry **mpt_entry)
361 if (!mlx4_is_mfunc(dev)) {
362 /* Make sure any changes to this entry are flushed */
365 *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW;
367 /* Make sure the new status is written */
370 err = mlx4_SYNC_TPT(dev);
372 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
374 struct mlx4_cmd_mailbox *mailbox =
375 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
378 err = mlx4_SW2HW_MPT(dev, mailbox, key);
381 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
383 mmr->enabled = MLX4_MPT_EN_HW;
386 EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
388 void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev,
389 struct mlx4_mpt_entry **mpt_entry)
391 if (mlx4_is_mfunc(dev)) {
392 struct mlx4_cmd_mailbox *mailbox =
393 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
395 mlx4_free_cmd_mailbox(dev, mailbox);
398 EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
400 int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
403 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags);
404 /* The wrapper function will put the slave's id here */
405 if (mlx4_is_mfunc(dev))
406 pd_flags &= ~MLX4_MPT_PD_VF_MASK;
407 mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) |
408 (pdn & MLX4_MPT_PD_MASK)
409 | MLX4_MPT_PD_FLAG_EN_INV);
412 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd);
414 int mlx4_mr_hw_change_access(struct mlx4_dev *dev,
415 struct mlx4_mpt_entry *mpt_entry,
418 u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) |
419 (access & MLX4_PERM_MASK);
421 mpt_entry->flags = cpu_to_be32(flags);
424 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access);
426 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
427 u64 iova, u64 size, u32 access, int npages,
428 int page_shift, struct mlx4_mr *mr)
434 mr->enabled = MLX4_MPT_DISABLED;
435 mr->key = hw_index_to_key(mridx);
437 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
440 static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
441 struct mlx4_cmd_mailbox *mailbox,
444 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
445 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
448 int __mlx4_mpt_reserve(struct mlx4_dev *dev)
450 struct mlx4_priv *priv = mlx4_priv(dev);
452 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
455 static int mlx4_mpt_reserve(struct mlx4_dev *dev)
459 if (mlx4_is_mfunc(dev)) {
460 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
462 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
464 return get_param_l(&out_param);
466 return __mlx4_mpt_reserve(dev);
469 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
471 struct mlx4_priv *priv = mlx4_priv(dev);
473 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
476 static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
480 if (mlx4_is_mfunc(dev)) {
481 set_param_l(&in_param, index);
482 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
484 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
485 mlx4_warn(dev, "Failed to release mr index:%d\n",
489 __mlx4_mpt_release(dev, index);
492 int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
494 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
496 return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp);
499 static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
503 if (mlx4_is_mfunc(dev)) {
504 set_param_l(¶m, index);
505 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM,
507 MLX4_CMD_TIME_CLASS_A,
510 return __mlx4_mpt_alloc_icm(dev, index, gfp);
513 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
515 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
517 mlx4_table_put(dev, &mr_table->dmpt_table, index);
520 static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
524 if (mlx4_is_mfunc(dev)) {
525 set_param_l(&in_param, index);
526 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
527 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
529 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
533 return __mlx4_mpt_free_icm(dev, index);
536 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
537 int npages, int page_shift, struct mlx4_mr *mr)
542 index = mlx4_mpt_reserve(dev);
546 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
547 access, npages, page_shift, mr);
549 mlx4_mpt_release(dev, index);
553 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
555 static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
559 if (mr->enabled == MLX4_MPT_EN_HW) {
560 err = mlx4_HW2SW_MPT(dev, NULL,
561 key_to_hw_index(mr->key) &
562 (dev->caps.num_mpts - 1));
564 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
569 mr->enabled = MLX4_MPT_EN_SW;
571 mlx4_mtt_cleanup(dev, &mr->mtt);
576 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
580 ret = mlx4_mr_free_reserved(dev, mr);
584 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
585 mlx4_mpt_release(dev, key_to_hw_index(mr->key));
589 EXPORT_SYMBOL_GPL(mlx4_mr_free);
591 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
593 mlx4_mtt_cleanup(dev, &mr->mtt);
595 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
597 int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
598 u64 iova, u64 size, int npages,
599 int page_shift, struct mlx4_mpt_entry *mpt_entry)
603 mpt_entry->start = cpu_to_be64(mr->iova);
604 mpt_entry->length = cpu_to_be64(mr->size);
605 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
607 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
611 if (mr->mtt.order < 0) {
612 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
613 mpt_entry->mtt_addr = 0;
615 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
617 if (mr->mtt.page_shift == 0)
618 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
620 mr->enabled = MLX4_MPT_EN_SW;
624 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write);
626 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
628 struct mlx4_cmd_mailbox *mailbox;
629 struct mlx4_mpt_entry *mpt_entry;
632 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL);
636 mailbox = mlx4_alloc_cmd_mailbox(dev);
637 if (IS_ERR(mailbox)) {
638 err = PTR_ERR(mailbox);
641 mpt_entry = mailbox->buf;
642 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
643 MLX4_MPT_FLAG_REGION |
646 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
647 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
648 mpt_entry->start = cpu_to_be64(mr->iova);
649 mpt_entry->length = cpu_to_be64(mr->size);
650 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
652 if (mr->mtt.order < 0) {
653 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
654 mpt_entry->mtt_addr = 0;
656 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
660 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
661 /* fast register MR in free state */
662 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
663 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
664 MLX4_MPT_PD_FLAG_RAE);
665 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
667 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
670 err = mlx4_SW2HW_MPT(dev, mailbox,
671 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
673 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
676 mr->enabled = MLX4_MPT_EN_HW;
678 mlx4_free_cmd_mailbox(dev, mailbox);
683 mlx4_free_cmd_mailbox(dev, mailbox);
686 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
689 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
691 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
692 int start_index, int npages, u64 *page_list)
694 struct mlx4_priv *priv = mlx4_priv(dev);
696 dma_addr_t dma_handle;
699 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
700 start_index, &dma_handle);
705 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
706 npages * sizeof (u64), DMA_TO_DEVICE);
708 for (i = 0; i < npages; ++i)
709 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
711 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
712 npages * sizeof (u64), DMA_TO_DEVICE);
717 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
718 int start_index, int npages, u64 *page_list)
723 int max_mtts_first_page;
725 /* compute how may mtts fit in the first page */
726 mtts_per_page = PAGE_SIZE / sizeof(u64);
727 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
730 chunk = min_t(int, max_mtts_first_page, npages);
733 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
737 start_index += chunk;
740 chunk = min_t(int, mtts_per_page, npages);
745 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
746 int start_index, int npages, u64 *page_list)
748 struct mlx4_cmd_mailbox *mailbox = NULL;
749 __be64 *inbox = NULL;
757 if (mlx4_is_mfunc(dev)) {
758 mailbox = mlx4_alloc_cmd_mailbox(dev);
760 return PTR_ERR(mailbox);
761 inbox = mailbox->buf;
764 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
766 inbox[0] = cpu_to_be64(mtt->offset + start_index);
768 for (i = 0; i < chunk; ++i)
769 inbox[i + 2] = cpu_to_be64(page_list[i] |
770 MLX4_MTT_FLAG_PRESENT);
771 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
773 mlx4_free_cmd_mailbox(dev, mailbox);
778 start_index += chunk;
781 mlx4_free_cmd_mailbox(dev, mailbox);
785 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
787 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
789 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
790 struct mlx4_buf *buf, gfp_t gfp)
796 page_list = kmalloc(buf->npages * sizeof *page_list,
801 for (i = 0; i < buf->npages; ++i)
803 page_list[i] = buf->direct.map + (i << buf->page_shift);
805 page_list[i] = buf->page_list[i].map;
807 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
812 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
814 int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
819 if ((type == MLX4_MW_TYPE_1 &&
820 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
821 (type == MLX4_MW_TYPE_2 &&
822 !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
825 index = mlx4_mpt_reserve(dev);
829 mw->key = hw_index_to_key(index);
832 mw->enabled = MLX4_MPT_DISABLED;
836 EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
838 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
840 struct mlx4_cmd_mailbox *mailbox;
841 struct mlx4_mpt_entry *mpt_entry;
844 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL);
848 mailbox = mlx4_alloc_cmd_mailbox(dev);
849 if (IS_ERR(mailbox)) {
850 err = PTR_ERR(mailbox);
853 mpt_entry = mailbox->buf;
855 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
856 * off, thus creating a memory window and not a memory region.
858 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
859 mpt_entry->pd_flags = cpu_to_be32(mw->pd);
860 if (mw->type == MLX4_MW_TYPE_2) {
861 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
862 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
863 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
866 err = mlx4_SW2HW_MPT(dev, mailbox,
867 key_to_hw_index(mw->key) &
868 (dev->caps.num_mpts - 1));
870 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
873 mw->enabled = MLX4_MPT_EN_HW;
875 mlx4_free_cmd_mailbox(dev, mailbox);
880 mlx4_free_cmd_mailbox(dev, mailbox);
883 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
886 EXPORT_SYMBOL_GPL(mlx4_mw_enable);
888 void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
892 if (mw->enabled == MLX4_MPT_EN_HW) {
893 err = mlx4_HW2SW_MPT(dev, NULL,
894 key_to_hw_index(mw->key) &
895 (dev->caps.num_mpts - 1));
897 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
899 mw->enabled = MLX4_MPT_EN_SW;
902 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
903 mlx4_mpt_release(dev, key_to_hw_index(mw->key));
905 EXPORT_SYMBOL_GPL(mlx4_mw_free);
907 int mlx4_init_mr_table(struct mlx4_dev *dev)
909 struct mlx4_priv *priv = mlx4_priv(dev);
910 struct mlx4_mr_table *mr_table = &priv->mr_table;
913 /* Nothing to do for slaves - all MR handling is forwarded
915 if (mlx4_is_slave(dev))
918 if (!is_power_of_2(dev->caps.num_mpts))
921 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
922 ~0, dev->caps.reserved_mrws, 0);
926 err = mlx4_buddy_init(&mr_table->mtt_buddy,
927 ilog2((u32)dev->caps.num_mtts /
928 (1 << log_mtts_per_seg)));
932 if (dev->caps.reserved_mtts) {
933 priv->reserved_mtts =
934 mlx4_alloc_mtt_range(dev,
935 fls(dev->caps.reserved_mtts - 1));
936 if (priv->reserved_mtts < 0) {
937 mlx4_warn(dev, "MTT table of order %u is too small\n",
938 mr_table->mtt_buddy.max_order);
940 goto err_reserve_mtts;
947 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
950 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
955 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
957 struct mlx4_priv *priv = mlx4_priv(dev);
958 struct mlx4_mr_table *mr_table = &priv->mr_table;
960 if (mlx4_is_slave(dev))
962 if (priv->reserved_mtts >= 0)
963 mlx4_free_mtt_range(dev, priv->reserved_mtts,
964 fls(dev->caps.reserved_mtts - 1));
965 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
966 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
969 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
970 int npages, u64 iova)
974 if (npages > fmr->max_pages)
977 page_mask = (1 << fmr->page_shift) - 1;
979 /* We are getting page lists, so va must be page aligned. */
980 if (iova & page_mask)
983 /* Trust the user not to pass misaligned data in page_list */
985 for (i = 0; i < npages; ++i) {
986 if (page_list[i] & ~page_mask)
990 if (fmr->maps >= fmr->max_maps)
996 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
997 int npages, u64 iova, u32 *lkey, u32 *rkey)
1002 err = mlx4_check_fmr(fmr, page_list, npages, iova);
1008 key = key_to_hw_index(fmr->mr.key);
1009 key += dev->caps.num_mpts;
1010 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
1012 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
1014 /* Make sure MPT status is visible before writing MTT entries */
1017 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
1018 npages * sizeof(u64), DMA_TO_DEVICE);
1020 for (i = 0; i < npages; ++i)
1021 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
1023 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
1024 npages * sizeof(u64), DMA_TO_DEVICE);
1026 fmr->mpt->key = cpu_to_be32(key);
1027 fmr->mpt->lkey = cpu_to_be32(key);
1028 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
1029 fmr->mpt->start = cpu_to_be64(iova);
1031 /* Make MTT entries are visible before setting MPT status */
1034 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
1036 /* Make sure MPT status is visible before consumer can use FMR */
1041 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
1043 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
1044 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
1046 struct mlx4_priv *priv = mlx4_priv(dev);
1049 if (max_maps > dev->caps.max_fmr_maps)
1052 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
1055 /* All MTTs must fit in the same page */
1056 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
1059 fmr->page_shift = page_shift;
1060 fmr->max_pages = max_pages;
1061 fmr->max_maps = max_maps;
1064 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
1065 page_shift, &fmr->mr);
1069 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
1081 (void) mlx4_mr_free(dev, &fmr->mr);
1084 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
1086 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
1088 struct mlx4_priv *priv = mlx4_priv(dev);
1091 err = mlx4_mr_enable(dev, &fmr->mr);
1095 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
1096 key_to_hw_index(fmr->mr.key), NULL);
1102 EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
1104 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
1105 u32 *lkey, u32 *rkey)
1107 struct mlx4_cmd_mailbox *mailbox;
1115 mailbox = mlx4_alloc_cmd_mailbox(dev);
1116 if (IS_ERR(mailbox)) {
1117 err = PTR_ERR(mailbox);
1118 pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
1122 err = mlx4_HW2SW_MPT(dev, NULL,
1123 key_to_hw_index(fmr->mr.key) &
1124 (dev->caps.num_mpts - 1));
1125 mlx4_free_cmd_mailbox(dev, mailbox);
1127 pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
1130 fmr->mr.enabled = MLX4_MPT_EN_SW;
1132 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
1134 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
1141 ret = mlx4_mr_free(dev, &fmr->mr);
1144 fmr->mr.enabled = MLX4_MPT_DISABLED;
1148 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
1150 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
1152 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
1155 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);