2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
43 MAX_PENDING_REG_MR = 8,
50 static __be64 *mr_align(__be64 *ptr, int align)
52 unsigned long mask = align - 1;
54 return (__be64 *)(((unsigned long)ptr + mask) & ~mask);
57 static int order2idx(struct mlx5_ib_dev *dev, int order)
59 struct mlx5_mr_cache *cache = &dev->cache;
61 if (order < cache->ent[0].order)
64 return order - cache->ent[0].order;
67 static void reg_mr_callback(int status, void *context)
69 struct mlx5_ib_mr *mr = context;
70 struct mlx5_ib_dev *dev = mr->dev;
71 struct mlx5_mr_cache *cache = &dev->cache;
72 int c = order2idx(dev, mr->order);
73 struct mlx5_cache_ent *ent = &cache->ent[c];
76 struct mlx5_mr_table *table = &dev->mdev.priv.mr_table;
79 spin_lock_irqsave(&ent->lock, flags);
81 spin_unlock_irqrestore(&ent->lock, flags);
83 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
86 mod_timer(&dev->delay_timer, jiffies + HZ);
90 if (mr->out.hdr.status) {
91 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
93 be32_to_cpu(mr->out.hdr.syndrome));
96 mod_timer(&dev->delay_timer, jiffies + HZ);
100 spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags);
101 key = dev->mdev.priv.mkey_key++;
102 spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags);
103 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
105 cache->last_add = jiffies;
107 spin_lock_irqsave(&ent->lock, flags);
108 list_add_tail(&mr->list, &ent->head);
111 spin_unlock_irqrestore(&ent->lock, flags);
113 write_lock_irqsave(&table->lock, flags);
114 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
117 pr_err("Error inserting to mr tree. 0x%x\n", -err);
118 write_unlock_irqrestore(&table->lock, flags);
121 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
123 struct mlx5_mr_cache *cache = &dev->cache;
124 struct mlx5_cache_ent *ent = &cache->ent[c];
125 struct mlx5_create_mkey_mbox_in *in;
126 struct mlx5_ib_mr *mr;
127 int npages = 1 << ent->order;
131 in = kzalloc(sizeof(*in), GFP_KERNEL);
135 for (i = 0; i < num; i++) {
136 if (ent->pending >= MAX_PENDING_REG_MR) {
141 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
146 mr->order = ent->order;
149 in->seg.status = 1 << 6;
150 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
151 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
152 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
153 in->seg.log2_page_size = 12;
155 spin_lock_irq(&ent->lock);
157 spin_unlock_irq(&ent->lock);
158 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
159 sizeof(*in), reg_mr_callback,
162 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
172 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
174 struct mlx5_mr_cache *cache = &dev->cache;
175 struct mlx5_cache_ent *ent = &cache->ent[c];
176 struct mlx5_ib_mr *mr;
180 for (i = 0; i < num; i++) {
181 spin_lock_irq(&ent->lock);
182 if (list_empty(&ent->head)) {
183 spin_unlock_irq(&ent->lock);
186 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
190 spin_unlock_irq(&ent->lock);
191 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
193 mlx5_ib_warn(dev, "failed destroy mkey\n");
199 static ssize_t size_write(struct file *filp, const char __user *buf,
200 size_t count, loff_t *pos)
202 struct mlx5_cache_ent *ent = filp->private_data;
203 struct mlx5_ib_dev *dev = ent->dev;
209 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
212 c = order2idx(dev, ent->order);
213 lbuf[sizeof(lbuf) - 1] = 0;
215 if (sscanf(lbuf, "%u", &var) != 1)
218 if (var < ent->limit)
221 if (var > ent->size) {
223 err = add_keys(dev, c, var - ent->size);
224 if (err && err != -EAGAIN)
227 usleep_range(3000, 5000);
229 } else if (var < ent->size) {
230 remove_keys(dev, c, ent->size - var);
236 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
239 struct mlx5_cache_ent *ent = filp->private_data;
246 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
250 if (copy_to_user(buf, lbuf, err))
258 static const struct file_operations size_fops = {
259 .owner = THIS_MODULE,
265 static ssize_t limit_write(struct file *filp, const char __user *buf,
266 size_t count, loff_t *pos)
268 struct mlx5_cache_ent *ent = filp->private_data;
269 struct mlx5_ib_dev *dev = ent->dev;
275 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
278 c = order2idx(dev, ent->order);
279 lbuf[sizeof(lbuf) - 1] = 0;
281 if (sscanf(lbuf, "%u", &var) != 1)
289 if (ent->cur < ent->limit) {
290 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
298 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
301 struct mlx5_cache_ent *ent = filp->private_data;
308 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
312 if (copy_to_user(buf, lbuf, err))
320 static const struct file_operations limit_fops = {
321 .owner = THIS_MODULE,
323 .write = limit_write,
327 static int someone_adding(struct mlx5_mr_cache *cache)
331 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
332 if (cache->ent[i].cur < cache->ent[i].limit)
339 static void __cache_work_func(struct mlx5_cache_ent *ent)
341 struct mlx5_ib_dev *dev = ent->dev;
342 struct mlx5_mr_cache *cache = &dev->cache;
343 int i = order2idx(dev, ent->order);
349 ent = &dev->cache.ent[i];
350 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
351 err = add_keys(dev, i, 1);
352 if (ent->cur < 2 * ent->limit) {
353 if (err == -EAGAIN) {
354 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
356 queue_delayed_work(cache->wq, &ent->dwork,
357 msecs_to_jiffies(3));
359 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
361 queue_delayed_work(cache->wq, &ent->dwork,
362 msecs_to_jiffies(1000));
364 queue_work(cache->wq, &ent->work);
367 } else if (ent->cur > 2 * ent->limit) {
368 if (!someone_adding(cache) &&
369 time_after(jiffies, cache->last_add + 300 * HZ)) {
370 remove_keys(dev, i, 1);
371 if (ent->cur > ent->limit)
372 queue_work(cache->wq, &ent->work);
374 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
379 static void delayed_cache_work_func(struct work_struct *work)
381 struct mlx5_cache_ent *ent;
383 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
384 __cache_work_func(ent);
387 static void cache_work_func(struct work_struct *work)
389 struct mlx5_cache_ent *ent;
391 ent = container_of(work, struct mlx5_cache_ent, work);
392 __cache_work_func(ent);
395 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
397 struct mlx5_mr_cache *cache = &dev->cache;
398 struct mlx5_ib_mr *mr = NULL;
399 struct mlx5_cache_ent *ent;
403 c = order2idx(dev, order);
404 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
405 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
409 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
410 ent = &cache->ent[i];
412 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
414 spin_lock_irq(&ent->lock);
415 if (!list_empty(&ent->head)) {
416 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
420 spin_unlock_irq(&ent->lock);
421 if (ent->cur < ent->limit)
422 queue_work(cache->wq, &ent->work);
425 spin_unlock_irq(&ent->lock);
427 queue_work(cache->wq, &ent->work);
434 cache->ent[c].miss++;
439 static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
441 struct mlx5_mr_cache *cache = &dev->cache;
442 struct mlx5_cache_ent *ent;
446 c = order2idx(dev, mr->order);
447 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
448 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
451 ent = &cache->ent[c];
452 spin_lock_irq(&ent->lock);
453 list_add_tail(&mr->list, &ent->head);
455 if (ent->cur > 2 * ent->limit)
457 spin_unlock_irq(&ent->lock);
460 queue_work(cache->wq, &ent->work);
463 static void clean_keys(struct mlx5_ib_dev *dev, int c)
465 struct mlx5_mr_cache *cache = &dev->cache;
466 struct mlx5_cache_ent *ent = &cache->ent[c];
467 struct mlx5_ib_mr *mr;
470 cancel_delayed_work(&ent->dwork);
472 spin_lock_irq(&ent->lock);
473 if (list_empty(&ent->head)) {
474 spin_unlock_irq(&ent->lock);
477 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
481 spin_unlock_irq(&ent->lock);
482 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
484 mlx5_ib_warn(dev, "failed destroy mkey\n");
490 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
492 struct mlx5_mr_cache *cache = &dev->cache;
493 struct mlx5_cache_ent *ent;
496 if (!mlx5_debugfs_root)
499 cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root);
503 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
504 ent = &cache->ent[i];
505 sprintf(ent->name, "%d", ent->order);
506 ent->dir = debugfs_create_dir(ent->name, cache->root);
510 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
515 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
520 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
525 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
534 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
536 if (!mlx5_debugfs_root)
539 debugfs_remove_recursive(dev->cache.root);
542 static void delay_time_func(unsigned long ctx)
544 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
549 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
551 struct mlx5_mr_cache *cache = &dev->cache;
552 struct mlx5_cache_ent *ent;
557 cache->wq = create_singlethread_workqueue("mkey_cache");
559 mlx5_ib_warn(dev, "failed to create work queue\n");
563 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
564 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
565 INIT_LIST_HEAD(&cache->ent[i].head);
566 spin_lock_init(&cache->ent[i].lock);
568 ent = &cache->ent[i];
569 INIT_LIST_HEAD(&ent->head);
570 spin_lock_init(&ent->lock);
574 if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE)
575 limit = dev->mdev.profile->mr_cache[i].limit;
579 INIT_WORK(&ent->work, cache_work_func);
580 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
582 queue_work(cache->wq, &ent->work);
585 err = mlx5_mr_cache_debugfs_init(dev);
587 mlx5_ib_warn(dev, "cache debugfs failure\n");
592 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
596 dev->cache.stopped = 1;
597 flush_workqueue(dev->cache.wq);
599 mlx5_mr_cache_debugfs_cleanup(dev);
601 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
604 destroy_workqueue(dev->cache.wq);
605 del_timer_sync(&dev->delay_timer);
610 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
612 struct mlx5_ib_dev *dev = to_mdev(pd->device);
613 struct mlx5_core_dev *mdev = &dev->mdev;
614 struct mlx5_create_mkey_mbox_in *in;
615 struct mlx5_mkey_seg *seg;
616 struct mlx5_ib_mr *mr;
619 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
621 return ERR_PTR(-ENOMEM);
623 in = kzalloc(sizeof(*in), GFP_KERNEL);
630 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
631 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
632 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
635 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
641 mr->ibmr.lkey = mr->mmr.key;
642 mr->ibmr.rkey = mr->mmr.key;
656 static int get_octo_len(u64 addr, u64 len, int page_size)
661 offset = addr & (page_size - 1);
662 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
663 return (npages + 1) / 2;
666 static int use_umr(int order)
671 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
672 struct ib_sge *sg, u64 dma, int n, u32 key,
673 int page_shift, u64 virt_addr, u64 len,
676 struct mlx5_ib_dev *dev = to_mdev(pd->device);
677 struct ib_mr *mr = dev->umrc.mr;
680 sg->length = ALIGN(sizeof(u64) * n, 64);
691 wr->opcode = MLX5_IB_WR_UMR;
692 wr->wr.fast_reg.page_list_len = n;
693 wr->wr.fast_reg.page_shift = page_shift;
694 wr->wr.fast_reg.rkey = key;
695 wr->wr.fast_reg.iova_start = virt_addr;
696 wr->wr.fast_reg.length = len;
697 wr->wr.fast_reg.access_flags = access_flags;
698 wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd;
701 static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
702 struct ib_send_wr *wr, u32 key)
704 wr->send_flags = MLX5_IB_SEND_UMR_UNREG;
705 wr->opcode = MLX5_IB_WR_UMR;
706 wr->wr.fast_reg.rkey = key;
709 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
711 struct mlx5_ib_umr_context *context;
716 err = ib_poll_cq(cq, 1, &wc);
718 pr_warn("poll cq error %d\n", err);
724 context = (struct mlx5_ib_umr_context *)wc.wr_id;
725 context->status = wc.status;
726 complete(&context->done);
728 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
731 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
732 u64 virt_addr, u64 len, int npages,
733 int page_shift, int order, int access_flags)
735 struct mlx5_ib_dev *dev = to_mdev(pd->device);
736 struct device *ddev = dev->ib_dev.dma_device;
737 struct umr_common *umrc = &dev->umrc;
738 struct mlx5_ib_umr_context umr_context;
739 struct ib_send_wr wr, *bad;
740 struct mlx5_ib_mr *mr;
742 int size = sizeof(u64) * npages;
746 for (i = 0; i < 1; i++) {
747 mr = alloc_cached_mr(dev, order);
751 err = add_keys(dev, order2idx(dev, order), 1);
752 if (err && err != -EAGAIN) {
753 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
759 return ERR_PTR(-EAGAIN);
761 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
767 mlx5_ib_populate_pas(dev, umem, page_shift,
768 mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
770 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
772 if (dma_mapping_error(ddev, mr->dma)) {
777 memset(&wr, 0, sizeof(wr));
778 wr.wr_id = (u64)(unsigned long)&umr_context;
779 prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
781 mlx5_ib_init_umr_context(&umr_context);
783 err = ib_post_send(umrc->qp, &wr, &bad);
785 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
788 wait_for_completion(&umr_context.done);
789 if (umr_context.status != IB_WC_SUCCESS) {
790 mlx5_ib_warn(dev, "reg umr failed\n");
795 mr->mmr.iova = virt_addr;
797 mr->mmr.pd = to_mpd(pd)->pdn;
801 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
808 free_cached_mr(dev, mr);
815 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
816 u64 length, struct ib_umem *umem,
817 int npages, int page_shift,
820 struct mlx5_ib_dev *dev = to_mdev(pd->device);
821 struct mlx5_create_mkey_mbox_in *in;
822 struct mlx5_ib_mr *mr;
826 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
828 return ERR_PTR(-ENOMEM);
830 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
831 in = mlx5_vzalloc(inlen);
836 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0);
838 in->seg.flags = convert_access(access_flags) |
839 MLX5_ACCESS_MODE_MTT;
840 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
841 in->seg.start_addr = cpu_to_be64(virt_addr);
842 in->seg.len = cpu_to_be64(length);
843 in->seg.bsfs_octo_size = 0;
844 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
845 in->seg.log2_page_size = page_shift;
846 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
847 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
849 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL,
852 mlx5_ib_warn(dev, "create mkey failed\n");
858 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
871 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
872 u64 virt_addr, int access_flags,
873 struct ib_udata *udata)
875 struct mlx5_ib_dev *dev = to_mdev(pd->device);
876 struct mlx5_ib_mr *mr = NULL;
877 struct ib_umem *umem;
884 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n",
885 start, virt_addr, length);
886 umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
889 mlx5_ib_dbg(dev, "umem get failed\n");
893 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
895 mlx5_ib_warn(dev, "avoid zero region\n");
900 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
901 npages, ncont, order, page_shift);
903 if (use_umr(order)) {
904 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
905 order, access_flags);
906 if (PTR_ERR(mr) == -EAGAIN) {
907 mlx5_ib_dbg(dev, "cache empty for order %d", order);
913 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
921 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
925 spin_lock(&dev->mr_lock);
926 dev->mdev.priv.reg_pages += npages;
927 spin_unlock(&dev->mr_lock);
928 mr->ibmr.lkey = mr->mmr.key;
929 mr->ibmr.rkey = mr->mmr.key;
934 ib_umem_release(umem);
938 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
940 struct umr_common *umrc = &dev->umrc;
941 struct mlx5_ib_umr_context umr_context;
942 struct ib_send_wr wr, *bad;
945 memset(&wr, 0, sizeof(wr));
946 wr.wr_id = (u64)(unsigned long)&umr_context;
947 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
949 mlx5_ib_init_umr_context(&umr_context);
951 err = ib_post_send(umrc->qp, &wr, &bad);
954 mlx5_ib_dbg(dev, "err %d\n", err);
957 wait_for_completion(&umr_context.done);
960 if (umr_context.status != IB_WC_SUCCESS) {
961 mlx5_ib_warn(dev, "unreg umr failed\n");
971 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
973 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
974 struct mlx5_ib_mr *mr = to_mmr(ibmr);
975 struct ib_umem *umem = mr->umem;
976 int npages = mr->npages;
977 int umred = mr->umred;
981 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
983 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
988 err = unreg_umr(dev, mr);
990 mlx5_ib_warn(dev, "failed unregister\n");
993 free_cached_mr(dev, mr);
997 ib_umem_release(umem);
998 spin_lock(&dev->mr_lock);
999 dev->mdev.priv.reg_pages -= npages;
1000 spin_unlock(&dev->mr_lock);
1009 struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
1010 struct ib_mr_init_attr *mr_init_attr)
1012 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1013 struct mlx5_create_mkey_mbox_in *in;
1014 struct mlx5_ib_mr *mr;
1015 int access_mode, err;
1016 int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4);
1018 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1020 return ERR_PTR(-ENOMEM);
1022 in = kzalloc(sizeof(*in), GFP_KERNEL);
1028 in->seg.status = 1 << 6; /* free */
1029 in->seg.xlt_oct_size = cpu_to_be32(ndescs);
1030 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1031 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1032 access_mode = MLX5_ACCESS_MODE_MTT;
1034 if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) {
1037 in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
1039 in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
1040 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1046 /* create mem & wire PSVs */
1047 err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn,
1052 access_mode = MLX5_ACCESS_MODE_KLM;
1053 mr->sig->psv_memory.psv_idx = psv_index[0];
1054 mr->sig->psv_wire.psv_idx = psv_index[1];
1056 mr->sig->sig_status_checked = true;
1057 mr->sig->sig_err_exists = false;
1058 /* Next UMR, Arm SIGERR */
1059 ++mr->sig->sigerr_count;
1062 in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
1063 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in),
1066 goto err_destroy_psv;
1068 mr->ibmr.lkey = mr->mmr.key;
1069 mr->ibmr.rkey = mr->mmr.key;
1077 if (mlx5_core_destroy_psv(&dev->mdev,
1078 mr->sig->psv_memory.psv_idx))
1079 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1080 mr->sig->psv_memory.psv_idx);
1081 if (mlx5_core_destroy_psv(&dev->mdev,
1082 mr->sig->psv_wire.psv_idx))
1083 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1084 mr->sig->psv_wire.psv_idx);
1092 return ERR_PTR(err);
1095 int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
1097 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1098 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1102 if (mlx5_core_destroy_psv(&dev->mdev,
1103 mr->sig->psv_memory.psv_idx))
1104 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1105 mr->sig->psv_memory.psv_idx);
1106 if (mlx5_core_destroy_psv(&dev->mdev,
1107 mr->sig->psv_wire.psv_idx))
1108 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1109 mr->sig->psv_wire.psv_idx);
1113 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
1115 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1125 struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
1126 int max_page_list_len)
1128 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1129 struct mlx5_create_mkey_mbox_in *in;
1130 struct mlx5_ib_mr *mr;
1133 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1135 return ERR_PTR(-ENOMEM);
1137 in = kzalloc(sizeof(*in), GFP_KERNEL);
1143 in->seg.status = 1 << 6; /* free */
1144 in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
1145 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1146 in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
1147 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1149 * TBD not needed - issue 197292 */
1150 in->seg.log2_page_size = PAGE_SHIFT;
1152 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
1158 mr->ibmr.lkey = mr->mmr.key;
1159 mr->ibmr.rkey = mr->mmr.key;
1166 return ERR_PTR(err);
1169 struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
1172 struct mlx5_ib_fast_reg_page_list *mfrpl;
1173 int size = page_list_len * sizeof(u64);
1175 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
1177 return ERR_PTR(-ENOMEM);
1179 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
1180 if (!mfrpl->ibfrpl.page_list)
1183 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
1186 if (!mfrpl->mapped_page_list)
1189 WARN_ON(mfrpl->map & 0x3f);
1191 return &mfrpl->ibfrpl;
1194 kfree(mfrpl->ibfrpl.page_list);
1196 return ERR_PTR(-ENOMEM);
1199 void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1201 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
1202 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1203 int size = page_list->max_page_list_len * sizeof(u64);
1205 dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list,
1207 kfree(mfrpl->ibfrpl.page_list);
1211 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1212 struct ib_mr_status *mr_status)
1214 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1217 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1218 pr_err("Invalid status check mask\n");
1223 mr_status->fail_status = 0;
1224 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1227 pr_err("signature status check requested on a non-signature enabled MR\n");
1231 mmr->sig->sig_status_checked = true;
1232 if (!mmr->sig->sig_err_exists)
1235 if (ibmr->lkey == mmr->sig->err_item.key)
1236 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1237 sizeof(mr_status->sig_err));
1239 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1240 mr_status->sig_err.sig_err_offset = 0;
1241 mr_status->sig_err.key = mmr->sig->err_item.key;
1244 mmr->sig->sig_err_exists = false;
1245 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;