2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR = 8,
48 #define MLX5_UMR_ALIGN 2048
50 static int clean_mr(struct mlx5_ib_mr *mr);
51 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
52 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
54 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
56 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
58 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
59 /* Wait until all page fault handlers using the mr complete. */
60 synchronize_srcu(&dev->mr_srcu);
66 static int order2idx(struct mlx5_ib_dev *dev, int order)
68 struct mlx5_mr_cache *cache = &dev->cache;
70 if (order < cache->ent[0].order)
73 return order - cache->ent[0].order;
76 static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
78 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
79 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
82 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
83 static void update_odp_mr(struct mlx5_ib_mr *mr)
85 if (mr->umem->odp_data) {
87 * This barrier prevents the compiler from moving the
88 * setting of umem->odp_data->private to point to our
89 * MR, before reg_umr finished, to ensure that the MR
90 * initialization have finished before starting to
91 * handle invalidations.
94 mr->umem->odp_data->private = mr;
96 * Make sure we will see the new
97 * umem->odp_data->private value in the invalidation
98 * routines, before we can get page faults on the
99 * MR. Page faults can happen once we put the MR in
100 * the tree, below this line. Without the barrier,
101 * there can be a fault handling and an invalidation
102 * before umem->odp_data->private == mr is visible to
103 * the invalidation handler.
110 static void reg_mr_callback(int status, void *context)
112 struct mlx5_ib_mr *mr = context;
113 struct mlx5_ib_dev *dev = mr->dev;
114 struct mlx5_mr_cache *cache = &dev->cache;
115 int c = order2idx(dev, mr->order);
116 struct mlx5_cache_ent *ent = &cache->ent[c];
119 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
122 spin_lock_irqsave(&ent->lock, flags);
124 spin_unlock_irqrestore(&ent->lock, flags);
126 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
129 mod_timer(&dev->delay_timer, jiffies + HZ);
133 mr->mmkey.type = MLX5_MKEY_MR;
134 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
135 key = dev->mdev->priv.mkey_key++;
136 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
137 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
139 cache->last_add = jiffies;
141 spin_lock_irqsave(&ent->lock, flags);
142 list_add_tail(&mr->list, &ent->head);
145 spin_unlock_irqrestore(&ent->lock, flags);
147 write_lock_irqsave(&table->lock, flags);
148 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
151 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
152 write_unlock_irqrestore(&table->lock, flags);
154 if (!completion_done(&ent->compl))
155 complete(&ent->compl);
158 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
160 struct mlx5_mr_cache *cache = &dev->cache;
161 struct mlx5_cache_ent *ent = &cache->ent[c];
162 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
163 struct mlx5_ib_mr *mr;
169 in = kzalloc(inlen, GFP_KERNEL);
173 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
174 for (i = 0; i < num; i++) {
175 if (ent->pending >= MAX_PENDING_REG_MR) {
180 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
185 mr->order = ent->order;
186 mr->allocated_from_cache = 1;
189 MLX5_SET(mkc, mkc, free, 1);
190 MLX5_SET(mkc, mkc, umr_en, 1);
191 MLX5_SET(mkc, mkc, access_mode, ent->access_mode);
193 MLX5_SET(mkc, mkc, qpn, 0xffffff);
194 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
195 MLX5_SET(mkc, mkc, log_page_size, ent->page);
197 spin_lock_irq(&ent->lock);
199 spin_unlock_irq(&ent->lock);
200 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
202 mr->out, sizeof(mr->out),
203 reg_mr_callback, mr);
205 spin_lock_irq(&ent->lock);
207 spin_unlock_irq(&ent->lock);
208 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
218 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
220 struct mlx5_mr_cache *cache = &dev->cache;
221 struct mlx5_cache_ent *ent = &cache->ent[c];
222 struct mlx5_ib_mr *mr;
226 for (i = 0; i < num; i++) {
227 spin_lock_irq(&ent->lock);
228 if (list_empty(&ent->head)) {
229 spin_unlock_irq(&ent->lock);
232 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
236 spin_unlock_irq(&ent->lock);
237 err = destroy_mkey(dev, mr);
239 mlx5_ib_warn(dev, "failed destroy mkey\n");
245 static ssize_t size_write(struct file *filp, const char __user *buf,
246 size_t count, loff_t *pos)
248 struct mlx5_cache_ent *ent = filp->private_data;
249 struct mlx5_ib_dev *dev = ent->dev;
255 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
258 c = order2idx(dev, ent->order);
259 lbuf[sizeof(lbuf) - 1] = 0;
261 if (sscanf(lbuf, "%u", &var) != 1)
264 if (var < ent->limit)
267 if (var > ent->size) {
269 err = add_keys(dev, c, var - ent->size);
270 if (err && err != -EAGAIN)
273 usleep_range(3000, 5000);
275 } else if (var < ent->size) {
276 remove_keys(dev, c, ent->size - var);
282 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
285 struct mlx5_cache_ent *ent = filp->private_data;
292 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
296 if (copy_to_user(buf, lbuf, err))
304 static const struct file_operations size_fops = {
305 .owner = THIS_MODULE,
311 static ssize_t limit_write(struct file *filp, const char __user *buf,
312 size_t count, loff_t *pos)
314 struct mlx5_cache_ent *ent = filp->private_data;
315 struct mlx5_ib_dev *dev = ent->dev;
321 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
324 c = order2idx(dev, ent->order);
325 lbuf[sizeof(lbuf) - 1] = 0;
327 if (sscanf(lbuf, "%u", &var) != 1)
335 if (ent->cur < ent->limit) {
336 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
344 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
347 struct mlx5_cache_ent *ent = filp->private_data;
354 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
358 if (copy_to_user(buf, lbuf, err))
366 static const struct file_operations limit_fops = {
367 .owner = THIS_MODULE,
369 .write = limit_write,
373 static int someone_adding(struct mlx5_mr_cache *cache)
377 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
378 if (cache->ent[i].cur < cache->ent[i].limit)
385 static void __cache_work_func(struct mlx5_cache_ent *ent)
387 struct mlx5_ib_dev *dev = ent->dev;
388 struct mlx5_mr_cache *cache = &dev->cache;
389 int i = order2idx(dev, ent->order);
395 ent = &dev->cache.ent[i];
396 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
397 err = add_keys(dev, i, 1);
398 if (ent->cur < 2 * ent->limit) {
399 if (err == -EAGAIN) {
400 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
402 queue_delayed_work(cache->wq, &ent->dwork,
403 msecs_to_jiffies(3));
405 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
407 queue_delayed_work(cache->wq, &ent->dwork,
408 msecs_to_jiffies(1000));
410 queue_work(cache->wq, &ent->work);
413 } else if (ent->cur > 2 * ent->limit) {
415 * The remove_keys() logic is performed as garbage collection
416 * task. Such task is intended to be run when no other active
417 * processes are running.
419 * The need_resched() will return TRUE if there are user tasks
420 * to be activated in near future.
422 * In such case, we don't execute remove_keys() and postpone
423 * the garbage collection work to try to run in next cycle,
424 * in order to free CPU resources to other tasks.
426 if (!need_resched() && !someone_adding(cache) &&
427 time_after(jiffies, cache->last_add + 300 * HZ)) {
428 remove_keys(dev, i, 1);
429 if (ent->cur > ent->limit)
430 queue_work(cache->wq, &ent->work);
432 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
437 static void delayed_cache_work_func(struct work_struct *work)
439 struct mlx5_cache_ent *ent;
441 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
442 __cache_work_func(ent);
445 static void cache_work_func(struct work_struct *work)
447 struct mlx5_cache_ent *ent;
449 ent = container_of(work, struct mlx5_cache_ent, work);
450 __cache_work_func(ent);
453 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
455 struct mlx5_mr_cache *cache = &dev->cache;
456 struct mlx5_cache_ent *ent;
457 struct mlx5_ib_mr *mr;
460 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
461 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
465 ent = &cache->ent[entry];
467 spin_lock_irq(&ent->lock);
468 if (list_empty(&ent->head)) {
469 spin_unlock_irq(&ent->lock);
471 err = add_keys(dev, entry, 1);
472 if (err && err != -EAGAIN)
475 wait_for_completion(&ent->compl);
477 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
481 spin_unlock_irq(&ent->lock);
482 if (ent->cur < ent->limit)
483 queue_work(cache->wq, &ent->work);
489 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
491 struct mlx5_mr_cache *cache = &dev->cache;
492 struct mlx5_ib_mr *mr = NULL;
493 struct mlx5_cache_ent *ent;
494 int last_umr_cache_entry;
498 c = order2idx(dev, order);
499 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
500 if (c < 0 || c > last_umr_cache_entry) {
501 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
505 for (i = c; i <= last_umr_cache_entry; i++) {
506 ent = &cache->ent[i];
508 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
510 spin_lock_irq(&ent->lock);
511 if (!list_empty(&ent->head)) {
512 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
516 spin_unlock_irq(&ent->lock);
517 if (ent->cur < ent->limit)
518 queue_work(cache->wq, &ent->work);
521 spin_unlock_irq(&ent->lock);
523 queue_work(cache->wq, &ent->work);
527 cache->ent[c].miss++;
532 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
534 struct mlx5_mr_cache *cache = &dev->cache;
535 struct mlx5_cache_ent *ent;
539 c = order2idx(dev, mr->order);
540 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
541 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
545 if (unreg_umr(dev, mr))
548 ent = &cache->ent[c];
549 spin_lock_irq(&ent->lock);
550 list_add_tail(&mr->list, &ent->head);
552 if (ent->cur > 2 * ent->limit)
554 spin_unlock_irq(&ent->lock);
557 queue_work(cache->wq, &ent->work);
560 static void clean_keys(struct mlx5_ib_dev *dev, int c)
562 struct mlx5_mr_cache *cache = &dev->cache;
563 struct mlx5_cache_ent *ent = &cache->ent[c];
564 struct mlx5_ib_mr *mr;
567 cancel_delayed_work(&ent->dwork);
569 spin_lock_irq(&ent->lock);
570 if (list_empty(&ent->head)) {
571 spin_unlock_irq(&ent->lock);
574 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
578 spin_unlock_irq(&ent->lock);
579 err = destroy_mkey(dev, mr);
581 mlx5_ib_warn(dev, "failed destroy mkey\n");
587 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
589 if (!mlx5_debugfs_root)
592 debugfs_remove_recursive(dev->cache.root);
593 dev->cache.root = NULL;
596 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
598 struct mlx5_mr_cache *cache = &dev->cache;
599 struct mlx5_cache_ent *ent;
602 if (!mlx5_debugfs_root)
605 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
609 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
610 ent = &cache->ent[i];
611 sprintf(ent->name, "%d", ent->order);
612 ent->dir = debugfs_create_dir(ent->name, cache->root);
616 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
621 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
626 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
631 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
639 mlx5_mr_cache_debugfs_cleanup(dev);
644 static void delay_time_func(unsigned long ctx)
646 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
651 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
653 struct mlx5_mr_cache *cache = &dev->cache;
654 struct mlx5_cache_ent *ent;
658 mutex_init(&dev->slow_path_mutex);
659 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
661 mlx5_ib_warn(dev, "failed to create work queue\n");
665 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
666 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
667 ent = &cache->ent[i];
668 INIT_LIST_HEAD(&ent->head);
669 spin_lock_init(&ent->lock);
674 init_completion(&ent->compl);
675 INIT_WORK(&ent->work, cache_work_func);
676 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
677 queue_work(cache->wq, &ent->work);
679 if (i > MR_CACHE_LAST_STD_ENTRY) {
680 mlx5_odp_init_mr_cache_entry(ent);
684 if (ent->order > mr_cache_max_order(dev))
687 ent->page = PAGE_SHIFT;
688 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
689 MLX5_IB_UMR_OCTOWORD;
690 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
691 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
692 mlx5_core_is_pf(dev->mdev))
693 ent->limit = dev->mdev->profile->mr_cache[i].limit;
698 err = mlx5_mr_cache_debugfs_init(dev);
700 mlx5_ib_warn(dev, "cache debugfs failure\n");
703 * We don't want to fail driver if debugfs failed to initialize,
704 * so we are not forwarding error to the user.
710 static void wait_for_async_commands(struct mlx5_ib_dev *dev)
712 struct mlx5_mr_cache *cache = &dev->cache;
713 struct mlx5_cache_ent *ent;
718 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
719 ent = &cache->ent[i];
720 for (j = 0 ; j < 1000; j++) {
726 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
727 ent = &cache->ent[i];
728 total += ent->pending;
732 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
734 mlx5_ib_warn(dev, "done with all pending requests\n");
737 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
741 dev->cache.stopped = 1;
742 flush_workqueue(dev->cache.wq);
744 mlx5_mr_cache_debugfs_cleanup(dev);
746 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
749 destroy_workqueue(dev->cache.wq);
750 wait_for_async_commands(dev);
751 del_timer_sync(&dev->delay_timer);
756 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
758 struct mlx5_ib_dev *dev = to_mdev(pd->device);
759 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
760 struct mlx5_core_dev *mdev = dev->mdev;
761 struct mlx5_ib_mr *mr;
766 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
768 return ERR_PTR(-ENOMEM);
770 in = kzalloc(inlen, GFP_KERNEL);
776 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
778 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
779 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
780 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
781 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
782 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
783 MLX5_SET(mkc, mkc, lr, 1);
785 MLX5_SET(mkc, mkc, length64, 1);
786 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
787 MLX5_SET(mkc, mkc, qpn, 0xffffff);
788 MLX5_SET64(mkc, mkc, start_addr, 0);
790 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
795 mr->mmkey.type = MLX5_MKEY_MR;
796 mr->ibmr.lkey = mr->mmkey.key;
797 mr->ibmr.rkey = mr->mmkey.key;
811 static int get_octo_len(u64 addr, u64 len, int page_size)
816 offset = addr & (page_size - 1);
817 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
818 return (npages + 1) / 2;
821 static int mr_cache_max_order(struct mlx5_ib_dev *dev)
823 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
824 return MR_CACHE_LAST_STD_ENTRY + 2;
825 return MLX5_MAX_UMR_SHIFT;
828 static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
829 int access_flags, struct ib_umem **umem,
830 int *npages, int *page_shift, int *ncont,
833 struct mlx5_ib_dev *dev = to_mdev(pd->device);
836 *umem = ib_umem_get(pd->uobject->context, start, length,
838 err = PTR_ERR_OR_ZERO(*umem);
840 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
844 mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
845 page_shift, ncont, order);
847 mlx5_ib_warn(dev, "avoid zero region\n");
848 ib_umem_release(*umem);
852 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
853 *npages, *ncont, *order, *page_shift);
858 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
860 struct mlx5_ib_umr_context *context =
861 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
863 context->status = wc->status;
864 complete(&context->done);
867 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
869 context->cqe.done = mlx5_ib_umr_done;
870 context->status = -1;
871 init_completion(&context->done);
874 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
875 struct mlx5_umr_wr *umrwr)
877 struct umr_common *umrc = &dev->umrc;
878 struct ib_send_wr *bad;
880 struct mlx5_ib_umr_context umr_context;
882 mlx5_ib_init_umr_context(&umr_context);
883 umrwr->wr.wr_cqe = &umr_context.cqe;
886 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
888 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
890 wait_for_completion(&umr_context.done);
891 if (umr_context.status != IB_WC_SUCCESS) {
892 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
901 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
902 u64 virt_addr, u64 len, int npages,
903 int page_shift, int order, int access_flags)
905 struct mlx5_ib_dev *dev = to_mdev(pd->device);
906 struct mlx5_ib_mr *mr;
910 for (i = 0; i < 1; i++) {
911 mr = alloc_cached_mr(dev, order);
915 err = add_keys(dev, order2idx(dev, order), 1);
916 if (err && err != -EAGAIN) {
917 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
923 return ERR_PTR(-EAGAIN);
927 mr->access_flags = access_flags;
928 mr->desc_size = sizeof(struct mlx5_mtt);
929 mr->mmkey.iova = virt_addr;
930 mr->mmkey.size = len;
931 mr->mmkey.pd = to_mpd(pd)->pdn;
933 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
934 MLX5_IB_UPD_XLT_ENABLE);
937 mlx5_mr_cache_free(dev, mr);
946 static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
947 void *xlt, int page_shift, size_t size,
950 struct mlx5_ib_dev *dev = mr->dev;
951 struct ib_umem *umem = mr->umem;
952 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
953 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
957 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
959 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
960 __mlx5_ib_populate_pas(dev, umem, page_shift,
962 MLX5_IB_MTT_PRESENT);
963 /* Clear padding after the pages
964 * brought from the umem.
966 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
967 size - npages * sizeof(struct mlx5_mtt));
973 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
974 MLX5_UMR_MTT_ALIGNMENT)
975 #define MLX5_SPARE_UMR_CHUNK 0x10000
977 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
978 int page_shift, int flags)
980 struct mlx5_ib_dev *dev = mr->dev;
981 struct device *ddev = dev->ib_dev.dev.parent;
982 struct mlx5_ib_ucontext *uctx = NULL;
986 struct mlx5_umr_wr wr;
989 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
990 ? sizeof(struct mlx5_klm)
991 : sizeof(struct mlx5_mtt);
992 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
993 const int page_mask = page_align - 1;
994 size_t pages_mapped = 0;
995 size_t pages_to_map = 0;
996 size_t pages_iter = 0;
999 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1000 * so we need to align the offset and length accordingly
1002 if (idx & page_mask) {
1003 npages += idx & page_mask;
1007 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1008 gfp |= __GFP_ZERO | __GFP_NOWARN;
1010 pages_to_map = ALIGN(npages, page_align);
1011 size = desc_size * pages_to_map;
1012 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
1014 xlt = (void *)__get_free_pages(gfp, get_order(size));
1015 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1016 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1017 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1019 size = MLX5_SPARE_UMR_CHUNK;
1020 xlt = (void *)__get_free_pages(gfp, get_order(size));
1024 uctx = to_mucontext(mr->ibmr.pd->uobject->context);
1025 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
1027 xlt = (void *)uctx->upd_xlt_page;
1028 mutex_lock(&uctx->upd_xlt_page_mutex);
1029 memset(xlt, 0, size);
1031 pages_iter = size / desc_size;
1032 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
1033 if (dma_mapping_error(ddev, dma)) {
1034 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1040 sg.lkey = dev->umrc.pd->local_dma_lkey;
1042 memset(&wr, 0, sizeof(wr));
1043 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1044 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1045 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1046 wr.wr.sg_list = &sg;
1048 wr.wr.opcode = MLX5_IB_WR_UMR;
1050 wr.pd = mr->ibmr.pd;
1051 wr.mkey = mr->mmkey.key;
1052 wr.length = mr->mmkey.size;
1053 wr.virt_addr = mr->mmkey.iova;
1054 wr.access_flags = mr->access_flags;
1055 wr.page_shift = page_shift;
1057 for (pages_mapped = 0;
1058 pages_mapped < pages_to_map && !err;
1059 pages_mapped += pages_iter, idx += pages_iter) {
1060 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1061 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
1062 npages = populate_xlt(mr, idx, npages, xlt,
1063 page_shift, size, flags);
1065 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1067 sg.length = ALIGN(npages * desc_size,
1068 MLX5_UMR_MTT_ALIGNMENT);
1070 if (pages_mapped + pages_iter >= pages_to_map) {
1071 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1073 MLX5_IB_SEND_UMR_ENABLE_MR |
1074 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1075 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1076 if (flags & MLX5_IB_UPD_XLT_PD ||
1077 flags & MLX5_IB_UPD_XLT_ACCESS)
1079 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1080 if (flags & MLX5_IB_UPD_XLT_ADDR)
1082 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1085 wr.offset = idx * desc_size;
1086 wr.xlt_size = sg.length;
1088 err = mlx5_ib_post_send_wait(dev, &wr);
1090 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1094 mutex_unlock(&uctx->upd_xlt_page_mutex);
1096 free_pages((unsigned long)xlt, get_order(size));
1102 * If ibmr is NULL it will be allocated by reg_create.
1103 * Else, the given ibmr will be used.
1105 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1106 u64 virt_addr, u64 length,
1107 struct ib_umem *umem, int npages,
1108 int page_shift, int access_flags)
1110 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1111 struct mlx5_ib_mr *mr;
1117 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1119 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
1121 return ERR_PTR(-ENOMEM);
1123 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
1124 sizeof(*pas) * ((npages + 1) / 2) * 2;
1125 in = kvzalloc(inlen, GFP_KERNEL);
1130 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1131 if (!(access_flags & IB_ACCESS_ON_DEMAND))
1132 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1133 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1135 /* The pg_access bit allows setting the access flags
1136 * in the page list submitted with the command. */
1137 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1139 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1140 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
1141 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1142 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1143 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1144 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1145 MLX5_SET(mkc, mkc, lr, 1);
1146 MLX5_SET(mkc, mkc, umr_en, 1);
1148 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1149 MLX5_SET64(mkc, mkc, len, length);
1150 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1151 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1152 MLX5_SET(mkc, mkc, translations_octword_size,
1153 get_octo_len(virt_addr, length, 1 << page_shift));
1154 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1155 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1156 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1157 get_octo_len(virt_addr, length, 1 << page_shift));
1159 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1161 mlx5_ib_warn(dev, "create mkey failed\n");
1164 mr->mmkey.type = MLX5_MKEY_MR;
1165 mr->desc_size = sizeof(struct mlx5_mtt);
1171 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1182 return ERR_PTR(err);
1185 static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1186 int npages, u64 length, int access_flags)
1188 mr->npages = npages;
1189 atomic_add(npages, &dev->mdev->priv.reg_pages);
1190 mr->ibmr.lkey = mr->mmkey.key;
1191 mr->ibmr.rkey = mr->mmkey.key;
1192 mr->ibmr.length = length;
1193 mr->access_flags = access_flags;
1196 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1197 u64 virt_addr, int access_flags,
1198 struct ib_udata *udata)
1200 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1201 struct mlx5_ib_mr *mr = NULL;
1202 struct ib_umem *umem;
1209 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1210 start, virt_addr, length, access_flags);
1212 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1213 if (!start && length == U64_MAX) {
1214 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1215 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1216 return ERR_PTR(-EINVAL);
1218 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1223 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
1224 &page_shift, &ncont, &order);
1227 return ERR_PTR(err);
1229 if (order <= mr_cache_max_order(dev)) {
1230 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
1231 order, access_flags);
1232 if (PTR_ERR(mr) == -EAGAIN) {
1233 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1236 } else if (access_flags & IB_ACCESS_ON_DEMAND &&
1237 !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1239 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1244 mutex_lock(&dev->slow_path_mutex);
1245 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1246 page_shift, access_flags);
1247 mutex_unlock(&dev->slow_path_mutex);
1255 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1258 set_mr_fileds(dev, mr, npages, length, access_flags);
1260 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1267 ib_umem_release(umem);
1268 return ERR_PTR(err);
1271 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1273 struct mlx5_core_dev *mdev = dev->mdev;
1274 struct mlx5_umr_wr umrwr = {};
1276 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1279 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1280 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1281 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1282 umrwr.mkey = mr->mmkey.key;
1284 return mlx5_ib_post_send_wait(dev, &umrwr);
1287 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1288 int access_flags, int flags)
1290 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1291 struct mlx5_umr_wr umrwr = {};
1294 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1296 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1297 umrwr.mkey = mr->mmkey.key;
1299 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
1301 umrwr.access_flags = access_flags;
1302 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1305 err = mlx5_ib_post_send_wait(dev, &umrwr);
1310 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1311 u64 length, u64 virt_addr, int new_access_flags,
1312 struct ib_pd *new_pd, struct ib_udata *udata)
1314 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1315 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1316 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1317 int access_flags = flags & IB_MR_REREG_ACCESS ?
1320 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1321 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1329 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1330 start, virt_addr, length, access_flags);
1332 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1334 if (flags != IB_MR_REREG_PD) {
1336 * Replace umem. This needs to be done whether or not UMR is
1339 flags |= IB_MR_REREG_TRANS;
1340 ib_umem_release(mr->umem);
1341 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1342 &npages, &page_shift, &ncont, &order);
1349 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1351 * UMR can't be used - MKey needs to be replaced.
1353 if (mr->allocated_from_cache) {
1354 err = unreg_umr(dev, mr);
1356 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1358 err = destroy_mkey(dev, mr);
1360 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1365 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1366 page_shift, access_flags);
1371 mr->allocated_from_cache = 0;
1377 mr->access_flags = access_flags;
1378 mr->mmkey.iova = addr;
1379 mr->mmkey.size = len;
1380 mr->mmkey.pd = to_mpd(pd)->pdn;
1382 if (flags & IB_MR_REREG_TRANS) {
1383 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1384 if (flags & IB_MR_REREG_PD)
1385 upd_flags |= MLX5_IB_UPD_XLT_PD;
1386 if (flags & IB_MR_REREG_ACCESS)
1387 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1388 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1391 err = rereg_umr(pd, mr, access_flags, flags);
1395 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1396 ib_umem_release(mr->umem);
1402 set_mr_fileds(dev, mr, npages, len, access_flags);
1404 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1411 mlx5_alloc_priv_descs(struct ib_device *device,
1412 struct mlx5_ib_mr *mr,
1416 int size = ndescs * desc_size;
1420 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1422 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1423 if (!mr->descs_alloc)
1426 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1428 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
1429 size, DMA_TO_DEVICE);
1430 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
1437 kfree(mr->descs_alloc);
1443 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1446 struct ib_device *device = mr->ibmr.device;
1447 int size = mr->max_descs * mr->desc_size;
1449 dma_unmap_single(device->dev.parent, mr->desc_map,
1450 size, DMA_TO_DEVICE);
1451 kfree(mr->descs_alloc);
1456 static int clean_mr(struct mlx5_ib_mr *mr)
1458 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1459 int allocated_from_cache = mr->allocated_from_cache;
1463 if (mlx5_core_destroy_psv(dev->mdev,
1464 mr->sig->psv_memory.psv_idx))
1465 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1466 mr->sig->psv_memory.psv_idx);
1467 if (mlx5_core_destroy_psv(dev->mdev,
1468 mr->sig->psv_wire.psv_idx))
1469 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1470 mr->sig->psv_wire.psv_idx);
1475 mlx5_free_priv_descs(mr);
1477 if (!allocated_from_cache) {
1478 err = destroy_mkey(dev, mr);
1480 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1481 mr->mmkey.key, err);
1485 mlx5_mr_cache_free(dev, mr);
1488 if (!allocated_from_cache)
1494 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1496 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1497 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1498 int npages = mr->npages;
1499 struct ib_umem *umem = mr->umem;
1501 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1502 if (umem && umem->odp_data) {
1503 /* Prevent new page faults from succeeding */
1505 /* Wait for all running page-fault handlers to finish. */
1506 synchronize_srcu(&dev->mr_srcu);
1507 /* Destroy all page mappings */
1508 if (umem->odp_data->page_list)
1509 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1512 mlx5_ib_free_implicit_mr(mr);
1514 * We kill the umem before the MR for ODP,
1515 * so that there will not be any invalidations in
1516 * flight, looking at the *mr struct.
1518 ib_umem_release(umem);
1519 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1521 /* Avoid double-freeing the umem. */
1529 ib_umem_release(umem);
1530 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1536 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1537 enum ib_mr_type mr_type,
1540 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1541 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1542 int ndescs = ALIGN(max_num_sg, 4);
1543 struct mlx5_ib_mr *mr;
1548 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1550 return ERR_PTR(-ENOMEM);
1552 in = kzalloc(inlen, GFP_KERNEL);
1558 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1559 MLX5_SET(mkc, mkc, free, 1);
1560 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1561 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1562 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1564 if (mr_type == IB_MR_TYPE_MEM_REG) {
1565 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1566 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
1567 err = mlx5_alloc_priv_descs(pd->device, mr,
1568 ndescs, sizeof(struct mlx5_mtt));
1572 mr->desc_size = sizeof(struct mlx5_mtt);
1573 mr->max_descs = ndescs;
1574 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
1575 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1577 err = mlx5_alloc_priv_descs(pd->device, mr,
1578 ndescs, sizeof(struct mlx5_klm));
1581 mr->desc_size = sizeof(struct mlx5_klm);
1582 mr->max_descs = ndescs;
1583 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1586 MLX5_SET(mkc, mkc, bsf_en, 1);
1587 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1588 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1594 /* create mem & wire PSVs */
1595 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1600 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1601 mr->sig->psv_memory.psv_idx = psv_index[0];
1602 mr->sig->psv_wire.psv_idx = psv_index[1];
1604 mr->sig->sig_status_checked = true;
1605 mr->sig->sig_err_exists = false;
1606 /* Next UMR, Arm SIGERR */
1607 ++mr->sig->sigerr_count;
1609 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1614 MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
1615 MLX5_SET(mkc, mkc, umr_en, 1);
1617 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1619 goto err_destroy_psv;
1621 mr->mmkey.type = MLX5_MKEY_MR;
1622 mr->ibmr.lkey = mr->mmkey.key;
1623 mr->ibmr.rkey = mr->mmkey.key;
1631 if (mlx5_core_destroy_psv(dev->mdev,
1632 mr->sig->psv_memory.psv_idx))
1633 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1634 mr->sig->psv_memory.psv_idx);
1635 if (mlx5_core_destroy_psv(dev->mdev,
1636 mr->sig->psv_wire.psv_idx))
1637 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1638 mr->sig->psv_wire.psv_idx);
1640 mlx5_free_priv_descs(mr);
1647 return ERR_PTR(err);
1650 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1651 struct ib_udata *udata)
1653 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1654 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1655 struct mlx5_ib_mw *mw = NULL;
1660 struct mlx5_ib_alloc_mw req = {};
1663 __u32 response_length;
1666 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1668 return ERR_PTR(err);
1670 if (req.comp_mask || req.reserved1 || req.reserved2)
1671 return ERR_PTR(-EOPNOTSUPP);
1673 if (udata->inlen > sizeof(req) &&
1674 !ib_is_udata_cleared(udata, sizeof(req),
1675 udata->inlen - sizeof(req)))
1676 return ERR_PTR(-EOPNOTSUPP);
1678 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1680 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1681 in = kzalloc(inlen, GFP_KERNEL);
1687 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1689 MLX5_SET(mkc, mkc, free, 1);
1690 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1691 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1692 MLX5_SET(mkc, mkc, umr_en, 1);
1693 MLX5_SET(mkc, mkc, lr, 1);
1694 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
1695 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1696 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1698 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
1702 mw->mmkey.type = MLX5_MKEY_MW;
1703 mw->ibmw.rkey = mw->mmkey.key;
1704 mw->ndescs = ndescs;
1706 resp.response_length = min(offsetof(typeof(resp), response_length) +
1707 sizeof(resp.response_length), udata->outlen);
1708 if (resp.response_length) {
1709 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1711 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1722 return ERR_PTR(err);
1725 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1727 struct mlx5_ib_mw *mmw = to_mmw(mw);
1730 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1737 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1738 struct ib_mr_status *mr_status)
1740 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1743 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1744 pr_err("Invalid status check mask\n");
1749 mr_status->fail_status = 0;
1750 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1753 pr_err("signature status check requested on a non-signature enabled MR\n");
1757 mmr->sig->sig_status_checked = true;
1758 if (!mmr->sig->sig_err_exists)
1761 if (ibmr->lkey == mmr->sig->err_item.key)
1762 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1763 sizeof(mr_status->sig_err));
1765 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1766 mr_status->sig_err.sig_err_offset = 0;
1767 mr_status->sig_err.key = mmr->sig->err_item.key;
1770 mmr->sig->sig_err_exists = false;
1771 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1779 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1780 struct scatterlist *sgl,
1781 unsigned short sg_nents,
1782 unsigned int *sg_offset_p)
1784 struct scatterlist *sg = sgl;
1785 struct mlx5_klm *klms = mr->descs;
1786 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1787 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1790 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1791 mr->ibmr.length = 0;
1792 mr->ndescs = sg_nents;
1794 for_each_sg(sgl, sg, sg_nents, i) {
1795 if (unlikely(i >= mr->max_descs))
1797 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1798 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
1799 klms[i].key = cpu_to_be32(lkey);
1800 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
1806 *sg_offset_p = sg_offset;
1811 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1813 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1816 if (unlikely(mr->ndescs == mr->max_descs))
1820 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1825 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1826 unsigned int *sg_offset)
1828 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1833 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1834 mr->desc_size * mr->max_descs,
1837 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
1838 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
1840 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1843 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1844 mr->desc_size * mr->max_descs,