2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR = 8,
48 #define MLX5_UMR_ALIGN 2048
51 create_mkey_callback(int status, struct mlx5_async_work *context);
53 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
56 struct mlx5_ib_dev *dev = to_mdev(pd->device);
58 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
59 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
60 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
61 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
62 MLX5_SET(mkc, mkc, lr, 1);
64 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
65 MLX5_SET(mkc, mkc, relaxed_ordering_write,
66 !!(acc & IB_ACCESS_RELAXED_ORDERING));
67 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
68 MLX5_SET(mkc, mkc, relaxed_ordering_read,
69 !!(acc & IB_ACCESS_RELAXED_ORDERING));
71 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
72 MLX5_SET(mkc, mkc, qpn, 0xffffff);
73 MLX5_SET64(mkc, mkc, start_addr, start_addr);
77 assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
80 u8 key = atomic_inc_return(&dev->mkey_var);
83 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
84 MLX5_SET(mkc, mkc, mkey_7_0, key);
89 mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
92 assign_mkey_variant(dev, mkey, in);
93 return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen);
97 mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
98 struct mlx5_core_mkey *mkey,
99 struct mlx5_async_ctx *async_ctx,
100 u32 *in, int inlen, u32 *out, int outlen,
101 struct mlx5_async_work *context)
103 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
104 assign_mkey_variant(dev, mkey, in);
105 return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
106 create_mkey_callback, context);
109 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
110 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
111 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
112 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
114 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
116 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
119 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
121 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
123 return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
126 static inline bool mlx5_ib_pas_fits_in_mr(struct mlx5_ib_mr *mr, u64 start,
129 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
130 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
133 static void create_mkey_callback(int status, struct mlx5_async_work *context)
135 struct mlx5_ib_mr *mr =
136 container_of(context, struct mlx5_ib_mr, cb_work);
137 struct mlx5_ib_dev *dev = mr->dev;
138 struct mlx5_cache_ent *ent = mr->cache_ent;
142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
144 spin_lock_irqsave(&ent->lock, flags);
146 WRITE_ONCE(dev->fill_delay, 1);
147 spin_unlock_irqrestore(&ent->lock, flags);
148 mod_timer(&dev->delay_timer, jiffies + HZ);
152 mr->mmkey.type = MLX5_MKEY_MR;
153 mr->mmkey.key |= mlx5_idx_to_mkey(
154 MLX5_GET(create_mkey_out, mr->out, mkey_index));
156 WRITE_ONCE(dev->cache.last_add, jiffies);
158 spin_lock_irqsave(&ent->lock, flags);
159 list_add_tail(&mr->list, &ent->head);
160 ent->available_mrs++;
162 /* If we are doing fill_to_high_water then keep going. */
163 queue_adjust_cache_locked(ent);
165 spin_unlock_irqrestore(&ent->lock, flags);
168 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
170 struct mlx5_ib_mr *mr;
172 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
175 mr->order = ent->order;
179 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
180 MLX5_SET(mkc, mkc, free, 1);
181 MLX5_SET(mkc, mkc, umr_en, 1);
182 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
183 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
185 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
186 MLX5_SET(mkc, mkc, log_page_size, ent->page);
190 /* Asynchronously schedule new MRs to be populated in the cache. */
191 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
193 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
194 struct mlx5_ib_mr *mr;
200 in = kzalloc(inlen, GFP_KERNEL);
204 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
205 for (i = 0; i < num; i++) {
206 mr = alloc_cache_mr(ent, mkc);
211 spin_lock_irq(&ent->lock);
212 if (ent->pending >= MAX_PENDING_REG_MR) {
214 spin_unlock_irq(&ent->lock);
219 spin_unlock_irq(&ent->lock);
220 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
221 &ent->dev->async_ctx, in, inlen,
222 mr->out, sizeof(mr->out),
225 spin_lock_irq(&ent->lock);
227 spin_unlock_irq(&ent->lock);
228 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
238 /* Synchronously create a MR in the cache */
239 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
241 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
242 struct mlx5_ib_mr *mr;
247 in = kzalloc(inlen, GFP_KERNEL);
249 return ERR_PTR(-ENOMEM);
250 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
252 mr = alloc_cache_mr(ent, mkc);
258 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
262 mr->mmkey.type = MLX5_MKEY_MR;
263 WRITE_ONCE(ent->dev->cache.last_add, jiffies);
264 spin_lock_irq(&ent->lock);
266 spin_unlock_irq(&ent->lock);
276 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
278 struct mlx5_ib_mr *mr;
280 lockdep_assert_held(&ent->lock);
281 if (list_empty(&ent->head))
283 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
285 ent->available_mrs--;
287 spin_unlock_irq(&ent->lock);
288 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
290 spin_lock_irq(&ent->lock);
293 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
298 lockdep_assert_held(&ent->lock);
302 target = ent->limit * 2;
303 if (target == ent->available_mrs + ent->pending)
305 if (target > ent->available_mrs + ent->pending) {
306 u32 todo = target - (ent->available_mrs + ent->pending);
308 spin_unlock_irq(&ent->lock);
309 err = add_keys(ent, todo);
311 usleep_range(3000, 5000);
312 spin_lock_irq(&ent->lock);
319 remove_cache_mr_locked(ent);
324 static ssize_t size_write(struct file *filp, const char __user *buf,
325 size_t count, loff_t *pos)
327 struct mlx5_cache_ent *ent = filp->private_data;
331 err = kstrtou32_from_user(buf, count, 0, &target);
336 * Target is the new value of total_mrs the user requests, however we
337 * cannot free MRs that are in use. Compute the target value for
340 spin_lock_irq(&ent->lock);
341 if (target < ent->total_mrs - ent->available_mrs) {
345 target = target - (ent->total_mrs - ent->available_mrs);
346 if (target < ent->limit || target > ent->limit*2) {
350 err = resize_available_mrs(ent, target, false);
353 spin_unlock_irq(&ent->lock);
358 spin_unlock_irq(&ent->lock);
362 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
365 struct mlx5_cache_ent *ent = filp->private_data;
369 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
373 return simple_read_from_buffer(buf, count, pos, lbuf, err);
376 static const struct file_operations size_fops = {
377 .owner = THIS_MODULE,
383 static ssize_t limit_write(struct file *filp, const char __user *buf,
384 size_t count, loff_t *pos)
386 struct mlx5_cache_ent *ent = filp->private_data;
390 err = kstrtou32_from_user(buf, count, 0, &var);
395 * Upon set we immediately fill the cache to high water mark implied by
398 spin_lock_irq(&ent->lock);
400 err = resize_available_mrs(ent, 0, true);
401 spin_unlock_irq(&ent->lock);
407 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
410 struct mlx5_cache_ent *ent = filp->private_data;
414 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
418 return simple_read_from_buffer(buf, count, pos, lbuf, err);
421 static const struct file_operations limit_fops = {
422 .owner = THIS_MODULE,
424 .write = limit_write,
428 static bool someone_adding(struct mlx5_mr_cache *cache)
432 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
433 struct mlx5_cache_ent *ent = &cache->ent[i];
436 spin_lock_irq(&ent->lock);
437 ret = ent->available_mrs < ent->limit;
438 spin_unlock_irq(&ent->lock);
446 * Check if the bucket is outside the high/low water mark and schedule an async
447 * update. The cache refill has hysteresis, once the low water mark is hit it is
448 * refilled up to the high mark.
450 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
452 lockdep_assert_held(&ent->lock);
454 if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
456 if (ent->available_mrs < ent->limit) {
457 ent->fill_to_high_water = true;
458 queue_work(ent->dev->cache.wq, &ent->work);
459 } else if (ent->fill_to_high_water &&
460 ent->available_mrs + ent->pending < 2 * ent->limit) {
462 * Once we start populating due to hitting a low water mark
463 * continue until we pass the high water mark.
465 queue_work(ent->dev->cache.wq, &ent->work);
466 } else if (ent->available_mrs == 2 * ent->limit) {
467 ent->fill_to_high_water = false;
468 } else if (ent->available_mrs > 2 * ent->limit) {
469 /* Queue deletion of excess entries */
470 ent->fill_to_high_water = false;
472 queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
473 msecs_to_jiffies(1000));
475 queue_work(ent->dev->cache.wq, &ent->work);
479 static void __cache_work_func(struct mlx5_cache_ent *ent)
481 struct mlx5_ib_dev *dev = ent->dev;
482 struct mlx5_mr_cache *cache = &dev->cache;
485 spin_lock_irq(&ent->lock);
489 if (ent->fill_to_high_water &&
490 ent->available_mrs + ent->pending < 2 * ent->limit &&
491 !READ_ONCE(dev->fill_delay)) {
492 spin_unlock_irq(&ent->lock);
493 err = add_keys(ent, 1);
494 spin_lock_irq(&ent->lock);
499 * EAGAIN only happens if pending is positive, so we
500 * will be rescheduled from reg_mr_callback(). The only
501 * failure path here is ENOMEM.
503 if (err != -EAGAIN) {
506 "command failed order %d, err %d\n",
508 queue_delayed_work(cache->wq, &ent->dwork,
509 msecs_to_jiffies(1000));
512 } else if (ent->available_mrs > 2 * ent->limit) {
516 * The remove_cache_mr() logic is performed as garbage
517 * collection task. Such task is intended to be run when no
518 * other active processes are running.
520 * The need_resched() will return TRUE if there are user tasks
521 * to be activated in near future.
523 * In such case, we don't execute remove_cache_mr() and postpone
524 * the garbage collection work to try to run in next cycle, in
525 * order to free CPU resources to other tasks.
527 spin_unlock_irq(&ent->lock);
528 need_delay = need_resched() || someone_adding(cache) ||
530 READ_ONCE(cache->last_add) + 300 * HZ);
531 spin_lock_irq(&ent->lock);
535 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
536 remove_cache_mr_locked(ent);
537 queue_adjust_cache_locked(ent);
540 spin_unlock_irq(&ent->lock);
543 static void delayed_cache_work_func(struct work_struct *work)
545 struct mlx5_cache_ent *ent;
547 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
548 __cache_work_func(ent);
551 static void cache_work_func(struct work_struct *work)
553 struct mlx5_cache_ent *ent;
555 ent = container_of(work, struct mlx5_cache_ent, work);
556 __cache_work_func(ent);
559 /* Allocate a special entry from the cache */
560 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
561 unsigned int entry, int access_flags)
563 struct mlx5_mr_cache *cache = &dev->cache;
564 struct mlx5_cache_ent *ent;
565 struct mlx5_ib_mr *mr;
567 if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY ||
568 entry >= ARRAY_SIZE(cache->ent)))
569 return ERR_PTR(-EINVAL);
571 /* Matches access in alloc_cache_mr() */
572 if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
573 return ERR_PTR(-EOPNOTSUPP);
575 ent = &cache->ent[entry];
576 spin_lock_irq(&ent->lock);
577 if (list_empty(&ent->head)) {
578 spin_unlock_irq(&ent->lock);
579 mr = create_cache_mr(ent);
583 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
585 ent->available_mrs--;
586 queue_adjust_cache_locked(ent);
587 spin_unlock_irq(&ent->lock);
589 mr->access_flags = access_flags;
593 /* Return a MR already available in the cache */
594 static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
596 struct mlx5_ib_dev *dev = req_ent->dev;
597 struct mlx5_ib_mr *mr = NULL;
598 struct mlx5_cache_ent *ent = req_ent;
600 /* Try larger MR pools from the cache to satisfy the allocation */
601 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) {
602 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order,
603 ent - dev->cache.ent);
605 spin_lock_irq(&ent->lock);
606 if (!list_empty(&ent->head)) {
607 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
610 ent->available_mrs--;
611 queue_adjust_cache_locked(ent);
612 spin_unlock_irq(&ent->lock);
615 queue_adjust_cache_locked(ent);
616 spin_unlock_irq(&ent->lock);
625 static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
627 struct mlx5_cache_ent *ent = mr->cache_ent;
629 mr->cache_ent = NULL;
630 spin_lock_irq(&ent->lock);
632 spin_unlock_irq(&ent->lock);
635 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
637 struct mlx5_cache_ent *ent = mr->cache_ent;
642 if (mlx5_mr_cache_invalidate(mr)) {
643 detach_mr_from_cache(mr);
644 destroy_mkey(dev, mr);
648 spin_lock_irq(&ent->lock);
649 list_add_tail(&mr->list, &ent->head);
650 ent->available_mrs++;
651 queue_adjust_cache_locked(ent);
652 spin_unlock_irq(&ent->lock);
655 static void clean_keys(struct mlx5_ib_dev *dev, int c)
657 struct mlx5_mr_cache *cache = &dev->cache;
658 struct mlx5_cache_ent *ent = &cache->ent[c];
659 struct mlx5_ib_mr *tmp_mr;
660 struct mlx5_ib_mr *mr;
663 cancel_delayed_work(&ent->dwork);
665 spin_lock_irq(&ent->lock);
666 if (list_empty(&ent->head)) {
667 spin_unlock_irq(&ent->lock);
670 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
671 list_move(&mr->list, &del_list);
672 ent->available_mrs--;
674 spin_unlock_irq(&ent->lock);
675 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
678 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
684 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
686 if (!mlx5_debugfs_root || dev->is_rep)
689 debugfs_remove_recursive(dev->cache.root);
690 dev->cache.root = NULL;
693 static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
695 struct mlx5_mr_cache *cache = &dev->cache;
696 struct mlx5_cache_ent *ent;
700 if (!mlx5_debugfs_root || dev->is_rep)
703 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
705 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
706 ent = &cache->ent[i];
707 sprintf(ent->name, "%d", ent->order);
708 dir = debugfs_create_dir(ent->name, cache->root);
709 debugfs_create_file("size", 0600, dir, ent, &size_fops);
710 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
711 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
712 debugfs_create_u32("miss", 0600, dir, &ent->miss);
716 static void delay_time_func(struct timer_list *t)
718 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
720 WRITE_ONCE(dev->fill_delay, 0);
723 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
725 struct mlx5_mr_cache *cache = &dev->cache;
726 struct mlx5_cache_ent *ent;
729 mutex_init(&dev->slow_path_mutex);
730 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
732 mlx5_ib_warn(dev, "failed to create work queue\n");
736 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
737 timer_setup(&dev->delay_timer, delay_time_func, 0);
738 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
739 ent = &cache->ent[i];
740 INIT_LIST_HEAD(&ent->head);
741 spin_lock_init(&ent->lock);
746 INIT_WORK(&ent->work, cache_work_func);
747 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
749 if (i > MR_CACHE_LAST_STD_ENTRY) {
750 mlx5_odp_init_mr_cache_entry(ent);
754 if (ent->order > mr_cache_max_order(dev))
757 ent->page = PAGE_SHIFT;
758 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
759 MLX5_IB_UMR_OCTOWORD;
760 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
761 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
762 !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
763 mlx5_ib_can_load_pas_with_umr(dev, 0))
764 ent->limit = dev->mdev->profile->mr_cache[i].limit;
767 spin_lock_irq(&ent->lock);
768 queue_adjust_cache_locked(ent);
769 spin_unlock_irq(&ent->lock);
772 mlx5_mr_cache_debugfs_init(dev);
777 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
784 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
785 struct mlx5_cache_ent *ent = &dev->cache.ent[i];
787 spin_lock_irq(&ent->lock);
788 ent->disabled = true;
789 spin_unlock_irq(&ent->lock);
790 cancel_work_sync(&ent->work);
791 cancel_delayed_work_sync(&ent->dwork);
794 mlx5_mr_cache_debugfs_cleanup(dev);
795 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
797 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
800 destroy_workqueue(dev->cache.wq);
801 del_timer_sync(&dev->delay_timer);
806 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
808 struct mlx5_ib_dev *dev = to_mdev(pd->device);
809 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
810 struct mlx5_ib_mr *mr;
815 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
817 return ERR_PTR(-ENOMEM);
819 in = kzalloc(inlen, GFP_KERNEL);
825 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
827 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
828 MLX5_SET(mkc, mkc, length64, 1);
829 set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
831 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
836 mr->mmkey.type = MLX5_MKEY_MR;
837 mr->ibmr.lkey = mr->mmkey.key;
838 mr->ibmr.rkey = mr->mmkey.key;
852 static int get_octo_len(u64 addr, u64 len, int page_shift)
854 u64 page_size = 1ULL << page_shift;
858 offset = addr & (page_size - 1);
859 npages = ALIGN(len + offset, page_size) >> page_shift;
860 return (npages + 1) / 2;
863 static int mr_cache_max_order(struct mlx5_ib_dev *dev)
865 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
866 return MR_CACHE_LAST_STD_ENTRY + 2;
867 return MLX5_MAX_UMR_SHIFT;
870 static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length,
871 int access_flags, struct ib_umem **umem, int *npages,
872 int *page_shift, int *ncont, int *order)
878 if (access_flags & IB_ACCESS_ON_DEMAND) {
879 struct ib_umem_odp *odp;
881 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
884 mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
891 *page_shift = odp->page_shift;
892 *ncont = ib_umem_odp_num_pages(odp);
893 *npages = *ncont << (*page_shift - PAGE_SHIFT);
895 *order = ilog2(roundup_pow_of_two(*ncont));
897 u = ib_umem_get(&dev->ib_dev, start, length, access_flags);
899 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
903 mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
904 page_shift, ncont, order);
908 mlx5_ib_warn(dev, "avoid zero region\n");
915 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
916 *npages, *ncont, *order, *page_shift);
921 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
923 struct mlx5_ib_umr_context *context =
924 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
926 context->status = wc->status;
927 complete(&context->done);
930 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
932 context->cqe.done = mlx5_ib_umr_done;
933 context->status = -1;
934 init_completion(&context->done);
937 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
938 struct mlx5_umr_wr *umrwr)
940 struct umr_common *umrc = &dev->umrc;
941 const struct ib_send_wr *bad;
943 struct mlx5_ib_umr_context umr_context;
945 mlx5_ib_init_umr_context(&umr_context);
946 umrwr->wr.wr_cqe = &umr_context.cqe;
949 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
951 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
953 wait_for_completion(&umr_context.done);
954 if (umr_context.status != IB_WC_SUCCESS) {
955 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
964 static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
967 struct mlx5_mr_cache *cache = &dev->cache;
969 if (order < cache->ent[0].order)
970 return &cache->ent[0];
971 order = order - cache->ent[0].order;
972 if (order > MR_CACHE_LAST_STD_ENTRY)
974 return &cache->ent[order];
977 static struct mlx5_ib_mr *
978 alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr,
979 u64 len, int npages, int page_shift, unsigned int order,
982 struct mlx5_ib_dev *dev = to_mdev(pd->device);
983 struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order);
984 struct mlx5_ib_mr *mr;
987 return ERR_PTR(-E2BIG);
989 /* Matches access in alloc_cache_mr() */
990 if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
991 return ERR_PTR(-EOPNOTSUPP);
993 mr = get_cache_mr(ent);
995 mr = create_cache_mr(ent);
1002 mr->access_flags = access_flags;
1003 mr->desc_size = sizeof(struct mlx5_mtt);
1004 mr->mmkey.iova = virt_addr;
1005 mr->mmkey.size = len;
1006 mr->mmkey.pd = to_mpd(pd)->pdn;
1011 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
1012 MLX5_UMR_MTT_ALIGNMENT)
1013 #define MLX5_SPARE_UMR_CHUNK 0x10000
1015 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1016 int page_shift, int flags)
1018 struct mlx5_ib_dev *dev = mr->dev;
1019 struct device *ddev = dev->ib_dev.dev.parent;
1023 struct mlx5_umr_wr wr;
1026 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
1027 ? sizeof(struct mlx5_klm)
1028 : sizeof(struct mlx5_mtt);
1029 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
1030 const int page_mask = page_align - 1;
1031 size_t pages_mapped = 0;
1032 size_t pages_to_map = 0;
1033 size_t pages_iter = 0;
1034 size_t size_to_map = 0;
1036 bool use_emergency_page = false;
1038 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
1039 !umr_can_use_indirect_mkey(dev))
1042 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1043 * so we need to align the offset and length accordingly
1045 if (idx & page_mask) {
1046 npages += idx & page_mask;
1050 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1051 gfp |= __GFP_ZERO | __GFP_NOWARN;
1053 pages_to_map = ALIGN(npages, page_align);
1054 size = desc_size * pages_to_map;
1055 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
1057 xlt = (void *)__get_free_pages(gfp, get_order(size));
1058 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1059 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1060 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1062 size = MLX5_SPARE_UMR_CHUNK;
1063 xlt = (void *)__get_free_pages(gfp, get_order(size));
1067 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
1068 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
1070 memset(xlt, 0, size);
1071 use_emergency_page = true;
1073 pages_iter = size / desc_size;
1074 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
1075 if (dma_mapping_error(ddev, dma)) {
1076 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1081 if (mr->umem->is_odp) {
1082 if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
1083 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
1084 size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
1086 pages_to_map = min_t(size_t, pages_to_map, max_pages);
1091 sg.lkey = dev->umrc.pd->local_dma_lkey;
1093 memset(&wr, 0, sizeof(wr));
1094 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1095 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1096 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1097 wr.wr.sg_list = &sg;
1099 wr.wr.opcode = MLX5_IB_WR_UMR;
1101 wr.pd = mr->ibmr.pd;
1102 wr.mkey = mr->mmkey.key;
1103 wr.length = mr->mmkey.size;
1104 wr.virt_addr = mr->mmkey.iova;
1105 wr.access_flags = mr->access_flags;
1106 wr.page_shift = page_shift;
1108 for (pages_mapped = 0;
1109 pages_mapped < pages_to_map && !err;
1110 pages_mapped += pages_iter, idx += pages_iter) {
1111 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1112 size_to_map = npages * desc_size;
1113 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
1114 if (mr->umem->is_odp) {
1115 mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
1117 __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx,
1119 MLX5_IB_MTT_PRESENT);
1120 /* Clear padding after the pages
1121 * brought from the umem.
1123 memset(xlt + size_to_map, 0, size - size_to_map);
1125 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1127 sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
1129 if (pages_mapped + pages_iter >= pages_to_map) {
1130 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1132 MLX5_IB_SEND_UMR_ENABLE_MR |
1133 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1134 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1135 if (flags & MLX5_IB_UPD_XLT_PD ||
1136 flags & MLX5_IB_UPD_XLT_ACCESS)
1138 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1139 if (flags & MLX5_IB_UPD_XLT_ADDR)
1141 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1144 wr.offset = idx * desc_size;
1145 wr.xlt_size = sg.length;
1147 err = mlx5_ib_post_send_wait(dev, &wr);
1149 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1152 if (use_emergency_page)
1153 mlx5_ib_put_xlt_emergency_page();
1155 free_pages((unsigned long)xlt, get_order(size));
1161 * If ibmr is NULL it will be allocated by reg_create.
1162 * Else, the given ibmr will be used.
1164 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1165 u64 virt_addr, u64 length,
1166 struct ib_umem *umem, int npages,
1167 int page_shift, int access_flags,
1170 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1171 struct mlx5_ib_mr *mr;
1177 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1179 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
1181 return ERR_PTR(-ENOMEM);
1184 mr->access_flags = access_flags;
1186 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1188 inlen += sizeof(*pas) * roundup(npages, 2);
1189 in = kvzalloc(inlen, GFP_KERNEL);
1194 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1196 if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
1200 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1201 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1204 /* The pg_access bit allows setting the access flags
1205 * in the page list submitted with the command. */
1206 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1208 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1209 set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr,
1210 populate ? pd : dev->umrc.pd);
1211 MLX5_SET(mkc, mkc, free, !populate);
1212 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1213 MLX5_SET(mkc, mkc, umr_en, 1);
1215 MLX5_SET64(mkc, mkc, len, length);
1216 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1217 MLX5_SET(mkc, mkc, translations_octword_size,
1218 get_octo_len(virt_addr, length, page_shift));
1219 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1221 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1222 get_octo_len(virt_addr, length, page_shift));
1225 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1227 mlx5_ib_warn(dev, "create mkey failed\n");
1230 mr->mmkey.type = MLX5_MKEY_MR;
1231 mr->desc_size = sizeof(struct mlx5_mtt);
1235 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1246 return ERR_PTR(err);
1249 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1250 int npages, u64 length, int access_flags)
1252 mr->npages = npages;
1253 atomic_add(npages, &dev->mdev->priv.reg_pages);
1254 mr->ibmr.lkey = mr->mmkey.key;
1255 mr->ibmr.rkey = mr->mmkey.key;
1256 mr->ibmr.length = length;
1257 mr->access_flags = access_flags;
1260 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1261 u64 length, int acc, int mode)
1263 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1264 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1265 struct mlx5_ib_mr *mr;
1270 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1272 return ERR_PTR(-ENOMEM);
1274 in = kzalloc(inlen, GFP_KERNEL);
1280 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1282 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1283 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
1284 MLX5_SET64(mkc, mkc, len, length);
1285 set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
1287 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1294 set_mr_fields(dev, mr, 0, length, acc);
1304 return ERR_PTR(err);
1307 int mlx5_ib_advise_mr(struct ib_pd *pd,
1308 enum ib_uverbs_advise_mr_advice advice,
1310 struct ib_sge *sg_list,
1312 struct uverbs_attr_bundle *attrs)
1314 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1315 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1316 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1319 return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1323 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1324 struct ib_dm_mr_attr *attr,
1325 struct uverbs_attr_bundle *attrs)
1327 struct mlx5_ib_dm *mdm = to_mdm(dm);
1328 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1329 u64 start_addr = mdm->dev_addr + attr->offset;
1332 switch (mdm->type) {
1333 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1334 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1335 return ERR_PTR(-EINVAL);
1337 mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1338 start_addr -= pci_resource_start(dev->pdev, 0);
1340 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1341 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1342 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1343 return ERR_PTR(-EINVAL);
1345 mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1348 return ERR_PTR(-EINVAL);
1351 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1352 attr->access_flags, mode);
1355 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1356 u64 virt_addr, int access_flags,
1357 struct ib_udata *udata)
1359 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1360 struct mlx5_ib_mr *mr = NULL;
1362 struct ib_umem *umem;
1369 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1370 return ERR_PTR(-EOPNOTSUPP);
1372 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1373 start, virt_addr, length, access_flags);
1375 xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, length);
1376 /* ODP requires xlt update via umr to work. */
1377 if (!xlt_with_umr && (access_flags & IB_ACCESS_ON_DEMAND))
1378 return ERR_PTR(-EINVAL);
1380 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
1381 length == U64_MAX) {
1382 if (virt_addr != start)
1383 return ERR_PTR(-EINVAL);
1384 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1385 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1386 return ERR_PTR(-EINVAL);
1388 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
1390 return ERR_CAST(mr);
1394 err = mr_umem_get(dev, start, length, access_flags, &umem,
1395 &npages, &page_shift, &ncont, &order);
1398 return ERR_PTR(err);
1401 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1402 page_shift, order, access_flags);
1408 mutex_lock(&dev->slow_path_mutex);
1409 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1410 page_shift, access_flags, !xlt_with_umr);
1411 mutex_unlock(&dev->slow_path_mutex);
1419 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1422 set_mr_fields(dev, mr, npages, length, access_flags);
1424 if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
1426 * If the MR was created with reg_create then it will be
1427 * configured properly but left disabled. It is safe to go ahead
1428 * and configure it again via UMR while enabling it.
1430 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
1432 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1436 return ERR_PTR(err);
1440 if (is_odp_mr(mr)) {
1441 to_ib_umem_odp(mr->umem)->private = mr;
1442 init_waitqueue_head(&mr->q_deferred_work);
1443 atomic_set(&mr->num_deferred_work, 0);
1444 err = xa_err(xa_store(&dev->odp_mkeys,
1445 mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
1449 return ERR_PTR(err);
1452 err = mlx5_ib_init_odp_mr(mr, xlt_with_umr);
1455 return ERR_PTR(err);
1461 ib_umem_release(umem);
1462 return ERR_PTR(err);
1466 * mlx5_mr_cache_invalidate - Fence all DMA on the MR
1467 * @mr: The MR to fence
1469 * Upon return the NIC will not be doing any DMA to the pages under the MR,
1470 * and any DMA inprogress will be completed. Failure of this function
1471 * indicates the HW has failed catastrophically.
1473 int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
1475 struct mlx5_umr_wr umrwr = {};
1477 if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1480 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1481 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1482 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1483 umrwr.pd = mr->dev->umrc.pd;
1484 umrwr.mkey = mr->mmkey.key;
1485 umrwr.ignore_free_state = 1;
1487 return mlx5_ib_post_send_wait(mr->dev, &umrwr);
1490 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1491 int access_flags, int flags)
1493 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1494 struct mlx5_umr_wr umrwr = {};
1497 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1499 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1500 umrwr.mkey = mr->mmkey.key;
1502 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
1504 umrwr.access_flags = access_flags;
1505 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1508 err = mlx5_ib_post_send_wait(dev, &umrwr);
1513 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1514 u64 length, u64 virt_addr, int new_access_flags,
1515 struct ib_pd *new_pd, struct ib_udata *udata)
1517 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1518 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1519 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1520 int access_flags = flags & IB_MR_REREG_ACCESS ?
1531 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1532 start, virt_addr, length, access_flags);
1534 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1542 if (flags & IB_MR_REREG_TRANS) {
1546 addr = mr->umem->address;
1547 len = mr->umem->length;
1550 if (flags != IB_MR_REREG_PD) {
1552 * Replace umem. This needs to be done whether or not UMR is
1555 flags |= IB_MR_REREG_TRANS;
1556 ib_umem_release(mr->umem);
1558 err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
1559 &npages, &page_shift, &ncont, &order);
1564 if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
1566 !mlx5_ib_can_load_pas_with_umr(dev, len) ||
1567 (flags & IB_MR_REREG_TRANS &&
1568 !mlx5_ib_pas_fits_in_mr(mr, addr, len))) {
1570 * UMR can't be used - MKey needs to be replaced.
1573 detach_mr_from_cache(mr);
1574 err = destroy_mkey(dev, mr);
1578 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1579 page_shift, access_flags, true);
1591 mr->access_flags = access_flags;
1592 mr->mmkey.iova = addr;
1593 mr->mmkey.size = len;
1594 mr->mmkey.pd = to_mpd(pd)->pdn;
1596 if (flags & IB_MR_REREG_TRANS) {
1597 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1598 if (flags & IB_MR_REREG_PD)
1599 upd_flags |= MLX5_IB_UPD_XLT_PD;
1600 if (flags & IB_MR_REREG_ACCESS)
1601 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1602 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1605 err = rereg_umr(pd, mr, access_flags, flags);
1612 set_mr_fields(dev, mr, npages, len, access_flags);
1617 ib_umem_release(mr->umem);
1625 mlx5_alloc_priv_descs(struct ib_device *device,
1626 struct mlx5_ib_mr *mr,
1630 int size = ndescs * desc_size;
1634 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1636 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1637 if (!mr->descs_alloc)
1640 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1642 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
1643 size, DMA_TO_DEVICE);
1644 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
1651 kfree(mr->descs_alloc);
1657 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1660 struct ib_device *device = mr->ibmr.device;
1661 int size = mr->max_descs * mr->desc_size;
1663 dma_unmap_single(device->dev.parent, mr->desc_map,
1664 size, DMA_TO_DEVICE);
1665 kfree(mr->descs_alloc);
1670 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1673 if (mlx5_core_destroy_psv(dev->mdev,
1674 mr->sig->psv_memory.psv_idx))
1675 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1676 mr->sig->psv_memory.psv_idx);
1677 if (mlx5_core_destroy_psv(dev->mdev,
1678 mr->sig->psv_wire.psv_idx))
1679 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1680 mr->sig->psv_wire.psv_idx);
1681 xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
1686 if (!mr->cache_ent) {
1687 destroy_mkey(dev, mr);
1688 mlx5_free_priv_descs(mr);
1692 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1694 int npages = mr->npages;
1695 struct ib_umem *umem = mr->umem;
1699 mlx5_ib_fence_odp_mr(mr);
1704 mlx5_mr_cache_free(dev, mr);
1708 ib_umem_release(umem);
1709 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1713 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1715 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1717 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1718 dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr);
1719 dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
1722 if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
1723 mlx5_ib_free_implicit_mr(mmr);
1727 dereg_mr(to_mdev(ibmr->device), mmr);
1732 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
1733 int access_mode, int page_shift)
1737 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1739 /* This is only used from the kernel, so setting the PD is OK. */
1740 set_mkc_access_pd_addr_fields(mkc, 0, 0, pd);
1741 MLX5_SET(mkc, mkc, free, 1);
1742 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1743 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
1744 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
1745 MLX5_SET(mkc, mkc, umr_en, 1);
1746 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1749 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1750 int ndescs, int desc_size, int page_shift,
1751 int access_mode, u32 *in, int inlen)
1753 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1756 mr->access_mode = access_mode;
1757 mr->desc_size = desc_size;
1758 mr->max_descs = ndescs;
1760 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
1764 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
1766 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1768 goto err_free_descs;
1770 mr->mmkey.type = MLX5_MKEY_MR;
1771 mr->ibmr.lkey = mr->mmkey.key;
1772 mr->ibmr.rkey = mr->mmkey.key;
1777 mlx5_free_priv_descs(mr);
1781 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
1782 u32 max_num_sg, u32 max_num_meta_sg,
1783 int desc_size, int access_mode)
1785 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1786 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
1788 struct mlx5_ib_mr *mr;
1792 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1794 return ERR_PTR(-ENOMEM);
1797 mr->ibmr.device = pd->device;
1799 in = kzalloc(inlen, GFP_KERNEL);
1805 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
1806 page_shift = PAGE_SHIFT;
1808 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
1809 access_mode, in, inlen);
1822 return ERR_PTR(err);
1825 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1826 int ndescs, u32 *in, int inlen)
1828 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
1829 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
1833 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1834 int ndescs, u32 *in, int inlen)
1836 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
1837 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1840 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1841 int max_num_sg, int max_num_meta_sg,
1844 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1849 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1853 /* create mem & wire PSVs */
1854 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
1858 mr->sig->psv_memory.psv_idx = psv_index[0];
1859 mr->sig->psv_wire.psv_idx = psv_index[1];
1861 mr->sig->sig_status_checked = true;
1862 mr->sig->sig_err_exists = false;
1863 /* Next UMR, Arm SIGERR */
1864 ++mr->sig->sigerr_count;
1865 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1866 sizeof(struct mlx5_klm),
1867 MLX5_MKC_ACCESS_MODE_KLMS);
1868 if (IS_ERR(mr->klm_mr)) {
1869 err = PTR_ERR(mr->klm_mr);
1870 goto err_destroy_psv;
1872 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1873 sizeof(struct mlx5_mtt),
1874 MLX5_MKC_ACCESS_MODE_MTT);
1875 if (IS_ERR(mr->mtt_mr)) {
1876 err = PTR_ERR(mr->mtt_mr);
1877 goto err_free_klm_mr;
1880 /* Set bsf descriptors for mkey */
1881 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1882 MLX5_SET(mkc, mkc, bsf_en, 1);
1883 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1885 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
1886 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1888 goto err_free_mtt_mr;
1890 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
1891 mr->sig, GFP_KERNEL));
1893 goto err_free_descs;
1897 destroy_mkey(dev, mr);
1898 mlx5_free_priv_descs(mr);
1900 dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
1903 dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
1906 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
1907 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1908 mr->sig->psv_memory.psv_idx);
1909 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1910 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1911 mr->sig->psv_wire.psv_idx);
1918 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
1919 enum ib_mr_type mr_type, u32 max_num_sg,
1920 u32 max_num_meta_sg)
1922 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1923 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1924 int ndescs = ALIGN(max_num_sg, 4);
1925 struct mlx5_ib_mr *mr;
1929 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1931 return ERR_PTR(-ENOMEM);
1933 in = kzalloc(inlen, GFP_KERNEL);
1939 mr->ibmr.device = pd->device;
1943 case IB_MR_TYPE_MEM_REG:
1944 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
1946 case IB_MR_TYPE_SG_GAPS:
1947 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
1949 case IB_MR_TYPE_INTEGRITY:
1950 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
1951 max_num_meta_sg, in, inlen);
1954 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1969 return ERR_PTR(err);
1972 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1975 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
1978 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1979 u32 max_num_sg, u32 max_num_meta_sg)
1981 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
1985 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
1987 struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
1988 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1989 struct mlx5_ib_mw *mw = to_mmw(ibmw);
1994 struct mlx5_ib_alloc_mw req = {};
1997 __u32 response_length;
2000 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
2004 if (req.comp_mask || req.reserved1 || req.reserved2)
2007 if (udata->inlen > sizeof(req) &&
2008 !ib_is_udata_cleared(udata, sizeof(req),
2009 udata->inlen - sizeof(req)))
2012 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
2014 in = kzalloc(inlen, GFP_KERNEL);
2020 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2022 MLX5_SET(mkc, mkc, free, 1);
2023 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
2024 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
2025 MLX5_SET(mkc, mkc, umr_en, 1);
2026 MLX5_SET(mkc, mkc, lr, 1);
2027 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
2028 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
2029 MLX5_SET(mkc, mkc, qpn, 0xffffff);
2031 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
2035 mw->mmkey.type = MLX5_MKEY_MW;
2036 ibmw->rkey = mw->mmkey.key;
2037 mw->ndescs = ndescs;
2039 resp.response_length =
2040 min(offsetofend(typeof(resp), response_length), udata->outlen);
2041 if (resp.response_length) {
2042 err = ib_copy_to_udata(udata, &resp, resp.response_length);
2047 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
2048 err = xa_err(xa_store(&dev->odp_mkeys,
2049 mlx5_base_mkey(mw->mmkey.key), &mw->mmkey,
2059 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
2065 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
2067 struct mlx5_ib_dev *dev = to_mdev(mw->device);
2068 struct mlx5_ib_mw *mmw = to_mmw(mw);
2070 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
2071 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
2073 * pagefault_single_data_segment() may be accessing mmw under
2074 * SRCU if the user bound an ODP MR to this MW.
2076 synchronize_srcu(&dev->odp_srcu);
2079 return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
2082 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
2083 struct ib_mr_status *mr_status)
2085 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
2088 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
2089 pr_err("Invalid status check mask\n");
2094 mr_status->fail_status = 0;
2095 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
2098 pr_err("signature status check requested on a non-signature enabled MR\n");
2102 mmr->sig->sig_status_checked = true;
2103 if (!mmr->sig->sig_err_exists)
2106 if (ibmr->lkey == mmr->sig->err_item.key)
2107 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
2108 sizeof(mr_status->sig_err));
2110 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
2111 mr_status->sig_err.sig_err_offset = 0;
2112 mr_status->sig_err.key = mmr->sig->err_item.key;
2115 mmr->sig->sig_err_exists = false;
2116 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
2124 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2125 int data_sg_nents, unsigned int *data_sg_offset,
2126 struct scatterlist *meta_sg, int meta_sg_nents,
2127 unsigned int *meta_sg_offset)
2129 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2130 unsigned int sg_offset = 0;
2133 mr->meta_length = 0;
2134 if (data_sg_nents == 1) {
2138 sg_offset = *data_sg_offset;
2139 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2140 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2141 if (meta_sg_nents == 1) {
2143 mr->meta_ndescs = 1;
2145 sg_offset = *meta_sg_offset;
2148 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2149 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2151 ibmr->length = mr->data_length + mr->meta_length;
2158 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2159 struct scatterlist *sgl,
2160 unsigned short sg_nents,
2161 unsigned int *sg_offset_p,
2162 struct scatterlist *meta_sgl,
2163 unsigned short meta_sg_nents,
2164 unsigned int *meta_sg_offset_p)
2166 struct scatterlist *sg = sgl;
2167 struct mlx5_klm *klms = mr->descs;
2168 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2169 u32 lkey = mr->ibmr.pd->local_dma_lkey;
2172 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
2173 mr->ibmr.length = 0;
2175 for_each_sg(sgl, sg, sg_nents, i) {
2176 if (unlikely(i >= mr->max_descs))
2178 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2179 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
2180 klms[i].key = cpu_to_be32(lkey);
2181 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2187 *sg_offset_p = sg_offset;
2190 mr->data_length = mr->ibmr.length;
2192 if (meta_sg_nents) {
2194 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
2195 for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
2196 if (unlikely(i + j >= mr->max_descs))
2198 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
2200 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
2202 klms[i + j].key = cpu_to_be32(lkey);
2203 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2207 if (meta_sg_offset_p)
2208 *meta_sg_offset_p = sg_offset;
2210 mr->meta_ndescs = j;
2211 mr->meta_length = mr->ibmr.length - mr->data_length;
2217 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
2219 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2222 if (unlikely(mr->ndescs == mr->max_descs))
2226 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2231 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
2233 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2236 if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
2240 descs[mr->ndescs + mr->meta_ndescs++] =
2241 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2247 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2248 int data_sg_nents, unsigned int *data_sg_offset,
2249 struct scatterlist *meta_sg, int meta_sg_nents,
2250 unsigned int *meta_sg_offset)
2252 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2253 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2257 pi_mr->meta_ndescs = 0;
2258 pi_mr->meta_length = 0;
2260 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2261 pi_mr->desc_size * pi_mr->max_descs,
2264 pi_mr->ibmr.page_size = ibmr->page_size;
2265 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2267 if (n != data_sg_nents)
2270 pi_mr->data_iova = pi_mr->ibmr.iova;
2271 pi_mr->data_length = pi_mr->ibmr.length;
2272 pi_mr->ibmr.length = pi_mr->data_length;
2273 ibmr->length = pi_mr->data_length;
2275 if (meta_sg_nents) {
2276 u64 page_mask = ~((u64)ibmr->page_size - 1);
2277 u64 iova = pi_mr->data_iova;
2279 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2280 meta_sg_offset, mlx5_set_page_pi);
2282 pi_mr->meta_length = pi_mr->ibmr.length;
2284 * PI address for the HW is the offset of the metadata address
2285 * relative to the first data page address.
2286 * It equals to first data page address + size of data pages +
2287 * metadata offset at the first metadata page
2289 pi_mr->pi_iova = (iova & page_mask) +
2290 pi_mr->ndescs * ibmr->page_size +
2291 (pi_mr->ibmr.iova & ~page_mask);
2293 * In order to use one MTT MR for data and metadata, we register
2294 * also the gaps between the end of the data and the start of
2295 * the metadata (the sig MR will verify that the HW will access
2296 * to right addresses). This mapping is safe because we use
2297 * internal mkey for the registration.
2299 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2300 pi_mr->ibmr.iova = iova;
2301 ibmr->length += pi_mr->meta_length;
2304 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2305 pi_mr->desc_size * pi_mr->max_descs,
2312 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2313 int data_sg_nents, unsigned int *data_sg_offset,
2314 struct scatterlist *meta_sg, int meta_sg_nents,
2315 unsigned int *meta_sg_offset)
2317 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2318 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2322 pi_mr->meta_ndescs = 0;
2323 pi_mr->meta_length = 0;
2325 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2326 pi_mr->desc_size * pi_mr->max_descs,
2329 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2330 meta_sg, meta_sg_nents, meta_sg_offset);
2332 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2333 pi_mr->desc_size * pi_mr->max_descs,
2336 /* This is zero-based memory region */
2337 pi_mr->data_iova = 0;
2338 pi_mr->ibmr.iova = 0;
2339 pi_mr->pi_iova = pi_mr->data_length;
2340 ibmr->length = pi_mr->ibmr.length;
2345 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2346 int data_sg_nents, unsigned int *data_sg_offset,
2347 struct scatterlist *meta_sg, int meta_sg_nents,
2348 unsigned int *meta_sg_offset)
2350 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2351 struct mlx5_ib_mr *pi_mr = NULL;
2354 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2357 mr->data_length = 0;
2359 mr->meta_ndescs = 0;
2362 * As a performance optimization, if possible, there is no need to
2363 * perform UMR operation to register the data/metadata buffers.
2364 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2365 * Fallback to UMR only in case of a failure.
2367 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2368 data_sg_offset, meta_sg, meta_sg_nents,
2370 if (n == data_sg_nents + meta_sg_nents)
2373 * As a performance optimization, if possible, there is no need to map
2374 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2375 * descriptors and fallback to KLM only in case of a failure.
2376 * It's more efficient for the HW to work with MTT descriptors
2377 * (especially in high load).
2378 * Use KLM (indirect access) only if it's mandatory.
2381 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2382 data_sg_offset, meta_sg, meta_sg_nents,
2384 if (n == data_sg_nents + meta_sg_nents)
2388 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2389 data_sg_offset, meta_sg, meta_sg_nents,
2391 if (unlikely(n != data_sg_nents + meta_sg_nents))
2395 /* This is zero-based memory region */
2399 ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2401 ibmr->sig_attrs->meta_length = mr->meta_length;
2406 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2407 unsigned int *sg_offset)
2409 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2414 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2415 mr->desc_size * mr->max_descs,
2418 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2419 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2422 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2425 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2426 mr->desc_size * mr->max_descs,