2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR = 8,
48 #define MLX5_UMR_ALIGN 2048
49 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
50 static __be64 mlx5_ib_update_mtt_emergency_buffer[
51 MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
52 __aligned(MLX5_UMR_ALIGN);
53 static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
56 static int clean_mr(struct mlx5_ib_mr *mr);
58 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
60 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
62 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
63 /* Wait until all page fault handlers using the mr complete. */
64 synchronize_srcu(&dev->mr_srcu);
70 static int order2idx(struct mlx5_ib_dev *dev, int order)
72 struct mlx5_mr_cache *cache = &dev->cache;
74 if (order < cache->ent[0].order)
77 return order - cache->ent[0].order;
80 static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
82 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
83 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
86 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
87 static void update_odp_mr(struct mlx5_ib_mr *mr)
89 if (mr->umem->odp_data) {
91 * This barrier prevents the compiler from moving the
92 * setting of umem->odp_data->private to point to our
93 * MR, before reg_umr finished, to ensure that the MR
94 * initialization have finished before starting to
95 * handle invalidations.
98 mr->umem->odp_data->private = mr;
100 * Make sure we will see the new
101 * umem->odp_data->private value in the invalidation
102 * routines, before we can get page faults on the
103 * MR. Page faults can happen once we put the MR in
104 * the tree, below this line. Without the barrier,
105 * there can be a fault handling and an invalidation
106 * before umem->odp_data->private == mr is visible to
107 * the invalidation handler.
114 static void reg_mr_callback(int status, void *context)
116 struct mlx5_ib_mr *mr = context;
117 struct mlx5_ib_dev *dev = mr->dev;
118 struct mlx5_mr_cache *cache = &dev->cache;
119 int c = order2idx(dev, mr->order);
120 struct mlx5_cache_ent *ent = &cache->ent[c];
123 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
126 spin_lock_irqsave(&ent->lock, flags);
128 spin_unlock_irqrestore(&ent->lock, flags);
130 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
133 mod_timer(&dev->delay_timer, jiffies + HZ);
137 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
138 key = dev->mdev->priv.mkey_key++;
139 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
140 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
142 cache->last_add = jiffies;
144 spin_lock_irqsave(&ent->lock, flags);
145 list_add_tail(&mr->list, &ent->head);
148 spin_unlock_irqrestore(&ent->lock, flags);
150 write_lock_irqsave(&table->lock, flags);
151 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
154 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
155 write_unlock_irqrestore(&table->lock, flags);
158 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
160 struct mlx5_mr_cache *cache = &dev->cache;
161 struct mlx5_cache_ent *ent = &cache->ent[c];
162 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
163 struct mlx5_ib_mr *mr;
164 int npages = 1 << ent->order;
170 in = kzalloc(inlen, GFP_KERNEL);
174 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
175 for (i = 0; i < num; i++) {
176 if (ent->pending >= MAX_PENDING_REG_MR) {
181 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
186 mr->order = ent->order;
190 MLX5_SET(mkc, mkc, free, 1);
191 MLX5_SET(mkc, mkc, umr_en, 1);
192 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
194 MLX5_SET(mkc, mkc, qpn, 0xffffff);
195 MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2);
196 MLX5_SET(mkc, mkc, log_page_size, 12);
198 spin_lock_irq(&ent->lock);
200 spin_unlock_irq(&ent->lock);
201 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
203 mr->out, sizeof(mr->out),
204 reg_mr_callback, mr);
206 spin_lock_irq(&ent->lock);
208 spin_unlock_irq(&ent->lock);
209 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
219 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
221 struct mlx5_mr_cache *cache = &dev->cache;
222 struct mlx5_cache_ent *ent = &cache->ent[c];
223 struct mlx5_ib_mr *mr;
227 for (i = 0; i < num; i++) {
228 spin_lock_irq(&ent->lock);
229 if (list_empty(&ent->head)) {
230 spin_unlock_irq(&ent->lock);
233 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
237 spin_unlock_irq(&ent->lock);
238 err = destroy_mkey(dev, mr);
240 mlx5_ib_warn(dev, "failed destroy mkey\n");
246 static ssize_t size_write(struct file *filp, const char __user *buf,
247 size_t count, loff_t *pos)
249 struct mlx5_cache_ent *ent = filp->private_data;
250 struct mlx5_ib_dev *dev = ent->dev;
256 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
259 c = order2idx(dev, ent->order);
260 lbuf[sizeof(lbuf) - 1] = 0;
262 if (sscanf(lbuf, "%u", &var) != 1)
265 if (var < ent->limit)
268 if (var > ent->size) {
270 err = add_keys(dev, c, var - ent->size);
271 if (err && err != -EAGAIN)
274 usleep_range(3000, 5000);
276 } else if (var < ent->size) {
277 remove_keys(dev, c, ent->size - var);
283 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
286 struct mlx5_cache_ent *ent = filp->private_data;
293 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
297 if (copy_to_user(buf, lbuf, err))
305 static const struct file_operations size_fops = {
306 .owner = THIS_MODULE,
312 static ssize_t limit_write(struct file *filp, const char __user *buf,
313 size_t count, loff_t *pos)
315 struct mlx5_cache_ent *ent = filp->private_data;
316 struct mlx5_ib_dev *dev = ent->dev;
322 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
325 c = order2idx(dev, ent->order);
326 lbuf[sizeof(lbuf) - 1] = 0;
328 if (sscanf(lbuf, "%u", &var) != 1)
336 if (ent->cur < ent->limit) {
337 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
345 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
348 struct mlx5_cache_ent *ent = filp->private_data;
355 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
359 if (copy_to_user(buf, lbuf, err))
367 static const struct file_operations limit_fops = {
368 .owner = THIS_MODULE,
370 .write = limit_write,
374 static int someone_adding(struct mlx5_mr_cache *cache)
378 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
379 if (cache->ent[i].cur < cache->ent[i].limit)
386 static void __cache_work_func(struct mlx5_cache_ent *ent)
388 struct mlx5_ib_dev *dev = ent->dev;
389 struct mlx5_mr_cache *cache = &dev->cache;
390 int i = order2idx(dev, ent->order);
396 ent = &dev->cache.ent[i];
397 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
398 err = add_keys(dev, i, 1);
399 if (ent->cur < 2 * ent->limit) {
400 if (err == -EAGAIN) {
401 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
403 queue_delayed_work(cache->wq, &ent->dwork,
404 msecs_to_jiffies(3));
406 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
408 queue_delayed_work(cache->wq, &ent->dwork,
409 msecs_to_jiffies(1000));
411 queue_work(cache->wq, &ent->work);
414 } else if (ent->cur > 2 * ent->limit) {
416 * The remove_keys() logic is performed as garbage collection
417 * task. Such task is intended to be run when no other active
418 * processes are running.
420 * The need_resched() will return TRUE if there are user tasks
421 * to be activated in near future.
423 * In such case, we don't execute remove_keys() and postpone
424 * the garbage collection work to try to run in next cycle,
425 * in order to free CPU resources to other tasks.
427 if (!need_resched() && !someone_adding(cache) &&
428 time_after(jiffies, cache->last_add + 300 * HZ)) {
429 remove_keys(dev, i, 1);
430 if (ent->cur > ent->limit)
431 queue_work(cache->wq, &ent->work);
433 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
438 static void delayed_cache_work_func(struct work_struct *work)
440 struct mlx5_cache_ent *ent;
442 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
443 __cache_work_func(ent);
446 static void cache_work_func(struct work_struct *work)
448 struct mlx5_cache_ent *ent;
450 ent = container_of(work, struct mlx5_cache_ent, work);
451 __cache_work_func(ent);
454 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
456 struct mlx5_mr_cache *cache = &dev->cache;
457 struct mlx5_ib_mr *mr = NULL;
458 struct mlx5_cache_ent *ent;
462 c = order2idx(dev, order);
463 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
464 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
468 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
469 ent = &cache->ent[i];
471 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
473 spin_lock_irq(&ent->lock);
474 if (!list_empty(&ent->head)) {
475 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
479 spin_unlock_irq(&ent->lock);
480 if (ent->cur < ent->limit)
481 queue_work(cache->wq, &ent->work);
484 spin_unlock_irq(&ent->lock);
486 queue_work(cache->wq, &ent->work);
490 cache->ent[c].miss++;
495 static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
497 struct mlx5_mr_cache *cache = &dev->cache;
498 struct mlx5_cache_ent *ent;
502 c = order2idx(dev, mr->order);
503 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
504 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
507 ent = &cache->ent[c];
508 spin_lock_irq(&ent->lock);
509 list_add_tail(&mr->list, &ent->head);
511 if (ent->cur > 2 * ent->limit)
513 spin_unlock_irq(&ent->lock);
516 queue_work(cache->wq, &ent->work);
519 static void clean_keys(struct mlx5_ib_dev *dev, int c)
521 struct mlx5_mr_cache *cache = &dev->cache;
522 struct mlx5_cache_ent *ent = &cache->ent[c];
523 struct mlx5_ib_mr *mr;
526 cancel_delayed_work(&ent->dwork);
528 spin_lock_irq(&ent->lock);
529 if (list_empty(&ent->head)) {
530 spin_unlock_irq(&ent->lock);
533 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
537 spin_unlock_irq(&ent->lock);
538 err = destroy_mkey(dev, mr);
540 mlx5_ib_warn(dev, "failed destroy mkey\n");
546 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
548 struct mlx5_mr_cache *cache = &dev->cache;
549 struct mlx5_cache_ent *ent;
552 if (!mlx5_debugfs_root)
555 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
559 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
560 ent = &cache->ent[i];
561 sprintf(ent->name, "%d", ent->order);
562 ent->dir = debugfs_create_dir(ent->name, cache->root);
566 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
571 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
576 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
581 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
590 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
592 if (!mlx5_debugfs_root)
595 debugfs_remove_recursive(dev->cache.root);
598 static void delay_time_func(unsigned long ctx)
600 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
605 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
607 struct mlx5_mr_cache *cache = &dev->cache;
608 struct mlx5_cache_ent *ent;
613 mutex_init(&dev->slow_path_mutex);
614 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
616 mlx5_ib_warn(dev, "failed to create work queue\n");
620 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
621 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
622 INIT_LIST_HEAD(&cache->ent[i].head);
623 spin_lock_init(&cache->ent[i].lock);
625 ent = &cache->ent[i];
626 INIT_LIST_HEAD(&ent->head);
627 spin_lock_init(&ent->lock);
631 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
632 limit = dev->mdev->profile->mr_cache[i].limit;
636 INIT_WORK(&ent->work, cache_work_func);
637 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
639 queue_work(cache->wq, &ent->work);
642 err = mlx5_mr_cache_debugfs_init(dev);
644 mlx5_ib_warn(dev, "cache debugfs failure\n");
649 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
653 dev->cache.stopped = 1;
654 flush_workqueue(dev->cache.wq);
656 mlx5_mr_cache_debugfs_cleanup(dev);
658 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
661 destroy_workqueue(dev->cache.wq);
662 del_timer_sync(&dev->delay_timer);
667 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
669 struct mlx5_ib_dev *dev = to_mdev(pd->device);
670 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
671 struct mlx5_core_dev *mdev = dev->mdev;
672 struct mlx5_ib_mr *mr;
677 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
679 return ERR_PTR(-ENOMEM);
681 in = kzalloc(inlen, GFP_KERNEL);
687 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
689 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
690 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
691 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
692 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
693 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
694 MLX5_SET(mkc, mkc, lr, 1);
696 MLX5_SET(mkc, mkc, length64, 1);
697 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
698 MLX5_SET(mkc, mkc, qpn, 0xffffff);
699 MLX5_SET64(mkc, mkc, start_addr, 0);
701 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
706 mr->ibmr.lkey = mr->mmkey.key;
707 mr->ibmr.rkey = mr->mmkey.key;
721 static int get_octo_len(u64 addr, u64 len, int page_size)
726 offset = addr & (page_size - 1);
727 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
728 return (npages + 1) / 2;
731 static int use_umr(int order)
733 return order <= MLX5_MAX_UMR_SHIFT;
736 static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
737 int npages, int page_shift, int *size,
738 __be64 **mr_pas, dma_addr_t *dma)
741 struct device *ddev = dev->ib_dev.dma_device;
744 * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
745 * To avoid copying garbage after the pas array, we allocate
748 *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
749 *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
753 pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
754 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
755 /* Clear padding after the actual pages. */
756 memset(pas + npages, 0, *size - npages * sizeof(u64));
758 *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
759 if (dma_mapping_error(ddev, *dma)) {
767 static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
768 struct ib_sge *sg, u64 dma, int n, u32 key,
771 struct mlx5_ib_dev *dev = to_mdev(pd->device);
772 struct mlx5_umr_wr *umrwr = umr_wr(wr);
775 sg->length = ALIGN(sizeof(u64) * n, 64);
776 sg->lkey = dev->umrc.pd->local_dma_lkey;
785 wr->opcode = MLX5_IB_WR_UMR;
788 umrwr->page_shift = page_shift;
792 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
793 struct ib_sge *sg, u64 dma, int n, u32 key,
794 int page_shift, u64 virt_addr, u64 len,
797 struct mlx5_umr_wr *umrwr = umr_wr(wr);
799 prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
803 umrwr->target.virt_addr = virt_addr;
805 umrwr->access_flags = access_flags;
809 static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
810 struct ib_send_wr *wr, u32 key)
812 struct mlx5_umr_wr *umrwr = umr_wr(wr);
814 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
815 wr->opcode = MLX5_IB_WR_UMR;
819 static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
820 int access_flags, int *npages,
821 int *page_shift, int *ncont, int *order)
823 struct mlx5_ib_dev *dev = to_mdev(pd->device);
824 struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
827 mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
831 mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
833 mlx5_ib_warn(dev, "avoid zero region\n");
834 ib_umem_release(umem);
835 return ERR_PTR(-EINVAL);
838 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
839 *npages, *ncont, *order, *page_shift);
844 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
846 struct mlx5_ib_umr_context *context =
847 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
849 context->status = wc->status;
850 complete(&context->done);
853 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
855 context->cqe.done = mlx5_ib_umr_done;
856 context->status = -1;
857 init_completion(&context->done);
860 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
861 u64 virt_addr, u64 len, int npages,
862 int page_shift, int order, int access_flags)
864 struct mlx5_ib_dev *dev = to_mdev(pd->device);
865 struct device *ddev = dev->ib_dev.dma_device;
866 struct umr_common *umrc = &dev->umrc;
867 struct mlx5_ib_umr_context umr_context;
868 struct mlx5_umr_wr umrwr = {};
869 struct ib_send_wr *bad;
870 struct mlx5_ib_mr *mr;
878 for (i = 0; i < 1; i++) {
879 mr = alloc_cached_mr(dev, order);
883 err = add_keys(dev, order2idx(dev, order), 1);
884 if (err && err != -EAGAIN) {
885 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
891 return ERR_PTR(-EAGAIN);
893 err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
898 mlx5_ib_init_umr_context(&umr_context);
900 umrwr.wr.wr_cqe = &umr_context.cqe;
901 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
902 page_shift, virt_addr, len, access_flags);
905 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
907 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
910 wait_for_completion(&umr_context.done);
911 if (umr_context.status != IB_WC_SUCCESS) {
912 mlx5_ib_warn(dev, "reg umr failed\n");
917 mr->mmkey.iova = virt_addr;
918 mr->mmkey.size = len;
919 mr->mmkey.pd = to_mpd(pd)->pdn;
925 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
931 free_cached_mr(dev, mr);
938 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
939 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
942 struct mlx5_ib_dev *dev = mr->dev;
943 struct device *ddev = dev->ib_dev.dma_device;
944 struct umr_common *umrc = &dev->umrc;
945 struct mlx5_ib_umr_context umr_context;
946 struct ib_umem *umem = mr->umem;
950 struct ib_send_wr *bad;
951 struct mlx5_umr_wr wr;
954 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
955 const int page_index_mask = page_index_alignment - 1;
956 size_t pages_mapped = 0;
957 size_t pages_to_map = 0;
958 size_t pages_iter = 0;
959 int use_emergency_buf = 0;
961 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
962 * so we need to align the offset and length accordingly */
963 if (start_page_index & page_index_mask) {
964 npages += start_page_index & page_index_mask;
965 start_page_index &= ~page_index_mask;
968 pages_to_map = ALIGN(npages, page_index_alignment);
970 if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
973 size = sizeof(u64) * pages_to_map;
974 size = min_t(int, PAGE_SIZE, size);
975 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
976 * code, when we are called from an invalidation. The pas buffer must
977 * be 2k-aligned for Connect-IB. */
978 pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
980 mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
981 pas = mlx5_ib_update_mtt_emergency_buffer;
982 size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
983 use_emergency_buf = 1;
984 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
985 memset(pas, 0, size);
987 pages_iter = size / sizeof(u64);
988 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
989 if (dma_mapping_error(ddev, dma)) {
990 mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
995 for (pages_mapped = 0;
996 pages_mapped < pages_to_map && !err;
997 pages_mapped += pages_iter, start_page_index += pages_iter) {
998 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
1000 npages = min_t(size_t,
1002 ib_umem_num_pages(umem) - start_page_index);
1005 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
1006 start_page_index, npages, pas,
1007 MLX5_IB_MTT_PRESENT);
1008 /* Clear padding after the pages brought from the
1010 memset(pas + npages, 0, size - npages * sizeof(u64));
1013 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1015 mlx5_ib_init_umr_context(&umr_context);
1017 memset(&wr, 0, sizeof(wr));
1018 wr.wr.wr_cqe = &umr_context.cqe;
1021 sg.length = ALIGN(npages * sizeof(u64),
1022 MLX5_UMR_MTT_ALIGNMENT);
1023 sg.lkey = dev->umrc.pd->local_dma_lkey;
1025 wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
1026 MLX5_IB_SEND_UMR_UPDATE_MTT;
1027 wr.wr.sg_list = &sg;
1029 wr.wr.opcode = MLX5_IB_WR_UMR;
1030 wr.npages = sg.length / sizeof(u64);
1031 wr.page_shift = PAGE_SHIFT;
1032 wr.mkey = mr->mmkey.key;
1033 wr.target.offset = start_page_index;
1036 err = ib_post_send(umrc->qp, &wr.wr, &bad);
1038 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
1040 wait_for_completion(&umr_context.done);
1041 if (umr_context.status != IB_WC_SUCCESS) {
1042 mlx5_ib_err(dev, "UMR completion failed, code %d\n",
1043 umr_context.status);
1049 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1052 if (!use_emergency_buf)
1053 free_page((unsigned long)pas);
1055 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
1062 * If ibmr is NULL it will be allocated by reg_create.
1063 * Else, the given ibmr will be used.
1065 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1066 u64 virt_addr, u64 length,
1067 struct ib_umem *umem, int npages,
1068 int page_shift, int access_flags)
1070 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1071 struct mlx5_ib_mr *mr;
1077 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1079 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
1081 return ERR_PTR(-ENOMEM);
1083 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
1084 sizeof(*pas) * ((npages + 1) / 2) * 2;
1085 in = mlx5_vzalloc(inlen);
1090 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1091 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1092 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1094 /* The pg_access bit allows setting the access flags
1095 * in the page list submitted with the command. */
1096 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1098 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1099 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
1100 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1101 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1102 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1103 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1104 MLX5_SET(mkc, mkc, lr, 1);
1106 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1107 MLX5_SET64(mkc, mkc, len, length);
1108 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1109 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1110 MLX5_SET(mkc, mkc, translations_octword_size,
1111 get_octo_len(virt_addr, length, 1 << page_shift));
1112 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1113 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1114 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1115 get_octo_len(virt_addr, length, 1 << page_shift));
1117 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1119 mlx5_ib_warn(dev, "create mkey failed\n");
1127 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1138 return ERR_PTR(err);
1141 static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1142 int npages, u64 length, int access_flags)
1144 mr->npages = npages;
1145 atomic_add(npages, &dev->mdev->priv.reg_pages);
1146 mr->ibmr.lkey = mr->mmkey.key;
1147 mr->ibmr.rkey = mr->mmkey.key;
1148 mr->ibmr.length = length;
1149 mr->access_flags = access_flags;
1152 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1153 u64 virt_addr, int access_flags,
1154 struct ib_udata *udata)
1156 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1157 struct mlx5_ib_mr *mr = NULL;
1158 struct ib_umem *umem;
1165 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1166 start, virt_addr, length, access_flags);
1167 umem = mr_umem_get(pd, start, length, access_flags, &npages,
1168 &page_shift, &ncont, &order);
1171 return (void *)umem;
1173 if (use_umr(order)) {
1174 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
1175 order, access_flags);
1176 if (PTR_ERR(mr) == -EAGAIN) {
1177 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1180 } else if (access_flags & IB_ACCESS_ON_DEMAND) {
1182 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1187 mutex_lock(&dev->slow_path_mutex);
1188 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1189 page_shift, access_flags);
1190 mutex_unlock(&dev->slow_path_mutex);
1198 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1201 set_mr_fileds(dev, mr, npages, length, access_flags);
1203 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1210 ib_umem_release(umem);
1211 return ERR_PTR(err);
1214 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1216 struct mlx5_core_dev *mdev = dev->mdev;
1217 struct umr_common *umrc = &dev->umrc;
1218 struct mlx5_ib_umr_context umr_context;
1219 struct mlx5_umr_wr umrwr = {};
1220 struct ib_send_wr *bad;
1223 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1226 mlx5_ib_init_umr_context(&umr_context);
1228 umrwr.wr.wr_cqe = &umr_context.cqe;
1229 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
1232 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1235 mlx5_ib_dbg(dev, "err %d\n", err);
1238 wait_for_completion(&umr_context.done);
1241 if (umr_context.status != IB_WC_SUCCESS) {
1242 mlx5_ib_warn(dev, "unreg umr failed\n");
1252 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
1253 u64 length, int npages, int page_shift, int order,
1254 int access_flags, int flags)
1256 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1257 struct device *ddev = dev->ib_dev.dma_device;
1258 struct mlx5_ib_umr_context umr_context;
1259 struct ib_send_wr *bad;
1260 struct mlx5_umr_wr umrwr = {};
1262 struct umr_common *umrc = &dev->umrc;
1264 __be64 *mr_pas = NULL;
1268 mlx5_ib_init_umr_context(&umr_context);
1270 umrwr.wr.wr_cqe = &umr_context.cqe;
1271 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1273 if (flags & IB_MR_REREG_TRANS) {
1274 err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
1279 umrwr.target.virt_addr = virt_addr;
1280 umrwr.length = length;
1281 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1284 prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
1287 if (flags & IB_MR_REREG_PD) {
1289 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
1292 if (flags & IB_MR_REREG_ACCESS) {
1293 umrwr.access_flags = access_flags;
1294 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
1297 /* post send request to UMR QP */
1299 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1302 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
1304 wait_for_completion(&umr_context.done);
1305 if (umr_context.status != IB_WC_SUCCESS) {
1306 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
1307 umr_context.status);
1313 if (flags & IB_MR_REREG_TRANS) {
1314 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1320 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1321 u64 length, u64 virt_addr, int new_access_flags,
1322 struct ib_pd *new_pd, struct ib_udata *udata)
1324 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1325 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1326 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1327 int access_flags = flags & IB_MR_REREG_ACCESS ?
1330 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1331 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1338 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1339 start, virt_addr, length, access_flags);
1341 if (flags != IB_MR_REREG_PD) {
1343 * Replace umem. This needs to be done whether or not UMR is
1346 flags |= IB_MR_REREG_TRANS;
1347 ib_umem_release(mr->umem);
1348 mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
1349 &page_shift, &ncont, &order);
1350 if (IS_ERR(mr->umem)) {
1351 err = PTR_ERR(mr->umem);
1357 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1359 * UMR can't be used - MKey needs to be replaced.
1362 err = unreg_umr(dev, mr);
1364 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1366 err = destroy_mkey(dev, mr);
1368 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1373 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1374 page_shift, access_flags);
1384 err = rereg_umr(pd, mr, addr, len, npages, page_shift,
1385 order, access_flags, flags);
1387 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1392 if (flags & IB_MR_REREG_PD) {
1394 mr->mmkey.pd = to_mpd(pd)->pdn;
1397 if (flags & IB_MR_REREG_ACCESS)
1398 mr->access_flags = access_flags;
1400 if (flags & IB_MR_REREG_TRANS) {
1401 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1402 set_mr_fileds(dev, mr, npages, len, access_flags);
1403 mr->mmkey.iova = addr;
1404 mr->mmkey.size = len;
1406 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1414 mlx5_alloc_priv_descs(struct ib_device *device,
1415 struct mlx5_ib_mr *mr,
1419 int size = ndescs * desc_size;
1423 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1425 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1426 if (!mr->descs_alloc)
1429 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1431 mr->desc_map = dma_map_single(device->dma_device, mr->descs,
1432 size, DMA_TO_DEVICE);
1433 if (dma_mapping_error(device->dma_device, mr->desc_map)) {
1440 kfree(mr->descs_alloc);
1446 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1449 struct ib_device *device = mr->ibmr.device;
1450 int size = mr->max_descs * mr->desc_size;
1452 dma_unmap_single(device->dma_device, mr->desc_map,
1453 size, DMA_TO_DEVICE);
1454 kfree(mr->descs_alloc);
1459 static int clean_mr(struct mlx5_ib_mr *mr)
1461 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1462 int umred = mr->umred;
1466 if (mlx5_core_destroy_psv(dev->mdev,
1467 mr->sig->psv_memory.psv_idx))
1468 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1469 mr->sig->psv_memory.psv_idx);
1470 if (mlx5_core_destroy_psv(dev->mdev,
1471 mr->sig->psv_wire.psv_idx))
1472 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1473 mr->sig->psv_wire.psv_idx);
1478 mlx5_free_priv_descs(mr);
1481 err = destroy_mkey(dev, mr);
1483 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1484 mr->mmkey.key, err);
1488 err = unreg_umr(dev, mr);
1490 mlx5_ib_warn(dev, "failed unregister\n");
1493 free_cached_mr(dev, mr);
1502 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1504 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1505 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1506 int npages = mr->npages;
1507 struct ib_umem *umem = mr->umem;
1509 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1510 if (umem && umem->odp_data) {
1511 /* Prevent new page faults from succeeding */
1513 /* Wait for all running page-fault handlers to finish. */
1514 synchronize_srcu(&dev->mr_srcu);
1515 /* Destroy all page mappings */
1516 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1519 * We kill the umem before the MR for ODP,
1520 * so that there will not be any invalidations in
1521 * flight, looking at the *mr struct.
1523 ib_umem_release(umem);
1524 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1526 /* Avoid double-freeing the umem. */
1534 ib_umem_release(umem);
1535 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1541 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1542 enum ib_mr_type mr_type,
1545 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1546 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1547 int ndescs = ALIGN(max_num_sg, 4);
1548 struct mlx5_ib_mr *mr;
1553 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1555 return ERR_PTR(-ENOMEM);
1557 in = kzalloc(inlen, GFP_KERNEL);
1563 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1564 MLX5_SET(mkc, mkc, free, 1);
1565 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1566 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1567 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1569 if (mr_type == IB_MR_TYPE_MEM_REG) {
1570 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1571 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
1572 err = mlx5_alloc_priv_descs(pd->device, mr,
1573 ndescs, sizeof(u64));
1577 mr->desc_size = sizeof(u64);
1578 mr->max_descs = ndescs;
1579 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
1580 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1582 err = mlx5_alloc_priv_descs(pd->device, mr,
1583 ndescs, sizeof(struct mlx5_klm));
1586 mr->desc_size = sizeof(struct mlx5_klm);
1587 mr->max_descs = ndescs;
1588 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1591 MLX5_SET(mkc, mkc, bsf_en, 1);
1592 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1593 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1599 /* create mem & wire PSVs */
1600 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1605 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1606 mr->sig->psv_memory.psv_idx = psv_index[0];
1607 mr->sig->psv_wire.psv_idx = psv_index[1];
1609 mr->sig->sig_status_checked = true;
1610 mr->sig->sig_err_exists = false;
1611 /* Next UMR, Arm SIGERR */
1612 ++mr->sig->sigerr_count;
1614 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1619 MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
1620 MLX5_SET(mkc, mkc, umr_en, 1);
1622 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1624 goto err_destroy_psv;
1626 mr->ibmr.lkey = mr->mmkey.key;
1627 mr->ibmr.rkey = mr->mmkey.key;
1635 if (mlx5_core_destroy_psv(dev->mdev,
1636 mr->sig->psv_memory.psv_idx))
1637 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1638 mr->sig->psv_memory.psv_idx);
1639 if (mlx5_core_destroy_psv(dev->mdev,
1640 mr->sig->psv_wire.psv_idx))
1641 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1642 mr->sig->psv_wire.psv_idx);
1644 mlx5_free_priv_descs(mr);
1651 return ERR_PTR(err);
1654 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1655 struct ib_udata *udata)
1657 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1658 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1659 struct mlx5_ib_mw *mw = NULL;
1664 struct mlx5_ib_alloc_mw req = {};
1667 __u32 response_length;
1670 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1672 return ERR_PTR(err);
1674 if (req.comp_mask || req.reserved1 || req.reserved2)
1675 return ERR_PTR(-EOPNOTSUPP);
1677 if (udata->inlen > sizeof(req) &&
1678 !ib_is_udata_cleared(udata, sizeof(req),
1679 udata->inlen - sizeof(req)))
1680 return ERR_PTR(-EOPNOTSUPP);
1682 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1684 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1685 in = kzalloc(inlen, GFP_KERNEL);
1691 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1693 MLX5_SET(mkc, mkc, free, 1);
1694 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1695 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1696 MLX5_SET(mkc, mkc, umr_en, 1);
1697 MLX5_SET(mkc, mkc, lr, 1);
1698 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
1699 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1700 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1702 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
1706 mw->ibmw.rkey = mw->mmkey.key;
1708 resp.response_length = min(offsetof(typeof(resp), response_length) +
1709 sizeof(resp.response_length), udata->outlen);
1710 if (resp.response_length) {
1711 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1713 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1724 return ERR_PTR(err);
1727 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1729 struct mlx5_ib_mw *mmw = to_mmw(mw);
1732 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1739 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1740 struct ib_mr_status *mr_status)
1742 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1745 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1746 pr_err("Invalid status check mask\n");
1751 mr_status->fail_status = 0;
1752 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1755 pr_err("signature status check requested on a non-signature enabled MR\n");
1759 mmr->sig->sig_status_checked = true;
1760 if (!mmr->sig->sig_err_exists)
1763 if (ibmr->lkey == mmr->sig->err_item.key)
1764 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1765 sizeof(mr_status->sig_err));
1767 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1768 mr_status->sig_err.sig_err_offset = 0;
1769 mr_status->sig_err.key = mmr->sig->err_item.key;
1772 mmr->sig->sig_err_exists = false;
1773 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1781 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1782 struct scatterlist *sgl,
1783 unsigned short sg_nents,
1784 unsigned int *sg_offset_p)
1786 struct scatterlist *sg = sgl;
1787 struct mlx5_klm *klms = mr->descs;
1788 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1789 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1792 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1793 mr->ibmr.length = 0;
1794 mr->ndescs = sg_nents;
1796 for_each_sg(sgl, sg, sg_nents, i) {
1797 if (unlikely(i > mr->max_descs))
1799 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1800 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
1801 klms[i].key = cpu_to_be32(lkey);
1802 mr->ibmr.length += sg_dma_len(sg);
1808 *sg_offset_p = sg_offset;
1813 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1815 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1818 if (unlikely(mr->ndescs == mr->max_descs))
1822 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1827 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1828 unsigned int *sg_offset)
1830 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1835 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1836 mr->desc_size * mr->max_descs,
1839 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
1840 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
1842 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1845 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1846 mr->desc_size * mr->max_descs,