IB/mlx5: Use direct mkey destroy command upon UMR unreg failure
[linux-2.6-block.git] / drivers / infiniband / hw / mlx5 / mr.c
CommitLineData
e126ba97 1/*
6cf0a15f 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
746b5583 38#include <linux/delay.h>
e126ba97 39#include <rdma/ib_umem.h>
b4cfe447 40#include <rdma/ib_umem_odp.h>
968e78dd 41#include <rdma/ib_verbs.h>
e126ba97
EC
42#include "mlx5_ib.h"
43
44enum {
746b5583 45 MAX_PENDING_REG_MR = 8,
e126ba97
EC
46};
47
832a6b06 48#define MLX5_UMR_ALIGN 2048
fe45f827 49
eeea6953
LR
50static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
8b7ff7f3 52static int mr_cache_max_order(struct mlx5_ib_dev *dev);
49780d42 53static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
c8d75a98
MD
54static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
55{
56 return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
57}
58
59static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
60{
61 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
62}
63
64static bool use_umr(struct mlx5_ib_dev *dev, int order)
65{
66 return order <= mr_cache_max_order(dev) &&
67 umr_can_modify_entity_size(dev);
68}
6aec21f6 69
b4cfe447
HE
70static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
71{
a606b0f6 72 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
b4cfe447 73
13859d5d
LR
74 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
75 /* Wait until all page fault handlers using the mr complete. */
76 synchronize_srcu(&dev->mr_srcu);
b4cfe447
HE
77
78 return err;
79}
80
e126ba97
EC
81static int order2idx(struct mlx5_ib_dev *dev, int order)
82{
83 struct mlx5_mr_cache *cache = &dev->cache;
84
85 if (order < cache->ent[0].order)
86 return 0;
87 else
88 return order - cache->ent[0].order;
89}
90
56e11d62
NO
91static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
92{
93 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
94 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
95}
96
395a8e4c
NO
97static void update_odp_mr(struct mlx5_ib_mr *mr)
98{
8b4d5bc5 99 if (is_odp_mr(mr)) {
395a8e4c
NO
100 /*
101 * This barrier prevents the compiler from moving the
102 * setting of umem->odp_data->private to point to our
103 * MR, before reg_umr finished, to ensure that the MR
104 * initialization have finished before starting to
105 * handle invalidations.
106 */
107 smp_wmb();
597ecc5a 108 to_ib_umem_odp(mr->umem)->private = mr;
395a8e4c
NO
109 /*
110 * Make sure we will see the new
111 * umem->odp_data->private value in the invalidation
112 * routines, before we can get page faults on the
113 * MR. Page faults can happen once we put the MR in
114 * the tree, below this line. Without the barrier,
115 * there can be a fault handling and an invalidation
116 * before umem->odp_data->private == mr is visible to
117 * the invalidation handler.
118 */
119 smp_wmb();
120 }
121}
395a8e4c 122
e355477e 123static void reg_mr_callback(int status, struct mlx5_async_work *context)
746b5583 124{
e355477e
JG
125 struct mlx5_ib_mr *mr =
126 container_of(context, struct mlx5_ib_mr, cb_work);
746b5583
EC
127 struct mlx5_ib_dev *dev = mr->dev;
128 struct mlx5_mr_cache *cache = &dev->cache;
129 int c = order2idx(dev, mr->order);
130 struct mlx5_cache_ent *ent = &cache->ent[c];
131 u8 key;
746b5583 132 unsigned long flags;
792c4e9d 133 struct xarray *mkeys = &dev->mdev->priv.mkey_table;
8605933a 134 int err;
746b5583 135
746b5583
EC
136 spin_lock_irqsave(&ent->lock, flags);
137 ent->pending--;
138 spin_unlock_irqrestore(&ent->lock, flags);
139 if (status) {
140 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
141 kfree(mr);
142 dev->fill_delay = 1;
143 mod_timer(&dev->delay_timer, jiffies + HZ);
144 return;
145 }
146
aa8e08d2 147 mr->mmkey.type = MLX5_MKEY_MR;
9603b61d
JM
148 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
149 key = dev->mdev->priv.mkey_key++;
150 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
ec22eb53 151 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
746b5583
EC
152
153 cache->last_add = jiffies;
154
155 spin_lock_irqsave(&ent->lock, flags);
156 list_add_tail(&mr->list, &ent->head);
157 ent->cur++;
158 ent->size++;
159 spin_unlock_irqrestore(&ent->lock, flags);
8605933a 160
792c4e9d
MW
161 xa_lock_irqsave(mkeys, flags);
162 err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
163 &mr->mmkey, GFP_ATOMIC));
164 xa_unlock_irqrestore(mkeys, flags);
8605933a 165 if (err)
a606b0f6 166 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
49780d42
AK
167
168 if (!completion_done(&ent->compl))
169 complete(&ent->compl);
746b5583
EC
170}
171
e126ba97
EC
172static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
173{
e126ba97
EC
174 struct mlx5_mr_cache *cache = &dev->cache;
175 struct mlx5_cache_ent *ent = &cache->ent[c];
ec22eb53 176 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
e126ba97 177 struct mlx5_ib_mr *mr;
ec22eb53
SM
178 void *mkc;
179 u32 *in;
e126ba97
EC
180 int err = 0;
181 int i;
182
ec22eb53 183 in = kzalloc(inlen, GFP_KERNEL);
e126ba97
EC
184 if (!in)
185 return -ENOMEM;
186
ec22eb53 187 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
e126ba97 188 for (i = 0; i < num; i++) {
746b5583
EC
189 if (ent->pending >= MAX_PENDING_REG_MR) {
190 err = -EAGAIN;
191 break;
192 }
193
e126ba97
EC
194 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
195 if (!mr) {
196 err = -ENOMEM;
746b5583 197 break;
e126ba97
EC
198 }
199 mr->order = ent->order;
8b7ff7f3 200 mr->allocated_from_cache = 1;
746b5583 201 mr->dev = dev;
ec22eb53
SM
202
203 MLX5_SET(mkc, mkc, free, 1);
204 MLX5_SET(mkc, mkc, umr_en, 1);
cdbd0d2b
AL
205 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
206 MLX5_SET(mkc, mkc, access_mode_4_2,
207 (ent->access_mode >> 2) & 0x7);
ec22eb53
SM
208
209 MLX5_SET(mkc, mkc, qpn, 0xffffff);
49780d42
AK
210 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
211 MLX5_SET(mkc, mkc, log_page_size, ent->page);
e126ba97 212
746b5583
EC
213 spin_lock_irq(&ent->lock);
214 ent->pending++;
215 spin_unlock_irq(&ent->lock);
ec22eb53 216 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
e355477e 217 &dev->async_ctx, in, inlen,
ec22eb53 218 mr->out, sizeof(mr->out),
e355477e 219 reg_mr_callback, &mr->cb_work);
e126ba97 220 if (err) {
d14e7110
EC
221 spin_lock_irq(&ent->lock);
222 ent->pending--;
223 spin_unlock_irq(&ent->lock);
e126ba97 224 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
e126ba97 225 kfree(mr);
746b5583 226 break;
e126ba97 227 }
e126ba97
EC
228 }
229
e126ba97
EC
230 kfree(in);
231 return err;
232}
233
234static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
235{
e126ba97
EC
236 struct mlx5_mr_cache *cache = &dev->cache;
237 struct mlx5_cache_ent *ent = &cache->ent[c];
65edd0e7 238 struct mlx5_ib_mr *tmp_mr;
e126ba97 239 struct mlx5_ib_mr *mr;
65edd0e7 240 LIST_HEAD(del_list);
e126ba97
EC
241 int i;
242
243 for (i = 0; i < num; i++) {
746b5583 244 spin_lock_irq(&ent->lock);
e126ba97 245 if (list_empty(&ent->head)) {
746b5583 246 spin_unlock_irq(&ent->lock);
65edd0e7 247 break;
e126ba97
EC
248 }
249 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
65edd0e7 250 list_move(&mr->list, &del_list);
e126ba97
EC
251 ent->cur--;
252 ent->size--;
746b5583 253 spin_unlock_irq(&ent->lock);
65edd0e7
DJ
254 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
255 }
256
13859d5d
LR
257 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
258 synchronize_srcu(&dev->mr_srcu);
65edd0e7
DJ
259
260 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
261 list_del(&mr->list);
262 kfree(mr);
e126ba97
EC
263 }
264}
265
266static ssize_t size_write(struct file *filp, const char __user *buf,
267 size_t count, loff_t *pos)
268{
269 struct mlx5_cache_ent *ent = filp->private_data;
270 struct mlx5_ib_dev *dev = ent->dev;
60e6627f 271 char lbuf[20] = {0};
e126ba97
EC
272 u32 var;
273 int err;
274 int c;
275
60e6627f
JH
276 count = min(count, sizeof(lbuf) - 1);
277 if (copy_from_user(lbuf, buf, count))
5e631a03 278 return -EFAULT;
e126ba97
EC
279
280 c = order2idx(dev, ent->order);
e126ba97
EC
281
282 if (sscanf(lbuf, "%u", &var) != 1)
283 return -EINVAL;
284
285 if (var < ent->limit)
286 return -EINVAL;
287
288 if (var > ent->size) {
746b5583
EC
289 do {
290 err = add_keys(dev, c, var - ent->size);
291 if (err && err != -EAGAIN)
292 return err;
293
294 usleep_range(3000, 5000);
295 } while (err);
e126ba97
EC
296 } else if (var < ent->size) {
297 remove_keys(dev, c, ent->size - var);
298 }
299
300 return count;
301}
302
303static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
304 loff_t *pos)
305{
306 struct mlx5_cache_ent *ent = filp->private_data;
307 char lbuf[20];
308 int err;
309
e126ba97
EC
310 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
311 if (err < 0)
312 return err;
313
60e6627f 314 return simple_read_from_buffer(buf, count, pos, lbuf, err);
e126ba97
EC
315}
316
317static const struct file_operations size_fops = {
318 .owner = THIS_MODULE,
319 .open = simple_open,
320 .write = size_write,
321 .read = size_read,
322};
323
324static ssize_t limit_write(struct file *filp, const char __user *buf,
325 size_t count, loff_t *pos)
326{
327 struct mlx5_cache_ent *ent = filp->private_data;
328 struct mlx5_ib_dev *dev = ent->dev;
60e6627f 329 char lbuf[20] = {0};
e126ba97
EC
330 u32 var;
331 int err;
332 int c;
333
60e6627f
JH
334 count = min(count, sizeof(lbuf) - 1);
335 if (copy_from_user(lbuf, buf, count))
5e631a03 336 return -EFAULT;
e126ba97
EC
337
338 c = order2idx(dev, ent->order);
e126ba97
EC
339
340 if (sscanf(lbuf, "%u", &var) != 1)
341 return -EINVAL;
342
343 if (var > ent->size)
344 return -EINVAL;
345
346 ent->limit = var;
347
348 if (ent->cur < ent->limit) {
349 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
350 if (err)
351 return err;
352 }
353
354 return count;
355}
356
357static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
358 loff_t *pos)
359{
360 struct mlx5_cache_ent *ent = filp->private_data;
361 char lbuf[20];
362 int err;
363
e126ba97
EC
364 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
365 if (err < 0)
366 return err;
367
60e6627f 368 return simple_read_from_buffer(buf, count, pos, lbuf, err);
e126ba97
EC
369}
370
371static const struct file_operations limit_fops = {
372 .owner = THIS_MODULE,
373 .open = simple_open,
374 .write = limit_write,
375 .read = limit_read,
376};
377
378static int someone_adding(struct mlx5_mr_cache *cache)
379{
380 int i;
381
382 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
383 if (cache->ent[i].cur < cache->ent[i].limit)
384 return 1;
385 }
386
387 return 0;
388}
389
390static void __cache_work_func(struct mlx5_cache_ent *ent)
391{
392 struct mlx5_ib_dev *dev = ent->dev;
393 struct mlx5_mr_cache *cache = &dev->cache;
394 int i = order2idx(dev, ent->order);
746b5583 395 int err;
e126ba97
EC
396
397 if (cache->stopped)
398 return;
399
400 ent = &dev->cache.ent[i];
746b5583
EC
401 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
402 err = add_keys(dev, i, 1);
403 if (ent->cur < 2 * ent->limit) {
404 if (err == -EAGAIN) {
405 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
406 i + 2);
407 queue_delayed_work(cache->wq, &ent->dwork,
408 msecs_to_jiffies(3));
409 } else if (err) {
410 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
411 i + 2, err);
412 queue_delayed_work(cache->wq, &ent->dwork,
413 msecs_to_jiffies(1000));
414 } else {
415 queue_work(cache->wq, &ent->work);
416 }
417 }
e126ba97 418 } else if (ent->cur > 2 * ent->limit) {
ab5cdc31
LR
419 /*
420 * The remove_keys() logic is performed as garbage collection
421 * task. Such task is intended to be run when no other active
422 * processes are running.
423 *
424 * The need_resched() will return TRUE if there are user tasks
425 * to be activated in near future.
426 *
427 * In such case, we don't execute remove_keys() and postpone
428 * the garbage collection work to try to run in next cycle,
429 * in order to free CPU resources to other tasks.
430 */
431 if (!need_resched() && !someone_adding(cache) &&
746b5583 432 time_after(jiffies, cache->last_add + 300 * HZ)) {
e126ba97
EC
433 remove_keys(dev, i, 1);
434 if (ent->cur > ent->limit)
435 queue_work(cache->wq, &ent->work);
436 } else {
746b5583 437 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
e126ba97
EC
438 }
439 }
440}
441
442static void delayed_cache_work_func(struct work_struct *work)
443{
444 struct mlx5_cache_ent *ent;
445
446 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
447 __cache_work_func(ent);
448}
449
450static void cache_work_func(struct work_struct *work)
451{
452 struct mlx5_cache_ent *ent;
453
454 ent = container_of(work, struct mlx5_cache_ent, work);
455 __cache_work_func(ent);
456}
457
49780d42
AK
458struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
459{
460 struct mlx5_mr_cache *cache = &dev->cache;
461 struct mlx5_cache_ent *ent;
462 struct mlx5_ib_mr *mr;
463 int err;
464
465 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
466 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
467 return NULL;
468 }
469
470 ent = &cache->ent[entry];
471 while (1) {
472 spin_lock_irq(&ent->lock);
473 if (list_empty(&ent->head)) {
474 spin_unlock_irq(&ent->lock);
475
476 err = add_keys(dev, entry, 1);
81713d37 477 if (err && err != -EAGAIN)
49780d42
AK
478 return ERR_PTR(err);
479
480 wait_for_completion(&ent->compl);
481 } else {
482 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
483 list);
484 list_del(&mr->list);
485 ent->cur--;
486 spin_unlock_irq(&ent->lock);
487 if (ent->cur < ent->limit)
488 queue_work(cache->wq, &ent->work);
489 return mr;
490 }
491 }
492}
493
e126ba97
EC
494static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
495{
496 struct mlx5_mr_cache *cache = &dev->cache;
497 struct mlx5_ib_mr *mr = NULL;
498 struct mlx5_cache_ent *ent;
4c25b7a3 499 int last_umr_cache_entry;
e126ba97
EC
500 int c;
501 int i;
502
503 c = order2idx(dev, order);
8b7ff7f3 504 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
4c25b7a3 505 if (c < 0 || c > last_umr_cache_entry) {
e126ba97
EC
506 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
507 return NULL;
508 }
509
4c25b7a3 510 for (i = c; i <= last_umr_cache_entry; i++) {
e126ba97
EC
511 ent = &cache->ent[i];
512
513 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
514
746b5583 515 spin_lock_irq(&ent->lock);
e126ba97
EC
516 if (!list_empty(&ent->head)) {
517 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
518 list);
519 list_del(&mr->list);
520 ent->cur--;
746b5583 521 spin_unlock_irq(&ent->lock);
e126ba97
EC
522 if (ent->cur < ent->limit)
523 queue_work(cache->wq, &ent->work);
524 break;
525 }
746b5583 526 spin_unlock_irq(&ent->lock);
e126ba97
EC
527
528 queue_work(cache->wq, &ent->work);
e126ba97
EC
529 }
530
531 if (!mr)
532 cache->ent[c].miss++;
533
534 return mr;
535}
536
49780d42 537void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
e126ba97
EC
538{
539 struct mlx5_mr_cache *cache = &dev->cache;
540 struct mlx5_cache_ent *ent;
541 int shrink = 0;
542 int c;
543
dd9a4034
VF
544 if (!mr->allocated_from_cache)
545 return;
546
e126ba97 547 c = order2idx(dev, mr->order);
afd14174
YH
548 WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
549
550 if (unreg_umr(dev, mr)) {
551 mr->allocated_from_cache = false;
552 destroy_mkey(dev, mr);
553 ent = &cache->ent[c];
554 if (ent->cur < ent->limit)
555 queue_work(cache->wq, &ent->work);
e126ba97
EC
556 return;
557 }
49780d42 558
e126ba97 559 ent = &cache->ent[c];
746b5583 560 spin_lock_irq(&ent->lock);
e126ba97
EC
561 list_add_tail(&mr->list, &ent->head);
562 ent->cur++;
563 if (ent->cur > 2 * ent->limit)
564 shrink = 1;
746b5583 565 spin_unlock_irq(&ent->lock);
e126ba97
EC
566
567 if (shrink)
568 queue_work(cache->wq, &ent->work);
569}
570
571static void clean_keys(struct mlx5_ib_dev *dev, int c)
572{
e126ba97
EC
573 struct mlx5_mr_cache *cache = &dev->cache;
574 struct mlx5_cache_ent *ent = &cache->ent[c];
65edd0e7 575 struct mlx5_ib_mr *tmp_mr;
e126ba97 576 struct mlx5_ib_mr *mr;
65edd0e7 577 LIST_HEAD(del_list);
e126ba97 578
3c461911 579 cancel_delayed_work(&ent->dwork);
e126ba97 580 while (1) {
746b5583 581 spin_lock_irq(&ent->lock);
e126ba97 582 if (list_empty(&ent->head)) {
746b5583 583 spin_unlock_irq(&ent->lock);
65edd0e7 584 break;
e126ba97
EC
585 }
586 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
65edd0e7 587 list_move(&mr->list, &del_list);
e126ba97
EC
588 ent->cur--;
589 ent->size--;
746b5583 590 spin_unlock_irq(&ent->lock);
65edd0e7
DJ
591 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
592 }
593
594#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
ccffa545 595 synchronize_srcu(&dev->mr_srcu);
65edd0e7
DJ
596#endif
597
598 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
599 list_del(&mr->list);
600 kfree(mr);
e126ba97
EC
601 }
602}
603
12cc1a02
LR
604static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
605{
6a4d00be 606 if (!mlx5_debugfs_root || dev->is_rep)
12cc1a02
LR
607 return;
608
609 debugfs_remove_recursive(dev->cache.root);
610 dev->cache.root = NULL;
611}
612
73eb8f03 613static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
e126ba97
EC
614{
615 struct mlx5_mr_cache *cache = &dev->cache;
616 struct mlx5_cache_ent *ent;
73eb8f03 617 struct dentry *dir;
e126ba97
EC
618 int i;
619
6a4d00be 620 if (!mlx5_debugfs_root || dev->is_rep)
73eb8f03 621 return;
e126ba97 622
9603b61d 623 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
e126ba97
EC
624
625 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
626 ent = &cache->ent[i];
627 sprintf(ent->name, "%d", ent->order);
73eb8f03
GKH
628 dir = debugfs_create_dir(ent->name, cache->root);
629 debugfs_create_file("size", 0600, dir, ent, &size_fops);
630 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
631 debugfs_create_u32("cur", 0400, dir, &ent->cur);
632 debugfs_create_u32("miss", 0600, dir, &ent->miss);
e126ba97 633 }
e126ba97
EC
634}
635
e99e88a9 636static void delay_time_func(struct timer_list *t)
746b5583 637{
e99e88a9 638 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
746b5583
EC
639
640 dev->fill_delay = 0;
641}
642
e126ba97
EC
643int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
644{
645 struct mlx5_mr_cache *cache = &dev->cache;
646 struct mlx5_cache_ent *ent;
e126ba97
EC
647 int i;
648
6bc1a656 649 mutex_init(&dev->slow_path_mutex);
3c856c82 650 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
e126ba97
EC
651 if (!cache->wq) {
652 mlx5_ib_warn(dev, "failed to create work queue\n");
653 return -ENOMEM;
654 }
655
e355477e 656 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
e99e88a9 657 timer_setup(&dev->delay_timer, delay_time_func, 0);
e126ba97 658 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
e126ba97
EC
659 ent = &cache->ent[i];
660 INIT_LIST_HEAD(&ent->head);
661 spin_lock_init(&ent->lock);
662 ent->order = i + 2;
663 ent->dev = dev;
49780d42 664 ent->limit = 0;
e126ba97 665
49780d42 666 init_completion(&ent->compl);
e126ba97
EC
667 INIT_WORK(&ent->work, cache_work_func);
668 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
49780d42 669
8b7ff7f3 670 if (i > MR_CACHE_LAST_STD_ENTRY) {
81713d37 671 mlx5_odp_init_mr_cache_entry(ent);
49780d42 672 continue;
81713d37 673 }
49780d42 674
8b7ff7f3 675 if (ent->order > mr_cache_max_order(dev))
49780d42
AK
676 continue;
677
678 ent->page = PAGE_SHIFT;
679 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
680 MLX5_IB_UMR_OCTOWORD;
681 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
682 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
6a4d00be 683 !dev->is_rep &&
49780d42
AK
684 mlx5_core_is_pf(dev->mdev))
685 ent->limit = dev->mdev->profile->mr_cache[i].limit;
686 else
687 ent->limit = 0;
013c2403 688 queue_work(cache->wq, &ent->work);
e126ba97
EC
689 }
690
73eb8f03 691 mlx5_mr_cache_debugfs_init(dev);
12cc1a02 692
e126ba97
EC
693 return 0;
694}
695
696int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
697{
698 int i;
699
32927e28
MB
700 if (!dev->cache.wq)
701 return 0;
702
e126ba97 703 dev->cache.stopped = 1;
3c461911 704 flush_workqueue(dev->cache.wq);
e126ba97
EC
705
706 mlx5_mr_cache_debugfs_cleanup(dev);
e355477e 707 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
e126ba97
EC
708
709 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
710 clean_keys(dev, i);
711
3c461911 712 destroy_workqueue(dev->cache.wq);
746b5583 713 del_timer_sync(&dev->delay_timer);
3c461911 714
e126ba97
EC
715 return 0;
716}
717
718struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
719{
720 struct mlx5_ib_dev *dev = to_mdev(pd->device);
ec22eb53 721 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
9603b61d 722 struct mlx5_core_dev *mdev = dev->mdev;
e126ba97 723 struct mlx5_ib_mr *mr;
ec22eb53
SM
724 void *mkc;
725 u32 *in;
e126ba97
EC
726 int err;
727
728 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
729 if (!mr)
730 return ERR_PTR(-ENOMEM);
731
ec22eb53 732 in = kzalloc(inlen, GFP_KERNEL);
e126ba97
EC
733 if (!in) {
734 err = -ENOMEM;
735 goto err_free;
736 }
737
ec22eb53
SM
738 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
739
cdbd0d2b 740 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
ec22eb53
SM
741 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
742 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
743 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
744 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
745 MLX5_SET(mkc, mkc, lr, 1);
e126ba97 746
ec22eb53
SM
747 MLX5_SET(mkc, mkc, length64, 1);
748 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
749 MLX5_SET(mkc, mkc, qpn, 0xffffff);
750 MLX5_SET64(mkc, mkc, start_addr, 0);
751
752 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
e126ba97
EC
753 if (err)
754 goto err_in;
755
756 kfree(in);
aa8e08d2 757 mr->mmkey.type = MLX5_MKEY_MR;
a606b0f6
MB
758 mr->ibmr.lkey = mr->mmkey.key;
759 mr->ibmr.rkey = mr->mmkey.key;
e126ba97
EC
760 mr->umem = NULL;
761
762 return &mr->ibmr;
763
764err_in:
765 kfree(in);
766
767err_free:
768 kfree(mr);
769
770 return ERR_PTR(err);
771}
772
7b4cdaae 773static int get_octo_len(u64 addr, u64 len, int page_shift)
e126ba97 774{
7b4cdaae 775 u64 page_size = 1ULL << page_shift;
e126ba97
EC
776 u64 offset;
777 int npages;
778
779 offset = addr & (page_size - 1);
7b4cdaae 780 npages = ALIGN(len + offset, page_size) >> page_shift;
e126ba97
EC
781 return (npages + 1) / 2;
782}
783
8b7ff7f3 784static int mr_cache_max_order(struct mlx5_ib_dev *dev)
e126ba97 785{
7d0cc6ed 786 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
8b7ff7f3 787 return MR_CACHE_LAST_STD_ENTRY + 2;
4c25b7a3
MD
788 return MLX5_MAX_UMR_SHIFT;
789}
790
b0ea0fa5
JG
791static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
792 u64 start, u64 length, int access_flags,
793 struct ib_umem **umem, int *npages, int *page_shift,
794 int *ncont, int *order)
395a8e4c 795{
b4bd701a 796 struct ib_umem *u;
14ab8896
AB
797 int err;
798
b4bd701a
LR
799 *umem = NULL;
800
b0ea0fa5 801 u = ib_umem_get(udata, start, length, access_flags, 0);
b4bd701a 802 err = PTR_ERR_OR_ZERO(u);
f3f134f5 803 if (err) {
b4bd701a 804 mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
14ab8896 805 return err;
395a8e4c
NO
806 }
807
b4bd701a 808 mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
762f899a 809 page_shift, ncont, order);
395a8e4c
NO
810 if (!*npages) {
811 mlx5_ib_warn(dev, "avoid zero region\n");
b4bd701a 812 ib_umem_release(u);
14ab8896 813 return -EINVAL;
395a8e4c
NO
814 }
815
b4bd701a
LR
816 *umem = u;
817
395a8e4c
NO
818 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
819 *npages, *ncont, *order, *page_shift);
820
14ab8896 821 return 0;
395a8e4c
NO
822}
823
add08d76 824static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
e126ba97 825{
add08d76
CH
826 struct mlx5_ib_umr_context *context =
827 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
e126ba97 828
add08d76
CH
829 context->status = wc->status;
830 complete(&context->done);
831}
e126ba97 832
add08d76
CH
833static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
834{
835 context->cqe.done = mlx5_ib_umr_done;
836 context->status = -1;
837 init_completion(&context->done);
e126ba97
EC
838}
839
d5ea2df9
BJ
840static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
841 struct mlx5_umr_wr *umrwr)
842{
843 struct umr_common *umrc = &dev->umrc;
d34ac5cd 844 const struct ib_send_wr *bad;
d5ea2df9
BJ
845 int err;
846 struct mlx5_ib_umr_context umr_context;
847
848 mlx5_ib_init_umr_context(&umr_context);
849 umrwr->wr.wr_cqe = &umr_context.cqe;
850
851 down(&umrc->sem);
852 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
853 if (err) {
854 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
855 } else {
856 wait_for_completion(&umr_context.done);
857 if (umr_context.status != IB_WC_SUCCESS) {
858 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
859 umr_context.status);
860 err = -EFAULT;
861 }
862 }
863 up(&umrc->sem);
864 return err;
865}
866
ff740aef
IL
867static struct mlx5_ib_mr *alloc_mr_from_cache(
868 struct ib_pd *pd, struct ib_umem *umem,
e126ba97
EC
869 u64 virt_addr, u64 len, int npages,
870 int page_shift, int order, int access_flags)
871{
872 struct mlx5_ib_dev *dev = to_mdev(pd->device);
e126ba97 873 struct mlx5_ib_mr *mr;
096f7e72 874 int err = 0;
e126ba97
EC
875 int i;
876
746b5583 877 for (i = 0; i < 1; i++) {
e126ba97
EC
878 mr = alloc_cached_mr(dev, order);
879 if (mr)
880 break;
881
882 err = add_keys(dev, order2idx(dev, order), 1);
746b5583
EC
883 if (err && err != -EAGAIN) {
884 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
e126ba97
EC
885 break;
886 }
887 }
888
889 if (!mr)
890 return ERR_PTR(-EAGAIN);
891
7d0cc6ed
AK
892 mr->ibmr.pd = pd;
893 mr->umem = umem;
894 mr->access_flags = access_flags;
895 mr->desc_size = sizeof(struct mlx5_mtt);
a606b0f6
MB
896 mr->mmkey.iova = virt_addr;
897 mr->mmkey.size = len;
898 mr->mmkey.pd = to_mpd(pd)->pdn;
b475598a 899
e126ba97 900 return mr;
e126ba97
EC
901}
902
7d0cc6ed
AK
903static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
904 void *xlt, int page_shift, size_t size,
905 int flags)
832a6b06
HE
906{
907 struct mlx5_ib_dev *dev = mr->dev;
832a6b06 908 struct ib_umem *umem = mr->umem;
c8d75a98 909
81713d37 910 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
c8d75a98
MD
911 if (!umr_can_use_indirect_mkey(dev))
912 return -EPERM;
81713d37
AK
913 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
914 return npages;
915 }
7d0cc6ed
AK
916
917 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
918
919 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
920 __mlx5_ib_populate_pas(dev, umem, page_shift,
921 idx, npages, xlt,
922 MLX5_IB_MTT_PRESENT);
923 /* Clear padding after the pages
924 * brought from the umem.
925 */
926 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
927 size - npages * sizeof(struct mlx5_mtt));
928 }
929
930 return npages;
931}
932
933#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
934 MLX5_UMR_MTT_ALIGNMENT)
935#define MLX5_SPARE_UMR_CHUNK 0x10000
936
937int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
938 int page_shift, int flags)
939{
940 struct mlx5_ib_dev *dev = mr->dev;
9b0c289e 941 struct device *ddev = dev->ib_dev.dev.parent;
832a6b06 942 int size;
7d0cc6ed 943 void *xlt;
832a6b06 944 dma_addr_t dma;
e622f2f4 945 struct mlx5_umr_wr wr;
832a6b06
HE
946 struct ib_sge sg;
947 int err = 0;
81713d37
AK
948 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
949 ? sizeof(struct mlx5_klm)
950 : sizeof(struct mlx5_mtt);
7d0cc6ed
AK
951 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
952 const int page_mask = page_align - 1;
832a6b06
HE
953 size_t pages_mapped = 0;
954 size_t pages_to_map = 0;
955 size_t pages_iter = 0;
7d0cc6ed 956 gfp_t gfp;
c44ef998 957 bool use_emergency_page = false;
832a6b06 958
c8d75a98
MD
959 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
960 !umr_can_use_indirect_mkey(dev))
961 return -EPERM;
832a6b06
HE
962
963 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
7d0cc6ed
AK
964 * so we need to align the offset and length accordingly
965 */
966 if (idx & page_mask) {
967 npages += idx & page_mask;
968 idx &= ~page_mask;
832a6b06
HE
969 }
970
7d0cc6ed
AK
971 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
972 gfp |= __GFP_ZERO | __GFP_NOWARN;
832a6b06 973
7d0cc6ed
AK
974 pages_to_map = ALIGN(npages, page_align);
975 size = desc_size * pages_to_map;
976 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
832a6b06 977
7d0cc6ed
AK
978 xlt = (void *)__get_free_pages(gfp, get_order(size));
979 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
980 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
981 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
982
983 size = MLX5_SPARE_UMR_CHUNK;
984 xlt = (void *)__get_free_pages(gfp, get_order(size));
832a6b06 985 }
7d0cc6ed
AK
986
987 if (!xlt) {
7d0cc6ed 988 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
c44ef998 989 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
7d0cc6ed 990 size = PAGE_SIZE;
7d0cc6ed 991 memset(xlt, 0, size);
c44ef998 992 use_emergency_page = true;
7d0cc6ed
AK
993 }
994 pages_iter = size / desc_size;
995 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
832a6b06 996 if (dma_mapping_error(ddev, dma)) {
7d0cc6ed 997 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
832a6b06 998 err = -ENOMEM;
7d0cc6ed 999 goto free_xlt;
832a6b06
HE
1000 }
1001
7d0cc6ed
AK
1002 sg.addr = dma;
1003 sg.lkey = dev->umrc.pd->local_dma_lkey;
1004
1005 memset(&wr, 0, sizeof(wr));
1006 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1007 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1008 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1009 wr.wr.sg_list = &sg;
1010 wr.wr.num_sge = 1;
1011 wr.wr.opcode = MLX5_IB_WR_UMR;
1012
1013 wr.pd = mr->ibmr.pd;
1014 wr.mkey = mr->mmkey.key;
1015 wr.length = mr->mmkey.size;
1016 wr.virt_addr = mr->mmkey.iova;
1017 wr.access_flags = mr->access_flags;
1018 wr.page_shift = page_shift;
1019
832a6b06
HE
1020 for (pages_mapped = 0;
1021 pages_mapped < pages_to_map && !err;
7d0cc6ed 1022 pages_mapped += pages_iter, idx += pages_iter) {
438b228e 1023 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
832a6b06 1024 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
438b228e 1025 npages = populate_xlt(mr, idx, npages, xlt,
7d0cc6ed 1026 page_shift, size, flags);
832a6b06
HE
1027
1028 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1029
7d0cc6ed
AK
1030 sg.length = ALIGN(npages * desc_size,
1031 MLX5_UMR_MTT_ALIGNMENT);
1032
1033 if (pages_mapped + pages_iter >= pages_to_map) {
1034 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1035 wr.wr.send_flags |=
1036 MLX5_IB_SEND_UMR_ENABLE_MR |
1037 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1038 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1039 if (flags & MLX5_IB_UPD_XLT_PD ||
1040 flags & MLX5_IB_UPD_XLT_ACCESS)
1041 wr.wr.send_flags |=
1042 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1043 if (flags & MLX5_IB_UPD_XLT_ADDR)
1044 wr.wr.send_flags |=
1045 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1046 }
832a6b06 1047
7d0cc6ed 1048 wr.offset = idx * desc_size;
31616255 1049 wr.xlt_size = sg.length;
832a6b06 1050
d5ea2df9 1051 err = mlx5_ib_post_send_wait(dev, &wr);
832a6b06
HE
1052 }
1053 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1054
7d0cc6ed 1055free_xlt:
c44ef998
IL
1056 if (use_emergency_page)
1057 mlx5_ib_put_xlt_emergency_page();
832a6b06 1058 else
7d0cc6ed 1059 free_pages((unsigned long)xlt, get_order(size));
832a6b06
HE
1060
1061 return err;
1062}
832a6b06 1063
395a8e4c
NO
1064/*
1065 * If ibmr is NULL it will be allocated by reg_create.
1066 * Else, the given ibmr will be used.
1067 */
1068static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1069 u64 virt_addr, u64 length,
1070 struct ib_umem *umem, int npages,
ff740aef
IL
1071 int page_shift, int access_flags,
1072 bool populate)
e126ba97
EC
1073{
1074 struct mlx5_ib_dev *dev = to_mdev(pd->device);
e126ba97 1075 struct mlx5_ib_mr *mr;
ec22eb53
SM
1076 __be64 *pas;
1077 void *mkc;
e126ba97 1078 int inlen;
ec22eb53 1079 u32 *in;
e126ba97 1080 int err;
938fe83c 1081 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
e126ba97 1082
395a8e4c 1083 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
e126ba97
EC
1084 if (!mr)
1085 return ERR_PTR(-ENOMEM);
1086
ff740aef
IL
1087 mr->ibmr.pd = pd;
1088 mr->access_flags = access_flags;
1089
1090 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1091 if (populate)
1092 inlen += sizeof(*pas) * roundup(npages, 2);
1b9a07ee 1093 in = kvzalloc(inlen, GFP_KERNEL);
e126ba97
EC
1094 if (!in) {
1095 err = -ENOMEM;
1096 goto err_1;
1097 }
ec22eb53 1098 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
ff740aef 1099 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
c438fde1
AK
1100 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1101 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
e126ba97 1102
ec22eb53 1103 /* The pg_access bit allows setting the access flags
cc149f75 1104 * in the page list submitted with the command. */
ec22eb53
SM
1105 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1106
1107 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
ff740aef 1108 MLX5_SET(mkc, mkc, free, !populate);
cdbd0d2b 1109 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
ec22eb53
SM
1110 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1111 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1112 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1113 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1114 MLX5_SET(mkc, mkc, lr, 1);
8b7ff7f3 1115 MLX5_SET(mkc, mkc, umr_en, 1);
ec22eb53
SM
1116
1117 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1118 MLX5_SET64(mkc, mkc, len, length);
1119 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1120 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1121 MLX5_SET(mkc, mkc, translations_octword_size,
7b4cdaae 1122 get_octo_len(virt_addr, length, page_shift));
ec22eb53
SM
1123 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1124 MLX5_SET(mkc, mkc, qpn, 0xffffff);
ff740aef
IL
1125 if (populate) {
1126 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
7b4cdaae 1127 get_octo_len(virt_addr, length, page_shift));
ff740aef 1128 }
ec22eb53
SM
1129
1130 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
e126ba97
EC
1131 if (err) {
1132 mlx5_ib_warn(dev, "create mkey failed\n");
1133 goto err_2;
1134 }
aa8e08d2 1135 mr->mmkey.type = MLX5_MKEY_MR;
49780d42 1136 mr->desc_size = sizeof(struct mlx5_mtt);
7eae20db 1137 mr->dev = dev;
479163f4 1138 kvfree(in);
e126ba97 1139
a606b0f6 1140 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
e126ba97
EC
1141
1142 return mr;
1143
1144err_2:
479163f4 1145 kvfree(in);
e126ba97
EC
1146
1147err_1:
395a8e4c
NO
1148 if (!ibmr)
1149 kfree(mr);
e126ba97
EC
1150
1151 return ERR_PTR(err);
1152}
1153
ac2f7e62 1154static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
395a8e4c
NO
1155 int npages, u64 length, int access_flags)
1156{
1157 mr->npages = npages;
1158 atomic_add(npages, &dev->mdev->priv.reg_pages);
a606b0f6
MB
1159 mr->ibmr.lkey = mr->mmkey.key;
1160 mr->ibmr.rkey = mr->mmkey.key;
395a8e4c 1161 mr->ibmr.length = length;
56e11d62 1162 mr->access_flags = access_flags;
395a8e4c
NO
1163}
1164
3b113a1e
AL
1165static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1166 u64 length, int acc, int mode)
6c29f57e
AL
1167{
1168 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1169 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1170 struct mlx5_core_dev *mdev = dev->mdev;
1171 struct mlx5_ib_mr *mr;
1172 void *mkc;
1173 u32 *in;
1174 int err;
1175
1176 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1177 if (!mr)
1178 return ERR_PTR(-ENOMEM);
1179
1180 in = kzalloc(inlen, GFP_KERNEL);
1181 if (!in) {
1182 err = -ENOMEM;
1183 goto err_free;
1184 }
1185
1186 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1187
3b113a1e
AL
1188 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1189 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
6c29f57e
AL
1190 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
1191 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
1192 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
1193 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
1194 MLX5_SET(mkc, mkc, lr, 1);
1195
1196 MLX5_SET64(mkc, mkc, len, length);
1197 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1198 MLX5_SET(mkc, mkc, qpn, 0xffffff);
3b113a1e 1199 MLX5_SET64(mkc, mkc, start_addr, start_addr);
6c29f57e
AL
1200
1201 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
1202 if (err)
1203 goto err_in;
1204
1205 kfree(in);
1206
1207 mr->umem = NULL;
ac2f7e62 1208 set_mr_fields(dev, mr, 0, length, acc);
6c29f57e
AL
1209
1210 return &mr->ibmr;
1211
1212err_in:
1213 kfree(in);
1214
1215err_free:
1216 kfree(mr);
1217
1218 return ERR_PTR(err);
1219}
1220
813e90b1
MS
1221int mlx5_ib_advise_mr(struct ib_pd *pd,
1222 enum ib_uverbs_advise_mr_advice advice,
1223 u32 flags,
1224 struct ib_sge *sg_list,
1225 u32 num_sge,
1226 struct uverbs_attr_bundle *attrs)
1227{
1228 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1229 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE)
1230 return -EOPNOTSUPP;
1231
1232 return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1233 sg_list, num_sge);
1234}
1235
6c29f57e
AL
1236struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1237 struct ib_dm_mr_attr *attr,
1238 struct uverbs_attr_bundle *attrs)
1239{
1240 struct mlx5_ib_dm *mdm = to_mdm(dm);
3b113a1e
AL
1241 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1242 u64 start_addr = mdm->dev_addr + attr->offset;
1243 int mode;
1244
1245 switch (mdm->type) {
1246 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1247 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1248 return ERR_PTR(-EINVAL);
1249
1250 mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1251 start_addr -= pci_resource_start(dev->pdev, 0);
1252 break;
25c13324
AL
1253 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1254 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1255 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1256 return ERR_PTR(-EINVAL);
1257
1258 mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1259 break;
3b113a1e 1260 default:
6c29f57e 1261 return ERR_PTR(-EINVAL);
3b113a1e 1262 }
6c29f57e 1263
3b113a1e
AL
1264 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1265 attr->access_flags, mode);
6c29f57e
AL
1266}
1267
e126ba97
EC
1268struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1269 u64 virt_addr, int access_flags,
1270 struct ib_udata *udata)
1271{
1272 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1273 struct mlx5_ib_mr *mr = NULL;
c8d75a98 1274 bool populate_mtts = false;
e126ba97
EC
1275 struct ib_umem *umem;
1276 int page_shift;
1277 int npages;
1278 int ncont;
1279 int order;
1280 int err;
1281
1b19b951 1282 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
ea30f013 1283 return ERR_PTR(-EOPNOTSUPP);
1b19b951 1284
900a6d79
EC
1285 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1286 start, virt_addr, length, access_flags);
81713d37 1287
13859d5d
LR
1288 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
1289 length == U64_MAX) {
81713d37
AK
1290 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1291 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1292 return ERR_PTR(-EINVAL);
1293
b0ea0fa5 1294 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
4289861d
LR
1295 if (IS_ERR(mr))
1296 return ERR_CAST(mr);
81713d37
AK
1297 return &mr->ibmr;
1298 }
81713d37 1299
b0ea0fa5
JG
1300 err = mr_umem_get(dev, udata, start, length, access_flags, &umem,
1301 &npages, &page_shift, &ncont, &order);
e126ba97 1302
ff740aef 1303 if (err < 0)
14ab8896 1304 return ERR_PTR(err);
e126ba97 1305
c8d75a98 1306 if (use_umr(dev, order)) {
ff740aef
IL
1307 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1308 page_shift, order, access_flags);
e126ba97 1309 if (PTR_ERR(mr) == -EAGAIN) {
d23a8baf 1310 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
e126ba97
EC
1311 mr = NULL;
1312 }
c8d75a98 1313 populate_mtts = false;
ff740aef
IL
1314 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1315 if (access_flags & IB_ACCESS_ON_DEMAND) {
1316 err = -EINVAL;
d23a8baf 1317 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
ff740aef
IL
1318 goto error;
1319 }
c8d75a98 1320 populate_mtts = true;
e126ba97
EC
1321 }
1322
6bc1a656 1323 if (!mr) {
c8d75a98
MD
1324 if (!umr_can_modify_entity_size(dev))
1325 populate_mtts = true;
6bc1a656 1326 mutex_lock(&dev->slow_path_mutex);
395a8e4c 1327 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
c8d75a98 1328 page_shift, access_flags, populate_mtts);
6bc1a656
ML
1329 mutex_unlock(&dev->slow_path_mutex);
1330 }
e126ba97
EC
1331
1332 if (IS_ERR(mr)) {
1333 err = PTR_ERR(mr);
1334 goto error;
1335 }
1336
a606b0f6 1337 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
e126ba97
EC
1338
1339 mr->umem = umem;
ac2f7e62 1340 set_mr_fields(dev, mr, npages, length, access_flags);
e126ba97 1341
395a8e4c 1342 update_odp_mr(mr);
b4cfe447 1343
c8d75a98 1344 if (!populate_mtts) {
ff740aef
IL
1345 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
1346
1347 if (access_flags & IB_ACCESS_ON_DEMAND)
1348 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
e126ba97 1349
ff740aef
IL
1350 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1351 update_xlt_flags);
fbcd4983 1352
ff740aef 1353 if (err) {
fbcd4983 1354 dereg_mr(dev, mr);
ff740aef
IL
1355 return ERR_PTR(err);
1356 }
1357 }
1358
a6bc3875 1359 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
13859d5d 1360 mr->live = 1;
a6bc3875
MS
1361 atomic_set(&mr->num_pending_prefetch, 0);
1362 }
13859d5d 1363
ff740aef 1364 return &mr->ibmr;
e126ba97
EC
1365error:
1366 ib_umem_release(umem);
1367 return ERR_PTR(err);
1368}
1369
1370static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1371{
89ea94a7 1372 struct mlx5_core_dev *mdev = dev->mdev;
0025b0bd 1373 struct mlx5_umr_wr umrwr = {};
e126ba97 1374
89ea94a7
MG
1375 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1376 return 0;
1377
6a053953 1378 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR;
7d0cc6ed
AK
1379 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1380 umrwr.mkey = mr->mmkey.key;
6a053953 1381 umrwr.ignore_free_state = 1;
e126ba97 1382
d5ea2df9 1383 return mlx5_ib_post_send_wait(dev, &umrwr);
e126ba97
EC
1384}
1385
7d0cc6ed 1386static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
56e11d62
NO
1387 int access_flags, int flags)
1388{
1389 struct mlx5_ib_dev *dev = to_mdev(pd->device);
56e11d62 1390 struct mlx5_umr_wr umrwr = {};
56e11d62
NO
1391 int err;
1392
56e11d62
NO
1393 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1394
7d0cc6ed
AK
1395 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1396 umrwr.mkey = mr->mmkey.key;
56e11d62 1397
31616255 1398 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
56e11d62 1399 umrwr.pd = pd;
56e11d62 1400 umrwr.access_flags = access_flags;
31616255 1401 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
56e11d62
NO
1402 }
1403
d5ea2df9 1404 err = mlx5_ib_post_send_wait(dev, &umrwr);
56e11d62 1405
56e11d62
NO
1406 return err;
1407}
1408
1409int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1410 u64 length, u64 virt_addr, int new_access_flags,
1411 struct ib_pd *new_pd, struct ib_udata *udata)
1412{
1413 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1414 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1415 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1416 int access_flags = flags & IB_MR_REREG_ACCESS ?
1417 new_access_flags :
1418 mr->access_flags;
56e11d62 1419 int page_shift = 0;
7d0cc6ed 1420 int upd_flags = 0;
56e11d62
NO
1421 int npages = 0;
1422 int ncont = 0;
1423 int order = 0;
b4bd701a 1424 u64 addr, len;
56e11d62
NO
1425 int err;
1426
1427 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1428 start, virt_addr, length, access_flags);
1429
7d0cc6ed
AK
1430 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1431
b4bd701a
LR
1432 if (!mr->umem)
1433 return -EINVAL;
1434
1435 if (flags & IB_MR_REREG_TRANS) {
1436 addr = virt_addr;
1437 len = length;
1438 } else {
1439 addr = mr->umem->address;
1440 len = mr->umem->length;
1441 }
1442
56e11d62
NO
1443 if (flags != IB_MR_REREG_PD) {
1444 /*
1445 * Replace umem. This needs to be done whether or not UMR is
1446 * used.
1447 */
1448 flags |= IB_MR_REREG_TRANS;
1449 ib_umem_release(mr->umem);
b4bd701a 1450 mr->umem = NULL;
b0ea0fa5
JG
1451 err = mr_umem_get(dev, udata, addr, len, access_flags,
1452 &mr->umem, &npages, &page_shift, &ncont,
1453 &order);
4638a3b2
LR
1454 if (err)
1455 goto err;
56e11d62
NO
1456 }
1457
1458 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1459 /*
1460 * UMR can't be used - MKey needs to be replaced.
1461 */
eeea6953 1462 if (mr->allocated_from_cache)
56e11d62 1463 err = unreg_umr(dev, mr);
eeea6953 1464 else
56e11d62 1465 err = destroy_mkey(dev, mr);
56e11d62 1466 if (err)
4638a3b2 1467 goto err;
56e11d62
NO
1468
1469 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
ff740aef 1470 page_shift, access_flags, true);
56e11d62 1471
4638a3b2
LR
1472 if (IS_ERR(mr)) {
1473 err = PTR_ERR(mr);
1474 mr = to_mmr(ib_mr);
1475 goto err;
1476 }
56e11d62 1477
8b7ff7f3 1478 mr->allocated_from_cache = 0;
13859d5d
LR
1479 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1480 mr->live = 1;
56e11d62
NO
1481 } else {
1482 /*
1483 * Send a UMR WQE
1484 */
7d0cc6ed
AK
1485 mr->ibmr.pd = pd;
1486 mr->access_flags = access_flags;
1487 mr->mmkey.iova = addr;
1488 mr->mmkey.size = len;
1489 mr->mmkey.pd = to_mpd(pd)->pdn;
1490
1491 if (flags & IB_MR_REREG_TRANS) {
1492 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1493 if (flags & IB_MR_REREG_PD)
1494 upd_flags |= MLX5_IB_UPD_XLT_PD;
1495 if (flags & IB_MR_REREG_ACCESS)
1496 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1497 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1498 upd_flags);
1499 } else {
1500 err = rereg_umr(pd, mr, access_flags, flags);
1501 }
1502
4638a3b2
LR
1503 if (err)
1504 goto err;
56e11d62
NO
1505 }
1506
ac2f7e62 1507 set_mr_fields(dev, mr, npages, len, access_flags);
56e11d62 1508
56e11d62 1509 update_odp_mr(mr);
56e11d62 1510 return 0;
4638a3b2
LR
1511
1512err:
836a0fbb
LR
1513 ib_umem_release(mr->umem);
1514 mr->umem = NULL;
1515
4638a3b2
LR
1516 clean_mr(dev, mr);
1517 return err;
56e11d62
NO
1518}
1519
8a187ee5
SG
1520static int
1521mlx5_alloc_priv_descs(struct ib_device *device,
1522 struct mlx5_ib_mr *mr,
1523 int ndescs,
1524 int desc_size)
1525{
1526 int size = ndescs * desc_size;
1527 int add_size;
1528 int ret;
1529
1530 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1531
1532 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1533 if (!mr->descs_alloc)
1534 return -ENOMEM;
1535
1536 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1537
9b0c289e 1538 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
8a187ee5 1539 size, DMA_TO_DEVICE);
9b0c289e 1540 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
8a187ee5
SG
1541 ret = -ENOMEM;
1542 goto err;
1543 }
1544
1545 return 0;
1546err:
1547 kfree(mr->descs_alloc);
1548
1549 return ret;
1550}
1551
1552static void
1553mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1554{
1555 if (mr->descs) {
1556 struct ib_device *device = mr->ibmr.device;
1557 int size = mr->max_descs * mr->desc_size;
1558
9b0c289e 1559 dma_unmap_single(device->dev.parent, mr->desc_map,
8a187ee5
SG
1560 size, DMA_TO_DEVICE);
1561 kfree(mr->descs_alloc);
1562 mr->descs = NULL;
1563 }
1564}
1565
eeea6953 1566static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
e126ba97 1567{
8b7ff7f3 1568 int allocated_from_cache = mr->allocated_from_cache;
e126ba97 1569
8b91ffc1
SG
1570 if (mr->sig) {
1571 if (mlx5_core_destroy_psv(dev->mdev,
1572 mr->sig->psv_memory.psv_idx))
1573 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1574 mr->sig->psv_memory.psv_idx);
1575 if (mlx5_core_destroy_psv(dev->mdev,
1576 mr->sig->psv_wire.psv_idx))
1577 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1578 mr->sig->psv_wire.psv_idx);
1579 kfree(mr->sig);
1580 mr->sig = NULL;
1581 }
1582
8a187ee5
SG
1583 mlx5_free_priv_descs(mr);
1584
eeea6953
LR
1585 if (!allocated_from_cache)
1586 destroy_mkey(dev, mr);
6aec21f6
HE
1587}
1588
eeea6953 1589static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
6aec21f6 1590{
6aec21f6
HE
1591 int npages = mr->npages;
1592 struct ib_umem *umem = mr->umem;
1593
8b4d5bc5 1594 if (is_odp_mr(mr)) {
597ecc5a
JG
1595 struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
1596
a6bc3875
MS
1597 /* Prevent new page faults and
1598 * prefetch requests from succeeding
1599 */
b4cfe447 1600 mr->live = 0;
a6bc3875
MS
1601
1602 /* dequeue pending prefetch requests for the mr */
1603 if (atomic_read(&mr->num_pending_prefetch))
1604 flush_workqueue(system_unbound_wq);
1605 WARN_ON(atomic_read(&mr->num_pending_prefetch));
1606
6aec21f6
HE
1607 /* Wait for all running page-fault handlers to finish. */
1608 synchronize_srcu(&dev->mr_srcu);
b4cfe447 1609 /* Destroy all page mappings */
597ecc5a 1610 if (umem_odp->page_list)
d2183c6f
JG
1611 mlx5_ib_invalidate_range(umem_odp,
1612 ib_umem_start(umem_odp),
1613 ib_umem_end(umem_odp));
81713d37
AK
1614 else
1615 mlx5_ib_free_implicit_mr(mr);
b4cfe447
HE
1616 /*
1617 * We kill the umem before the MR for ODP,
1618 * so that there will not be any invalidations in
1619 * flight, looking at the *mr struct.
1620 */
1621 ib_umem_release(umem);
1622 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1623
1624 /* Avoid double-freeing the umem. */
1625 umem = NULL;
1626 }
8b4d5bc5 1627
fbcd4983 1628 clean_mr(dev, mr);
6aec21f6 1629
dd9a4034
VF
1630 /*
1631 * We should unregister the DMA address from the HCA before
1632 * remove the DMA mapping.
1633 */
1634 mlx5_mr_cache_free(dev, mr);
836a0fbb
LR
1635 ib_umem_release(umem);
1636 if (umem)
6aec21f6 1637 atomic_sub(npages, &dev->mdev->priv.reg_pages);
836a0fbb 1638
f3f134f5
LR
1639 if (!mr->allocated_from_cache)
1640 kfree(mr);
e126ba97
EC
1641}
1642
c4367a26 1643int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
fbcd4983 1644{
6c984472
MG
1645 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1646
de0ae958
IR
1647 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1648 dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr);
1649 dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
1650 }
6c984472
MG
1651
1652 dereg_mr(to_mdev(ibmr->device), mmr);
1653
eeea6953 1654 return 0;
fbcd4983
IL
1655}
1656
7796d2a3
MG
1657static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
1658 int access_mode, int page_shift)
1659{
1660 void *mkc;
1661
1662 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1663
1664 MLX5_SET(mkc, mkc, free, 1);
1665 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1666 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1667 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1668 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
1669 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
1670 MLX5_SET(mkc, mkc, umr_en, 1);
1671 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1672}
1673
1674static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1675 int ndescs, int desc_size, int page_shift,
1676 int access_mode, u32 *in, int inlen)
1677{
1678 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1679 int err;
1680
1681 mr->access_mode = access_mode;
1682 mr->desc_size = desc_size;
1683 mr->max_descs = ndescs;
1684
1685 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
1686 if (err)
1687 return err;
1688
1689 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
1690
1691 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1692 if (err)
1693 goto err_free_descs;
1694
1695 mr->mmkey.type = MLX5_MKEY_MR;
1696 mr->ibmr.lkey = mr->mmkey.key;
1697 mr->ibmr.rkey = mr->mmkey.key;
1698
1699 return 0;
1700
1701err_free_descs:
1702 mlx5_free_priv_descs(mr);
1703 return err;
1704}
1705
6c984472 1706static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
de0ae958
IR
1707 u32 max_num_sg, u32 max_num_meta_sg,
1708 int desc_size, int access_mode)
3121e3c4 1709{
ec22eb53 1710 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
6c984472 1711 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
7796d2a3 1712 int page_shift = 0;
ec22eb53 1713 struct mlx5_ib_mr *mr;
ec22eb53 1714 u32 *in;
b005d316 1715 int err;
3121e3c4
SG
1716
1717 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1718 if (!mr)
1719 return ERR_PTR(-ENOMEM);
1720
7796d2a3
MG
1721 mr->ibmr.pd = pd;
1722 mr->ibmr.device = pd->device;
1723
ec22eb53 1724 in = kzalloc(inlen, GFP_KERNEL);
3121e3c4
SG
1725 if (!in) {
1726 err = -ENOMEM;
1727 goto err_free;
1728 }
1729
de0ae958 1730 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
7796d2a3 1731 page_shift = PAGE_SHIFT;
3121e3c4 1732
7796d2a3
MG
1733 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
1734 access_mode, in, inlen);
6c984472
MG
1735 if (err)
1736 goto err_free_in;
6c984472 1737
6c984472
MG
1738 mr->umem = NULL;
1739 kfree(in);
1740
1741 return mr;
1742
6c984472
MG
1743err_free_in:
1744 kfree(in);
1745err_free:
1746 kfree(mr);
1747 return ERR_PTR(err);
1748}
1749
7796d2a3
MG
1750static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1751 int ndescs, u32 *in, int inlen)
1752{
1753 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
1754 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
1755 inlen);
1756}
1757
1758static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1759 int ndescs, u32 *in, int inlen)
1760{
1761 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
1762 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1763}
1764
1765static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1766 int max_num_sg, int max_num_meta_sg,
1767 u32 *in, int inlen)
1768{
1769 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1770 u32 psv_index[2];
1771 void *mkc;
1772 int err;
1773
1774 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1775 if (!mr->sig)
1776 return -ENOMEM;
1777
1778 /* create mem & wire PSVs */
1779 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
1780 if (err)
1781 goto err_free_sig;
1782
1783 mr->sig->psv_memory.psv_idx = psv_index[0];
1784 mr->sig->psv_wire.psv_idx = psv_index[1];
1785
1786 mr->sig->sig_status_checked = true;
1787 mr->sig->sig_err_exists = false;
1788 /* Next UMR, Arm SIGERR */
1789 ++mr->sig->sigerr_count;
1790 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1791 sizeof(struct mlx5_klm),
1792 MLX5_MKC_ACCESS_MODE_KLMS);
1793 if (IS_ERR(mr->klm_mr)) {
1794 err = PTR_ERR(mr->klm_mr);
1795 goto err_destroy_psv;
1796 }
1797 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1798 sizeof(struct mlx5_mtt),
1799 MLX5_MKC_ACCESS_MODE_MTT);
1800 if (IS_ERR(mr->mtt_mr)) {
1801 err = PTR_ERR(mr->mtt_mr);
1802 goto err_free_klm_mr;
1803 }
1804
1805 /* Set bsf descriptors for mkey */
1806 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1807 MLX5_SET(mkc, mkc, bsf_en, 1);
1808 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1809
1810 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
1811 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1812 if (err)
1813 goto err_free_mtt_mr;
1814
1815 return 0;
1816
1817err_free_mtt_mr:
1818 dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
1819 mr->mtt_mr = NULL;
1820err_free_klm_mr:
1821 dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
1822 mr->klm_mr = NULL;
1823err_destroy_psv:
1824 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
1825 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1826 mr->sig->psv_memory.psv_idx);
1827 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1828 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1829 mr->sig->psv_wire.psv_idx);
1830err_free_sig:
1831 kfree(mr->sig);
1832
1833 return err;
1834}
1835
6c984472
MG
1836static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
1837 enum ib_mr_type mr_type, u32 max_num_sg,
1838 u32 max_num_meta_sg)
1839{
1840 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1841 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1842 int ndescs = ALIGN(max_num_sg, 4);
1843 struct mlx5_ib_mr *mr;
6c984472
MG
1844 u32 *in;
1845 int err;
1846
1847 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1848 if (!mr)
1849 return ERR_PTR(-ENOMEM);
1850
1851 in = kzalloc(inlen, GFP_KERNEL);
1852 if (!in) {
1853 err = -ENOMEM;
1854 goto err_free;
1855 }
1856
7796d2a3
MG
1857 mr->ibmr.device = pd->device;
1858 mr->umem = NULL;
3121e3c4 1859
7796d2a3
MG
1860 switch (mr_type) {
1861 case IB_MR_TYPE_MEM_REG:
1862 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
1863 break;
1864 case IB_MR_TYPE_SG_GAPS:
1865 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
1866 break;
1867 case IB_MR_TYPE_INTEGRITY:
1868 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
1869 max_num_meta_sg, in, inlen);
1870 break;
1871 default:
9bee178b
SG
1872 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1873 err = -EINVAL;
3121e3c4
SG
1874 }
1875
3121e3c4 1876 if (err)
7796d2a3 1877 goto err_free_in;
3121e3c4 1878
3121e3c4
SG
1879 kfree(in);
1880
1881 return &mr->ibmr;
1882
3121e3c4
SG
1883err_free_in:
1884 kfree(in);
1885err_free:
1886 kfree(mr);
1887 return ERR_PTR(err);
1888}
1889
6c984472
MG
1890struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1891 u32 max_num_sg, struct ib_udata *udata)
1892{
1893 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
1894}
1895
1896struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1897 u32 max_num_sg, u32 max_num_meta_sg)
1898{
1899 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
1900 max_num_meta_sg);
1901}
1902
d2370e0a
MB
1903struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1904 struct ib_udata *udata)
1905{
1906 struct mlx5_ib_dev *dev = to_mdev(pd->device);
ec22eb53 1907 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
d2370e0a 1908 struct mlx5_ib_mw *mw = NULL;
ec22eb53
SM
1909 u32 *in = NULL;
1910 void *mkc;
d2370e0a
MB
1911 int ndescs;
1912 int err;
1913 struct mlx5_ib_alloc_mw req = {};
1914 struct {
1915 __u32 comp_mask;
1916 __u32 response_length;
1917 } resp = {};
1918
1919 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1920 if (err)
1921 return ERR_PTR(err);
1922
1923 if (req.comp_mask || req.reserved1 || req.reserved2)
1924 return ERR_PTR(-EOPNOTSUPP);
1925
1926 if (udata->inlen > sizeof(req) &&
1927 !ib_is_udata_cleared(udata, sizeof(req),
1928 udata->inlen - sizeof(req)))
1929 return ERR_PTR(-EOPNOTSUPP);
1930
1931 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1932
1933 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
ec22eb53 1934 in = kzalloc(inlen, GFP_KERNEL);
d2370e0a
MB
1935 if (!mw || !in) {
1936 err = -ENOMEM;
1937 goto free;
1938 }
1939
ec22eb53
SM
1940 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1941
1942 MLX5_SET(mkc, mkc, free, 1);
1943 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1944 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1945 MLX5_SET(mkc, mkc, umr_en, 1);
1946 MLX5_SET(mkc, mkc, lr, 1);
cdbd0d2b 1947 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
ec22eb53
SM
1948 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1949 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1950
1951 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
d2370e0a
MB
1952 if (err)
1953 goto free;
1954
aa8e08d2 1955 mw->mmkey.type = MLX5_MKEY_MW;
d2370e0a 1956 mw->ibmw.rkey = mw->mmkey.key;
db570d7d 1957 mw->ndescs = ndescs;
d2370e0a
MB
1958
1959 resp.response_length = min(offsetof(typeof(resp), response_length) +
1960 sizeof(resp.response_length), udata->outlen);
1961 if (resp.response_length) {
1962 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1963 if (err) {
1964 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1965 goto free;
1966 }
1967 }
1968
1969 kfree(in);
1970 return &mw->ibmw;
1971
1972free:
1973 kfree(mw);
1974 kfree(in);
1975 return ERR_PTR(err);
1976}
1977
1978int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1979{
1980 struct mlx5_ib_mw *mmw = to_mmw(mw);
1981 int err;
1982
1983 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1984 &mmw->mmkey);
1985 if (!err)
1986 kfree(mmw);
1987 return err;
1988}
1989
d5436ba0
SG
1990int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1991 struct ib_mr_status *mr_status)
1992{
1993 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1994 int ret = 0;
1995
1996 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1997 pr_err("Invalid status check mask\n");
1998 ret = -EINVAL;
1999 goto done;
2000 }
2001
2002 mr_status->fail_status = 0;
2003 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
2004 if (!mmr->sig) {
2005 ret = -EINVAL;
2006 pr_err("signature status check requested on a non-signature enabled MR\n");
2007 goto done;
2008 }
2009
2010 mmr->sig->sig_status_checked = true;
2011 if (!mmr->sig->sig_err_exists)
2012 goto done;
2013
2014 if (ibmr->lkey == mmr->sig->err_item.key)
2015 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
2016 sizeof(mr_status->sig_err));
2017 else {
2018 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
2019 mr_status->sig_err.sig_err_offset = 0;
2020 mr_status->sig_err.key = mmr->sig->err_item.key;
2021 }
2022
2023 mmr->sig->sig_err_exists = false;
2024 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
2025 }
2026
2027done:
2028 return ret;
2029}
8a187ee5 2030
2563e2f3
MG
2031static int
2032mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2033 int data_sg_nents, unsigned int *data_sg_offset,
2034 struct scatterlist *meta_sg, int meta_sg_nents,
2035 unsigned int *meta_sg_offset)
2036{
2037 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2038 unsigned int sg_offset = 0;
2039 int n = 0;
2040
2041 mr->meta_length = 0;
2042 if (data_sg_nents == 1) {
2043 n++;
2044 mr->ndescs = 1;
2045 if (data_sg_offset)
2046 sg_offset = *data_sg_offset;
2047 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2048 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2049 if (meta_sg_nents == 1) {
2050 n++;
2051 mr->meta_ndescs = 1;
2052 if (meta_sg_offset)
2053 sg_offset = *meta_sg_offset;
2054 else
2055 sg_offset = 0;
2056 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2057 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2058 }
2059 ibmr->length = mr->data_length + mr->meta_length;
2060 }
2061
2062 return n;
2063}
2064
b005d316
SG
2065static int
2066mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2067 struct scatterlist *sgl,
ff2ba993 2068 unsigned short sg_nents,
6c984472
MG
2069 unsigned int *sg_offset_p,
2070 struct scatterlist *meta_sgl,
2071 unsigned short meta_sg_nents,
2072 unsigned int *meta_sg_offset_p)
b005d316
SG
2073{
2074 struct scatterlist *sg = sgl;
2075 struct mlx5_klm *klms = mr->descs;
9aa8b321 2076 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
b005d316 2077 u32 lkey = mr->ibmr.pd->local_dma_lkey;
6c984472 2078 int i, j = 0;
b005d316 2079
ff2ba993 2080 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
b005d316 2081 mr->ibmr.length = 0;
b005d316
SG
2082
2083 for_each_sg(sgl, sg, sg_nents, i) {
99975cd4 2084 if (unlikely(i >= mr->max_descs))
b005d316 2085 break;
ff2ba993
CH
2086 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2087 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
b005d316 2088 klms[i].key = cpu_to_be32(lkey);
0a49f2c3 2089 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
ff2ba993
CH
2090
2091 sg_offset = 0;
b005d316
SG
2092 }
2093
9aa8b321
BVA
2094 if (sg_offset_p)
2095 *sg_offset_p = sg_offset;
2096
6c984472
MG
2097 mr->ndescs = i;
2098 mr->data_length = mr->ibmr.length;
2099
2100 if (meta_sg_nents) {
2101 sg = meta_sgl;
2102 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
2103 for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
2104 if (unlikely(i + j >= mr->max_descs))
2105 break;
2106 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
2107 sg_offset);
2108 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
2109 sg_offset);
2110 klms[i + j].key = cpu_to_be32(lkey);
2111 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2112
2113 sg_offset = 0;
2114 }
2115 if (meta_sg_offset_p)
2116 *meta_sg_offset_p = sg_offset;
2117
2118 mr->meta_ndescs = j;
2119 mr->meta_length = mr->ibmr.length - mr->data_length;
2120 }
2121
2122 return i + j;
b005d316
SG
2123}
2124
8a187ee5
SG
2125static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
2126{
2127 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2128 __be64 *descs;
2129
2130 if (unlikely(mr->ndescs == mr->max_descs))
2131 return -ENOMEM;
2132
2133 descs = mr->descs;
2134 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2135
2136 return 0;
2137}
2138
de0ae958
IR
2139static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
2140{
2141 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2142 __be64 *descs;
2143
2144 if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
2145 return -ENOMEM;
2146
2147 descs = mr->descs;
2148 descs[mr->ndescs + mr->meta_ndescs++] =
2149 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2150
2151 return 0;
2152}
2153
2154static int
2155mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
6c984472
MG
2156 int data_sg_nents, unsigned int *data_sg_offset,
2157 struct scatterlist *meta_sg, int meta_sg_nents,
2158 unsigned int *meta_sg_offset)
2159{
2160 struct mlx5_ib_mr *mr = to_mmr(ibmr);
de0ae958 2161 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
6c984472
MG
2162 int n;
2163
de0ae958
IR
2164 pi_mr->ndescs = 0;
2165 pi_mr->meta_ndescs = 0;
2166 pi_mr->meta_length = 0;
2167
2168 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2169 pi_mr->desc_size * pi_mr->max_descs,
2170 DMA_TO_DEVICE);
2171
2172 pi_mr->ibmr.page_size = ibmr->page_size;
2173 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2174 mlx5_set_page);
2175 if (n != data_sg_nents)
2176 return n;
2177
2563e2f3 2178 pi_mr->data_iova = pi_mr->ibmr.iova;
de0ae958
IR
2179 pi_mr->data_length = pi_mr->ibmr.length;
2180 pi_mr->ibmr.length = pi_mr->data_length;
2181 ibmr->length = pi_mr->data_length;
2182
2183 if (meta_sg_nents) {
2184 u64 page_mask = ~((u64)ibmr->page_size - 1);
2563e2f3 2185 u64 iova = pi_mr->data_iova;
de0ae958
IR
2186
2187 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2188 meta_sg_offset, mlx5_set_page_pi);
2189
2190 pi_mr->meta_length = pi_mr->ibmr.length;
2191 /*
2192 * PI address for the HW is the offset of the metadata address
2193 * relative to the first data page address.
2194 * It equals to first data page address + size of data pages +
2195 * metadata offset at the first metadata page
2196 */
2197 pi_mr->pi_iova = (iova & page_mask) +
2198 pi_mr->ndescs * ibmr->page_size +
2199 (pi_mr->ibmr.iova & ~page_mask);
2200 /*
2201 * In order to use one MTT MR for data and metadata, we register
2202 * also the gaps between the end of the data and the start of
2203 * the metadata (the sig MR will verify that the HW will access
2204 * to right addresses). This mapping is safe because we use
2205 * internal mkey for the registration.
2206 */
2207 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2208 pi_mr->ibmr.iova = iova;
2209 ibmr->length += pi_mr->meta_length;
2210 }
2211
2212 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2213 pi_mr->desc_size * pi_mr->max_descs,
2214 DMA_TO_DEVICE);
2215
2216 return n;
2217}
2218
2219static int
2220mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2221 int data_sg_nents, unsigned int *data_sg_offset,
2222 struct scatterlist *meta_sg, int meta_sg_nents,
2223 unsigned int *meta_sg_offset)
2224{
2225 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2226 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2227 int n;
6c984472
MG
2228
2229 pi_mr->ndescs = 0;
2230 pi_mr->meta_ndescs = 0;
2231 pi_mr->meta_length = 0;
2232
2233 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2234 pi_mr->desc_size * pi_mr->max_descs,
2235 DMA_TO_DEVICE);
2236
2237 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2238 meta_sg, meta_sg_nents, meta_sg_offset);
2239
de0ae958
IR
2240 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2241 pi_mr->desc_size * pi_mr->max_descs,
2242 DMA_TO_DEVICE);
2243
6c984472 2244 /* This is zero-based memory region */
2563e2f3 2245 pi_mr->data_iova = 0;
6c984472 2246 pi_mr->ibmr.iova = 0;
de0ae958 2247 pi_mr->pi_iova = pi_mr->data_length;
6c984472 2248 ibmr->length = pi_mr->ibmr.length;
6c984472 2249
de0ae958
IR
2250 return n;
2251}
6c984472 2252
de0ae958
IR
2253int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2254 int data_sg_nents, unsigned int *data_sg_offset,
2255 struct scatterlist *meta_sg, int meta_sg_nents,
2256 unsigned int *meta_sg_offset)
2257{
2258 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2563e2f3 2259 struct mlx5_ib_mr *pi_mr = NULL;
de0ae958
IR
2260 int n;
2261
2262 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2263
2563e2f3
MG
2264 mr->ndescs = 0;
2265 mr->data_length = 0;
2266 mr->data_iova = 0;
2267 mr->meta_ndescs = 0;
2268 mr->pi_iova = 0;
2269 /*
2270 * As a performance optimization, if possible, there is no need to
2271 * perform UMR operation to register the data/metadata buffers.
2272 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2273 * Fallback to UMR only in case of a failure.
2274 */
2275 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2276 data_sg_offset, meta_sg, meta_sg_nents,
2277 meta_sg_offset);
2278 if (n == data_sg_nents + meta_sg_nents)
2279 goto out;
de0ae958
IR
2280 /*
2281 * As a performance optimization, if possible, there is no need to map
2282 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2283 * descriptors and fallback to KLM only in case of a failure.
2284 * It's more efficient for the HW to work with MTT descriptors
2285 * (especially in high load).
2286 * Use KLM (indirect access) only if it's mandatory.
2287 */
2563e2f3 2288 pi_mr = mr->mtt_mr;
de0ae958
IR
2289 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2290 data_sg_offset, meta_sg, meta_sg_nents,
2291 meta_sg_offset);
2292 if (n == data_sg_nents + meta_sg_nents)
2293 goto out;
2294
2295 pi_mr = mr->klm_mr;
2296 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2297 data_sg_offset, meta_sg, meta_sg_nents,
2298 meta_sg_offset);
6c984472
MG
2299 if (unlikely(n != data_sg_nents + meta_sg_nents))
2300 return -ENOMEM;
2301
de0ae958
IR
2302out:
2303 /* This is zero-based memory region */
2304 ibmr->iova = 0;
2305 mr->pi_mr = pi_mr;
2563e2f3
MG
2306 if (pi_mr)
2307 ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2308 else
2309 ibmr->sig_attrs->meta_length = mr->meta_length;
de0ae958 2310
6c984472
MG
2311 return 0;
2312}
2313
ff2ba993 2314int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
9aa8b321 2315 unsigned int *sg_offset)
8a187ee5
SG
2316{
2317 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2318 int n;
2319
2320 mr->ndescs = 0;
2321
2322 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2323 mr->desc_size * mr->max_descs,
2324 DMA_TO_DEVICE);
2325
ec22eb53 2326 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
6c984472
MG
2327 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2328 NULL);
b005d316 2329 else
ff2ba993
CH
2330 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2331 mlx5_set_page);
8a187ee5
SG
2332
2333 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2334 mr->desc_size * mr->max_descs,
2335 DMA_TO_DEVICE);
2336
2337 return n;
2338}