Merge tag 'amlogic-dt-2' of https://git.kernel.org/pub/scm/linux/kernel/git/khilman...
[linux-2.6-block.git] / drivers / infiniband / hw / mlx5 / mr.c
CommitLineData
e126ba97 1/*
6cf0a15f 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
746b5583 38#include <linux/delay.h>
e126ba97 39#include <rdma/ib_umem.h>
b4cfe447 40#include <rdma/ib_umem_odp.h>
968e78dd 41#include <rdma/ib_verbs.h>
e126ba97
EC
42#include "mlx5_ib.h"
43
44enum {
746b5583 45 MAX_PENDING_REG_MR = 8,
e126ba97
EC
46};
47
832a6b06 48#define MLX5_UMR_ALIGN 2048
fe45f827 49
eeea6953
LR
50static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
8b7ff7f3 52static int mr_cache_max_order(struct mlx5_ib_dev *dev);
49780d42 53static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
c8d75a98
MD
54static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
55{
56 return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
57}
58
59static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
60{
61 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
62}
63
64static bool use_umr(struct mlx5_ib_dev *dev, int order)
65{
66 return order <= mr_cache_max_order(dev) &&
67 umr_can_modify_entity_size(dev);
68}
6aec21f6 69
b4cfe447
HE
70static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
71{
a606b0f6 72 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
b4cfe447
HE
73
74#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
75 /* Wait until all page fault handlers using the mr complete. */
76 synchronize_srcu(&dev->mr_srcu);
77#endif
78
79 return err;
80}
81
e126ba97
EC
82static int order2idx(struct mlx5_ib_dev *dev, int order)
83{
84 struct mlx5_mr_cache *cache = &dev->cache;
85
86 if (order < cache->ent[0].order)
87 return 0;
88 else
89 return order - cache->ent[0].order;
90}
91
56e11d62
NO
92static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
93{
94 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
95 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
96}
97
395a8e4c
NO
98#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
99static void update_odp_mr(struct mlx5_ib_mr *mr)
100{
101 if (mr->umem->odp_data) {
102 /*
103 * This barrier prevents the compiler from moving the
104 * setting of umem->odp_data->private to point to our
105 * MR, before reg_umr finished, to ensure that the MR
106 * initialization have finished before starting to
107 * handle invalidations.
108 */
109 smp_wmb();
110 mr->umem->odp_data->private = mr;
111 /*
112 * Make sure we will see the new
113 * umem->odp_data->private value in the invalidation
114 * routines, before we can get page faults on the
115 * MR. Page faults can happen once we put the MR in
116 * the tree, below this line. Without the barrier,
117 * there can be a fault handling and an invalidation
118 * before umem->odp_data->private == mr is visible to
119 * the invalidation handler.
120 */
121 smp_wmb();
122 }
123}
124#endif
125
746b5583
EC
126static void reg_mr_callback(int status, void *context)
127{
128 struct mlx5_ib_mr *mr = context;
129 struct mlx5_ib_dev *dev = mr->dev;
130 struct mlx5_mr_cache *cache = &dev->cache;
131 int c = order2idx(dev, mr->order);
132 struct mlx5_cache_ent *ent = &cache->ent[c];
133 u8 key;
746b5583 134 unsigned long flags;
a606b0f6 135 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
8605933a 136 int err;
746b5583 137
746b5583
EC
138 spin_lock_irqsave(&ent->lock, flags);
139 ent->pending--;
140 spin_unlock_irqrestore(&ent->lock, flags);
141 if (status) {
142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
143 kfree(mr);
144 dev->fill_delay = 1;
145 mod_timer(&dev->delay_timer, jiffies + HZ);
146 return;
147 }
148
aa8e08d2 149 mr->mmkey.type = MLX5_MKEY_MR;
9603b61d
JM
150 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
151 key = dev->mdev->priv.mkey_key++;
152 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
ec22eb53 153 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
746b5583
EC
154
155 cache->last_add = jiffies;
156
157 spin_lock_irqsave(&ent->lock, flags);
158 list_add_tail(&mr->list, &ent->head);
159 ent->cur++;
160 ent->size++;
161 spin_unlock_irqrestore(&ent->lock, flags);
8605933a
HE
162
163 write_lock_irqsave(&table->lock, flags);
a606b0f6
MB
164 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
165 &mr->mmkey);
8605933a 166 if (err)
a606b0f6 167 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
8605933a 168 write_unlock_irqrestore(&table->lock, flags);
49780d42
AK
169
170 if (!completion_done(&ent->compl))
171 complete(&ent->compl);
746b5583
EC
172}
173
e126ba97
EC
174static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
175{
e126ba97
EC
176 struct mlx5_mr_cache *cache = &dev->cache;
177 struct mlx5_cache_ent *ent = &cache->ent[c];
ec22eb53 178 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
e126ba97 179 struct mlx5_ib_mr *mr;
ec22eb53
SM
180 void *mkc;
181 u32 *in;
e126ba97
EC
182 int err = 0;
183 int i;
184
ec22eb53 185 in = kzalloc(inlen, GFP_KERNEL);
e126ba97
EC
186 if (!in)
187 return -ENOMEM;
188
ec22eb53 189 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
e126ba97 190 for (i = 0; i < num; i++) {
746b5583
EC
191 if (ent->pending >= MAX_PENDING_REG_MR) {
192 err = -EAGAIN;
193 break;
194 }
195
e126ba97
EC
196 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
197 if (!mr) {
198 err = -ENOMEM;
746b5583 199 break;
e126ba97
EC
200 }
201 mr->order = ent->order;
8b7ff7f3 202 mr->allocated_from_cache = 1;
746b5583 203 mr->dev = dev;
ec22eb53
SM
204
205 MLX5_SET(mkc, mkc, free, 1);
206 MLX5_SET(mkc, mkc, umr_en, 1);
cdbd0d2b
AL
207 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
208 MLX5_SET(mkc, mkc, access_mode_4_2,
209 (ent->access_mode >> 2) & 0x7);
ec22eb53
SM
210
211 MLX5_SET(mkc, mkc, qpn, 0xffffff);
49780d42
AK
212 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
213 MLX5_SET(mkc, mkc, log_page_size, ent->page);
e126ba97 214
746b5583
EC
215 spin_lock_irq(&ent->lock);
216 ent->pending++;
217 spin_unlock_irq(&ent->lock);
ec22eb53
SM
218 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
219 in, inlen,
220 mr->out, sizeof(mr->out),
221 reg_mr_callback, mr);
e126ba97 222 if (err) {
d14e7110
EC
223 spin_lock_irq(&ent->lock);
224 ent->pending--;
225 spin_unlock_irq(&ent->lock);
e126ba97 226 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
e126ba97 227 kfree(mr);
746b5583 228 break;
e126ba97 229 }
e126ba97
EC
230 }
231
e126ba97
EC
232 kfree(in);
233 return err;
234}
235
236static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
237{
e126ba97
EC
238 struct mlx5_mr_cache *cache = &dev->cache;
239 struct mlx5_cache_ent *ent = &cache->ent[c];
65edd0e7 240 struct mlx5_ib_mr *tmp_mr;
e126ba97 241 struct mlx5_ib_mr *mr;
65edd0e7 242 LIST_HEAD(del_list);
e126ba97
EC
243 int i;
244
245 for (i = 0; i < num; i++) {
746b5583 246 spin_lock_irq(&ent->lock);
e126ba97 247 if (list_empty(&ent->head)) {
746b5583 248 spin_unlock_irq(&ent->lock);
65edd0e7 249 break;
e126ba97
EC
250 }
251 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
65edd0e7 252 list_move(&mr->list, &del_list);
e126ba97
EC
253 ent->cur--;
254 ent->size--;
746b5583 255 spin_unlock_irq(&ent->lock);
65edd0e7
DJ
256 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
257 }
258
259#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
260 synchronize_srcu(&dev->mr_srcu);
261#endif
262
263 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
264 list_del(&mr->list);
265 kfree(mr);
e126ba97
EC
266 }
267}
268
269static ssize_t size_write(struct file *filp, const char __user *buf,
270 size_t count, loff_t *pos)
271{
272 struct mlx5_cache_ent *ent = filp->private_data;
273 struct mlx5_ib_dev *dev = ent->dev;
274 char lbuf[20];
275 u32 var;
276 int err;
277 int c;
278
279 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
5e631a03 280 return -EFAULT;
e126ba97
EC
281
282 c = order2idx(dev, ent->order);
283 lbuf[sizeof(lbuf) - 1] = 0;
284
285 if (sscanf(lbuf, "%u", &var) != 1)
286 return -EINVAL;
287
288 if (var < ent->limit)
289 return -EINVAL;
290
291 if (var > ent->size) {
746b5583
EC
292 do {
293 err = add_keys(dev, c, var - ent->size);
294 if (err && err != -EAGAIN)
295 return err;
296
297 usleep_range(3000, 5000);
298 } while (err);
e126ba97
EC
299 } else if (var < ent->size) {
300 remove_keys(dev, c, ent->size - var);
301 }
302
303 return count;
304}
305
306static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
307 loff_t *pos)
308{
309 struct mlx5_cache_ent *ent = filp->private_data;
310 char lbuf[20];
311 int err;
312
313 if (*pos)
314 return 0;
315
316 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
317 if (err < 0)
318 return err;
319
320 if (copy_to_user(buf, lbuf, err))
5e631a03 321 return -EFAULT;
e126ba97
EC
322
323 *pos += err;
324
325 return err;
326}
327
328static const struct file_operations size_fops = {
329 .owner = THIS_MODULE,
330 .open = simple_open,
331 .write = size_write,
332 .read = size_read,
333};
334
335static ssize_t limit_write(struct file *filp, const char __user *buf,
336 size_t count, loff_t *pos)
337{
338 struct mlx5_cache_ent *ent = filp->private_data;
339 struct mlx5_ib_dev *dev = ent->dev;
340 char lbuf[20];
341 u32 var;
342 int err;
343 int c;
344
345 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
5e631a03 346 return -EFAULT;
e126ba97
EC
347
348 c = order2idx(dev, ent->order);
349 lbuf[sizeof(lbuf) - 1] = 0;
350
351 if (sscanf(lbuf, "%u", &var) != 1)
352 return -EINVAL;
353
354 if (var > ent->size)
355 return -EINVAL;
356
357 ent->limit = var;
358
359 if (ent->cur < ent->limit) {
360 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
361 if (err)
362 return err;
363 }
364
365 return count;
366}
367
368static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
369 loff_t *pos)
370{
371 struct mlx5_cache_ent *ent = filp->private_data;
372 char lbuf[20];
373 int err;
374
375 if (*pos)
376 return 0;
377
378 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
379 if (err < 0)
380 return err;
381
382 if (copy_to_user(buf, lbuf, err))
5e631a03 383 return -EFAULT;
e126ba97
EC
384
385 *pos += err;
386
387 return err;
388}
389
390static const struct file_operations limit_fops = {
391 .owner = THIS_MODULE,
392 .open = simple_open,
393 .write = limit_write,
394 .read = limit_read,
395};
396
397static int someone_adding(struct mlx5_mr_cache *cache)
398{
399 int i;
400
401 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
402 if (cache->ent[i].cur < cache->ent[i].limit)
403 return 1;
404 }
405
406 return 0;
407}
408
409static void __cache_work_func(struct mlx5_cache_ent *ent)
410{
411 struct mlx5_ib_dev *dev = ent->dev;
412 struct mlx5_mr_cache *cache = &dev->cache;
413 int i = order2idx(dev, ent->order);
746b5583 414 int err;
e126ba97
EC
415
416 if (cache->stopped)
417 return;
418
419 ent = &dev->cache.ent[i];
746b5583
EC
420 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
421 err = add_keys(dev, i, 1);
422 if (ent->cur < 2 * ent->limit) {
423 if (err == -EAGAIN) {
424 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
425 i + 2);
426 queue_delayed_work(cache->wq, &ent->dwork,
427 msecs_to_jiffies(3));
428 } else if (err) {
429 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
430 i + 2, err);
431 queue_delayed_work(cache->wq, &ent->dwork,
432 msecs_to_jiffies(1000));
433 } else {
434 queue_work(cache->wq, &ent->work);
435 }
436 }
e126ba97 437 } else if (ent->cur > 2 * ent->limit) {
ab5cdc31
LR
438 /*
439 * The remove_keys() logic is performed as garbage collection
440 * task. Such task is intended to be run when no other active
441 * processes are running.
442 *
443 * The need_resched() will return TRUE if there are user tasks
444 * to be activated in near future.
445 *
446 * In such case, we don't execute remove_keys() and postpone
447 * the garbage collection work to try to run in next cycle,
448 * in order to free CPU resources to other tasks.
449 */
450 if (!need_resched() && !someone_adding(cache) &&
746b5583 451 time_after(jiffies, cache->last_add + 300 * HZ)) {
e126ba97
EC
452 remove_keys(dev, i, 1);
453 if (ent->cur > ent->limit)
454 queue_work(cache->wq, &ent->work);
455 } else {
746b5583 456 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
e126ba97
EC
457 }
458 }
459}
460
461static void delayed_cache_work_func(struct work_struct *work)
462{
463 struct mlx5_cache_ent *ent;
464
465 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
466 __cache_work_func(ent);
467}
468
469static void cache_work_func(struct work_struct *work)
470{
471 struct mlx5_cache_ent *ent;
472
473 ent = container_of(work, struct mlx5_cache_ent, work);
474 __cache_work_func(ent);
475}
476
49780d42
AK
477struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
478{
479 struct mlx5_mr_cache *cache = &dev->cache;
480 struct mlx5_cache_ent *ent;
481 struct mlx5_ib_mr *mr;
482 int err;
483
484 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
485 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
486 return NULL;
487 }
488
489 ent = &cache->ent[entry];
490 while (1) {
491 spin_lock_irq(&ent->lock);
492 if (list_empty(&ent->head)) {
493 spin_unlock_irq(&ent->lock);
494
495 err = add_keys(dev, entry, 1);
81713d37 496 if (err && err != -EAGAIN)
49780d42
AK
497 return ERR_PTR(err);
498
499 wait_for_completion(&ent->compl);
500 } else {
501 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
502 list);
503 list_del(&mr->list);
504 ent->cur--;
505 spin_unlock_irq(&ent->lock);
506 if (ent->cur < ent->limit)
507 queue_work(cache->wq, &ent->work);
508 return mr;
509 }
510 }
511}
512
e126ba97
EC
513static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
514{
515 struct mlx5_mr_cache *cache = &dev->cache;
516 struct mlx5_ib_mr *mr = NULL;
517 struct mlx5_cache_ent *ent;
4c25b7a3 518 int last_umr_cache_entry;
e126ba97
EC
519 int c;
520 int i;
521
522 c = order2idx(dev, order);
8b7ff7f3 523 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
4c25b7a3 524 if (c < 0 || c > last_umr_cache_entry) {
e126ba97
EC
525 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
526 return NULL;
527 }
528
4c25b7a3 529 for (i = c; i <= last_umr_cache_entry; i++) {
e126ba97
EC
530 ent = &cache->ent[i];
531
532 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
533
746b5583 534 spin_lock_irq(&ent->lock);
e126ba97
EC
535 if (!list_empty(&ent->head)) {
536 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
537 list);
538 list_del(&mr->list);
539 ent->cur--;
746b5583 540 spin_unlock_irq(&ent->lock);
e126ba97
EC
541 if (ent->cur < ent->limit)
542 queue_work(cache->wq, &ent->work);
543 break;
544 }
746b5583 545 spin_unlock_irq(&ent->lock);
e126ba97
EC
546
547 queue_work(cache->wq, &ent->work);
e126ba97
EC
548 }
549
550 if (!mr)
551 cache->ent[c].miss++;
552
553 return mr;
554}
555
49780d42 556void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
e126ba97
EC
557{
558 struct mlx5_mr_cache *cache = &dev->cache;
559 struct mlx5_cache_ent *ent;
560 int shrink = 0;
561 int c;
562
563 c = order2idx(dev, mr->order);
564 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
565 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
566 return;
567 }
49780d42
AK
568
569 if (unreg_umr(dev, mr))
570 return;
571
e126ba97 572 ent = &cache->ent[c];
746b5583 573 spin_lock_irq(&ent->lock);
e126ba97
EC
574 list_add_tail(&mr->list, &ent->head);
575 ent->cur++;
576 if (ent->cur > 2 * ent->limit)
577 shrink = 1;
746b5583 578 spin_unlock_irq(&ent->lock);
e126ba97
EC
579
580 if (shrink)
581 queue_work(cache->wq, &ent->work);
582}
583
584static void clean_keys(struct mlx5_ib_dev *dev, int c)
585{
e126ba97
EC
586 struct mlx5_mr_cache *cache = &dev->cache;
587 struct mlx5_cache_ent *ent = &cache->ent[c];
65edd0e7 588 struct mlx5_ib_mr *tmp_mr;
e126ba97 589 struct mlx5_ib_mr *mr;
65edd0e7 590 LIST_HEAD(del_list);
e126ba97 591
3c461911 592 cancel_delayed_work(&ent->dwork);
e126ba97 593 while (1) {
746b5583 594 spin_lock_irq(&ent->lock);
e126ba97 595 if (list_empty(&ent->head)) {
746b5583 596 spin_unlock_irq(&ent->lock);
65edd0e7 597 break;
e126ba97
EC
598 }
599 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
65edd0e7 600 list_move(&mr->list, &del_list);
e126ba97
EC
601 ent->cur--;
602 ent->size--;
746b5583 603 spin_unlock_irq(&ent->lock);
65edd0e7
DJ
604 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
605 }
606
607#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
608 synchronize_srcu(&dev->mr_srcu);
609#endif
610
611 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
612 list_del(&mr->list);
613 kfree(mr);
e126ba97
EC
614 }
615}
616
12cc1a02
LR
617static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
618{
72afcf82 619 if (!mlx5_debugfs_root || dev->rep)
12cc1a02
LR
620 return;
621
622 debugfs_remove_recursive(dev->cache.root);
623 dev->cache.root = NULL;
624}
625
e126ba97
EC
626static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
627{
628 struct mlx5_mr_cache *cache = &dev->cache;
629 struct mlx5_cache_ent *ent;
630 int i;
631
72afcf82 632 if (!mlx5_debugfs_root || dev->rep)
e126ba97
EC
633 return 0;
634
9603b61d 635 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
e126ba97
EC
636 if (!cache->root)
637 return -ENOMEM;
638
639 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
640 ent = &cache->ent[i];
641 sprintf(ent->name, "%d", ent->order);
642 ent->dir = debugfs_create_dir(ent->name, cache->root);
643 if (!ent->dir)
12cc1a02 644 goto err;
e126ba97
EC
645
646 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
647 &size_fops);
648 if (!ent->fsize)
12cc1a02 649 goto err;
e126ba97
EC
650
651 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
652 &limit_fops);
653 if (!ent->flimit)
12cc1a02 654 goto err;
e126ba97
EC
655
656 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
657 &ent->cur);
658 if (!ent->fcur)
12cc1a02 659 goto err;
e126ba97
EC
660
661 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
662 &ent->miss);
663 if (!ent->fmiss)
12cc1a02 664 goto err;
e126ba97
EC
665 }
666
667 return 0;
12cc1a02
LR
668err:
669 mlx5_mr_cache_debugfs_cleanup(dev);
e126ba97 670
12cc1a02 671 return -ENOMEM;
e126ba97
EC
672}
673
e99e88a9 674static void delay_time_func(struct timer_list *t)
746b5583 675{
e99e88a9 676 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
746b5583
EC
677
678 dev->fill_delay = 0;
679}
680
e126ba97
EC
681int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
682{
683 struct mlx5_mr_cache *cache = &dev->cache;
684 struct mlx5_cache_ent *ent;
e126ba97
EC
685 int err;
686 int i;
687
6bc1a656 688 mutex_init(&dev->slow_path_mutex);
3c856c82 689 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
e126ba97
EC
690 if (!cache->wq) {
691 mlx5_ib_warn(dev, "failed to create work queue\n");
692 return -ENOMEM;
693 }
694
e99e88a9 695 timer_setup(&dev->delay_timer, delay_time_func, 0);
e126ba97 696 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
e126ba97
EC
697 ent = &cache->ent[i];
698 INIT_LIST_HEAD(&ent->head);
699 spin_lock_init(&ent->lock);
700 ent->order = i + 2;
701 ent->dev = dev;
49780d42 702 ent->limit = 0;
e126ba97 703
49780d42 704 init_completion(&ent->compl);
e126ba97
EC
705 INIT_WORK(&ent->work, cache_work_func);
706 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
e126ba97 707 queue_work(cache->wq, &ent->work);
49780d42 708
8b7ff7f3 709 if (i > MR_CACHE_LAST_STD_ENTRY) {
81713d37 710 mlx5_odp_init_mr_cache_entry(ent);
49780d42 711 continue;
81713d37 712 }
49780d42 713
8b7ff7f3 714 if (ent->order > mr_cache_max_order(dev))
49780d42
AK
715 continue;
716
717 ent->page = PAGE_SHIFT;
718 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
719 MLX5_IB_UMR_OCTOWORD;
720 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
721 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
72afcf82 722 !dev->rep &&
49780d42
AK
723 mlx5_core_is_pf(dev->mdev))
724 ent->limit = dev->mdev->profile->mr_cache[i].limit;
725 else
726 ent->limit = 0;
e126ba97
EC
727 }
728
729 err = mlx5_mr_cache_debugfs_init(dev);
730 if (err)
731 mlx5_ib_warn(dev, "cache debugfs failure\n");
732
12cc1a02
LR
733 /*
734 * We don't want to fail driver if debugfs failed to initialize,
735 * so we are not forwarding error to the user.
736 */
737
e126ba97
EC
738 return 0;
739}
740
acbda523
EC
741static void wait_for_async_commands(struct mlx5_ib_dev *dev)
742{
743 struct mlx5_mr_cache *cache = &dev->cache;
744 struct mlx5_cache_ent *ent;
745 int total = 0;
746 int i;
747 int j;
748
749 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
750 ent = &cache->ent[i];
751 for (j = 0 ; j < 1000; j++) {
752 if (!ent->pending)
753 break;
754 msleep(50);
755 }
756 }
757 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
758 ent = &cache->ent[i];
759 total += ent->pending;
760 }
761
762 if (total)
763 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
764 else
765 mlx5_ib_warn(dev, "done with all pending requests\n");
766}
767
e126ba97
EC
768int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
769{
770 int i;
771
32927e28
MB
772 if (!dev->cache.wq)
773 return 0;
774
e126ba97 775 dev->cache.stopped = 1;
3c461911 776 flush_workqueue(dev->cache.wq);
e126ba97
EC
777
778 mlx5_mr_cache_debugfs_cleanup(dev);
779
780 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
781 clean_keys(dev, i);
782
3c461911 783 destroy_workqueue(dev->cache.wq);
acbda523 784 wait_for_async_commands(dev);
746b5583 785 del_timer_sync(&dev->delay_timer);
3c461911 786
e126ba97
EC
787 return 0;
788}
789
790struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
791{
792 struct mlx5_ib_dev *dev = to_mdev(pd->device);
ec22eb53 793 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
9603b61d 794 struct mlx5_core_dev *mdev = dev->mdev;
e126ba97 795 struct mlx5_ib_mr *mr;
ec22eb53
SM
796 void *mkc;
797 u32 *in;
e126ba97
EC
798 int err;
799
800 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
801 if (!mr)
802 return ERR_PTR(-ENOMEM);
803
ec22eb53 804 in = kzalloc(inlen, GFP_KERNEL);
e126ba97
EC
805 if (!in) {
806 err = -ENOMEM;
807 goto err_free;
808 }
809
ec22eb53
SM
810 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
811
cdbd0d2b 812 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
ec22eb53
SM
813 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
814 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
815 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
816 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
817 MLX5_SET(mkc, mkc, lr, 1);
e126ba97 818
ec22eb53
SM
819 MLX5_SET(mkc, mkc, length64, 1);
820 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
821 MLX5_SET(mkc, mkc, qpn, 0xffffff);
822 MLX5_SET64(mkc, mkc, start_addr, 0);
823
824 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
e126ba97
EC
825 if (err)
826 goto err_in;
827
828 kfree(in);
aa8e08d2 829 mr->mmkey.type = MLX5_MKEY_MR;
a606b0f6
MB
830 mr->ibmr.lkey = mr->mmkey.key;
831 mr->ibmr.rkey = mr->mmkey.key;
e126ba97
EC
832 mr->umem = NULL;
833
834 return &mr->ibmr;
835
836err_in:
837 kfree(in);
838
839err_free:
840 kfree(mr);
841
842 return ERR_PTR(err);
843}
844
7b4cdaae 845static int get_octo_len(u64 addr, u64 len, int page_shift)
e126ba97 846{
7b4cdaae 847 u64 page_size = 1ULL << page_shift;
e126ba97
EC
848 u64 offset;
849 int npages;
850
851 offset = addr & (page_size - 1);
7b4cdaae 852 npages = ALIGN(len + offset, page_size) >> page_shift;
e126ba97
EC
853 return (npages + 1) / 2;
854}
855
8b7ff7f3 856static int mr_cache_max_order(struct mlx5_ib_dev *dev)
e126ba97 857{
7d0cc6ed 858 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
8b7ff7f3 859 return MR_CACHE_LAST_STD_ENTRY + 2;
4c25b7a3
MD
860 return MLX5_MAX_UMR_SHIFT;
861}
862
14ab8896
AB
863static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
864 int access_flags, struct ib_umem **umem,
865 int *npages, int *page_shift, int *ncont,
866 int *order)
395a8e4c
NO
867{
868 struct mlx5_ib_dev *dev = to_mdev(pd->device);
b4bd701a 869 struct ib_umem *u;
14ab8896
AB
870 int err;
871
b4bd701a
LR
872 *umem = NULL;
873
874 u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
875 err = PTR_ERR_OR_ZERO(u);
f3f134f5 876 if (err) {
b4bd701a 877 mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
14ab8896 878 return err;
395a8e4c
NO
879 }
880
b4bd701a 881 mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
762f899a 882 page_shift, ncont, order);
395a8e4c
NO
883 if (!*npages) {
884 mlx5_ib_warn(dev, "avoid zero region\n");
b4bd701a 885 ib_umem_release(u);
14ab8896 886 return -EINVAL;
395a8e4c
NO
887 }
888
b4bd701a
LR
889 *umem = u;
890
395a8e4c
NO
891 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
892 *npages, *ncont, *order, *page_shift);
893
14ab8896 894 return 0;
395a8e4c
NO
895}
896
add08d76 897static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
e126ba97 898{
add08d76
CH
899 struct mlx5_ib_umr_context *context =
900 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
e126ba97 901
add08d76
CH
902 context->status = wc->status;
903 complete(&context->done);
904}
e126ba97 905
add08d76
CH
906static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
907{
908 context->cqe.done = mlx5_ib_umr_done;
909 context->status = -1;
910 init_completion(&context->done);
e126ba97
EC
911}
912
d5ea2df9
BJ
913static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
914 struct mlx5_umr_wr *umrwr)
915{
916 struct umr_common *umrc = &dev->umrc;
917 struct ib_send_wr *bad;
918 int err;
919 struct mlx5_ib_umr_context umr_context;
920
921 mlx5_ib_init_umr_context(&umr_context);
922 umrwr->wr.wr_cqe = &umr_context.cqe;
923
924 down(&umrc->sem);
925 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
926 if (err) {
927 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
928 } else {
929 wait_for_completion(&umr_context.done);
930 if (umr_context.status != IB_WC_SUCCESS) {
931 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
932 umr_context.status);
933 err = -EFAULT;
934 }
935 }
936 up(&umrc->sem);
937 return err;
938}
939
ff740aef
IL
940static struct mlx5_ib_mr *alloc_mr_from_cache(
941 struct ib_pd *pd, struct ib_umem *umem,
e126ba97
EC
942 u64 virt_addr, u64 len, int npages,
943 int page_shift, int order, int access_flags)
944{
945 struct mlx5_ib_dev *dev = to_mdev(pd->device);
e126ba97 946 struct mlx5_ib_mr *mr;
096f7e72 947 int err = 0;
e126ba97
EC
948 int i;
949
746b5583 950 for (i = 0; i < 1; i++) {
e126ba97
EC
951 mr = alloc_cached_mr(dev, order);
952 if (mr)
953 break;
954
955 err = add_keys(dev, order2idx(dev, order), 1);
746b5583
EC
956 if (err && err != -EAGAIN) {
957 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
e126ba97
EC
958 break;
959 }
960 }
961
962 if (!mr)
963 return ERR_PTR(-EAGAIN);
964
7d0cc6ed
AK
965 mr->ibmr.pd = pd;
966 mr->umem = umem;
967 mr->access_flags = access_flags;
968 mr->desc_size = sizeof(struct mlx5_mtt);
a606b0f6
MB
969 mr->mmkey.iova = virt_addr;
970 mr->mmkey.size = len;
971 mr->mmkey.pd = to_mpd(pd)->pdn;
b475598a 972
e126ba97 973 return mr;
e126ba97
EC
974}
975
7d0cc6ed
AK
976static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
977 void *xlt, int page_shift, size_t size,
978 int flags)
832a6b06
HE
979{
980 struct mlx5_ib_dev *dev = mr->dev;
832a6b06 981 struct ib_umem *umem = mr->umem;
c8d75a98 982
81713d37 983 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
c8d75a98
MD
984 if (!umr_can_use_indirect_mkey(dev))
985 return -EPERM;
81713d37
AK
986 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
987 return npages;
988 }
7d0cc6ed
AK
989
990 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
991
992 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
993 __mlx5_ib_populate_pas(dev, umem, page_shift,
994 idx, npages, xlt,
995 MLX5_IB_MTT_PRESENT);
996 /* Clear padding after the pages
997 * brought from the umem.
998 */
999 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
1000 size - npages * sizeof(struct mlx5_mtt));
1001 }
1002
1003 return npages;
1004}
1005
1006#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
1007 MLX5_UMR_MTT_ALIGNMENT)
1008#define MLX5_SPARE_UMR_CHUNK 0x10000
1009
1010int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1011 int page_shift, int flags)
1012{
1013 struct mlx5_ib_dev *dev = mr->dev;
9b0c289e 1014 struct device *ddev = dev->ib_dev.dev.parent;
832a6b06 1015 int size;
7d0cc6ed 1016 void *xlt;
832a6b06 1017 dma_addr_t dma;
e622f2f4 1018 struct mlx5_umr_wr wr;
832a6b06
HE
1019 struct ib_sge sg;
1020 int err = 0;
81713d37
AK
1021 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
1022 ? sizeof(struct mlx5_klm)
1023 : sizeof(struct mlx5_mtt);
7d0cc6ed
AK
1024 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
1025 const int page_mask = page_align - 1;
832a6b06
HE
1026 size_t pages_mapped = 0;
1027 size_t pages_to_map = 0;
1028 size_t pages_iter = 0;
7d0cc6ed 1029 gfp_t gfp;
c44ef998 1030 bool use_emergency_page = false;
832a6b06 1031
c8d75a98
MD
1032 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
1033 !umr_can_use_indirect_mkey(dev))
1034 return -EPERM;
832a6b06
HE
1035
1036 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
7d0cc6ed
AK
1037 * so we need to align the offset and length accordingly
1038 */
1039 if (idx & page_mask) {
1040 npages += idx & page_mask;
1041 idx &= ~page_mask;
832a6b06
HE
1042 }
1043
7d0cc6ed
AK
1044 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1045 gfp |= __GFP_ZERO | __GFP_NOWARN;
832a6b06 1046
7d0cc6ed
AK
1047 pages_to_map = ALIGN(npages, page_align);
1048 size = desc_size * pages_to_map;
1049 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
832a6b06 1050
7d0cc6ed
AK
1051 xlt = (void *)__get_free_pages(gfp, get_order(size));
1052 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1053 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1054 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1055
1056 size = MLX5_SPARE_UMR_CHUNK;
1057 xlt = (void *)__get_free_pages(gfp, get_order(size));
832a6b06 1058 }
7d0cc6ed
AK
1059
1060 if (!xlt) {
7d0cc6ed 1061 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
c44ef998 1062 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
7d0cc6ed 1063 size = PAGE_SIZE;
7d0cc6ed 1064 memset(xlt, 0, size);
c44ef998 1065 use_emergency_page = true;
7d0cc6ed
AK
1066 }
1067 pages_iter = size / desc_size;
1068 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
832a6b06 1069 if (dma_mapping_error(ddev, dma)) {
7d0cc6ed 1070 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
832a6b06 1071 err = -ENOMEM;
7d0cc6ed 1072 goto free_xlt;
832a6b06
HE
1073 }
1074
7d0cc6ed
AK
1075 sg.addr = dma;
1076 sg.lkey = dev->umrc.pd->local_dma_lkey;
1077
1078 memset(&wr, 0, sizeof(wr));
1079 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1080 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1081 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1082 wr.wr.sg_list = &sg;
1083 wr.wr.num_sge = 1;
1084 wr.wr.opcode = MLX5_IB_WR_UMR;
1085
1086 wr.pd = mr->ibmr.pd;
1087 wr.mkey = mr->mmkey.key;
1088 wr.length = mr->mmkey.size;
1089 wr.virt_addr = mr->mmkey.iova;
1090 wr.access_flags = mr->access_flags;
1091 wr.page_shift = page_shift;
1092
832a6b06
HE
1093 for (pages_mapped = 0;
1094 pages_mapped < pages_to_map && !err;
7d0cc6ed 1095 pages_mapped += pages_iter, idx += pages_iter) {
438b228e 1096 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
832a6b06 1097 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
438b228e 1098 npages = populate_xlt(mr, idx, npages, xlt,
7d0cc6ed 1099 page_shift, size, flags);
832a6b06
HE
1100
1101 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1102
7d0cc6ed
AK
1103 sg.length = ALIGN(npages * desc_size,
1104 MLX5_UMR_MTT_ALIGNMENT);
1105
1106 if (pages_mapped + pages_iter >= pages_to_map) {
1107 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1108 wr.wr.send_flags |=
1109 MLX5_IB_SEND_UMR_ENABLE_MR |
1110 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1111 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1112 if (flags & MLX5_IB_UPD_XLT_PD ||
1113 flags & MLX5_IB_UPD_XLT_ACCESS)
1114 wr.wr.send_flags |=
1115 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1116 if (flags & MLX5_IB_UPD_XLT_ADDR)
1117 wr.wr.send_flags |=
1118 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1119 }
832a6b06 1120
7d0cc6ed 1121 wr.offset = idx * desc_size;
31616255 1122 wr.xlt_size = sg.length;
832a6b06 1123
d5ea2df9 1124 err = mlx5_ib_post_send_wait(dev, &wr);
832a6b06
HE
1125 }
1126 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1127
7d0cc6ed 1128free_xlt:
c44ef998
IL
1129 if (use_emergency_page)
1130 mlx5_ib_put_xlt_emergency_page();
832a6b06 1131 else
7d0cc6ed 1132 free_pages((unsigned long)xlt, get_order(size));
832a6b06
HE
1133
1134 return err;
1135}
832a6b06 1136
395a8e4c
NO
1137/*
1138 * If ibmr is NULL it will be allocated by reg_create.
1139 * Else, the given ibmr will be used.
1140 */
1141static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1142 u64 virt_addr, u64 length,
1143 struct ib_umem *umem, int npages,
ff740aef
IL
1144 int page_shift, int access_flags,
1145 bool populate)
e126ba97
EC
1146{
1147 struct mlx5_ib_dev *dev = to_mdev(pd->device);
e126ba97 1148 struct mlx5_ib_mr *mr;
ec22eb53
SM
1149 __be64 *pas;
1150 void *mkc;
e126ba97 1151 int inlen;
ec22eb53 1152 u32 *in;
e126ba97 1153 int err;
938fe83c 1154 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
e126ba97 1155
395a8e4c 1156 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
e126ba97
EC
1157 if (!mr)
1158 return ERR_PTR(-ENOMEM);
1159
ff740aef
IL
1160 mr->ibmr.pd = pd;
1161 mr->access_flags = access_flags;
1162
1163 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1164 if (populate)
1165 inlen += sizeof(*pas) * roundup(npages, 2);
1b9a07ee 1166 in = kvzalloc(inlen, GFP_KERNEL);
e126ba97
EC
1167 if (!in) {
1168 err = -ENOMEM;
1169 goto err_1;
1170 }
ec22eb53 1171 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
ff740aef 1172 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
c438fde1
AK
1173 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1174 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
e126ba97 1175
ec22eb53 1176 /* The pg_access bit allows setting the access flags
cc149f75 1177 * in the page list submitted with the command. */
ec22eb53
SM
1178 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1179
1180 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
ff740aef 1181 MLX5_SET(mkc, mkc, free, !populate);
cdbd0d2b 1182 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
ec22eb53
SM
1183 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1184 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1185 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1186 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1187 MLX5_SET(mkc, mkc, lr, 1);
8b7ff7f3 1188 MLX5_SET(mkc, mkc, umr_en, 1);
ec22eb53
SM
1189
1190 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1191 MLX5_SET64(mkc, mkc, len, length);
1192 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1193 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1194 MLX5_SET(mkc, mkc, translations_octword_size,
7b4cdaae 1195 get_octo_len(virt_addr, length, page_shift));
ec22eb53
SM
1196 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1197 MLX5_SET(mkc, mkc, qpn, 0xffffff);
ff740aef
IL
1198 if (populate) {
1199 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
7b4cdaae 1200 get_octo_len(virt_addr, length, page_shift));
ff740aef 1201 }
ec22eb53
SM
1202
1203 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
e126ba97
EC
1204 if (err) {
1205 mlx5_ib_warn(dev, "create mkey failed\n");
1206 goto err_2;
1207 }
aa8e08d2 1208 mr->mmkey.type = MLX5_MKEY_MR;
49780d42 1209 mr->desc_size = sizeof(struct mlx5_mtt);
7eae20db 1210 mr->dev = dev;
479163f4 1211 kvfree(in);
e126ba97 1212
a606b0f6 1213 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
e126ba97
EC
1214
1215 return mr;
1216
1217err_2:
479163f4 1218 kvfree(in);
e126ba97
EC
1219
1220err_1:
395a8e4c
NO
1221 if (!ibmr)
1222 kfree(mr);
e126ba97
EC
1223
1224 return ERR_PTR(err);
1225}
1226
395a8e4c
NO
1227static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1228 int npages, u64 length, int access_flags)
1229{
1230 mr->npages = npages;
1231 atomic_add(npages, &dev->mdev->priv.reg_pages);
a606b0f6
MB
1232 mr->ibmr.lkey = mr->mmkey.key;
1233 mr->ibmr.rkey = mr->mmkey.key;
395a8e4c 1234 mr->ibmr.length = length;
56e11d62 1235 mr->access_flags = access_flags;
395a8e4c
NO
1236}
1237
6c29f57e
AL
1238static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
1239 u64 length, int acc)
1240{
1241 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1242 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1243 struct mlx5_core_dev *mdev = dev->mdev;
1244 struct mlx5_ib_mr *mr;
1245 void *mkc;
1246 u32 *in;
1247 int err;
1248
1249 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1250 if (!mr)
1251 return ERR_PTR(-ENOMEM);
1252
1253 in = kzalloc(inlen, GFP_KERNEL);
1254 if (!in) {
1255 err = -ENOMEM;
1256 goto err_free;
1257 }
1258
1259 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1260
1261 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3);
1262 MLX5_SET(mkc, mkc, access_mode_4_2,
1263 (MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7);
1264 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
1265 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
1266 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
1267 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
1268 MLX5_SET(mkc, mkc, lr, 1);
1269
1270 MLX5_SET64(mkc, mkc, len, length);
1271 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1272 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1273 MLX5_SET64(mkc, mkc, start_addr,
1274 memic_addr - pci_resource_start(dev->mdev->pdev, 0));
1275
1276 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
1277 if (err)
1278 goto err_in;
1279
1280 kfree(in);
1281
1282 mr->umem = NULL;
1283 set_mr_fileds(dev, mr, 0, length, acc);
1284
1285 return &mr->ibmr;
1286
1287err_in:
1288 kfree(in);
1289
1290err_free:
1291 kfree(mr);
1292
1293 return ERR_PTR(err);
1294}
1295
1296struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1297 struct ib_dm_mr_attr *attr,
1298 struct uverbs_attr_bundle *attrs)
1299{
1300 struct mlx5_ib_dm *mdm = to_mdm(dm);
1301 u64 memic_addr;
1302
1303 if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS)
1304 return ERR_PTR(-EINVAL);
1305
1306 memic_addr = mdm->dev_addr + attr->offset;
1307
1308 return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length,
1309 attr->access_flags);
1310}
1311
e126ba97
EC
1312struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1313 u64 virt_addr, int access_flags,
1314 struct ib_udata *udata)
1315{
1316 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1317 struct mlx5_ib_mr *mr = NULL;
c8d75a98 1318 bool populate_mtts = false;
e126ba97
EC
1319 struct ib_umem *umem;
1320 int page_shift;
1321 int npages;
1322 int ncont;
1323 int order;
1324 int err;
1325
1b19b951 1326 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
ea30f013 1327 return ERR_PTR(-EOPNOTSUPP);
1b19b951 1328
900a6d79
EC
1329 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1330 start, virt_addr, length, access_flags);
81713d37
AK
1331
1332#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1333 if (!start && length == U64_MAX) {
1334 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1335 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1336 return ERR_PTR(-EINVAL);
1337
1338 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
4289861d
LR
1339 if (IS_ERR(mr))
1340 return ERR_CAST(mr);
81713d37
AK
1341 return &mr->ibmr;
1342 }
1343#endif
1344
14ab8896 1345 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
395a8e4c 1346 &page_shift, &ncont, &order);
e126ba97 1347
ff740aef 1348 if (err < 0)
14ab8896 1349 return ERR_PTR(err);
e126ba97 1350
c8d75a98 1351 if (use_umr(dev, order)) {
ff740aef
IL
1352 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1353 page_shift, order, access_flags);
e126ba97 1354 if (PTR_ERR(mr) == -EAGAIN) {
d23a8baf 1355 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
e126ba97
EC
1356 mr = NULL;
1357 }
c8d75a98 1358 populate_mtts = false;
ff740aef
IL
1359 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1360 if (access_flags & IB_ACCESS_ON_DEMAND) {
1361 err = -EINVAL;
d23a8baf 1362 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
ff740aef
IL
1363 goto error;
1364 }
c8d75a98 1365 populate_mtts = true;
e126ba97
EC
1366 }
1367
6bc1a656 1368 if (!mr) {
c8d75a98
MD
1369 if (!umr_can_modify_entity_size(dev))
1370 populate_mtts = true;
6bc1a656 1371 mutex_lock(&dev->slow_path_mutex);
395a8e4c 1372 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
c8d75a98 1373 page_shift, access_flags, populate_mtts);
6bc1a656
ML
1374 mutex_unlock(&dev->slow_path_mutex);
1375 }
e126ba97
EC
1376
1377 if (IS_ERR(mr)) {
1378 err = PTR_ERR(mr);
1379 goto error;
1380 }
1381
a606b0f6 1382 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
e126ba97
EC
1383
1384 mr->umem = umem;
395a8e4c 1385 set_mr_fileds(dev, mr, npages, length, access_flags);
e126ba97 1386
b4cfe447 1387#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
395a8e4c 1388 update_odp_mr(mr);
b4cfe447
HE
1389#endif
1390
c8d75a98 1391 if (!populate_mtts) {
ff740aef
IL
1392 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
1393
1394 if (access_flags & IB_ACCESS_ON_DEMAND)
1395 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
e126ba97 1396
ff740aef
IL
1397 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1398 update_xlt_flags);
fbcd4983 1399
ff740aef 1400 if (err) {
fbcd4983 1401 dereg_mr(dev, mr);
ff740aef
IL
1402 return ERR_PTR(err);
1403 }
1404 }
1405
c985bd0e 1406#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
ff740aef 1407 mr->live = 1;
c985bd0e 1408#endif
ff740aef 1409 return &mr->ibmr;
e126ba97
EC
1410error:
1411 ib_umem_release(umem);
1412 return ERR_PTR(err);
1413}
1414
1415static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1416{
89ea94a7 1417 struct mlx5_core_dev *mdev = dev->mdev;
0025b0bd 1418 struct mlx5_umr_wr umrwr = {};
e126ba97 1419
89ea94a7
MG
1420 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1421 return 0;
1422
7d0cc6ed
AK
1423 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1424 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1425 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1426 umrwr.mkey = mr->mmkey.key;
e126ba97 1427
d5ea2df9 1428 return mlx5_ib_post_send_wait(dev, &umrwr);
e126ba97
EC
1429}
1430
7d0cc6ed 1431static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
56e11d62
NO
1432 int access_flags, int flags)
1433{
1434 struct mlx5_ib_dev *dev = to_mdev(pd->device);
56e11d62 1435 struct mlx5_umr_wr umrwr = {};
56e11d62
NO
1436 int err;
1437
56e11d62
NO
1438 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1439
7d0cc6ed
AK
1440 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1441 umrwr.mkey = mr->mmkey.key;
56e11d62 1442
31616255 1443 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
56e11d62 1444 umrwr.pd = pd;
56e11d62 1445 umrwr.access_flags = access_flags;
31616255 1446 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
56e11d62
NO
1447 }
1448
d5ea2df9 1449 err = mlx5_ib_post_send_wait(dev, &umrwr);
56e11d62 1450
56e11d62
NO
1451 return err;
1452}
1453
1454int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1455 u64 length, u64 virt_addr, int new_access_flags,
1456 struct ib_pd *new_pd, struct ib_udata *udata)
1457{
1458 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1459 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1460 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1461 int access_flags = flags & IB_MR_REREG_ACCESS ?
1462 new_access_flags :
1463 mr->access_flags;
56e11d62 1464 int page_shift = 0;
7d0cc6ed 1465 int upd_flags = 0;
56e11d62
NO
1466 int npages = 0;
1467 int ncont = 0;
1468 int order = 0;
b4bd701a 1469 u64 addr, len;
56e11d62
NO
1470 int err;
1471
1472 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1473 start, virt_addr, length, access_flags);
1474
7d0cc6ed
AK
1475 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1476
b4bd701a
LR
1477 if (!mr->umem)
1478 return -EINVAL;
1479
1480 if (flags & IB_MR_REREG_TRANS) {
1481 addr = virt_addr;
1482 len = length;
1483 } else {
1484 addr = mr->umem->address;
1485 len = mr->umem->length;
1486 }
1487
56e11d62
NO
1488 if (flags != IB_MR_REREG_PD) {
1489 /*
1490 * Replace umem. This needs to be done whether or not UMR is
1491 * used.
1492 */
1493 flags |= IB_MR_REREG_TRANS;
1494 ib_umem_release(mr->umem);
b4bd701a 1495 mr->umem = NULL;
14ab8896
AB
1496 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1497 &npages, &page_shift, &ncont, &order);
4638a3b2
LR
1498 if (err)
1499 goto err;
56e11d62
NO
1500 }
1501
1502 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1503 /*
1504 * UMR can't be used - MKey needs to be replaced.
1505 */
eeea6953 1506 if (mr->allocated_from_cache)
56e11d62 1507 err = unreg_umr(dev, mr);
eeea6953 1508 else
56e11d62 1509 err = destroy_mkey(dev, mr);
56e11d62 1510 if (err)
4638a3b2 1511 goto err;
56e11d62
NO
1512
1513 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
ff740aef 1514 page_shift, access_flags, true);
56e11d62 1515
4638a3b2
LR
1516 if (IS_ERR(mr)) {
1517 err = PTR_ERR(mr);
1518 mr = to_mmr(ib_mr);
1519 goto err;
1520 }
56e11d62 1521
8b7ff7f3 1522 mr->allocated_from_cache = 0;
c985bd0e 1523#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
ff740aef 1524 mr->live = 1;
c985bd0e 1525#endif
56e11d62
NO
1526 } else {
1527 /*
1528 * Send a UMR WQE
1529 */
7d0cc6ed
AK
1530 mr->ibmr.pd = pd;
1531 mr->access_flags = access_flags;
1532 mr->mmkey.iova = addr;
1533 mr->mmkey.size = len;
1534 mr->mmkey.pd = to_mpd(pd)->pdn;
1535
1536 if (flags & IB_MR_REREG_TRANS) {
1537 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1538 if (flags & IB_MR_REREG_PD)
1539 upd_flags |= MLX5_IB_UPD_XLT_PD;
1540 if (flags & IB_MR_REREG_ACCESS)
1541 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1542 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1543 upd_flags);
1544 } else {
1545 err = rereg_umr(pd, mr, access_flags, flags);
1546 }
1547
4638a3b2
LR
1548 if (err)
1549 goto err;
56e11d62
NO
1550 }
1551
7d0cc6ed 1552 set_mr_fileds(dev, mr, npages, len, access_flags);
56e11d62 1553
56e11d62
NO
1554#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1555 update_odp_mr(mr);
1556#endif
56e11d62 1557 return 0;
4638a3b2
LR
1558
1559err:
1560 if (mr->umem) {
1561 ib_umem_release(mr->umem);
1562 mr->umem = NULL;
1563 }
1564 clean_mr(dev, mr);
1565 return err;
56e11d62
NO
1566}
1567
8a187ee5
SG
1568static int
1569mlx5_alloc_priv_descs(struct ib_device *device,
1570 struct mlx5_ib_mr *mr,
1571 int ndescs,
1572 int desc_size)
1573{
1574 int size = ndescs * desc_size;
1575 int add_size;
1576 int ret;
1577
1578 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1579
1580 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1581 if (!mr->descs_alloc)
1582 return -ENOMEM;
1583
1584 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1585
9b0c289e 1586 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
8a187ee5 1587 size, DMA_TO_DEVICE);
9b0c289e 1588 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
8a187ee5
SG
1589 ret = -ENOMEM;
1590 goto err;
1591 }
1592
1593 return 0;
1594err:
1595 kfree(mr->descs_alloc);
1596
1597 return ret;
1598}
1599
1600static void
1601mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1602{
1603 if (mr->descs) {
1604 struct ib_device *device = mr->ibmr.device;
1605 int size = mr->max_descs * mr->desc_size;
1606
9b0c289e 1607 dma_unmap_single(device->dev.parent, mr->desc_map,
8a187ee5
SG
1608 size, DMA_TO_DEVICE);
1609 kfree(mr->descs_alloc);
1610 mr->descs = NULL;
1611 }
1612}
1613
eeea6953 1614static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
e126ba97 1615{
8b7ff7f3 1616 int allocated_from_cache = mr->allocated_from_cache;
e126ba97 1617
8b91ffc1
SG
1618 if (mr->sig) {
1619 if (mlx5_core_destroy_psv(dev->mdev,
1620 mr->sig->psv_memory.psv_idx))
1621 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1622 mr->sig->psv_memory.psv_idx);
1623 if (mlx5_core_destroy_psv(dev->mdev,
1624 mr->sig->psv_wire.psv_idx))
1625 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1626 mr->sig->psv_wire.psv_idx);
1627 kfree(mr->sig);
1628 mr->sig = NULL;
1629 }
1630
8a187ee5
SG
1631 mlx5_free_priv_descs(mr);
1632
eeea6953
LR
1633 if (!allocated_from_cache)
1634 destroy_mkey(dev, mr);
6aec21f6
HE
1635}
1636
eeea6953 1637static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
6aec21f6 1638{
6aec21f6
HE
1639 int npages = mr->npages;
1640 struct ib_umem *umem = mr->umem;
1641
1642#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
b4cfe447
HE
1643 if (umem && umem->odp_data) {
1644 /* Prevent new page faults from succeeding */
1645 mr->live = 0;
6aec21f6
HE
1646 /* Wait for all running page-fault handlers to finish. */
1647 synchronize_srcu(&dev->mr_srcu);
b4cfe447 1648 /* Destroy all page mappings */
81713d37
AK
1649 if (umem->odp_data->page_list)
1650 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1651 ib_umem_end(umem));
1652 else
1653 mlx5_ib_free_implicit_mr(mr);
b4cfe447
HE
1654 /*
1655 * We kill the umem before the MR for ODP,
1656 * so that there will not be any invalidations in
1657 * flight, looking at the *mr struct.
1658 */
1659 ib_umem_release(umem);
1660 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1661
1662 /* Avoid double-freeing the umem. */
1663 umem = NULL;
1664 }
6aec21f6
HE
1665#endif
1666
fbcd4983 1667 clean_mr(dev, mr);
6aec21f6 1668
e126ba97
EC
1669 if (umem) {
1670 ib_umem_release(umem);
6aec21f6 1671 atomic_sub(npages, &dev->mdev->priv.reg_pages);
e126ba97
EC
1672 }
1673
f3f134f5
LR
1674 if (!mr->allocated_from_cache)
1675 kfree(mr);
1676 else
1677 mlx5_mr_cache_free(dev, mr);
e126ba97
EC
1678}
1679
fbcd4983
IL
1680int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1681{
eeea6953
LR
1682 dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
1683 return 0;
fbcd4983
IL
1684}
1685
9bee178b
SG
1686struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1687 enum ib_mr_type mr_type,
1688 u32 max_num_sg)
3121e3c4
SG
1689{
1690 struct mlx5_ib_dev *dev = to_mdev(pd->device);
ec22eb53 1691 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
b005d316 1692 int ndescs = ALIGN(max_num_sg, 4);
ec22eb53
SM
1693 struct mlx5_ib_mr *mr;
1694 void *mkc;
1695 u32 *in;
b005d316 1696 int err;
3121e3c4
SG
1697
1698 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1699 if (!mr)
1700 return ERR_PTR(-ENOMEM);
1701
ec22eb53 1702 in = kzalloc(inlen, GFP_KERNEL);
3121e3c4
SG
1703 if (!in) {
1704 err = -ENOMEM;
1705 goto err_free;
1706 }
1707
ec22eb53
SM
1708 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1709 MLX5_SET(mkc, mkc, free, 1);
1710 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1711 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1712 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
3121e3c4 1713
9bee178b 1714 if (mr_type == IB_MR_TYPE_MEM_REG) {
ec22eb53
SM
1715 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1716 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
8a187ee5 1717 err = mlx5_alloc_priv_descs(pd->device, mr,
31616255 1718 ndescs, sizeof(struct mlx5_mtt));
8a187ee5
SG
1719 if (err)
1720 goto err_free_in;
1721
31616255 1722 mr->desc_size = sizeof(struct mlx5_mtt);
8a187ee5 1723 mr->max_descs = ndescs;
b005d316 1724 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
ec22eb53 1725 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
b005d316
SG
1726
1727 err = mlx5_alloc_priv_descs(pd->device, mr,
1728 ndescs, sizeof(struct mlx5_klm));
1729 if (err)
1730 goto err_free_in;
1731 mr->desc_size = sizeof(struct mlx5_klm);
1732 mr->max_descs = ndescs;
9bee178b 1733 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
3121e3c4
SG
1734 u32 psv_index[2];
1735
ec22eb53
SM
1736 MLX5_SET(mkc, mkc, bsf_en, 1);
1737 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
3121e3c4
SG
1738 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1739 if (!mr->sig) {
1740 err = -ENOMEM;
1741 goto err_free_in;
1742 }
1743
1744 /* create mem & wire PSVs */
9603b61d 1745 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
3121e3c4
SG
1746 2, psv_index);
1747 if (err)
1748 goto err_free_sig;
1749
ec22eb53 1750 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
3121e3c4
SG
1751 mr->sig->psv_memory.psv_idx = psv_index[0];
1752 mr->sig->psv_wire.psv_idx = psv_index[1];
d5436ba0
SG
1753
1754 mr->sig->sig_status_checked = true;
1755 mr->sig->sig_err_exists = false;
1756 /* Next UMR, Arm SIGERR */
1757 ++mr->sig->sigerr_count;
9bee178b
SG
1758 } else {
1759 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1760 err = -EINVAL;
1761 goto err_free_in;
3121e3c4
SG
1762 }
1763
cdbd0d2b
AL
1764 MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3);
1765 MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7);
ec22eb53
SM
1766 MLX5_SET(mkc, mkc, umr_en, 1);
1767
45e6ae7e 1768 mr->ibmr.device = pd->device;
ec22eb53 1769 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
3121e3c4
SG
1770 if (err)
1771 goto err_destroy_psv;
1772
aa8e08d2 1773 mr->mmkey.type = MLX5_MKEY_MR;
a606b0f6
MB
1774 mr->ibmr.lkey = mr->mmkey.key;
1775 mr->ibmr.rkey = mr->mmkey.key;
3121e3c4
SG
1776 mr->umem = NULL;
1777 kfree(in);
1778
1779 return &mr->ibmr;
1780
1781err_destroy_psv:
1782 if (mr->sig) {
9603b61d 1783 if (mlx5_core_destroy_psv(dev->mdev,
3121e3c4
SG
1784 mr->sig->psv_memory.psv_idx))
1785 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1786 mr->sig->psv_memory.psv_idx);
9603b61d 1787 if (mlx5_core_destroy_psv(dev->mdev,
3121e3c4
SG
1788 mr->sig->psv_wire.psv_idx))
1789 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1790 mr->sig->psv_wire.psv_idx);
1791 }
8a187ee5 1792 mlx5_free_priv_descs(mr);
3121e3c4
SG
1793err_free_sig:
1794 kfree(mr->sig);
1795err_free_in:
1796 kfree(in);
1797err_free:
1798 kfree(mr);
1799 return ERR_PTR(err);
1800}
1801
d2370e0a
MB
1802struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1803 struct ib_udata *udata)
1804{
1805 struct mlx5_ib_dev *dev = to_mdev(pd->device);
ec22eb53 1806 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
d2370e0a 1807 struct mlx5_ib_mw *mw = NULL;
ec22eb53
SM
1808 u32 *in = NULL;
1809 void *mkc;
d2370e0a
MB
1810 int ndescs;
1811 int err;
1812 struct mlx5_ib_alloc_mw req = {};
1813 struct {
1814 __u32 comp_mask;
1815 __u32 response_length;
1816 } resp = {};
1817
1818 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1819 if (err)
1820 return ERR_PTR(err);
1821
1822 if (req.comp_mask || req.reserved1 || req.reserved2)
1823 return ERR_PTR(-EOPNOTSUPP);
1824
1825 if (udata->inlen > sizeof(req) &&
1826 !ib_is_udata_cleared(udata, sizeof(req),
1827 udata->inlen - sizeof(req)))
1828 return ERR_PTR(-EOPNOTSUPP);
1829
1830 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1831
1832 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
ec22eb53 1833 in = kzalloc(inlen, GFP_KERNEL);
d2370e0a
MB
1834 if (!mw || !in) {
1835 err = -ENOMEM;
1836 goto free;
1837 }
1838
ec22eb53
SM
1839 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1840
1841 MLX5_SET(mkc, mkc, free, 1);
1842 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1843 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1844 MLX5_SET(mkc, mkc, umr_en, 1);
1845 MLX5_SET(mkc, mkc, lr, 1);
cdbd0d2b 1846 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
ec22eb53
SM
1847 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1848 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1849
1850 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
d2370e0a
MB
1851 if (err)
1852 goto free;
1853
aa8e08d2 1854 mw->mmkey.type = MLX5_MKEY_MW;
d2370e0a 1855 mw->ibmw.rkey = mw->mmkey.key;
db570d7d 1856 mw->ndescs = ndescs;
d2370e0a
MB
1857
1858 resp.response_length = min(offsetof(typeof(resp), response_length) +
1859 sizeof(resp.response_length), udata->outlen);
1860 if (resp.response_length) {
1861 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1862 if (err) {
1863 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1864 goto free;
1865 }
1866 }
1867
1868 kfree(in);
1869 return &mw->ibmw;
1870
1871free:
1872 kfree(mw);
1873 kfree(in);
1874 return ERR_PTR(err);
1875}
1876
1877int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1878{
1879 struct mlx5_ib_mw *mmw = to_mmw(mw);
1880 int err;
1881
1882 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1883 &mmw->mmkey);
1884 if (!err)
1885 kfree(mmw);
1886 return err;
1887}
1888
d5436ba0
SG
1889int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1890 struct ib_mr_status *mr_status)
1891{
1892 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1893 int ret = 0;
1894
1895 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1896 pr_err("Invalid status check mask\n");
1897 ret = -EINVAL;
1898 goto done;
1899 }
1900
1901 mr_status->fail_status = 0;
1902 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1903 if (!mmr->sig) {
1904 ret = -EINVAL;
1905 pr_err("signature status check requested on a non-signature enabled MR\n");
1906 goto done;
1907 }
1908
1909 mmr->sig->sig_status_checked = true;
1910 if (!mmr->sig->sig_err_exists)
1911 goto done;
1912
1913 if (ibmr->lkey == mmr->sig->err_item.key)
1914 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1915 sizeof(mr_status->sig_err));
1916 else {
1917 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1918 mr_status->sig_err.sig_err_offset = 0;
1919 mr_status->sig_err.key = mmr->sig->err_item.key;
1920 }
1921
1922 mmr->sig->sig_err_exists = false;
1923 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1924 }
1925
1926done:
1927 return ret;
1928}
8a187ee5 1929
b005d316
SG
1930static int
1931mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1932 struct scatterlist *sgl,
ff2ba993 1933 unsigned short sg_nents,
9aa8b321 1934 unsigned int *sg_offset_p)
b005d316
SG
1935{
1936 struct scatterlist *sg = sgl;
1937 struct mlx5_klm *klms = mr->descs;
9aa8b321 1938 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
b005d316
SG
1939 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1940 int i;
1941
ff2ba993 1942 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
b005d316 1943 mr->ibmr.length = 0;
b005d316
SG
1944
1945 for_each_sg(sgl, sg, sg_nents, i) {
99975cd4 1946 if (unlikely(i >= mr->max_descs))
b005d316 1947 break;
ff2ba993
CH
1948 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1949 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
b005d316 1950 klms[i].key = cpu_to_be32(lkey);
0a49f2c3 1951 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
ff2ba993
CH
1952
1953 sg_offset = 0;
b005d316 1954 }
da343b6d 1955 mr->ndescs = i;
b005d316 1956
9aa8b321
BVA
1957 if (sg_offset_p)
1958 *sg_offset_p = sg_offset;
1959
b005d316
SG
1960 return i;
1961}
1962
8a187ee5
SG
1963static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1964{
1965 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1966 __be64 *descs;
1967
1968 if (unlikely(mr->ndescs == mr->max_descs))
1969 return -ENOMEM;
1970
1971 descs = mr->descs;
1972 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1973
1974 return 0;
1975}
1976
ff2ba993 1977int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
9aa8b321 1978 unsigned int *sg_offset)
8a187ee5
SG
1979{
1980 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1981 int n;
1982
1983 mr->ndescs = 0;
1984
1985 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1986 mr->desc_size * mr->max_descs,
1987 DMA_TO_DEVICE);
1988
ec22eb53 1989 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
ff2ba993 1990 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
b005d316 1991 else
ff2ba993
CH
1992 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1993 mlx5_set_page);
8a187ee5
SG
1994
1995 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1996 mr->desc_size * mr->max_descs,
1997 DMA_TO_DEVICE);
1998
1999 return n;
2000}