Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
6cf0a15f | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | ||
34 | #include <linux/kref.h> | |
35 | #include <linux/random.h> | |
36 | #include <linux/debugfs.h> | |
37 | #include <linux/export.h> | |
746b5583 | 38 | #include <linux/delay.h> |
e126ba97 | 39 | #include <rdma/ib_umem.h> |
b4cfe447 | 40 | #include <rdma/ib_umem_odp.h> |
968e78dd | 41 | #include <rdma/ib_verbs.h> |
e126ba97 EC |
42 | #include "mlx5_ib.h" |
43 | ||
44 | enum { | |
746b5583 | 45 | MAX_PENDING_REG_MR = 8, |
e126ba97 EC |
46 | }; |
47 | ||
832a6b06 | 48 | #define MLX5_UMR_ALIGN 2048 |
fe45f827 | 49 | |
eeea6953 LR |
50 | static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
51 | static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); | |
8b7ff7f3 | 52 | static int mr_cache_max_order(struct mlx5_ib_dev *dev); |
49780d42 | 53 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
c8d75a98 MD |
54 | |
55 | static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) | |
56 | { | |
57 | return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); | |
58 | } | |
59 | ||
b4cfe447 HE |
60 | static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
61 | { | |
a606b0f6 | 62 | int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); |
b4cfe447 | 63 | |
13859d5d LR |
64 | if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) |
65 | /* Wait until all page fault handlers using the mr complete. */ | |
66 | synchronize_srcu(&dev->mr_srcu); | |
b4cfe447 HE |
67 | |
68 | return err; | |
69 | } | |
70 | ||
e126ba97 EC |
71 | static int order2idx(struct mlx5_ib_dev *dev, int order) |
72 | { | |
73 | struct mlx5_mr_cache *cache = &dev->cache; | |
74 | ||
75 | if (order < cache->ent[0].order) | |
76 | return 0; | |
77 | else | |
78 | return order - cache->ent[0].order; | |
79 | } | |
80 | ||
56e11d62 NO |
81 | static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length) |
82 | { | |
83 | return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >= | |
84 | length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1)); | |
85 | } | |
86 | ||
395a8e4c NO |
87 | static void update_odp_mr(struct mlx5_ib_mr *mr) |
88 | { | |
8b4d5bc5 | 89 | if (is_odp_mr(mr)) { |
395a8e4c NO |
90 | /* |
91 | * This barrier prevents the compiler from moving the | |
92 | * setting of umem->odp_data->private to point to our | |
93 | * MR, before reg_umr finished, to ensure that the MR | |
94 | * initialization have finished before starting to | |
95 | * handle invalidations. | |
96 | */ | |
97 | smp_wmb(); | |
597ecc5a | 98 | to_ib_umem_odp(mr->umem)->private = mr; |
395a8e4c NO |
99 | /* |
100 | * Make sure we will see the new | |
101 | * umem->odp_data->private value in the invalidation | |
102 | * routines, before we can get page faults on the | |
103 | * MR. Page faults can happen once we put the MR in | |
104 | * the tree, below this line. Without the barrier, | |
105 | * there can be a fault handling and an invalidation | |
106 | * before umem->odp_data->private == mr is visible to | |
107 | * the invalidation handler. | |
108 | */ | |
109 | smp_wmb(); | |
110 | } | |
111 | } | |
395a8e4c | 112 | |
e355477e | 113 | static void reg_mr_callback(int status, struct mlx5_async_work *context) |
746b5583 | 114 | { |
e355477e JG |
115 | struct mlx5_ib_mr *mr = |
116 | container_of(context, struct mlx5_ib_mr, cb_work); | |
746b5583 EC |
117 | struct mlx5_ib_dev *dev = mr->dev; |
118 | struct mlx5_mr_cache *cache = &dev->cache; | |
119 | int c = order2idx(dev, mr->order); | |
120 | struct mlx5_cache_ent *ent = &cache->ent[c]; | |
121 | u8 key; | |
746b5583 | 122 | unsigned long flags; |
792c4e9d | 123 | struct xarray *mkeys = &dev->mdev->priv.mkey_table; |
8605933a | 124 | int err; |
746b5583 | 125 | |
746b5583 EC |
126 | spin_lock_irqsave(&ent->lock, flags); |
127 | ent->pending--; | |
128 | spin_unlock_irqrestore(&ent->lock, flags); | |
129 | if (status) { | |
130 | mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); | |
131 | kfree(mr); | |
132 | dev->fill_delay = 1; | |
133 | mod_timer(&dev->delay_timer, jiffies + HZ); | |
134 | return; | |
135 | } | |
136 | ||
aa8e08d2 | 137 | mr->mmkey.type = MLX5_MKEY_MR; |
9603b61d JM |
138 | spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); |
139 | key = dev->mdev->priv.mkey_key++; | |
140 | spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); | |
ec22eb53 | 141 | mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key; |
746b5583 EC |
142 | |
143 | cache->last_add = jiffies; | |
144 | ||
145 | spin_lock_irqsave(&ent->lock, flags); | |
146 | list_add_tail(&mr->list, &ent->head); | |
147 | ent->cur++; | |
148 | ent->size++; | |
149 | spin_unlock_irqrestore(&ent->lock, flags); | |
8605933a | 150 | |
792c4e9d MW |
151 | xa_lock_irqsave(mkeys, flags); |
152 | err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key), | |
153 | &mr->mmkey, GFP_ATOMIC)); | |
154 | xa_unlock_irqrestore(mkeys, flags); | |
8605933a | 155 | if (err) |
a606b0f6 | 156 | pr_err("Error inserting to mkey tree. 0x%x\n", -err); |
49780d42 AK |
157 | |
158 | if (!completion_done(&ent->compl)) | |
159 | complete(&ent->compl); | |
746b5583 EC |
160 | } |
161 | ||
e126ba97 EC |
162 | static int add_keys(struct mlx5_ib_dev *dev, int c, int num) |
163 | { | |
e126ba97 EC |
164 | struct mlx5_mr_cache *cache = &dev->cache; |
165 | struct mlx5_cache_ent *ent = &cache->ent[c]; | |
ec22eb53 | 166 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
e126ba97 | 167 | struct mlx5_ib_mr *mr; |
ec22eb53 SM |
168 | void *mkc; |
169 | u32 *in; | |
e126ba97 EC |
170 | int err = 0; |
171 | int i; | |
172 | ||
ec22eb53 | 173 | in = kzalloc(inlen, GFP_KERNEL); |
e126ba97 EC |
174 | if (!in) |
175 | return -ENOMEM; | |
176 | ||
ec22eb53 | 177 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
e126ba97 | 178 | for (i = 0; i < num; i++) { |
746b5583 EC |
179 | if (ent->pending >= MAX_PENDING_REG_MR) { |
180 | err = -EAGAIN; | |
181 | break; | |
182 | } | |
183 | ||
e126ba97 EC |
184 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
185 | if (!mr) { | |
186 | err = -ENOMEM; | |
746b5583 | 187 | break; |
e126ba97 EC |
188 | } |
189 | mr->order = ent->order; | |
8b7ff7f3 | 190 | mr->allocated_from_cache = 1; |
746b5583 | 191 | mr->dev = dev; |
ec22eb53 SM |
192 | |
193 | MLX5_SET(mkc, mkc, free, 1); | |
194 | MLX5_SET(mkc, mkc, umr_en, 1); | |
cdbd0d2b AL |
195 | MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); |
196 | MLX5_SET(mkc, mkc, access_mode_4_2, | |
197 | (ent->access_mode >> 2) & 0x7); | |
ec22eb53 SM |
198 | |
199 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
49780d42 AK |
200 | MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); |
201 | MLX5_SET(mkc, mkc, log_page_size, ent->page); | |
e126ba97 | 202 | |
746b5583 EC |
203 | spin_lock_irq(&ent->lock); |
204 | ent->pending++; | |
205 | spin_unlock_irq(&ent->lock); | |
ec22eb53 | 206 | err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey, |
e355477e | 207 | &dev->async_ctx, in, inlen, |
ec22eb53 | 208 | mr->out, sizeof(mr->out), |
e355477e | 209 | reg_mr_callback, &mr->cb_work); |
e126ba97 | 210 | if (err) { |
d14e7110 EC |
211 | spin_lock_irq(&ent->lock); |
212 | ent->pending--; | |
213 | spin_unlock_irq(&ent->lock); | |
e126ba97 | 214 | mlx5_ib_warn(dev, "create mkey failed %d\n", err); |
e126ba97 | 215 | kfree(mr); |
746b5583 | 216 | break; |
e126ba97 | 217 | } |
e126ba97 EC |
218 | } |
219 | ||
e126ba97 EC |
220 | kfree(in); |
221 | return err; | |
222 | } | |
223 | ||
224 | static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) | |
225 | { | |
e126ba97 EC |
226 | struct mlx5_mr_cache *cache = &dev->cache; |
227 | struct mlx5_cache_ent *ent = &cache->ent[c]; | |
65edd0e7 | 228 | struct mlx5_ib_mr *tmp_mr; |
e126ba97 | 229 | struct mlx5_ib_mr *mr; |
65edd0e7 | 230 | LIST_HEAD(del_list); |
e126ba97 EC |
231 | int i; |
232 | ||
233 | for (i = 0; i < num; i++) { | |
746b5583 | 234 | spin_lock_irq(&ent->lock); |
e126ba97 | 235 | if (list_empty(&ent->head)) { |
746b5583 | 236 | spin_unlock_irq(&ent->lock); |
65edd0e7 | 237 | break; |
e126ba97 EC |
238 | } |
239 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); | |
65edd0e7 | 240 | list_move(&mr->list, &del_list); |
e126ba97 EC |
241 | ent->cur--; |
242 | ent->size--; | |
746b5583 | 243 | spin_unlock_irq(&ent->lock); |
65edd0e7 DJ |
244 | mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); |
245 | } | |
246 | ||
13859d5d LR |
247 | if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) |
248 | synchronize_srcu(&dev->mr_srcu); | |
65edd0e7 DJ |
249 | |
250 | list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { | |
251 | list_del(&mr->list); | |
252 | kfree(mr); | |
e126ba97 EC |
253 | } |
254 | } | |
255 | ||
256 | static ssize_t size_write(struct file *filp, const char __user *buf, | |
257 | size_t count, loff_t *pos) | |
258 | { | |
259 | struct mlx5_cache_ent *ent = filp->private_data; | |
260 | struct mlx5_ib_dev *dev = ent->dev; | |
60e6627f | 261 | char lbuf[20] = {0}; |
e126ba97 EC |
262 | u32 var; |
263 | int err; | |
264 | int c; | |
265 | ||
60e6627f JH |
266 | count = min(count, sizeof(lbuf) - 1); |
267 | if (copy_from_user(lbuf, buf, count)) | |
5e631a03 | 268 | return -EFAULT; |
e126ba97 EC |
269 | |
270 | c = order2idx(dev, ent->order); | |
e126ba97 EC |
271 | |
272 | if (sscanf(lbuf, "%u", &var) != 1) | |
273 | return -EINVAL; | |
274 | ||
275 | if (var < ent->limit) | |
276 | return -EINVAL; | |
277 | ||
278 | if (var > ent->size) { | |
746b5583 EC |
279 | do { |
280 | err = add_keys(dev, c, var - ent->size); | |
281 | if (err && err != -EAGAIN) | |
282 | return err; | |
283 | ||
284 | usleep_range(3000, 5000); | |
285 | } while (err); | |
e126ba97 EC |
286 | } else if (var < ent->size) { |
287 | remove_keys(dev, c, ent->size - var); | |
288 | } | |
289 | ||
290 | return count; | |
291 | } | |
292 | ||
293 | static ssize_t size_read(struct file *filp, char __user *buf, size_t count, | |
294 | loff_t *pos) | |
295 | { | |
296 | struct mlx5_cache_ent *ent = filp->private_data; | |
297 | char lbuf[20]; | |
298 | int err; | |
299 | ||
e126ba97 EC |
300 | err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); |
301 | if (err < 0) | |
302 | return err; | |
303 | ||
60e6627f | 304 | return simple_read_from_buffer(buf, count, pos, lbuf, err); |
e126ba97 EC |
305 | } |
306 | ||
307 | static const struct file_operations size_fops = { | |
308 | .owner = THIS_MODULE, | |
309 | .open = simple_open, | |
310 | .write = size_write, | |
311 | .read = size_read, | |
312 | }; | |
313 | ||
314 | static ssize_t limit_write(struct file *filp, const char __user *buf, | |
315 | size_t count, loff_t *pos) | |
316 | { | |
317 | struct mlx5_cache_ent *ent = filp->private_data; | |
318 | struct mlx5_ib_dev *dev = ent->dev; | |
60e6627f | 319 | char lbuf[20] = {0}; |
e126ba97 EC |
320 | u32 var; |
321 | int err; | |
322 | int c; | |
323 | ||
60e6627f JH |
324 | count = min(count, sizeof(lbuf) - 1); |
325 | if (copy_from_user(lbuf, buf, count)) | |
5e631a03 | 326 | return -EFAULT; |
e126ba97 EC |
327 | |
328 | c = order2idx(dev, ent->order); | |
e126ba97 EC |
329 | |
330 | if (sscanf(lbuf, "%u", &var) != 1) | |
331 | return -EINVAL; | |
332 | ||
333 | if (var > ent->size) | |
334 | return -EINVAL; | |
335 | ||
336 | ent->limit = var; | |
337 | ||
338 | if (ent->cur < ent->limit) { | |
339 | err = add_keys(dev, c, 2 * ent->limit - ent->cur); | |
340 | if (err) | |
341 | return err; | |
342 | } | |
343 | ||
344 | return count; | |
345 | } | |
346 | ||
347 | static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, | |
348 | loff_t *pos) | |
349 | { | |
350 | struct mlx5_cache_ent *ent = filp->private_data; | |
351 | char lbuf[20]; | |
352 | int err; | |
353 | ||
e126ba97 EC |
354 | err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); |
355 | if (err < 0) | |
356 | return err; | |
357 | ||
60e6627f | 358 | return simple_read_from_buffer(buf, count, pos, lbuf, err); |
e126ba97 EC |
359 | } |
360 | ||
361 | static const struct file_operations limit_fops = { | |
362 | .owner = THIS_MODULE, | |
363 | .open = simple_open, | |
364 | .write = limit_write, | |
365 | .read = limit_read, | |
366 | }; | |
367 | ||
368 | static int someone_adding(struct mlx5_mr_cache *cache) | |
369 | { | |
370 | int i; | |
371 | ||
372 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { | |
373 | if (cache->ent[i].cur < cache->ent[i].limit) | |
374 | return 1; | |
375 | } | |
376 | ||
377 | return 0; | |
378 | } | |
379 | ||
380 | static void __cache_work_func(struct mlx5_cache_ent *ent) | |
381 | { | |
382 | struct mlx5_ib_dev *dev = ent->dev; | |
383 | struct mlx5_mr_cache *cache = &dev->cache; | |
384 | int i = order2idx(dev, ent->order); | |
746b5583 | 385 | int err; |
e126ba97 EC |
386 | |
387 | if (cache->stopped) | |
388 | return; | |
389 | ||
390 | ent = &dev->cache.ent[i]; | |
746b5583 EC |
391 | if (ent->cur < 2 * ent->limit && !dev->fill_delay) { |
392 | err = add_keys(dev, i, 1); | |
393 | if (ent->cur < 2 * ent->limit) { | |
394 | if (err == -EAGAIN) { | |
395 | mlx5_ib_dbg(dev, "returned eagain, order %d\n", | |
396 | i + 2); | |
397 | queue_delayed_work(cache->wq, &ent->dwork, | |
398 | msecs_to_jiffies(3)); | |
399 | } else if (err) { | |
400 | mlx5_ib_warn(dev, "command failed order %d, err %d\n", | |
401 | i + 2, err); | |
402 | queue_delayed_work(cache->wq, &ent->dwork, | |
403 | msecs_to_jiffies(1000)); | |
404 | } else { | |
405 | queue_work(cache->wq, &ent->work); | |
406 | } | |
407 | } | |
e126ba97 | 408 | } else if (ent->cur > 2 * ent->limit) { |
ab5cdc31 LR |
409 | /* |
410 | * The remove_keys() logic is performed as garbage collection | |
411 | * task. Such task is intended to be run when no other active | |
412 | * processes are running. | |
413 | * | |
414 | * The need_resched() will return TRUE if there are user tasks | |
415 | * to be activated in near future. | |
416 | * | |
417 | * In such case, we don't execute remove_keys() and postpone | |
418 | * the garbage collection work to try to run in next cycle, | |
419 | * in order to free CPU resources to other tasks. | |
420 | */ | |
421 | if (!need_resched() && !someone_adding(cache) && | |
746b5583 | 422 | time_after(jiffies, cache->last_add + 300 * HZ)) { |
e126ba97 EC |
423 | remove_keys(dev, i, 1); |
424 | if (ent->cur > ent->limit) | |
425 | queue_work(cache->wq, &ent->work); | |
426 | } else { | |
746b5583 | 427 | queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); |
e126ba97 EC |
428 | } |
429 | } | |
430 | } | |
431 | ||
432 | static void delayed_cache_work_func(struct work_struct *work) | |
433 | { | |
434 | struct mlx5_cache_ent *ent; | |
435 | ||
436 | ent = container_of(work, struct mlx5_cache_ent, dwork.work); | |
437 | __cache_work_func(ent); | |
438 | } | |
439 | ||
440 | static void cache_work_func(struct work_struct *work) | |
441 | { | |
442 | struct mlx5_cache_ent *ent; | |
443 | ||
444 | ent = container_of(work, struct mlx5_cache_ent, work); | |
445 | __cache_work_func(ent); | |
446 | } | |
447 | ||
49780d42 AK |
448 | struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) |
449 | { | |
450 | struct mlx5_mr_cache *cache = &dev->cache; | |
451 | struct mlx5_cache_ent *ent; | |
452 | struct mlx5_ib_mr *mr; | |
453 | int err; | |
454 | ||
455 | if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) { | |
456 | mlx5_ib_err(dev, "cache entry %d is out of range\n", entry); | |
457 | return NULL; | |
458 | } | |
459 | ||
460 | ent = &cache->ent[entry]; | |
461 | while (1) { | |
462 | spin_lock_irq(&ent->lock); | |
463 | if (list_empty(&ent->head)) { | |
464 | spin_unlock_irq(&ent->lock); | |
465 | ||
466 | err = add_keys(dev, entry, 1); | |
81713d37 | 467 | if (err && err != -EAGAIN) |
49780d42 AK |
468 | return ERR_PTR(err); |
469 | ||
470 | wait_for_completion(&ent->compl); | |
471 | } else { | |
472 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, | |
473 | list); | |
474 | list_del(&mr->list); | |
475 | ent->cur--; | |
476 | spin_unlock_irq(&ent->lock); | |
477 | if (ent->cur < ent->limit) | |
478 | queue_work(cache->wq, &ent->work); | |
479 | return mr; | |
480 | } | |
481 | } | |
482 | } | |
483 | ||
e126ba97 EC |
484 | static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) |
485 | { | |
486 | struct mlx5_mr_cache *cache = &dev->cache; | |
487 | struct mlx5_ib_mr *mr = NULL; | |
488 | struct mlx5_cache_ent *ent; | |
4c25b7a3 | 489 | int last_umr_cache_entry; |
e126ba97 EC |
490 | int c; |
491 | int i; | |
492 | ||
493 | c = order2idx(dev, order); | |
8b7ff7f3 | 494 | last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev)); |
4c25b7a3 | 495 | if (c < 0 || c > last_umr_cache_entry) { |
e126ba97 EC |
496 | mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); |
497 | return NULL; | |
498 | } | |
499 | ||
4c25b7a3 | 500 | for (i = c; i <= last_umr_cache_entry; i++) { |
e126ba97 EC |
501 | ent = &cache->ent[i]; |
502 | ||
503 | mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); | |
504 | ||
746b5583 | 505 | spin_lock_irq(&ent->lock); |
e126ba97 EC |
506 | if (!list_empty(&ent->head)) { |
507 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, | |
508 | list); | |
509 | list_del(&mr->list); | |
510 | ent->cur--; | |
746b5583 | 511 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
512 | if (ent->cur < ent->limit) |
513 | queue_work(cache->wq, &ent->work); | |
514 | break; | |
515 | } | |
746b5583 | 516 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
517 | |
518 | queue_work(cache->wq, &ent->work); | |
e126ba97 EC |
519 | } |
520 | ||
521 | if (!mr) | |
522 | cache->ent[c].miss++; | |
523 | ||
524 | return mr; | |
525 | } | |
526 | ||
49780d42 | 527 | void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
e126ba97 EC |
528 | { |
529 | struct mlx5_mr_cache *cache = &dev->cache; | |
530 | struct mlx5_cache_ent *ent; | |
531 | int shrink = 0; | |
532 | int c; | |
533 | ||
dd9a4034 VF |
534 | if (!mr->allocated_from_cache) |
535 | return; | |
536 | ||
e126ba97 | 537 | c = order2idx(dev, mr->order); |
afd14174 YH |
538 | WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES); |
539 | ||
540 | if (unreg_umr(dev, mr)) { | |
541 | mr->allocated_from_cache = false; | |
542 | destroy_mkey(dev, mr); | |
543 | ent = &cache->ent[c]; | |
544 | if (ent->cur < ent->limit) | |
545 | queue_work(cache->wq, &ent->work); | |
e126ba97 EC |
546 | return; |
547 | } | |
49780d42 | 548 | |
e126ba97 | 549 | ent = &cache->ent[c]; |
746b5583 | 550 | spin_lock_irq(&ent->lock); |
e126ba97 EC |
551 | list_add_tail(&mr->list, &ent->head); |
552 | ent->cur++; | |
553 | if (ent->cur > 2 * ent->limit) | |
554 | shrink = 1; | |
746b5583 | 555 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
556 | |
557 | if (shrink) | |
558 | queue_work(cache->wq, &ent->work); | |
559 | } | |
560 | ||
561 | static void clean_keys(struct mlx5_ib_dev *dev, int c) | |
562 | { | |
e126ba97 EC |
563 | struct mlx5_mr_cache *cache = &dev->cache; |
564 | struct mlx5_cache_ent *ent = &cache->ent[c]; | |
65edd0e7 | 565 | struct mlx5_ib_mr *tmp_mr; |
e126ba97 | 566 | struct mlx5_ib_mr *mr; |
65edd0e7 | 567 | LIST_HEAD(del_list); |
e126ba97 | 568 | |
3c461911 | 569 | cancel_delayed_work(&ent->dwork); |
e126ba97 | 570 | while (1) { |
746b5583 | 571 | spin_lock_irq(&ent->lock); |
e126ba97 | 572 | if (list_empty(&ent->head)) { |
746b5583 | 573 | spin_unlock_irq(&ent->lock); |
65edd0e7 | 574 | break; |
e126ba97 EC |
575 | } |
576 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); | |
65edd0e7 | 577 | list_move(&mr->list, &del_list); |
e126ba97 EC |
578 | ent->cur--; |
579 | ent->size--; | |
746b5583 | 580 | spin_unlock_irq(&ent->lock); |
65edd0e7 DJ |
581 | mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); |
582 | } | |
583 | ||
584 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | |
ccffa545 | 585 | synchronize_srcu(&dev->mr_srcu); |
65edd0e7 DJ |
586 | #endif |
587 | ||
588 | list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { | |
589 | list_del(&mr->list); | |
590 | kfree(mr); | |
e126ba97 EC |
591 | } |
592 | } | |
593 | ||
12cc1a02 LR |
594 | static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) |
595 | { | |
6a4d00be | 596 | if (!mlx5_debugfs_root || dev->is_rep) |
12cc1a02 LR |
597 | return; |
598 | ||
599 | debugfs_remove_recursive(dev->cache.root); | |
600 | dev->cache.root = NULL; | |
601 | } | |
602 | ||
73eb8f03 | 603 | static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) |
e126ba97 EC |
604 | { |
605 | struct mlx5_mr_cache *cache = &dev->cache; | |
606 | struct mlx5_cache_ent *ent; | |
73eb8f03 | 607 | struct dentry *dir; |
e126ba97 EC |
608 | int i; |
609 | ||
6a4d00be | 610 | if (!mlx5_debugfs_root || dev->is_rep) |
73eb8f03 | 611 | return; |
e126ba97 | 612 | |
9603b61d | 613 | cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); |
e126ba97 EC |
614 | |
615 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { | |
616 | ent = &cache->ent[i]; | |
617 | sprintf(ent->name, "%d", ent->order); | |
73eb8f03 GKH |
618 | dir = debugfs_create_dir(ent->name, cache->root); |
619 | debugfs_create_file("size", 0600, dir, ent, &size_fops); | |
620 | debugfs_create_file("limit", 0600, dir, ent, &limit_fops); | |
621 | debugfs_create_u32("cur", 0400, dir, &ent->cur); | |
622 | debugfs_create_u32("miss", 0600, dir, &ent->miss); | |
e126ba97 | 623 | } |
e126ba97 EC |
624 | } |
625 | ||
e99e88a9 | 626 | static void delay_time_func(struct timer_list *t) |
746b5583 | 627 | { |
e99e88a9 | 628 | struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer); |
746b5583 EC |
629 | |
630 | dev->fill_delay = 0; | |
631 | } | |
632 | ||
e126ba97 EC |
633 | int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) |
634 | { | |
635 | struct mlx5_mr_cache *cache = &dev->cache; | |
636 | struct mlx5_cache_ent *ent; | |
e126ba97 EC |
637 | int i; |
638 | ||
6bc1a656 | 639 | mutex_init(&dev->slow_path_mutex); |
3c856c82 | 640 | cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); |
e126ba97 EC |
641 | if (!cache->wq) { |
642 | mlx5_ib_warn(dev, "failed to create work queue\n"); | |
643 | return -ENOMEM; | |
644 | } | |
645 | ||
e355477e | 646 | mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); |
e99e88a9 | 647 | timer_setup(&dev->delay_timer, delay_time_func, 0); |
e126ba97 | 648 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { |
e126ba97 EC |
649 | ent = &cache->ent[i]; |
650 | INIT_LIST_HEAD(&ent->head); | |
651 | spin_lock_init(&ent->lock); | |
652 | ent->order = i + 2; | |
653 | ent->dev = dev; | |
49780d42 | 654 | ent->limit = 0; |
e126ba97 | 655 | |
49780d42 | 656 | init_completion(&ent->compl); |
e126ba97 EC |
657 | INIT_WORK(&ent->work, cache_work_func); |
658 | INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); | |
49780d42 | 659 | |
8b7ff7f3 | 660 | if (i > MR_CACHE_LAST_STD_ENTRY) { |
81713d37 | 661 | mlx5_odp_init_mr_cache_entry(ent); |
49780d42 | 662 | continue; |
81713d37 | 663 | } |
49780d42 | 664 | |
8b7ff7f3 | 665 | if (ent->order > mr_cache_max_order(dev)) |
49780d42 AK |
666 | continue; |
667 | ||
668 | ent->page = PAGE_SHIFT; | |
669 | ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / | |
670 | MLX5_IB_UMR_OCTOWORD; | |
671 | ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; | |
672 | if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && | |
6a4d00be | 673 | !dev->is_rep && |
49780d42 AK |
674 | mlx5_core_is_pf(dev->mdev)) |
675 | ent->limit = dev->mdev->profile->mr_cache[i].limit; | |
676 | else | |
677 | ent->limit = 0; | |
013c2403 | 678 | queue_work(cache->wq, &ent->work); |
e126ba97 EC |
679 | } |
680 | ||
73eb8f03 | 681 | mlx5_mr_cache_debugfs_init(dev); |
12cc1a02 | 682 | |
e126ba97 EC |
683 | return 0; |
684 | } | |
685 | ||
686 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) | |
687 | { | |
688 | int i; | |
689 | ||
32927e28 MB |
690 | if (!dev->cache.wq) |
691 | return 0; | |
692 | ||
e126ba97 | 693 | dev->cache.stopped = 1; |
3c461911 | 694 | flush_workqueue(dev->cache.wq); |
e126ba97 EC |
695 | |
696 | mlx5_mr_cache_debugfs_cleanup(dev); | |
e355477e | 697 | mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); |
e126ba97 EC |
698 | |
699 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) | |
700 | clean_keys(dev, i); | |
701 | ||
3c461911 | 702 | destroy_workqueue(dev->cache.wq); |
746b5583 | 703 | del_timer_sync(&dev->delay_timer); |
3c461911 | 704 | |
e126ba97 EC |
705 | return 0; |
706 | } | |
707 | ||
708 | struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) | |
709 | { | |
710 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
ec22eb53 | 711 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
9603b61d | 712 | struct mlx5_core_dev *mdev = dev->mdev; |
e126ba97 | 713 | struct mlx5_ib_mr *mr; |
ec22eb53 SM |
714 | void *mkc; |
715 | u32 *in; | |
e126ba97 EC |
716 | int err; |
717 | ||
718 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
719 | if (!mr) | |
720 | return ERR_PTR(-ENOMEM); | |
721 | ||
ec22eb53 | 722 | in = kzalloc(inlen, GFP_KERNEL); |
e126ba97 EC |
723 | if (!in) { |
724 | err = -ENOMEM; | |
725 | goto err_free; | |
726 | } | |
727 | ||
ec22eb53 SM |
728 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
729 | ||
cdbd0d2b | 730 | MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); |
ec22eb53 SM |
731 | MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); |
732 | MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); | |
733 | MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); | |
734 | MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); | |
735 | MLX5_SET(mkc, mkc, lr, 1); | |
e126ba97 | 736 | |
ec22eb53 SM |
737 | MLX5_SET(mkc, mkc, length64, 1); |
738 | MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | |
739 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
740 | MLX5_SET64(mkc, mkc, start_addr, 0); | |
741 | ||
742 | err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); | |
e126ba97 EC |
743 | if (err) |
744 | goto err_in; | |
745 | ||
746 | kfree(in); | |
aa8e08d2 | 747 | mr->mmkey.type = MLX5_MKEY_MR; |
a606b0f6 MB |
748 | mr->ibmr.lkey = mr->mmkey.key; |
749 | mr->ibmr.rkey = mr->mmkey.key; | |
e126ba97 EC |
750 | mr->umem = NULL; |
751 | ||
752 | return &mr->ibmr; | |
753 | ||
754 | err_in: | |
755 | kfree(in); | |
756 | ||
757 | err_free: | |
758 | kfree(mr); | |
759 | ||
760 | return ERR_PTR(err); | |
761 | } | |
762 | ||
7b4cdaae | 763 | static int get_octo_len(u64 addr, u64 len, int page_shift) |
e126ba97 | 764 | { |
7b4cdaae | 765 | u64 page_size = 1ULL << page_shift; |
e126ba97 EC |
766 | u64 offset; |
767 | int npages; | |
768 | ||
769 | offset = addr & (page_size - 1); | |
7b4cdaae | 770 | npages = ALIGN(len + offset, page_size) >> page_shift; |
e126ba97 EC |
771 | return (npages + 1) / 2; |
772 | } | |
773 | ||
8b7ff7f3 | 774 | static int mr_cache_max_order(struct mlx5_ib_dev *dev) |
e126ba97 | 775 | { |
7d0cc6ed | 776 | if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) |
8b7ff7f3 | 777 | return MR_CACHE_LAST_STD_ENTRY + 2; |
4c25b7a3 MD |
778 | return MLX5_MAX_UMR_SHIFT; |
779 | } | |
780 | ||
b0ea0fa5 JG |
781 | static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, |
782 | u64 start, u64 length, int access_flags, | |
783 | struct ib_umem **umem, int *npages, int *page_shift, | |
784 | int *ncont, int *order) | |
395a8e4c | 785 | { |
b4bd701a | 786 | struct ib_umem *u; |
14ab8896 | 787 | |
b4bd701a LR |
788 | *umem = NULL; |
789 | ||
261dc53f JG |
790 | if (access_flags & IB_ACCESS_ON_DEMAND) { |
791 | struct ib_umem_odp *odp; | |
792 | ||
793 | odp = ib_umem_odp_get(udata, start, length, access_flags); | |
794 | if (IS_ERR(odp)) { | |
795 | mlx5_ib_dbg(dev, "umem get failed (%ld)\n", | |
796 | PTR_ERR(odp)); | |
797 | return PTR_ERR(odp); | |
798 | } | |
799 | ||
800 | u = &odp->umem; | |
801 | ||
802 | *page_shift = odp->page_shift; | |
803 | *ncont = ib_umem_odp_num_pages(odp); | |
804 | *npages = *ncont << (*page_shift - PAGE_SHIFT); | |
805 | if (order) | |
806 | *order = ilog2(roundup_pow_of_two(*ncont)); | |
807 | } else { | |
808 | u = ib_umem_get(udata, start, length, access_flags, 0); | |
809 | if (IS_ERR(u)) { | |
810 | mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u)); | |
811 | return PTR_ERR(u); | |
812 | } | |
813 | ||
814 | mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, | |
815 | page_shift, ncont, order); | |
395a8e4c NO |
816 | } |
817 | ||
395a8e4c NO |
818 | if (!*npages) { |
819 | mlx5_ib_warn(dev, "avoid zero region\n"); | |
b4bd701a | 820 | ib_umem_release(u); |
14ab8896 | 821 | return -EINVAL; |
395a8e4c NO |
822 | } |
823 | ||
b4bd701a LR |
824 | *umem = u; |
825 | ||
395a8e4c NO |
826 | mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", |
827 | *npages, *ncont, *order, *page_shift); | |
828 | ||
14ab8896 | 829 | return 0; |
395a8e4c NO |
830 | } |
831 | ||
add08d76 | 832 | static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) |
e126ba97 | 833 | { |
add08d76 CH |
834 | struct mlx5_ib_umr_context *context = |
835 | container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); | |
e126ba97 | 836 | |
add08d76 CH |
837 | context->status = wc->status; |
838 | complete(&context->done); | |
839 | } | |
e126ba97 | 840 | |
add08d76 CH |
841 | static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) |
842 | { | |
843 | context->cqe.done = mlx5_ib_umr_done; | |
844 | context->status = -1; | |
845 | init_completion(&context->done); | |
e126ba97 EC |
846 | } |
847 | ||
d5ea2df9 BJ |
848 | static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, |
849 | struct mlx5_umr_wr *umrwr) | |
850 | { | |
851 | struct umr_common *umrc = &dev->umrc; | |
d34ac5cd | 852 | const struct ib_send_wr *bad; |
d5ea2df9 BJ |
853 | int err; |
854 | struct mlx5_ib_umr_context umr_context; | |
855 | ||
856 | mlx5_ib_init_umr_context(&umr_context); | |
857 | umrwr->wr.wr_cqe = &umr_context.cqe; | |
858 | ||
859 | down(&umrc->sem); | |
860 | err = ib_post_send(umrc->qp, &umrwr->wr, &bad); | |
861 | if (err) { | |
862 | mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err); | |
863 | } else { | |
864 | wait_for_completion(&umr_context.done); | |
865 | if (umr_context.status != IB_WC_SUCCESS) { | |
866 | mlx5_ib_warn(dev, "reg umr failed (%u)\n", | |
867 | umr_context.status); | |
868 | err = -EFAULT; | |
869 | } | |
870 | } | |
871 | up(&umrc->sem); | |
872 | return err; | |
873 | } | |
874 | ||
ff740aef IL |
875 | static struct mlx5_ib_mr *alloc_mr_from_cache( |
876 | struct ib_pd *pd, struct ib_umem *umem, | |
e126ba97 EC |
877 | u64 virt_addr, u64 len, int npages, |
878 | int page_shift, int order, int access_flags) | |
879 | { | |
880 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
e126ba97 | 881 | struct mlx5_ib_mr *mr; |
096f7e72 | 882 | int err = 0; |
e126ba97 EC |
883 | int i; |
884 | ||
746b5583 | 885 | for (i = 0; i < 1; i++) { |
e126ba97 EC |
886 | mr = alloc_cached_mr(dev, order); |
887 | if (mr) | |
888 | break; | |
889 | ||
890 | err = add_keys(dev, order2idx(dev, order), 1); | |
746b5583 EC |
891 | if (err && err != -EAGAIN) { |
892 | mlx5_ib_warn(dev, "add_keys failed, err %d\n", err); | |
e126ba97 EC |
893 | break; |
894 | } | |
895 | } | |
896 | ||
897 | if (!mr) | |
898 | return ERR_PTR(-EAGAIN); | |
899 | ||
7d0cc6ed AK |
900 | mr->ibmr.pd = pd; |
901 | mr->umem = umem; | |
902 | mr->access_flags = access_flags; | |
903 | mr->desc_size = sizeof(struct mlx5_mtt); | |
a606b0f6 MB |
904 | mr->mmkey.iova = virt_addr; |
905 | mr->mmkey.size = len; | |
906 | mr->mmkey.pd = to_mpd(pd)->pdn; | |
b475598a | 907 | |
e126ba97 | 908 | return mr; |
e126ba97 EC |
909 | } |
910 | ||
7d0cc6ed AK |
911 | static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages, |
912 | void *xlt, int page_shift, size_t size, | |
913 | int flags) | |
832a6b06 HE |
914 | { |
915 | struct mlx5_ib_dev *dev = mr->dev; | |
832a6b06 | 916 | struct ib_umem *umem = mr->umem; |
c8d75a98 | 917 | |
81713d37 | 918 | if (flags & MLX5_IB_UPD_XLT_INDIRECT) { |
c8d75a98 MD |
919 | if (!umr_can_use_indirect_mkey(dev)) |
920 | return -EPERM; | |
81713d37 AK |
921 | mlx5_odp_populate_klm(xlt, idx, npages, mr, flags); |
922 | return npages; | |
923 | } | |
7d0cc6ed AK |
924 | |
925 | npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx); | |
926 | ||
927 | if (!(flags & MLX5_IB_UPD_XLT_ZAP)) { | |
928 | __mlx5_ib_populate_pas(dev, umem, page_shift, | |
929 | idx, npages, xlt, | |
930 | MLX5_IB_MTT_PRESENT); | |
931 | /* Clear padding after the pages | |
932 | * brought from the umem. | |
933 | */ | |
934 | memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0, | |
935 | size - npages * sizeof(struct mlx5_mtt)); | |
936 | } | |
937 | ||
938 | return npages; | |
939 | } | |
940 | ||
941 | #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \ | |
942 | MLX5_UMR_MTT_ALIGNMENT) | |
943 | #define MLX5_SPARE_UMR_CHUNK 0x10000 | |
944 | ||
945 | int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, | |
946 | int page_shift, int flags) | |
947 | { | |
948 | struct mlx5_ib_dev *dev = mr->dev; | |
9b0c289e | 949 | struct device *ddev = dev->ib_dev.dev.parent; |
832a6b06 | 950 | int size; |
7d0cc6ed | 951 | void *xlt; |
832a6b06 | 952 | dma_addr_t dma; |
e622f2f4 | 953 | struct mlx5_umr_wr wr; |
832a6b06 HE |
954 | struct ib_sge sg; |
955 | int err = 0; | |
81713d37 AK |
956 | int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT) |
957 | ? sizeof(struct mlx5_klm) | |
958 | : sizeof(struct mlx5_mtt); | |
7d0cc6ed AK |
959 | const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size; |
960 | const int page_mask = page_align - 1; | |
832a6b06 HE |
961 | size_t pages_mapped = 0; |
962 | size_t pages_to_map = 0; | |
963 | size_t pages_iter = 0; | |
7d0cc6ed | 964 | gfp_t gfp; |
c44ef998 | 965 | bool use_emergency_page = false; |
832a6b06 | 966 | |
c8d75a98 MD |
967 | if ((flags & MLX5_IB_UPD_XLT_INDIRECT) && |
968 | !umr_can_use_indirect_mkey(dev)) | |
969 | return -EPERM; | |
832a6b06 HE |
970 | |
971 | /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes, | |
7d0cc6ed AK |
972 | * so we need to align the offset and length accordingly |
973 | */ | |
974 | if (idx & page_mask) { | |
975 | npages += idx & page_mask; | |
976 | idx &= ~page_mask; | |
832a6b06 HE |
977 | } |
978 | ||
7d0cc6ed AK |
979 | gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL; |
980 | gfp |= __GFP_ZERO | __GFP_NOWARN; | |
832a6b06 | 981 | |
7d0cc6ed AK |
982 | pages_to_map = ALIGN(npages, page_align); |
983 | size = desc_size * pages_to_map; | |
984 | size = min_t(int, size, MLX5_MAX_UMR_CHUNK); | |
832a6b06 | 985 | |
7d0cc6ed AK |
986 | xlt = (void *)__get_free_pages(gfp, get_order(size)); |
987 | if (!xlt && size > MLX5_SPARE_UMR_CHUNK) { | |
988 | mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n", | |
989 | size, get_order(size), MLX5_SPARE_UMR_CHUNK); | |
990 | ||
991 | size = MLX5_SPARE_UMR_CHUNK; | |
992 | xlt = (void *)__get_free_pages(gfp, get_order(size)); | |
832a6b06 | 993 | } |
7d0cc6ed AK |
994 | |
995 | if (!xlt) { | |
7d0cc6ed | 996 | mlx5_ib_warn(dev, "Using XLT emergency buffer\n"); |
c44ef998 | 997 | xlt = (void *)mlx5_ib_get_xlt_emergency_page(); |
7d0cc6ed | 998 | size = PAGE_SIZE; |
7d0cc6ed | 999 | memset(xlt, 0, size); |
c44ef998 | 1000 | use_emergency_page = true; |
7d0cc6ed AK |
1001 | } |
1002 | pages_iter = size / desc_size; | |
1003 | dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE); | |
832a6b06 | 1004 | if (dma_mapping_error(ddev, dma)) { |
7d0cc6ed | 1005 | mlx5_ib_err(dev, "unable to map DMA during XLT update.\n"); |
832a6b06 | 1006 | err = -ENOMEM; |
7d0cc6ed | 1007 | goto free_xlt; |
832a6b06 HE |
1008 | } |
1009 | ||
7d0cc6ed AK |
1010 | sg.addr = dma; |
1011 | sg.lkey = dev->umrc.pd->local_dma_lkey; | |
1012 | ||
1013 | memset(&wr, 0, sizeof(wr)); | |
1014 | wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT; | |
1015 | if (!(flags & MLX5_IB_UPD_XLT_ENABLE)) | |
1016 | wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE; | |
1017 | wr.wr.sg_list = &sg; | |
1018 | wr.wr.num_sge = 1; | |
1019 | wr.wr.opcode = MLX5_IB_WR_UMR; | |
1020 | ||
1021 | wr.pd = mr->ibmr.pd; | |
1022 | wr.mkey = mr->mmkey.key; | |
1023 | wr.length = mr->mmkey.size; | |
1024 | wr.virt_addr = mr->mmkey.iova; | |
1025 | wr.access_flags = mr->access_flags; | |
1026 | wr.page_shift = page_shift; | |
1027 | ||
832a6b06 HE |
1028 | for (pages_mapped = 0; |
1029 | pages_mapped < pages_to_map && !err; | |
7d0cc6ed | 1030 | pages_mapped += pages_iter, idx += pages_iter) { |
438b228e | 1031 | npages = min_t(int, pages_iter, pages_to_map - pages_mapped); |
832a6b06 | 1032 | dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE); |
438b228e | 1033 | npages = populate_xlt(mr, idx, npages, xlt, |
7d0cc6ed | 1034 | page_shift, size, flags); |
832a6b06 HE |
1035 | |
1036 | dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); | |
1037 | ||
7d0cc6ed AK |
1038 | sg.length = ALIGN(npages * desc_size, |
1039 | MLX5_UMR_MTT_ALIGNMENT); | |
1040 | ||
1041 | if (pages_mapped + pages_iter >= pages_to_map) { | |
1042 | if (flags & MLX5_IB_UPD_XLT_ENABLE) | |
1043 | wr.wr.send_flags |= | |
1044 | MLX5_IB_SEND_UMR_ENABLE_MR | | |
1045 | MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS | | |
1046 | MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; | |
1047 | if (flags & MLX5_IB_UPD_XLT_PD || | |
1048 | flags & MLX5_IB_UPD_XLT_ACCESS) | |
1049 | wr.wr.send_flags |= | |
1050 | MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; | |
1051 | if (flags & MLX5_IB_UPD_XLT_ADDR) | |
1052 | wr.wr.send_flags |= | |
1053 | MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; | |
1054 | } | |
832a6b06 | 1055 | |
7d0cc6ed | 1056 | wr.offset = idx * desc_size; |
31616255 | 1057 | wr.xlt_size = sg.length; |
832a6b06 | 1058 | |
d5ea2df9 | 1059 | err = mlx5_ib_post_send_wait(dev, &wr); |
832a6b06 HE |
1060 | } |
1061 | dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); | |
1062 | ||
7d0cc6ed | 1063 | free_xlt: |
c44ef998 IL |
1064 | if (use_emergency_page) |
1065 | mlx5_ib_put_xlt_emergency_page(); | |
832a6b06 | 1066 | else |
7d0cc6ed | 1067 | free_pages((unsigned long)xlt, get_order(size)); |
832a6b06 HE |
1068 | |
1069 | return err; | |
1070 | } | |
832a6b06 | 1071 | |
395a8e4c NO |
1072 | /* |
1073 | * If ibmr is NULL it will be allocated by reg_create. | |
1074 | * Else, the given ibmr will be used. | |
1075 | */ | |
1076 | static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, | |
1077 | u64 virt_addr, u64 length, | |
1078 | struct ib_umem *umem, int npages, | |
ff740aef IL |
1079 | int page_shift, int access_flags, |
1080 | bool populate) | |
e126ba97 EC |
1081 | { |
1082 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
e126ba97 | 1083 | struct mlx5_ib_mr *mr; |
ec22eb53 SM |
1084 | __be64 *pas; |
1085 | void *mkc; | |
e126ba97 | 1086 | int inlen; |
ec22eb53 | 1087 | u32 *in; |
e126ba97 | 1088 | int err; |
938fe83c | 1089 | bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); |
e126ba97 | 1090 | |
395a8e4c | 1091 | mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL); |
e126ba97 EC |
1092 | if (!mr) |
1093 | return ERR_PTR(-ENOMEM); | |
1094 | ||
ff740aef IL |
1095 | mr->ibmr.pd = pd; |
1096 | mr->access_flags = access_flags; | |
1097 | ||
1098 | inlen = MLX5_ST_SZ_BYTES(create_mkey_in); | |
1099 | if (populate) | |
1100 | inlen += sizeof(*pas) * roundup(npages, 2); | |
1b9a07ee | 1101 | in = kvzalloc(inlen, GFP_KERNEL); |
e126ba97 EC |
1102 | if (!in) { |
1103 | err = -ENOMEM; | |
1104 | goto err_1; | |
1105 | } | |
ec22eb53 | 1106 | pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); |
ff740aef | 1107 | if (populate && !(access_flags & IB_ACCESS_ON_DEMAND)) |
c438fde1 AK |
1108 | mlx5_ib_populate_pas(dev, umem, page_shift, pas, |
1109 | pg_cap ? MLX5_IB_MTT_PRESENT : 0); | |
e126ba97 | 1110 | |
ec22eb53 | 1111 | /* The pg_access bit allows setting the access flags |
cc149f75 | 1112 | * in the page list submitted with the command. */ |
ec22eb53 SM |
1113 | MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); |
1114 | ||
1115 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
ff740aef | 1116 | MLX5_SET(mkc, mkc, free, !populate); |
cdbd0d2b | 1117 | MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); |
ec22eb53 SM |
1118 | MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); |
1119 | MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); | |
1120 | MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); | |
1121 | MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE)); | |
1122 | MLX5_SET(mkc, mkc, lr, 1); | |
8b7ff7f3 | 1123 | MLX5_SET(mkc, mkc, umr_en, 1); |
ec22eb53 SM |
1124 | |
1125 | MLX5_SET64(mkc, mkc, start_addr, virt_addr); | |
1126 | MLX5_SET64(mkc, mkc, len, length); | |
1127 | MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | |
1128 | MLX5_SET(mkc, mkc, bsf_octword_size, 0); | |
1129 | MLX5_SET(mkc, mkc, translations_octword_size, | |
7b4cdaae | 1130 | get_octo_len(virt_addr, length, page_shift)); |
ec22eb53 SM |
1131 | MLX5_SET(mkc, mkc, log_page_size, page_shift); |
1132 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
ff740aef IL |
1133 | if (populate) { |
1134 | MLX5_SET(create_mkey_in, in, translations_octword_actual_size, | |
7b4cdaae | 1135 | get_octo_len(virt_addr, length, page_shift)); |
ff740aef | 1136 | } |
ec22eb53 SM |
1137 | |
1138 | err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); | |
e126ba97 EC |
1139 | if (err) { |
1140 | mlx5_ib_warn(dev, "create mkey failed\n"); | |
1141 | goto err_2; | |
1142 | } | |
aa8e08d2 | 1143 | mr->mmkey.type = MLX5_MKEY_MR; |
49780d42 | 1144 | mr->desc_size = sizeof(struct mlx5_mtt); |
7eae20db | 1145 | mr->dev = dev; |
479163f4 | 1146 | kvfree(in); |
e126ba97 | 1147 | |
a606b0f6 | 1148 | mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); |
e126ba97 EC |
1149 | |
1150 | return mr; | |
1151 | ||
1152 | err_2: | |
479163f4 | 1153 | kvfree(in); |
e126ba97 EC |
1154 | |
1155 | err_1: | |
395a8e4c NO |
1156 | if (!ibmr) |
1157 | kfree(mr); | |
e126ba97 EC |
1158 | |
1159 | return ERR_PTR(err); | |
1160 | } | |
1161 | ||
ac2f7e62 | 1162 | static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, |
395a8e4c NO |
1163 | int npages, u64 length, int access_flags) |
1164 | { | |
1165 | mr->npages = npages; | |
1166 | atomic_add(npages, &dev->mdev->priv.reg_pages); | |
a606b0f6 MB |
1167 | mr->ibmr.lkey = mr->mmkey.key; |
1168 | mr->ibmr.rkey = mr->mmkey.key; | |
395a8e4c | 1169 | mr->ibmr.length = length; |
56e11d62 | 1170 | mr->access_flags = access_flags; |
395a8e4c NO |
1171 | } |
1172 | ||
3b113a1e AL |
1173 | static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr, |
1174 | u64 length, int acc, int mode) | |
6c29f57e AL |
1175 | { |
1176 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1177 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); | |
1178 | struct mlx5_core_dev *mdev = dev->mdev; | |
1179 | struct mlx5_ib_mr *mr; | |
1180 | void *mkc; | |
1181 | u32 *in; | |
1182 | int err; | |
1183 | ||
1184 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
1185 | if (!mr) | |
1186 | return ERR_PTR(-ENOMEM); | |
1187 | ||
1188 | in = kzalloc(inlen, GFP_KERNEL); | |
1189 | if (!in) { | |
1190 | err = -ENOMEM; | |
1191 | goto err_free; | |
1192 | } | |
1193 | ||
1194 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
1195 | ||
3b113a1e AL |
1196 | MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); |
1197 | MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7); | |
6c29f57e AL |
1198 | MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); |
1199 | MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); | |
1200 | MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); | |
1201 | MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); | |
1202 | MLX5_SET(mkc, mkc, lr, 1); | |
1203 | ||
1204 | MLX5_SET64(mkc, mkc, len, length); | |
1205 | MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | |
1206 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
3b113a1e | 1207 | MLX5_SET64(mkc, mkc, start_addr, start_addr); |
6c29f57e AL |
1208 | |
1209 | err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); | |
1210 | if (err) | |
1211 | goto err_in; | |
1212 | ||
1213 | kfree(in); | |
1214 | ||
1215 | mr->umem = NULL; | |
ac2f7e62 | 1216 | set_mr_fields(dev, mr, 0, length, acc); |
6c29f57e AL |
1217 | |
1218 | return &mr->ibmr; | |
1219 | ||
1220 | err_in: | |
1221 | kfree(in); | |
1222 | ||
1223 | err_free: | |
1224 | kfree(mr); | |
1225 | ||
1226 | return ERR_PTR(err); | |
1227 | } | |
1228 | ||
813e90b1 MS |
1229 | int mlx5_ib_advise_mr(struct ib_pd *pd, |
1230 | enum ib_uverbs_advise_mr_advice advice, | |
1231 | u32 flags, | |
1232 | struct ib_sge *sg_list, | |
1233 | u32 num_sge, | |
1234 | struct uverbs_attr_bundle *attrs) | |
1235 | { | |
1236 | if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH && | |
1237 | advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE) | |
1238 | return -EOPNOTSUPP; | |
1239 | ||
1240 | return mlx5_ib_advise_mr_prefetch(pd, advice, flags, | |
1241 | sg_list, num_sge); | |
1242 | } | |
1243 | ||
6c29f57e AL |
1244 | struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, |
1245 | struct ib_dm_mr_attr *attr, | |
1246 | struct uverbs_attr_bundle *attrs) | |
1247 | { | |
1248 | struct mlx5_ib_dm *mdm = to_mdm(dm); | |
3b113a1e AL |
1249 | struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev; |
1250 | u64 start_addr = mdm->dev_addr + attr->offset; | |
1251 | int mode; | |
1252 | ||
1253 | switch (mdm->type) { | |
1254 | case MLX5_IB_UAPI_DM_TYPE_MEMIC: | |
1255 | if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS) | |
1256 | return ERR_PTR(-EINVAL); | |
1257 | ||
1258 | mode = MLX5_MKC_ACCESS_MODE_MEMIC; | |
1259 | start_addr -= pci_resource_start(dev->pdev, 0); | |
1260 | break; | |
25c13324 AL |
1261 | case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: |
1262 | case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: | |
1263 | if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) | |
1264 | return ERR_PTR(-EINVAL); | |
1265 | ||
1266 | mode = MLX5_MKC_ACCESS_MODE_SW_ICM; | |
1267 | break; | |
3b113a1e | 1268 | default: |
6c29f57e | 1269 | return ERR_PTR(-EINVAL); |
3b113a1e | 1270 | } |
6c29f57e | 1271 | |
3b113a1e AL |
1272 | return mlx5_ib_get_dm_mr(pd, start_addr, attr->length, |
1273 | attr->access_flags, mode); | |
6c29f57e AL |
1274 | } |
1275 | ||
e126ba97 EC |
1276 | struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
1277 | u64 virt_addr, int access_flags, | |
1278 | struct ib_udata *udata) | |
1279 | { | |
1280 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1281 | struct mlx5_ib_mr *mr = NULL; | |
e5366d30 | 1282 | bool use_umr; |
e126ba97 EC |
1283 | struct ib_umem *umem; |
1284 | int page_shift; | |
1285 | int npages; | |
1286 | int ncont; | |
1287 | int order; | |
1288 | int err; | |
1289 | ||
1b19b951 | 1290 | if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) |
ea30f013 | 1291 | return ERR_PTR(-EOPNOTSUPP); |
1b19b951 | 1292 | |
900a6d79 EC |
1293 | mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", |
1294 | start, virt_addr, length, access_flags); | |
81713d37 | 1295 | |
13859d5d LR |
1296 | if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start && |
1297 | length == U64_MAX) { | |
81713d37 AK |
1298 | if (!(access_flags & IB_ACCESS_ON_DEMAND) || |
1299 | !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) | |
1300 | return ERR_PTR(-EINVAL); | |
1301 | ||
b0ea0fa5 | 1302 | mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags); |
4289861d LR |
1303 | if (IS_ERR(mr)) |
1304 | return ERR_CAST(mr); | |
81713d37 AK |
1305 | return &mr->ibmr; |
1306 | } | |
81713d37 | 1307 | |
b0ea0fa5 JG |
1308 | err = mr_umem_get(dev, udata, start, length, access_flags, &umem, |
1309 | &npages, &page_shift, &ncont, &order); | |
e126ba97 | 1310 | |
ff740aef | 1311 | if (err < 0) |
14ab8896 | 1312 | return ERR_PTR(err); |
e126ba97 | 1313 | |
0e6613b4 | 1314 | use_umr = mlx5_ib_can_use_umr(dev, true); |
e5366d30 GL |
1315 | |
1316 | if (order <= mr_cache_max_order(dev) && use_umr) { | |
ff740aef IL |
1317 | mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, |
1318 | page_shift, order, access_flags); | |
e126ba97 | 1319 | if (PTR_ERR(mr) == -EAGAIN) { |
d23a8baf | 1320 | mlx5_ib_dbg(dev, "cache empty for order %d\n", order); |
e126ba97 EC |
1321 | mr = NULL; |
1322 | } | |
ff740aef IL |
1323 | } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { |
1324 | if (access_flags & IB_ACCESS_ON_DEMAND) { | |
1325 | err = -EINVAL; | |
d23a8baf | 1326 | pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); |
ff740aef IL |
1327 | goto error; |
1328 | } | |
e5366d30 | 1329 | use_umr = false; |
e126ba97 EC |
1330 | } |
1331 | ||
6bc1a656 ML |
1332 | if (!mr) { |
1333 | mutex_lock(&dev->slow_path_mutex); | |
395a8e4c | 1334 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, |
e5366d30 | 1335 | page_shift, access_flags, !use_umr); |
6bc1a656 ML |
1336 | mutex_unlock(&dev->slow_path_mutex); |
1337 | } | |
e126ba97 EC |
1338 | |
1339 | if (IS_ERR(mr)) { | |
1340 | err = PTR_ERR(mr); | |
1341 | goto error; | |
1342 | } | |
1343 | ||
a606b0f6 | 1344 | mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); |
e126ba97 EC |
1345 | |
1346 | mr->umem = umem; | |
ac2f7e62 | 1347 | set_mr_fields(dev, mr, npages, length, access_flags); |
e126ba97 | 1348 | |
395a8e4c | 1349 | update_odp_mr(mr); |
b4cfe447 | 1350 | |
e5366d30 | 1351 | if (use_umr) { |
ff740aef IL |
1352 | int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; |
1353 | ||
1354 | if (access_flags & IB_ACCESS_ON_DEMAND) | |
1355 | update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP; | |
e126ba97 | 1356 | |
ff740aef IL |
1357 | err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, |
1358 | update_xlt_flags); | |
fbcd4983 | 1359 | |
ff740aef | 1360 | if (err) { |
fbcd4983 | 1361 | dereg_mr(dev, mr); |
ff740aef IL |
1362 | return ERR_PTR(err); |
1363 | } | |
1364 | } | |
1365 | ||
a6bc3875 | 1366 | if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { |
13859d5d | 1367 | mr->live = 1; |
a6bc3875 MS |
1368 | atomic_set(&mr->num_pending_prefetch, 0); |
1369 | } | |
13859d5d | 1370 | |
ff740aef | 1371 | return &mr->ibmr; |
e126ba97 EC |
1372 | error: |
1373 | ib_umem_release(umem); | |
1374 | return ERR_PTR(err); | |
1375 | } | |
1376 | ||
1377 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |
1378 | { | |
89ea94a7 | 1379 | struct mlx5_core_dev *mdev = dev->mdev; |
0025b0bd | 1380 | struct mlx5_umr_wr umrwr = {}; |
e126ba97 | 1381 | |
89ea94a7 MG |
1382 | if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) |
1383 | return 0; | |
1384 | ||
9ec4483a YH |
1385 | umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | |
1386 | MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; | |
7d0cc6ed | 1387 | umrwr.wr.opcode = MLX5_IB_WR_UMR; |
9ec4483a | 1388 | umrwr.pd = dev->umrc.pd; |
7d0cc6ed | 1389 | umrwr.mkey = mr->mmkey.key; |
6a053953 | 1390 | umrwr.ignore_free_state = 1; |
e126ba97 | 1391 | |
d5ea2df9 | 1392 | return mlx5_ib_post_send_wait(dev, &umrwr); |
e126ba97 EC |
1393 | } |
1394 | ||
7d0cc6ed | 1395 | static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, |
56e11d62 NO |
1396 | int access_flags, int flags) |
1397 | { | |
1398 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
56e11d62 | 1399 | struct mlx5_umr_wr umrwr = {}; |
56e11d62 NO |
1400 | int err; |
1401 | ||
56e11d62 NO |
1402 | umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; |
1403 | ||
7d0cc6ed AK |
1404 | umrwr.wr.opcode = MLX5_IB_WR_UMR; |
1405 | umrwr.mkey = mr->mmkey.key; | |
56e11d62 | 1406 | |
31616255 | 1407 | if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) { |
56e11d62 | 1408 | umrwr.pd = pd; |
56e11d62 | 1409 | umrwr.access_flags = access_flags; |
31616255 | 1410 | umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; |
56e11d62 NO |
1411 | } |
1412 | ||
d5ea2df9 | 1413 | err = mlx5_ib_post_send_wait(dev, &umrwr); |
56e11d62 | 1414 | |
56e11d62 NO |
1415 | return err; |
1416 | } | |
1417 | ||
1418 | int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, | |
1419 | u64 length, u64 virt_addr, int new_access_flags, | |
1420 | struct ib_pd *new_pd, struct ib_udata *udata) | |
1421 | { | |
1422 | struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); | |
1423 | struct mlx5_ib_mr *mr = to_mmr(ib_mr); | |
1424 | struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd; | |
1425 | int access_flags = flags & IB_MR_REREG_ACCESS ? | |
1426 | new_access_flags : | |
1427 | mr->access_flags; | |
56e11d62 | 1428 | int page_shift = 0; |
7d0cc6ed | 1429 | int upd_flags = 0; |
56e11d62 NO |
1430 | int npages = 0; |
1431 | int ncont = 0; | |
1432 | int order = 0; | |
b4bd701a | 1433 | u64 addr, len; |
56e11d62 NO |
1434 | int err; |
1435 | ||
1436 | mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", | |
1437 | start, virt_addr, length, access_flags); | |
1438 | ||
7d0cc6ed AK |
1439 | atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); |
1440 | ||
b4bd701a LR |
1441 | if (!mr->umem) |
1442 | return -EINVAL; | |
1443 | ||
1444 | if (flags & IB_MR_REREG_TRANS) { | |
1445 | addr = virt_addr; | |
1446 | len = length; | |
1447 | } else { | |
1448 | addr = mr->umem->address; | |
1449 | len = mr->umem->length; | |
1450 | } | |
1451 | ||
56e11d62 NO |
1452 | if (flags != IB_MR_REREG_PD) { |
1453 | /* | |
1454 | * Replace umem. This needs to be done whether or not UMR is | |
1455 | * used. | |
1456 | */ | |
1457 | flags |= IB_MR_REREG_TRANS; | |
1458 | ib_umem_release(mr->umem); | |
b4bd701a | 1459 | mr->umem = NULL; |
b0ea0fa5 JG |
1460 | err = mr_umem_get(dev, udata, addr, len, access_flags, |
1461 | &mr->umem, &npages, &page_shift, &ncont, | |
1462 | &order); | |
4638a3b2 LR |
1463 | if (err) |
1464 | goto err; | |
56e11d62 NO |
1465 | } |
1466 | ||
25a45172 MS |
1467 | if (!mlx5_ib_can_use_umr(dev, true) || |
1468 | (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) { | |
56e11d62 NO |
1469 | /* |
1470 | * UMR can't be used - MKey needs to be replaced. | |
1471 | */ | |
eeea6953 | 1472 | if (mr->allocated_from_cache) |
56e11d62 | 1473 | err = unreg_umr(dev, mr); |
eeea6953 | 1474 | else |
56e11d62 | 1475 | err = destroy_mkey(dev, mr); |
56e11d62 | 1476 | if (err) |
4638a3b2 | 1477 | goto err; |
56e11d62 NO |
1478 | |
1479 | mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont, | |
ff740aef | 1480 | page_shift, access_flags, true); |
56e11d62 | 1481 | |
4638a3b2 LR |
1482 | if (IS_ERR(mr)) { |
1483 | err = PTR_ERR(mr); | |
1484 | mr = to_mmr(ib_mr); | |
1485 | goto err; | |
1486 | } | |
56e11d62 | 1487 | |
8b7ff7f3 | 1488 | mr->allocated_from_cache = 0; |
13859d5d LR |
1489 | if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) |
1490 | mr->live = 1; | |
56e11d62 NO |
1491 | } else { |
1492 | /* | |
1493 | * Send a UMR WQE | |
1494 | */ | |
7d0cc6ed AK |
1495 | mr->ibmr.pd = pd; |
1496 | mr->access_flags = access_flags; | |
1497 | mr->mmkey.iova = addr; | |
1498 | mr->mmkey.size = len; | |
1499 | mr->mmkey.pd = to_mpd(pd)->pdn; | |
1500 | ||
1501 | if (flags & IB_MR_REREG_TRANS) { | |
1502 | upd_flags = MLX5_IB_UPD_XLT_ADDR; | |
1503 | if (flags & IB_MR_REREG_PD) | |
1504 | upd_flags |= MLX5_IB_UPD_XLT_PD; | |
1505 | if (flags & IB_MR_REREG_ACCESS) | |
1506 | upd_flags |= MLX5_IB_UPD_XLT_ACCESS; | |
1507 | err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, | |
1508 | upd_flags); | |
1509 | } else { | |
1510 | err = rereg_umr(pd, mr, access_flags, flags); | |
1511 | } | |
1512 | ||
4638a3b2 LR |
1513 | if (err) |
1514 | goto err; | |
56e11d62 NO |
1515 | } |
1516 | ||
ac2f7e62 | 1517 | set_mr_fields(dev, mr, npages, len, access_flags); |
56e11d62 | 1518 | |
56e11d62 | 1519 | update_odp_mr(mr); |
56e11d62 | 1520 | return 0; |
4638a3b2 LR |
1521 | |
1522 | err: | |
836a0fbb LR |
1523 | ib_umem_release(mr->umem); |
1524 | mr->umem = NULL; | |
1525 | ||
4638a3b2 LR |
1526 | clean_mr(dev, mr); |
1527 | return err; | |
56e11d62 NO |
1528 | } |
1529 | ||
8a187ee5 SG |
1530 | static int |
1531 | mlx5_alloc_priv_descs(struct ib_device *device, | |
1532 | struct mlx5_ib_mr *mr, | |
1533 | int ndescs, | |
1534 | int desc_size) | |
1535 | { | |
1536 | int size = ndescs * desc_size; | |
1537 | int add_size; | |
1538 | int ret; | |
1539 | ||
1540 | add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); | |
1541 | ||
1542 | mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); | |
1543 | if (!mr->descs_alloc) | |
1544 | return -ENOMEM; | |
1545 | ||
1546 | mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); | |
1547 | ||
9b0c289e | 1548 | mr->desc_map = dma_map_single(device->dev.parent, mr->descs, |
8a187ee5 | 1549 | size, DMA_TO_DEVICE); |
9b0c289e | 1550 | if (dma_mapping_error(device->dev.parent, mr->desc_map)) { |
8a187ee5 SG |
1551 | ret = -ENOMEM; |
1552 | goto err; | |
1553 | } | |
1554 | ||
1555 | return 0; | |
1556 | err: | |
1557 | kfree(mr->descs_alloc); | |
1558 | ||
1559 | return ret; | |
1560 | } | |
1561 | ||
1562 | static void | |
1563 | mlx5_free_priv_descs(struct mlx5_ib_mr *mr) | |
1564 | { | |
1565 | if (mr->descs) { | |
1566 | struct ib_device *device = mr->ibmr.device; | |
1567 | int size = mr->max_descs * mr->desc_size; | |
1568 | ||
9b0c289e | 1569 | dma_unmap_single(device->dev.parent, mr->desc_map, |
8a187ee5 SG |
1570 | size, DMA_TO_DEVICE); |
1571 | kfree(mr->descs_alloc); | |
1572 | mr->descs = NULL; | |
1573 | } | |
1574 | } | |
1575 | ||
eeea6953 | 1576 | static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
e126ba97 | 1577 | { |
8b7ff7f3 | 1578 | int allocated_from_cache = mr->allocated_from_cache; |
e126ba97 | 1579 | |
8b91ffc1 SG |
1580 | if (mr->sig) { |
1581 | if (mlx5_core_destroy_psv(dev->mdev, | |
1582 | mr->sig->psv_memory.psv_idx)) | |
1583 | mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", | |
1584 | mr->sig->psv_memory.psv_idx); | |
1585 | if (mlx5_core_destroy_psv(dev->mdev, | |
1586 | mr->sig->psv_wire.psv_idx)) | |
1587 | mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", | |
1588 | mr->sig->psv_wire.psv_idx); | |
1589 | kfree(mr->sig); | |
1590 | mr->sig = NULL; | |
1591 | } | |
1592 | ||
b9332dad | 1593 | if (!allocated_from_cache) { |
eeea6953 | 1594 | destroy_mkey(dev, mr); |
b9332dad YH |
1595 | mlx5_free_priv_descs(mr); |
1596 | } | |
6aec21f6 HE |
1597 | } |
1598 | ||
eeea6953 | 1599 | static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
6aec21f6 | 1600 | { |
6aec21f6 HE |
1601 | int npages = mr->npages; |
1602 | struct ib_umem *umem = mr->umem; | |
1603 | ||
8b4d5bc5 | 1604 | if (is_odp_mr(mr)) { |
597ecc5a JG |
1605 | struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem); |
1606 | ||
a6bc3875 MS |
1607 | /* Prevent new page faults and |
1608 | * prefetch requests from succeeding | |
1609 | */ | |
b4cfe447 | 1610 | mr->live = 0; |
a6bc3875 MS |
1611 | |
1612 | /* dequeue pending prefetch requests for the mr */ | |
1613 | if (atomic_read(&mr->num_pending_prefetch)) | |
1614 | flush_workqueue(system_unbound_wq); | |
1615 | WARN_ON(atomic_read(&mr->num_pending_prefetch)); | |
1616 | ||
6aec21f6 HE |
1617 | /* Wait for all running page-fault handlers to finish. */ |
1618 | synchronize_srcu(&dev->mr_srcu); | |
b4cfe447 | 1619 | /* Destroy all page mappings */ |
fd7dbf03 | 1620 | if (!umem_odp->is_implicit_odp) |
d2183c6f JG |
1621 | mlx5_ib_invalidate_range(umem_odp, |
1622 | ib_umem_start(umem_odp), | |
1623 | ib_umem_end(umem_odp)); | |
81713d37 AK |
1624 | else |
1625 | mlx5_ib_free_implicit_mr(mr); | |
b4cfe447 HE |
1626 | /* |
1627 | * We kill the umem before the MR for ODP, | |
1628 | * so that there will not be any invalidations in | |
1629 | * flight, looking at the *mr struct. | |
1630 | */ | |
0446cad9 | 1631 | ib_umem_odp_release(umem_odp); |
b4cfe447 HE |
1632 | atomic_sub(npages, &dev->mdev->priv.reg_pages); |
1633 | ||
1634 | /* Avoid double-freeing the umem. */ | |
1635 | umem = NULL; | |
1636 | } | |
8b4d5bc5 | 1637 | |
fbcd4983 | 1638 | clean_mr(dev, mr); |
6aec21f6 | 1639 | |
dd9a4034 VF |
1640 | /* |
1641 | * We should unregister the DMA address from the HCA before | |
1642 | * remove the DMA mapping. | |
1643 | */ | |
1644 | mlx5_mr_cache_free(dev, mr); | |
836a0fbb LR |
1645 | ib_umem_release(umem); |
1646 | if (umem) | |
6aec21f6 | 1647 | atomic_sub(npages, &dev->mdev->priv.reg_pages); |
836a0fbb | 1648 | |
f3f134f5 LR |
1649 | if (!mr->allocated_from_cache) |
1650 | kfree(mr); | |
e126ba97 EC |
1651 | } |
1652 | ||
c4367a26 | 1653 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
fbcd4983 | 1654 | { |
6c984472 MG |
1655 | struct mlx5_ib_mr *mmr = to_mmr(ibmr); |
1656 | ||
de0ae958 IR |
1657 | if (ibmr->type == IB_MR_TYPE_INTEGRITY) { |
1658 | dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr); | |
1659 | dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr); | |
1660 | } | |
6c984472 MG |
1661 | |
1662 | dereg_mr(to_mdev(ibmr->device), mmr); | |
1663 | ||
eeea6953 | 1664 | return 0; |
fbcd4983 IL |
1665 | } |
1666 | ||
7796d2a3 MG |
1667 | static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs, |
1668 | int access_mode, int page_shift) | |
1669 | { | |
1670 | void *mkc; | |
1671 | ||
1672 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
1673 | ||
1674 | MLX5_SET(mkc, mkc, free, 1); | |
1675 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
1676 | MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | |
1677 | MLX5_SET(mkc, mkc, translations_octword_size, ndescs); | |
1678 | MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3); | |
1679 | MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7); | |
1680 | MLX5_SET(mkc, mkc, umr_en, 1); | |
1681 | MLX5_SET(mkc, mkc, log_page_size, page_shift); | |
1682 | } | |
1683 | ||
1684 | static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, | |
1685 | int ndescs, int desc_size, int page_shift, | |
1686 | int access_mode, u32 *in, int inlen) | |
1687 | { | |
1688 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1689 | int err; | |
1690 | ||
1691 | mr->access_mode = access_mode; | |
1692 | mr->desc_size = desc_size; | |
1693 | mr->max_descs = ndescs; | |
1694 | ||
1695 | err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); | |
1696 | if (err) | |
1697 | return err; | |
1698 | ||
1699 | mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift); | |
1700 | ||
1701 | err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); | |
1702 | if (err) | |
1703 | goto err_free_descs; | |
1704 | ||
1705 | mr->mmkey.type = MLX5_MKEY_MR; | |
1706 | mr->ibmr.lkey = mr->mmkey.key; | |
1707 | mr->ibmr.rkey = mr->mmkey.key; | |
1708 | ||
1709 | return 0; | |
1710 | ||
1711 | err_free_descs: | |
1712 | mlx5_free_priv_descs(mr); | |
1713 | return err; | |
1714 | } | |
1715 | ||
6c984472 | 1716 | static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, |
de0ae958 IR |
1717 | u32 max_num_sg, u32 max_num_meta_sg, |
1718 | int desc_size, int access_mode) | |
3121e3c4 | 1719 | { |
ec22eb53 | 1720 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
6c984472 | 1721 | int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4); |
7796d2a3 | 1722 | int page_shift = 0; |
ec22eb53 | 1723 | struct mlx5_ib_mr *mr; |
ec22eb53 | 1724 | u32 *in; |
b005d316 | 1725 | int err; |
3121e3c4 SG |
1726 | |
1727 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
1728 | if (!mr) | |
1729 | return ERR_PTR(-ENOMEM); | |
1730 | ||
7796d2a3 MG |
1731 | mr->ibmr.pd = pd; |
1732 | mr->ibmr.device = pd->device; | |
1733 | ||
ec22eb53 | 1734 | in = kzalloc(inlen, GFP_KERNEL); |
3121e3c4 SG |
1735 | if (!in) { |
1736 | err = -ENOMEM; | |
1737 | goto err_free; | |
1738 | } | |
1739 | ||
de0ae958 | 1740 | if (access_mode == MLX5_MKC_ACCESS_MODE_MTT) |
7796d2a3 | 1741 | page_shift = PAGE_SHIFT; |
3121e3c4 | 1742 | |
7796d2a3 MG |
1743 | err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, |
1744 | access_mode, in, inlen); | |
6c984472 MG |
1745 | if (err) |
1746 | goto err_free_in; | |
6c984472 | 1747 | |
6c984472 MG |
1748 | mr->umem = NULL; |
1749 | kfree(in); | |
1750 | ||
1751 | return mr; | |
1752 | ||
6c984472 MG |
1753 | err_free_in: |
1754 | kfree(in); | |
1755 | err_free: | |
1756 | kfree(mr); | |
1757 | return ERR_PTR(err); | |
1758 | } | |
1759 | ||
7796d2a3 MG |
1760 | static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, |
1761 | int ndescs, u32 *in, int inlen) | |
1762 | { | |
1763 | return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), | |
1764 | PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in, | |
1765 | inlen); | |
1766 | } | |
1767 | ||
1768 | static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, | |
1769 | int ndescs, u32 *in, int inlen) | |
1770 | { | |
1771 | return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), | |
1772 | 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); | |
1773 | } | |
1774 | ||
1775 | static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, | |
1776 | int max_num_sg, int max_num_meta_sg, | |
1777 | u32 *in, int inlen) | |
1778 | { | |
1779 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1780 | u32 psv_index[2]; | |
1781 | void *mkc; | |
1782 | int err; | |
1783 | ||
1784 | mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); | |
1785 | if (!mr->sig) | |
1786 | return -ENOMEM; | |
1787 | ||
1788 | /* create mem & wire PSVs */ | |
1789 | err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index); | |
1790 | if (err) | |
1791 | goto err_free_sig; | |
1792 | ||
1793 | mr->sig->psv_memory.psv_idx = psv_index[0]; | |
1794 | mr->sig->psv_wire.psv_idx = psv_index[1]; | |
1795 | ||
1796 | mr->sig->sig_status_checked = true; | |
1797 | mr->sig->sig_err_exists = false; | |
1798 | /* Next UMR, Arm SIGERR */ | |
1799 | ++mr->sig->sigerr_count; | |
1800 | mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, | |
1801 | sizeof(struct mlx5_klm), | |
1802 | MLX5_MKC_ACCESS_MODE_KLMS); | |
1803 | if (IS_ERR(mr->klm_mr)) { | |
1804 | err = PTR_ERR(mr->klm_mr); | |
1805 | goto err_destroy_psv; | |
1806 | } | |
1807 | mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, | |
1808 | sizeof(struct mlx5_mtt), | |
1809 | MLX5_MKC_ACCESS_MODE_MTT); | |
1810 | if (IS_ERR(mr->mtt_mr)) { | |
1811 | err = PTR_ERR(mr->mtt_mr); | |
1812 | goto err_free_klm_mr; | |
1813 | } | |
1814 | ||
1815 | /* Set bsf descriptors for mkey */ | |
1816 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
1817 | MLX5_SET(mkc, mkc, bsf_en, 1); | |
1818 | MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE); | |
1819 | ||
1820 | err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, | |
1821 | MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); | |
1822 | if (err) | |
1823 | goto err_free_mtt_mr; | |
1824 | ||
1825 | return 0; | |
1826 | ||
1827 | err_free_mtt_mr: | |
1828 | dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr); | |
1829 | mr->mtt_mr = NULL; | |
1830 | err_free_klm_mr: | |
1831 | dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr); | |
1832 | mr->klm_mr = NULL; | |
1833 | err_destroy_psv: | |
1834 | if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) | |
1835 | mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", | |
1836 | mr->sig->psv_memory.psv_idx); | |
1837 | if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) | |
1838 | mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", | |
1839 | mr->sig->psv_wire.psv_idx); | |
1840 | err_free_sig: | |
1841 | kfree(mr->sig); | |
1842 | ||
1843 | return err; | |
1844 | } | |
1845 | ||
6c984472 MG |
1846 | static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, |
1847 | enum ib_mr_type mr_type, u32 max_num_sg, | |
1848 | u32 max_num_meta_sg) | |
1849 | { | |
1850 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1851 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); | |
1852 | int ndescs = ALIGN(max_num_sg, 4); | |
1853 | struct mlx5_ib_mr *mr; | |
6c984472 MG |
1854 | u32 *in; |
1855 | int err; | |
1856 | ||
1857 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
1858 | if (!mr) | |
1859 | return ERR_PTR(-ENOMEM); | |
1860 | ||
1861 | in = kzalloc(inlen, GFP_KERNEL); | |
1862 | if (!in) { | |
1863 | err = -ENOMEM; | |
1864 | goto err_free; | |
1865 | } | |
1866 | ||
7796d2a3 MG |
1867 | mr->ibmr.device = pd->device; |
1868 | mr->umem = NULL; | |
3121e3c4 | 1869 | |
7796d2a3 MG |
1870 | switch (mr_type) { |
1871 | case IB_MR_TYPE_MEM_REG: | |
1872 | err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); | |
1873 | break; | |
1874 | case IB_MR_TYPE_SG_GAPS: | |
1875 | err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); | |
1876 | break; | |
1877 | case IB_MR_TYPE_INTEGRITY: | |
1878 | err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, | |
1879 | max_num_meta_sg, in, inlen); | |
1880 | break; | |
1881 | default: | |
9bee178b SG |
1882 | mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); |
1883 | err = -EINVAL; | |
3121e3c4 SG |
1884 | } |
1885 | ||
3121e3c4 | 1886 | if (err) |
7796d2a3 | 1887 | goto err_free_in; |
3121e3c4 | 1888 | |
3121e3c4 SG |
1889 | kfree(in); |
1890 | ||
1891 | return &mr->ibmr; | |
1892 | ||
3121e3c4 SG |
1893 | err_free_in: |
1894 | kfree(in); | |
1895 | err_free: | |
1896 | kfree(mr); | |
1897 | return ERR_PTR(err); | |
1898 | } | |
1899 | ||
6c984472 MG |
1900 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
1901 | u32 max_num_sg, struct ib_udata *udata) | |
1902 | { | |
1903 | return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0); | |
1904 | } | |
1905 | ||
1906 | struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, | |
1907 | u32 max_num_sg, u32 max_num_meta_sg) | |
1908 | { | |
1909 | return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg, | |
1910 | max_num_meta_sg); | |
1911 | } | |
1912 | ||
d2370e0a MB |
1913 | struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
1914 | struct ib_udata *udata) | |
1915 | { | |
1916 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
ec22eb53 | 1917 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
d2370e0a | 1918 | struct mlx5_ib_mw *mw = NULL; |
ec22eb53 SM |
1919 | u32 *in = NULL; |
1920 | void *mkc; | |
d2370e0a MB |
1921 | int ndescs; |
1922 | int err; | |
1923 | struct mlx5_ib_alloc_mw req = {}; | |
1924 | struct { | |
1925 | __u32 comp_mask; | |
1926 | __u32 response_length; | |
1927 | } resp = {}; | |
1928 | ||
1929 | err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); | |
1930 | if (err) | |
1931 | return ERR_PTR(err); | |
1932 | ||
1933 | if (req.comp_mask || req.reserved1 || req.reserved2) | |
1934 | return ERR_PTR(-EOPNOTSUPP); | |
1935 | ||
1936 | if (udata->inlen > sizeof(req) && | |
1937 | !ib_is_udata_cleared(udata, sizeof(req), | |
1938 | udata->inlen - sizeof(req))) | |
1939 | return ERR_PTR(-EOPNOTSUPP); | |
1940 | ||
1941 | ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); | |
1942 | ||
1943 | mw = kzalloc(sizeof(*mw), GFP_KERNEL); | |
ec22eb53 | 1944 | in = kzalloc(inlen, GFP_KERNEL); |
d2370e0a MB |
1945 | if (!mw || !in) { |
1946 | err = -ENOMEM; | |
1947 | goto free; | |
1948 | } | |
1949 | ||
ec22eb53 SM |
1950 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
1951 | ||
1952 | MLX5_SET(mkc, mkc, free, 1); | |
1953 | MLX5_SET(mkc, mkc, translations_octword_size, ndescs); | |
1954 | MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | |
1955 | MLX5_SET(mkc, mkc, umr_en, 1); | |
1956 | MLX5_SET(mkc, mkc, lr, 1); | |
cdbd0d2b | 1957 | MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); |
ec22eb53 SM |
1958 | MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2))); |
1959 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
1960 | ||
1961 | err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen); | |
d2370e0a MB |
1962 | if (err) |
1963 | goto free; | |
1964 | ||
aa8e08d2 | 1965 | mw->mmkey.type = MLX5_MKEY_MW; |
d2370e0a | 1966 | mw->ibmw.rkey = mw->mmkey.key; |
db570d7d | 1967 | mw->ndescs = ndescs; |
d2370e0a MB |
1968 | |
1969 | resp.response_length = min(offsetof(typeof(resp), response_length) + | |
1970 | sizeof(resp.response_length), udata->outlen); | |
1971 | if (resp.response_length) { | |
1972 | err = ib_copy_to_udata(udata, &resp, resp.response_length); | |
1973 | if (err) { | |
1974 | mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); | |
1975 | goto free; | |
1976 | } | |
1977 | } | |
1978 | ||
1979 | kfree(in); | |
1980 | return &mw->ibmw; | |
1981 | ||
1982 | free: | |
1983 | kfree(mw); | |
1984 | kfree(in); | |
1985 | return ERR_PTR(err); | |
1986 | } | |
1987 | ||
1988 | int mlx5_ib_dealloc_mw(struct ib_mw *mw) | |
1989 | { | |
1990 | struct mlx5_ib_mw *mmw = to_mmw(mw); | |
1991 | int err; | |
1992 | ||
1993 | err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev, | |
1994 | &mmw->mmkey); | |
1995 | if (!err) | |
1996 | kfree(mmw); | |
1997 | return err; | |
1998 | } | |
1999 | ||
d5436ba0 SG |
2000 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, |
2001 | struct ib_mr_status *mr_status) | |
2002 | { | |
2003 | struct mlx5_ib_mr *mmr = to_mmr(ibmr); | |
2004 | int ret = 0; | |
2005 | ||
2006 | if (check_mask & ~IB_MR_CHECK_SIG_STATUS) { | |
2007 | pr_err("Invalid status check mask\n"); | |
2008 | ret = -EINVAL; | |
2009 | goto done; | |
2010 | } | |
2011 | ||
2012 | mr_status->fail_status = 0; | |
2013 | if (check_mask & IB_MR_CHECK_SIG_STATUS) { | |
2014 | if (!mmr->sig) { | |
2015 | ret = -EINVAL; | |
2016 | pr_err("signature status check requested on a non-signature enabled MR\n"); | |
2017 | goto done; | |
2018 | } | |
2019 | ||
2020 | mmr->sig->sig_status_checked = true; | |
2021 | if (!mmr->sig->sig_err_exists) | |
2022 | goto done; | |
2023 | ||
2024 | if (ibmr->lkey == mmr->sig->err_item.key) | |
2025 | memcpy(&mr_status->sig_err, &mmr->sig->err_item, | |
2026 | sizeof(mr_status->sig_err)); | |
2027 | else { | |
2028 | mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; | |
2029 | mr_status->sig_err.sig_err_offset = 0; | |
2030 | mr_status->sig_err.key = mmr->sig->err_item.key; | |
2031 | } | |
2032 | ||
2033 | mmr->sig->sig_err_exists = false; | |
2034 | mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; | |
2035 | } | |
2036 | ||
2037 | done: | |
2038 | return ret; | |
2039 | } | |
8a187ee5 | 2040 | |
2563e2f3 MG |
2041 | static int |
2042 | mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, | |
2043 | int data_sg_nents, unsigned int *data_sg_offset, | |
2044 | struct scatterlist *meta_sg, int meta_sg_nents, | |
2045 | unsigned int *meta_sg_offset) | |
2046 | { | |
2047 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2048 | unsigned int sg_offset = 0; | |
2049 | int n = 0; | |
2050 | ||
2051 | mr->meta_length = 0; | |
2052 | if (data_sg_nents == 1) { | |
2053 | n++; | |
2054 | mr->ndescs = 1; | |
2055 | if (data_sg_offset) | |
2056 | sg_offset = *data_sg_offset; | |
2057 | mr->data_length = sg_dma_len(data_sg) - sg_offset; | |
2058 | mr->data_iova = sg_dma_address(data_sg) + sg_offset; | |
2059 | if (meta_sg_nents == 1) { | |
2060 | n++; | |
2061 | mr->meta_ndescs = 1; | |
2062 | if (meta_sg_offset) | |
2063 | sg_offset = *meta_sg_offset; | |
2064 | else | |
2065 | sg_offset = 0; | |
2066 | mr->meta_length = sg_dma_len(meta_sg) - sg_offset; | |
2067 | mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; | |
2068 | } | |
2069 | ibmr->length = mr->data_length + mr->meta_length; | |
2070 | } | |
2071 | ||
2072 | return n; | |
2073 | } | |
2074 | ||
b005d316 SG |
2075 | static int |
2076 | mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, | |
2077 | struct scatterlist *sgl, | |
ff2ba993 | 2078 | unsigned short sg_nents, |
6c984472 MG |
2079 | unsigned int *sg_offset_p, |
2080 | struct scatterlist *meta_sgl, | |
2081 | unsigned short meta_sg_nents, | |
2082 | unsigned int *meta_sg_offset_p) | |
b005d316 SG |
2083 | { |
2084 | struct scatterlist *sg = sgl; | |
2085 | struct mlx5_klm *klms = mr->descs; | |
9aa8b321 | 2086 | unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; |
b005d316 | 2087 | u32 lkey = mr->ibmr.pd->local_dma_lkey; |
6c984472 | 2088 | int i, j = 0; |
b005d316 | 2089 | |
ff2ba993 | 2090 | mr->ibmr.iova = sg_dma_address(sg) + sg_offset; |
b005d316 | 2091 | mr->ibmr.length = 0; |
b005d316 SG |
2092 | |
2093 | for_each_sg(sgl, sg, sg_nents, i) { | |
99975cd4 | 2094 | if (unlikely(i >= mr->max_descs)) |
b005d316 | 2095 | break; |
ff2ba993 CH |
2096 | klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); |
2097 | klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); | |
b005d316 | 2098 | klms[i].key = cpu_to_be32(lkey); |
0a49f2c3 | 2099 | mr->ibmr.length += sg_dma_len(sg) - sg_offset; |
ff2ba993 CH |
2100 | |
2101 | sg_offset = 0; | |
b005d316 SG |
2102 | } |
2103 | ||
9aa8b321 BVA |
2104 | if (sg_offset_p) |
2105 | *sg_offset_p = sg_offset; | |
2106 | ||
6c984472 MG |
2107 | mr->ndescs = i; |
2108 | mr->data_length = mr->ibmr.length; | |
2109 | ||
2110 | if (meta_sg_nents) { | |
2111 | sg = meta_sgl; | |
2112 | sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0; | |
2113 | for_each_sg(meta_sgl, sg, meta_sg_nents, j) { | |
2114 | if (unlikely(i + j >= mr->max_descs)) | |
2115 | break; | |
2116 | klms[i + j].va = cpu_to_be64(sg_dma_address(sg) + | |
2117 | sg_offset); | |
2118 | klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) - | |
2119 | sg_offset); | |
2120 | klms[i + j].key = cpu_to_be32(lkey); | |
2121 | mr->ibmr.length += sg_dma_len(sg) - sg_offset; | |
2122 | ||
2123 | sg_offset = 0; | |
2124 | } | |
2125 | if (meta_sg_offset_p) | |
2126 | *meta_sg_offset_p = sg_offset; | |
2127 | ||
2128 | mr->meta_ndescs = j; | |
2129 | mr->meta_length = mr->ibmr.length - mr->data_length; | |
2130 | } | |
2131 | ||
2132 | return i + j; | |
b005d316 SG |
2133 | } |
2134 | ||
8a187ee5 SG |
2135 | static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) |
2136 | { | |
2137 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2138 | __be64 *descs; | |
2139 | ||
2140 | if (unlikely(mr->ndescs == mr->max_descs)) | |
2141 | return -ENOMEM; | |
2142 | ||
2143 | descs = mr->descs; | |
2144 | descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); | |
2145 | ||
2146 | return 0; | |
2147 | } | |
2148 | ||
de0ae958 IR |
2149 | static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr) |
2150 | { | |
2151 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2152 | __be64 *descs; | |
2153 | ||
2154 | if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs)) | |
2155 | return -ENOMEM; | |
2156 | ||
2157 | descs = mr->descs; | |
2158 | descs[mr->ndescs + mr->meta_ndescs++] = | |
2159 | cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); | |
2160 | ||
2161 | return 0; | |
2162 | } | |
2163 | ||
2164 | static int | |
2165 | mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, | |
6c984472 MG |
2166 | int data_sg_nents, unsigned int *data_sg_offset, |
2167 | struct scatterlist *meta_sg, int meta_sg_nents, | |
2168 | unsigned int *meta_sg_offset) | |
2169 | { | |
2170 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
de0ae958 | 2171 | struct mlx5_ib_mr *pi_mr = mr->mtt_mr; |
6c984472 MG |
2172 | int n; |
2173 | ||
de0ae958 IR |
2174 | pi_mr->ndescs = 0; |
2175 | pi_mr->meta_ndescs = 0; | |
2176 | pi_mr->meta_length = 0; | |
2177 | ||
2178 | ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, | |
2179 | pi_mr->desc_size * pi_mr->max_descs, | |
2180 | DMA_TO_DEVICE); | |
2181 | ||
2182 | pi_mr->ibmr.page_size = ibmr->page_size; | |
2183 | n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, | |
2184 | mlx5_set_page); | |
2185 | if (n != data_sg_nents) | |
2186 | return n; | |
2187 | ||
2563e2f3 | 2188 | pi_mr->data_iova = pi_mr->ibmr.iova; |
de0ae958 IR |
2189 | pi_mr->data_length = pi_mr->ibmr.length; |
2190 | pi_mr->ibmr.length = pi_mr->data_length; | |
2191 | ibmr->length = pi_mr->data_length; | |
2192 | ||
2193 | if (meta_sg_nents) { | |
2194 | u64 page_mask = ~((u64)ibmr->page_size - 1); | |
2563e2f3 | 2195 | u64 iova = pi_mr->data_iova; |
de0ae958 IR |
2196 | |
2197 | n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, | |
2198 | meta_sg_offset, mlx5_set_page_pi); | |
2199 | ||
2200 | pi_mr->meta_length = pi_mr->ibmr.length; | |
2201 | /* | |
2202 | * PI address for the HW is the offset of the metadata address | |
2203 | * relative to the first data page address. | |
2204 | * It equals to first data page address + size of data pages + | |
2205 | * metadata offset at the first metadata page | |
2206 | */ | |
2207 | pi_mr->pi_iova = (iova & page_mask) + | |
2208 | pi_mr->ndescs * ibmr->page_size + | |
2209 | (pi_mr->ibmr.iova & ~page_mask); | |
2210 | /* | |
2211 | * In order to use one MTT MR for data and metadata, we register | |
2212 | * also the gaps between the end of the data and the start of | |
2213 | * the metadata (the sig MR will verify that the HW will access | |
2214 | * to right addresses). This mapping is safe because we use | |
2215 | * internal mkey for the registration. | |
2216 | */ | |
2217 | pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova; | |
2218 | pi_mr->ibmr.iova = iova; | |
2219 | ibmr->length += pi_mr->meta_length; | |
2220 | } | |
2221 | ||
2222 | ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, | |
2223 | pi_mr->desc_size * pi_mr->max_descs, | |
2224 | DMA_TO_DEVICE); | |
2225 | ||
2226 | return n; | |
2227 | } | |
2228 | ||
2229 | static int | |
2230 | mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, | |
2231 | int data_sg_nents, unsigned int *data_sg_offset, | |
2232 | struct scatterlist *meta_sg, int meta_sg_nents, | |
2233 | unsigned int *meta_sg_offset) | |
2234 | { | |
2235 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2236 | struct mlx5_ib_mr *pi_mr = mr->klm_mr; | |
2237 | int n; | |
6c984472 MG |
2238 | |
2239 | pi_mr->ndescs = 0; | |
2240 | pi_mr->meta_ndescs = 0; | |
2241 | pi_mr->meta_length = 0; | |
2242 | ||
2243 | ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, | |
2244 | pi_mr->desc_size * pi_mr->max_descs, | |
2245 | DMA_TO_DEVICE); | |
2246 | ||
2247 | n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset, | |
2248 | meta_sg, meta_sg_nents, meta_sg_offset); | |
2249 | ||
de0ae958 IR |
2250 | ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, |
2251 | pi_mr->desc_size * pi_mr->max_descs, | |
2252 | DMA_TO_DEVICE); | |
2253 | ||
6c984472 | 2254 | /* This is zero-based memory region */ |
2563e2f3 | 2255 | pi_mr->data_iova = 0; |
6c984472 | 2256 | pi_mr->ibmr.iova = 0; |
de0ae958 | 2257 | pi_mr->pi_iova = pi_mr->data_length; |
6c984472 | 2258 | ibmr->length = pi_mr->ibmr.length; |
6c984472 | 2259 | |
de0ae958 IR |
2260 | return n; |
2261 | } | |
6c984472 | 2262 | |
de0ae958 IR |
2263 | int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, |
2264 | int data_sg_nents, unsigned int *data_sg_offset, | |
2265 | struct scatterlist *meta_sg, int meta_sg_nents, | |
2266 | unsigned int *meta_sg_offset) | |
2267 | { | |
2268 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2563e2f3 | 2269 | struct mlx5_ib_mr *pi_mr = NULL; |
de0ae958 IR |
2270 | int n; |
2271 | ||
2272 | WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); | |
2273 | ||
2563e2f3 MG |
2274 | mr->ndescs = 0; |
2275 | mr->data_length = 0; | |
2276 | mr->data_iova = 0; | |
2277 | mr->meta_ndescs = 0; | |
2278 | mr->pi_iova = 0; | |
2279 | /* | |
2280 | * As a performance optimization, if possible, there is no need to | |
2281 | * perform UMR operation to register the data/metadata buffers. | |
2282 | * First try to map the sg lists to PA descriptors with local_dma_lkey. | |
2283 | * Fallback to UMR only in case of a failure. | |
2284 | */ | |
2285 | n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents, | |
2286 | data_sg_offset, meta_sg, meta_sg_nents, | |
2287 | meta_sg_offset); | |
2288 | if (n == data_sg_nents + meta_sg_nents) | |
2289 | goto out; | |
de0ae958 IR |
2290 | /* |
2291 | * As a performance optimization, if possible, there is no need to map | |
2292 | * the sg lists to KLM descriptors. First try to map the sg lists to MTT | |
2293 | * descriptors and fallback to KLM only in case of a failure. | |
2294 | * It's more efficient for the HW to work with MTT descriptors | |
2295 | * (especially in high load). | |
2296 | * Use KLM (indirect access) only if it's mandatory. | |
2297 | */ | |
2563e2f3 | 2298 | pi_mr = mr->mtt_mr; |
de0ae958 IR |
2299 | n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents, |
2300 | data_sg_offset, meta_sg, meta_sg_nents, | |
2301 | meta_sg_offset); | |
2302 | if (n == data_sg_nents + meta_sg_nents) | |
2303 | goto out; | |
2304 | ||
2305 | pi_mr = mr->klm_mr; | |
2306 | n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents, | |
2307 | data_sg_offset, meta_sg, meta_sg_nents, | |
2308 | meta_sg_offset); | |
6c984472 MG |
2309 | if (unlikely(n != data_sg_nents + meta_sg_nents)) |
2310 | return -ENOMEM; | |
2311 | ||
de0ae958 IR |
2312 | out: |
2313 | /* This is zero-based memory region */ | |
2314 | ibmr->iova = 0; | |
2315 | mr->pi_mr = pi_mr; | |
2563e2f3 MG |
2316 | if (pi_mr) |
2317 | ibmr->sig_attrs->meta_length = pi_mr->meta_length; | |
2318 | else | |
2319 | ibmr->sig_attrs->meta_length = mr->meta_length; | |
de0ae958 | 2320 | |
6c984472 MG |
2321 | return 0; |
2322 | } | |
2323 | ||
ff2ba993 | 2324 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
9aa8b321 | 2325 | unsigned int *sg_offset) |
8a187ee5 SG |
2326 | { |
2327 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2328 | int n; | |
2329 | ||
2330 | mr->ndescs = 0; | |
2331 | ||
2332 | ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, | |
2333 | mr->desc_size * mr->max_descs, | |
2334 | DMA_TO_DEVICE); | |
2335 | ||
ec22eb53 | 2336 | if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) |
6c984472 MG |
2337 | n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, |
2338 | NULL); | |
b005d316 | 2339 | else |
ff2ba993 CH |
2340 | n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, |
2341 | mlx5_set_page); | |
8a187ee5 SG |
2342 | |
2343 | ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, | |
2344 | mr->desc_size * mr->max_descs, | |
2345 | DMA_TO_DEVICE); | |
2346 | ||
2347 | return n; | |
2348 | } |