Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
6cf0a15f | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | ||
34 | #include <linux/kref.h> | |
35 | #include <linux/random.h> | |
36 | #include <linux/debugfs.h> | |
37 | #include <linux/export.h> | |
746b5583 | 38 | #include <linux/delay.h> |
e126ba97 | 39 | #include <rdma/ib_umem.h> |
b4cfe447 | 40 | #include <rdma/ib_umem_odp.h> |
968e78dd | 41 | #include <rdma/ib_verbs.h> |
e126ba97 EC |
42 | #include "mlx5_ib.h" |
43 | ||
44 | enum { | |
746b5583 | 45 | MAX_PENDING_REG_MR = 8, |
e126ba97 EC |
46 | }; |
47 | ||
832a6b06 | 48 | #define MLX5_UMR_ALIGN 2048 |
fe45f827 | 49 | |
fc6a9f86 SM |
50 | static void |
51 | create_mkey_callback(int status, struct mlx5_async_work *context); | |
52 | ||
5eb29f0d JG |
53 | static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, |
54 | struct ib_pd *pd) | |
55 | { | |
56 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
57 | ||
58 | MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); | |
59 | MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); | |
60 | MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); | |
61 | MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); | |
62 | MLX5_SET(mkc, mkc, lr, 1); | |
63 | ||
64 | if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) | |
65 | MLX5_SET(mkc, mkc, relaxed_ordering_write, | |
66 | !!(acc & IB_ACCESS_RELAXED_ORDERING)); | |
67 | if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) | |
68 | MLX5_SET(mkc, mkc, relaxed_ordering_read, | |
69 | !!(acc & IB_ACCESS_RELAXED_ORDERING)); | |
70 | ||
71 | MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | |
72 | MLX5_SET(mkc, mkc, qpn, 0xffffff); | |
73 | MLX5_SET64(mkc, mkc, start_addr, start_addr); | |
74 | } | |
75 | ||
fc6a9f86 SM |
76 | static void |
77 | assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, | |
78 | u32 *in) | |
79 | { | |
f743ff3b | 80 | u8 key = atomic_inc_return(&dev->mkey_var); |
fc6a9f86 | 81 | void *mkc; |
fc6a9f86 SM |
82 | |
83 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
84 | MLX5_SET(mkc, mkc, mkey_7_0, key); | |
85 | mkey->key = key; | |
86 | } | |
87 | ||
88 | static int | |
89 | mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, | |
90 | u32 *in, int inlen) | |
91 | { | |
92 | assign_mkey_variant(dev, mkey, in); | |
93 | return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen); | |
94 | } | |
95 | ||
96 | static int | |
97 | mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev, | |
98 | struct mlx5_core_mkey *mkey, | |
99 | struct mlx5_async_ctx *async_ctx, | |
100 | u32 *in, int inlen, u32 *out, int outlen, | |
101 | struct mlx5_async_work *context) | |
102 | { | |
a3cfdd39 | 103 | MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); |
fc6a9f86 | 104 | assign_mkey_variant(dev, mkey, in); |
a3cfdd39 MG |
105 | return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen, |
106 | create_mkey_callback, context); | |
fc6a9f86 SM |
107 | } |
108 | ||
eeea6953 LR |
109 | static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
110 | static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); | |
8b7ff7f3 | 111 | static int mr_cache_max_order(struct mlx5_ib_dev *dev); |
1c78a21a | 112 | static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent); |
c8d75a98 MD |
113 | |
114 | static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) | |
115 | { | |
116 | return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); | |
117 | } | |
118 | ||
b4cfe447 HE |
119 | static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
120 | { | |
806b101b | 121 | WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); |
b4cfe447 | 122 | |
806b101b | 123 | return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); |
b4cfe447 HE |
124 | } |
125 | ||
8383da3e JG |
126 | static inline bool mlx5_ib_pas_fits_in_mr(struct mlx5_ib_mr *mr, u64 start, |
127 | u64 length) | |
56e11d62 NO |
128 | { |
129 | return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >= | |
130 | length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1)); | |
131 | } | |
132 | ||
fc6a9f86 | 133 | static void create_mkey_callback(int status, struct mlx5_async_work *context) |
746b5583 | 134 | { |
e355477e JG |
135 | struct mlx5_ib_mr *mr = |
136 | container_of(context, struct mlx5_ib_mr, cb_work); | |
746b5583 | 137 | struct mlx5_ib_dev *dev = mr->dev; |
b91e1751 | 138 | struct mlx5_cache_ent *ent = mr->cache_ent; |
746b5583 EC |
139 | unsigned long flags; |
140 | ||
746b5583 EC |
141 | if (status) { |
142 | mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); | |
143 | kfree(mr); | |
b9358bdb JG |
144 | spin_lock_irqsave(&ent->lock, flags); |
145 | ent->pending--; | |
146 | WRITE_ONCE(dev->fill_delay, 1); | |
147 | spin_unlock_irqrestore(&ent->lock, flags); | |
746b5583 EC |
148 | mod_timer(&dev->delay_timer, jiffies + HZ); |
149 | return; | |
150 | } | |
151 | ||
aa8e08d2 | 152 | mr->mmkey.type = MLX5_MKEY_MR; |
54c62e13 SM |
153 | mr->mmkey.key |= mlx5_idx_to_mkey( |
154 | MLX5_GET(create_mkey_out, mr->out, mkey_index)); | |
746b5583 | 155 | |
b9358bdb | 156 | WRITE_ONCE(dev->cache.last_add, jiffies); |
746b5583 EC |
157 | |
158 | spin_lock_irqsave(&ent->lock, flags); | |
159 | list_add_tail(&mr->list, &ent->head); | |
7c8691a3 JG |
160 | ent->available_mrs++; |
161 | ent->total_mrs++; | |
1c78a21a JG |
162 | /* If we are doing fill_to_high_water then keep going. */ |
163 | queue_adjust_cache_locked(ent); | |
b9358bdb | 164 | ent->pending--; |
746b5583 | 165 | spin_unlock_irqrestore(&ent->lock, flags); |
aad719dc JG |
166 | } |
167 | ||
168 | static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc) | |
169 | { | |
170 | struct mlx5_ib_mr *mr; | |
171 | ||
172 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
173 | if (!mr) | |
174 | return NULL; | |
175 | mr->order = ent->order; | |
176 | mr->cache_ent = ent; | |
177 | mr->dev = ent->dev; | |
8605933a | 178 | |
5eb29f0d | 179 | set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); |
aad719dc JG |
180 | MLX5_SET(mkc, mkc, free, 1); |
181 | MLX5_SET(mkc, mkc, umr_en, 1); | |
182 | MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); | |
183 | MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); | |
184 | ||
aad719dc JG |
185 | MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); |
186 | MLX5_SET(mkc, mkc, log_page_size, ent->page); | |
187 | return mr; | |
746b5583 EC |
188 | } |
189 | ||
aad719dc | 190 | /* Asynchronously schedule new MRs to be populated in the cache. */ |
a1d8854a | 191 | static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) |
e126ba97 | 192 | { |
aad719dc | 193 | size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
e126ba97 | 194 | struct mlx5_ib_mr *mr; |
ec22eb53 SM |
195 | void *mkc; |
196 | u32 *in; | |
e126ba97 EC |
197 | int err = 0; |
198 | int i; | |
199 | ||
ec22eb53 | 200 | in = kzalloc(inlen, GFP_KERNEL); |
e126ba97 EC |
201 | if (!in) |
202 | return -ENOMEM; | |
203 | ||
ec22eb53 | 204 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
e126ba97 | 205 | for (i = 0; i < num; i++) { |
aad719dc | 206 | mr = alloc_cache_mr(ent, mkc); |
e126ba97 EC |
207 | if (!mr) { |
208 | err = -ENOMEM; | |
746b5583 | 209 | break; |
e126ba97 | 210 | } |
746b5583 | 211 | spin_lock_irq(&ent->lock); |
b9358bdb JG |
212 | if (ent->pending >= MAX_PENDING_REG_MR) { |
213 | err = -EAGAIN; | |
214 | spin_unlock_irq(&ent->lock); | |
215 | kfree(mr); | |
216 | break; | |
217 | } | |
746b5583 EC |
218 | ent->pending++; |
219 | spin_unlock_irq(&ent->lock); | |
b91e1751 JG |
220 | err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, |
221 | &ent->dev->async_ctx, in, inlen, | |
222 | mr->out, sizeof(mr->out), | |
223 | &mr->cb_work); | |
e126ba97 | 224 | if (err) { |
d14e7110 EC |
225 | spin_lock_irq(&ent->lock); |
226 | ent->pending--; | |
227 | spin_unlock_irq(&ent->lock); | |
b91e1751 | 228 | mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); |
e126ba97 | 229 | kfree(mr); |
746b5583 | 230 | break; |
e126ba97 | 231 | } |
e126ba97 EC |
232 | } |
233 | ||
e126ba97 EC |
234 | kfree(in); |
235 | return err; | |
236 | } | |
237 | ||
aad719dc JG |
238 | /* Synchronously create a MR in the cache */ |
239 | static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent) | |
240 | { | |
241 | size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); | |
242 | struct mlx5_ib_mr *mr; | |
243 | void *mkc; | |
244 | u32 *in; | |
245 | int err; | |
246 | ||
247 | in = kzalloc(inlen, GFP_KERNEL); | |
248 | if (!in) | |
249 | return ERR_PTR(-ENOMEM); | |
250 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
251 | ||
252 | mr = alloc_cache_mr(ent, mkc); | |
253 | if (!mr) { | |
254 | err = -ENOMEM; | |
255 | goto free_in; | |
256 | } | |
257 | ||
258 | err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); | |
259 | if (err) | |
260 | goto free_mr; | |
261 | ||
262 | mr->mmkey.type = MLX5_MKEY_MR; | |
263 | WRITE_ONCE(ent->dev->cache.last_add, jiffies); | |
264 | spin_lock_irq(&ent->lock); | |
265 | ent->total_mrs++; | |
266 | spin_unlock_irq(&ent->lock); | |
267 | kfree(in); | |
268 | return mr; | |
269 | free_mr: | |
270 | kfree(mr); | |
271 | free_in: | |
272 | kfree(in); | |
273 | return ERR_PTR(err); | |
274 | } | |
275 | ||
b9358bdb | 276 | static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) |
e126ba97 | 277 | { |
e126ba97 | 278 | struct mlx5_ib_mr *mr; |
e126ba97 | 279 | |
b9358bdb JG |
280 | lockdep_assert_held(&ent->lock); |
281 | if (list_empty(&ent->head)) | |
a1d8854a | 282 | return; |
a1d8854a JG |
283 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); |
284 | list_del(&mr->list); | |
285 | ent->available_mrs--; | |
286 | ent->total_mrs--; | |
287 | spin_unlock_irq(&ent->lock); | |
288 | mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); | |
289 | kfree(mr); | |
b9358bdb | 290 | spin_lock_irq(&ent->lock); |
a1d8854a | 291 | } |
65edd0e7 | 292 | |
a1d8854a JG |
293 | static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, |
294 | bool limit_fill) | |
295 | { | |
296 | int err; | |
297 | ||
298 | lockdep_assert_held(&ent->lock); | |
299 | ||
300 | while (true) { | |
301 | if (limit_fill) | |
302 | target = ent->limit * 2; | |
303 | if (target == ent->available_mrs + ent->pending) | |
304 | return 0; | |
305 | if (target > ent->available_mrs + ent->pending) { | |
306 | u32 todo = target - (ent->available_mrs + ent->pending); | |
307 | ||
308 | spin_unlock_irq(&ent->lock); | |
309 | err = add_keys(ent, todo); | |
310 | if (err == -EAGAIN) | |
311 | usleep_range(3000, 5000); | |
312 | spin_lock_irq(&ent->lock); | |
313 | if (err) { | |
314 | if (err != -EAGAIN) | |
315 | return err; | |
316 | } else | |
317 | return 0; | |
318 | } else { | |
b9358bdb | 319 | remove_cache_mr_locked(ent); |
a1d8854a | 320 | } |
e126ba97 EC |
321 | } |
322 | } | |
323 | ||
324 | static ssize_t size_write(struct file *filp, const char __user *buf, | |
325 | size_t count, loff_t *pos) | |
326 | { | |
327 | struct mlx5_cache_ent *ent = filp->private_data; | |
a1d8854a | 328 | u32 target; |
e126ba97 | 329 | int err; |
e126ba97 | 330 | |
a1d8854a JG |
331 | err = kstrtou32_from_user(buf, count, 0, &target); |
332 | if (err) | |
333 | return err; | |
746b5583 | 334 | |
a1d8854a JG |
335 | /* |
336 | * Target is the new value of total_mrs the user requests, however we | |
337 | * cannot free MRs that are in use. Compute the target value for | |
338 | * available_mrs. | |
339 | */ | |
340 | spin_lock_irq(&ent->lock); | |
341 | if (target < ent->total_mrs - ent->available_mrs) { | |
342 | err = -EINVAL; | |
343 | goto err_unlock; | |
e126ba97 | 344 | } |
a1d8854a JG |
345 | target = target - (ent->total_mrs - ent->available_mrs); |
346 | if (target < ent->limit || target > ent->limit*2) { | |
347 | err = -EINVAL; | |
348 | goto err_unlock; | |
349 | } | |
350 | err = resize_available_mrs(ent, target, false); | |
351 | if (err) | |
352 | goto err_unlock; | |
353 | spin_unlock_irq(&ent->lock); | |
e126ba97 EC |
354 | |
355 | return count; | |
a1d8854a JG |
356 | |
357 | err_unlock: | |
358 | spin_unlock_irq(&ent->lock); | |
359 | return err; | |
e126ba97 EC |
360 | } |
361 | ||
362 | static ssize_t size_read(struct file *filp, char __user *buf, size_t count, | |
363 | loff_t *pos) | |
364 | { | |
365 | struct mlx5_cache_ent *ent = filp->private_data; | |
366 | char lbuf[20]; | |
367 | int err; | |
368 | ||
7c8691a3 | 369 | err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs); |
e126ba97 EC |
370 | if (err < 0) |
371 | return err; | |
372 | ||
60e6627f | 373 | return simple_read_from_buffer(buf, count, pos, lbuf, err); |
e126ba97 EC |
374 | } |
375 | ||
376 | static const struct file_operations size_fops = { | |
377 | .owner = THIS_MODULE, | |
378 | .open = simple_open, | |
379 | .write = size_write, | |
380 | .read = size_read, | |
381 | }; | |
382 | ||
383 | static ssize_t limit_write(struct file *filp, const char __user *buf, | |
384 | size_t count, loff_t *pos) | |
385 | { | |
386 | struct mlx5_cache_ent *ent = filp->private_data; | |
e126ba97 EC |
387 | u32 var; |
388 | int err; | |
e126ba97 | 389 | |
a1d8854a JG |
390 | err = kstrtou32_from_user(buf, count, 0, &var); |
391 | if (err) | |
392 | return err; | |
e126ba97 | 393 | |
a1d8854a JG |
394 | /* |
395 | * Upon set we immediately fill the cache to high water mark implied by | |
396 | * the limit. | |
397 | */ | |
398 | spin_lock_irq(&ent->lock); | |
e126ba97 | 399 | ent->limit = var; |
a1d8854a JG |
400 | err = resize_available_mrs(ent, 0, true); |
401 | spin_unlock_irq(&ent->lock); | |
402 | if (err) | |
403 | return err; | |
e126ba97 EC |
404 | return count; |
405 | } | |
406 | ||
407 | static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, | |
408 | loff_t *pos) | |
409 | { | |
410 | struct mlx5_cache_ent *ent = filp->private_data; | |
411 | char lbuf[20]; | |
412 | int err; | |
413 | ||
e126ba97 EC |
414 | err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); |
415 | if (err < 0) | |
416 | return err; | |
417 | ||
60e6627f | 418 | return simple_read_from_buffer(buf, count, pos, lbuf, err); |
e126ba97 EC |
419 | } |
420 | ||
421 | static const struct file_operations limit_fops = { | |
422 | .owner = THIS_MODULE, | |
423 | .open = simple_open, | |
424 | .write = limit_write, | |
425 | .read = limit_read, | |
426 | }; | |
427 | ||
b9358bdb | 428 | static bool someone_adding(struct mlx5_mr_cache *cache) |
e126ba97 | 429 | { |
b9358bdb | 430 | unsigned int i; |
e126ba97 EC |
431 | |
432 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { | |
b9358bdb JG |
433 | struct mlx5_cache_ent *ent = &cache->ent[i]; |
434 | bool ret; | |
e126ba97 | 435 | |
b9358bdb JG |
436 | spin_lock_irq(&ent->lock); |
437 | ret = ent->available_mrs < ent->limit; | |
438 | spin_unlock_irq(&ent->lock); | |
439 | if (ret) | |
440 | return true; | |
441 | } | |
442 | return false; | |
e126ba97 EC |
443 | } |
444 | ||
ad2d3ef4 JG |
445 | /* |
446 | * Check if the bucket is outside the high/low water mark and schedule an async | |
447 | * update. The cache refill has hysteresis, once the low water mark is hit it is | |
448 | * refilled up to the high mark. | |
449 | */ | |
450 | static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) | |
451 | { | |
452 | lockdep_assert_held(&ent->lock); | |
453 | ||
1c78a21a | 454 | if (ent->disabled || READ_ONCE(ent->dev->fill_delay)) |
b9358bdb | 455 | return; |
1c78a21a JG |
456 | if (ent->available_mrs < ent->limit) { |
457 | ent->fill_to_high_water = true; | |
458 | queue_work(ent->dev->cache.wq, &ent->work); | |
459 | } else if (ent->fill_to_high_water && | |
460 | ent->available_mrs + ent->pending < 2 * ent->limit) { | |
461 | /* | |
462 | * Once we start populating due to hitting a low water mark | |
463 | * continue until we pass the high water mark. | |
464 | */ | |
ad2d3ef4 | 465 | queue_work(ent->dev->cache.wq, &ent->work); |
1c78a21a JG |
466 | } else if (ent->available_mrs == 2 * ent->limit) { |
467 | ent->fill_to_high_water = false; | |
468 | } else if (ent->available_mrs > 2 * ent->limit) { | |
469 | /* Queue deletion of excess entries */ | |
470 | ent->fill_to_high_water = false; | |
471 | if (ent->pending) | |
472 | queue_delayed_work(ent->dev->cache.wq, &ent->dwork, | |
473 | msecs_to_jiffies(1000)); | |
474 | else | |
475 | queue_work(ent->dev->cache.wq, &ent->work); | |
476 | } | |
ad2d3ef4 JG |
477 | } |
478 | ||
e126ba97 EC |
479 | static void __cache_work_func(struct mlx5_cache_ent *ent) |
480 | { | |
481 | struct mlx5_ib_dev *dev = ent->dev; | |
482 | struct mlx5_mr_cache *cache = &dev->cache; | |
746b5583 | 483 | int err; |
e126ba97 | 484 | |
b9358bdb JG |
485 | spin_lock_irq(&ent->lock); |
486 | if (ent->disabled) | |
487 | goto out; | |
e126ba97 | 488 | |
1c78a21a JG |
489 | if (ent->fill_to_high_water && |
490 | ent->available_mrs + ent->pending < 2 * ent->limit && | |
b9358bdb JG |
491 | !READ_ONCE(dev->fill_delay)) { |
492 | spin_unlock_irq(&ent->lock); | |
b91e1751 | 493 | err = add_keys(ent, 1); |
b9358bdb JG |
494 | spin_lock_irq(&ent->lock); |
495 | if (ent->disabled) | |
496 | goto out; | |
497 | if (err) { | |
aad719dc JG |
498 | /* |
499 | * EAGAIN only happens if pending is positive, so we | |
500 | * will be rescheduled from reg_mr_callback(). The only | |
501 | * failure path here is ENOMEM. | |
502 | */ | |
503 | if (err != -EAGAIN) { | |
b9358bdb JG |
504 | mlx5_ib_warn( |
505 | dev, | |
506 | "command failed order %d, err %d\n", | |
507 | ent->order, err); | |
746b5583 EC |
508 | queue_delayed_work(cache->wq, &ent->dwork, |
509 | msecs_to_jiffies(1000)); | |
746b5583 EC |
510 | } |
511 | } | |
7c8691a3 | 512 | } else if (ent->available_mrs > 2 * ent->limit) { |
b9358bdb JG |
513 | bool need_delay; |
514 | ||
ab5cdc31 | 515 | /* |
a1d8854a JG |
516 | * The remove_cache_mr() logic is performed as garbage |
517 | * collection task. Such task is intended to be run when no | |
518 | * other active processes are running. | |
ab5cdc31 LR |
519 | * |
520 | * The need_resched() will return TRUE if there are user tasks | |
521 | * to be activated in near future. | |
522 | * | |
a1d8854a JG |
523 | * In such case, we don't execute remove_cache_mr() and postpone |
524 | * the garbage collection work to try to run in next cycle, in | |
525 | * order to free CPU resources to other tasks. | |
ab5cdc31 | 526 | */ |
b9358bdb JG |
527 | spin_unlock_irq(&ent->lock); |
528 | need_delay = need_resched() || someone_adding(cache) || | |
529 | time_after(jiffies, | |
530 | READ_ONCE(cache->last_add) + 300 * HZ); | |
531 | spin_lock_irq(&ent->lock); | |
532 | if (ent->disabled) | |
533 | goto out; | |
534 | if (need_delay) | |
746b5583 | 535 | queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); |
b9358bdb JG |
536 | remove_cache_mr_locked(ent); |
537 | queue_adjust_cache_locked(ent); | |
e126ba97 | 538 | } |
b9358bdb JG |
539 | out: |
540 | spin_unlock_irq(&ent->lock); | |
e126ba97 EC |
541 | } |
542 | ||
543 | static void delayed_cache_work_func(struct work_struct *work) | |
544 | { | |
545 | struct mlx5_cache_ent *ent; | |
546 | ||
547 | ent = container_of(work, struct mlx5_cache_ent, dwork.work); | |
548 | __cache_work_func(ent); | |
549 | } | |
550 | ||
551 | static void cache_work_func(struct work_struct *work) | |
552 | { | |
553 | struct mlx5_cache_ent *ent; | |
554 | ||
555 | ent = container_of(work, struct mlx5_cache_ent, work); | |
556 | __cache_work_func(ent); | |
557 | } | |
558 | ||
b91e1751 JG |
559 | /* Allocate a special entry from the cache */ |
560 | struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, | |
8383da3e | 561 | unsigned int entry, int access_flags) |
49780d42 AK |
562 | { |
563 | struct mlx5_mr_cache *cache = &dev->cache; | |
564 | struct mlx5_cache_ent *ent; | |
565 | struct mlx5_ib_mr *mr; | |
49780d42 | 566 | |
b91e1751 JG |
567 | if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY || |
568 | entry >= ARRAY_SIZE(cache->ent))) | |
546d3009 | 569 | return ERR_PTR(-EINVAL); |
49780d42 | 570 | |
8383da3e JG |
571 | /* Matches access in alloc_cache_mr() */ |
572 | if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags)) | |
573 | return ERR_PTR(-EOPNOTSUPP); | |
574 | ||
49780d42 | 575 | ent = &cache->ent[entry]; |
aad719dc JG |
576 | spin_lock_irq(&ent->lock); |
577 | if (list_empty(&ent->head)) { | |
578 | spin_unlock_irq(&ent->lock); | |
579 | mr = create_cache_mr(ent); | |
580 | if (IS_ERR(mr)) | |
49780d42 | 581 | return mr; |
aad719dc JG |
582 | } else { |
583 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); | |
584 | list_del(&mr->list); | |
585 | ent->available_mrs--; | |
586 | queue_adjust_cache_locked(ent); | |
587 | spin_unlock_irq(&ent->lock); | |
49780d42 | 588 | } |
8383da3e | 589 | mr->access_flags = access_flags; |
aad719dc | 590 | return mr; |
49780d42 AK |
591 | } |
592 | ||
aad719dc JG |
593 | /* Return a MR already available in the cache */ |
594 | static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent) | |
e126ba97 | 595 | { |
b91e1751 | 596 | struct mlx5_ib_dev *dev = req_ent->dev; |
e126ba97 | 597 | struct mlx5_ib_mr *mr = NULL; |
b91e1751 | 598 | struct mlx5_cache_ent *ent = req_ent; |
e126ba97 | 599 | |
b91e1751 JG |
600 | /* Try larger MR pools from the cache to satisfy the allocation */ |
601 | for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) { | |
602 | mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order, | |
603 | ent - dev->cache.ent); | |
e126ba97 | 604 | |
746b5583 | 605 | spin_lock_irq(&ent->lock); |
e126ba97 EC |
606 | if (!list_empty(&ent->head)) { |
607 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, | |
608 | list); | |
609 | list_del(&mr->list); | |
7c8691a3 | 610 | ent->available_mrs--; |
ad2d3ef4 | 611 | queue_adjust_cache_locked(ent); |
746b5583 | 612 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
613 | break; |
614 | } | |
ad2d3ef4 | 615 | queue_adjust_cache_locked(ent); |
746b5583 | 616 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
617 | } |
618 | ||
619 | if (!mr) | |
b91e1751 | 620 | req_ent->miss++; |
e126ba97 EC |
621 | |
622 | return mr; | |
623 | } | |
624 | ||
1769c4c5 JG |
625 | static void detach_mr_from_cache(struct mlx5_ib_mr *mr) |
626 | { | |
627 | struct mlx5_cache_ent *ent = mr->cache_ent; | |
628 | ||
629 | mr->cache_ent = NULL; | |
630 | spin_lock_irq(&ent->lock); | |
631 | ent->total_mrs--; | |
632 | spin_unlock_irq(&ent->lock); | |
633 | } | |
634 | ||
49780d42 | 635 | void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
e126ba97 | 636 | { |
b91e1751 | 637 | struct mlx5_cache_ent *ent = mr->cache_ent; |
e126ba97 | 638 | |
b91e1751 | 639 | if (!ent) |
dd9a4034 VF |
640 | return; |
641 | ||
09689703 | 642 | if (mlx5_mr_cache_invalidate(mr)) { |
1769c4c5 | 643 | detach_mr_from_cache(mr); |
afd14174 | 644 | destroy_mkey(dev, mr); |
e126ba97 EC |
645 | return; |
646 | } | |
49780d42 | 647 | |
746b5583 | 648 | spin_lock_irq(&ent->lock); |
e126ba97 | 649 | list_add_tail(&mr->list, &ent->head); |
7c8691a3 | 650 | ent->available_mrs++; |
ad2d3ef4 | 651 | queue_adjust_cache_locked(ent); |
746b5583 | 652 | spin_unlock_irq(&ent->lock); |
e126ba97 EC |
653 | } |
654 | ||
655 | static void clean_keys(struct mlx5_ib_dev *dev, int c) | |
656 | { | |
e126ba97 EC |
657 | struct mlx5_mr_cache *cache = &dev->cache; |
658 | struct mlx5_cache_ent *ent = &cache->ent[c]; | |
65edd0e7 | 659 | struct mlx5_ib_mr *tmp_mr; |
e126ba97 | 660 | struct mlx5_ib_mr *mr; |
65edd0e7 | 661 | LIST_HEAD(del_list); |
e126ba97 | 662 | |
3c461911 | 663 | cancel_delayed_work(&ent->dwork); |
e126ba97 | 664 | while (1) { |
746b5583 | 665 | spin_lock_irq(&ent->lock); |
e126ba97 | 666 | if (list_empty(&ent->head)) { |
746b5583 | 667 | spin_unlock_irq(&ent->lock); |
65edd0e7 | 668 | break; |
e126ba97 EC |
669 | } |
670 | mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); | |
65edd0e7 | 671 | list_move(&mr->list, &del_list); |
7c8691a3 JG |
672 | ent->available_mrs--; |
673 | ent->total_mrs--; | |
746b5583 | 674 | spin_unlock_irq(&ent->lock); |
65edd0e7 DJ |
675 | mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); |
676 | } | |
677 | ||
65edd0e7 DJ |
678 | list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { |
679 | list_del(&mr->list); | |
680 | kfree(mr); | |
e126ba97 EC |
681 | } |
682 | } | |
683 | ||
12cc1a02 LR |
684 | static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) |
685 | { | |
6a4d00be | 686 | if (!mlx5_debugfs_root || dev->is_rep) |
12cc1a02 LR |
687 | return; |
688 | ||
689 | debugfs_remove_recursive(dev->cache.root); | |
690 | dev->cache.root = NULL; | |
691 | } | |
692 | ||
73eb8f03 | 693 | static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) |
e126ba97 EC |
694 | { |
695 | struct mlx5_mr_cache *cache = &dev->cache; | |
696 | struct mlx5_cache_ent *ent; | |
73eb8f03 | 697 | struct dentry *dir; |
e126ba97 EC |
698 | int i; |
699 | ||
6a4d00be | 700 | if (!mlx5_debugfs_root || dev->is_rep) |
73eb8f03 | 701 | return; |
e126ba97 | 702 | |
9603b61d | 703 | cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); |
e126ba97 EC |
704 | |
705 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { | |
706 | ent = &cache->ent[i]; | |
707 | sprintf(ent->name, "%d", ent->order); | |
73eb8f03 GKH |
708 | dir = debugfs_create_dir(ent->name, cache->root); |
709 | debugfs_create_file("size", 0600, dir, ent, &size_fops); | |
710 | debugfs_create_file("limit", 0600, dir, ent, &limit_fops); | |
7c8691a3 | 711 | debugfs_create_u32("cur", 0400, dir, &ent->available_mrs); |
73eb8f03 | 712 | debugfs_create_u32("miss", 0600, dir, &ent->miss); |
e126ba97 | 713 | } |
e126ba97 EC |
714 | } |
715 | ||
e99e88a9 | 716 | static void delay_time_func(struct timer_list *t) |
746b5583 | 717 | { |
e99e88a9 | 718 | struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer); |
746b5583 | 719 | |
b9358bdb | 720 | WRITE_ONCE(dev->fill_delay, 0); |
746b5583 EC |
721 | } |
722 | ||
e126ba97 EC |
723 | int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) |
724 | { | |
725 | struct mlx5_mr_cache *cache = &dev->cache; | |
726 | struct mlx5_cache_ent *ent; | |
e126ba97 EC |
727 | int i; |
728 | ||
6bc1a656 | 729 | mutex_init(&dev->slow_path_mutex); |
3c856c82 | 730 | cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); |
e126ba97 EC |
731 | if (!cache->wq) { |
732 | mlx5_ib_warn(dev, "failed to create work queue\n"); | |
733 | return -ENOMEM; | |
734 | } | |
735 | ||
e355477e | 736 | mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); |
e99e88a9 | 737 | timer_setup(&dev->delay_timer, delay_time_func, 0); |
e126ba97 | 738 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { |
e126ba97 EC |
739 | ent = &cache->ent[i]; |
740 | INIT_LIST_HEAD(&ent->head); | |
741 | spin_lock_init(&ent->lock); | |
742 | ent->order = i + 2; | |
743 | ent->dev = dev; | |
49780d42 | 744 | ent->limit = 0; |
e126ba97 | 745 | |
e126ba97 EC |
746 | INIT_WORK(&ent->work, cache_work_func); |
747 | INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); | |
49780d42 | 748 | |
8b7ff7f3 | 749 | if (i > MR_CACHE_LAST_STD_ENTRY) { |
81713d37 | 750 | mlx5_odp_init_mr_cache_entry(ent); |
49780d42 | 751 | continue; |
81713d37 | 752 | } |
49780d42 | 753 | |
8b7ff7f3 | 754 | if (ent->order > mr_cache_max_order(dev)) |
49780d42 AK |
755 | continue; |
756 | ||
757 | ent->page = PAGE_SHIFT; | |
758 | ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / | |
759 | MLX5_IB_UMR_OCTOWORD; | |
760 | ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; | |
761 | if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && | |
8383da3e JG |
762 | !dev->is_rep && mlx5_core_is_pf(dev->mdev) && |
763 | mlx5_ib_can_load_pas_with_umr(dev, 0)) | |
49780d42 AK |
764 | ent->limit = dev->mdev->profile->mr_cache[i].limit; |
765 | else | |
766 | ent->limit = 0; | |
ad2d3ef4 JG |
767 | spin_lock_irq(&ent->lock); |
768 | queue_adjust_cache_locked(ent); | |
769 | spin_unlock_irq(&ent->lock); | |
e126ba97 EC |
770 | } |
771 | ||
73eb8f03 | 772 | mlx5_mr_cache_debugfs_init(dev); |
12cc1a02 | 773 | |
e126ba97 EC |
774 | return 0; |
775 | } | |
776 | ||
777 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) | |
778 | { | |
b9358bdb | 779 | unsigned int i; |
e126ba97 | 780 | |
32927e28 MB |
781 | if (!dev->cache.wq) |
782 | return 0; | |
783 | ||
b9358bdb JG |
784 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { |
785 | struct mlx5_cache_ent *ent = &dev->cache.ent[i]; | |
786 | ||
787 | spin_lock_irq(&ent->lock); | |
788 | ent->disabled = true; | |
789 | spin_unlock_irq(&ent->lock); | |
790 | cancel_work_sync(&ent->work); | |
791 | cancel_delayed_work_sync(&ent->dwork); | |
792 | } | |
e126ba97 EC |
793 | |
794 | mlx5_mr_cache_debugfs_cleanup(dev); | |
e355477e | 795 | mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); |
e126ba97 EC |
796 | |
797 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) | |
798 | clean_keys(dev, i); | |
799 | ||
3c461911 | 800 | destroy_workqueue(dev->cache.wq); |
746b5583 | 801 | del_timer_sync(&dev->delay_timer); |
3c461911 | 802 | |
e126ba97 EC |
803 | return 0; |
804 | } | |
805 | ||
806 | struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) | |
807 | { | |
808 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
ec22eb53 | 809 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
e126ba97 | 810 | struct mlx5_ib_mr *mr; |
ec22eb53 SM |
811 | void *mkc; |
812 | u32 *in; | |
e126ba97 EC |
813 | int err; |
814 | ||
815 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
816 | if (!mr) | |
817 | return ERR_PTR(-ENOMEM); | |
818 | ||
ec22eb53 | 819 | in = kzalloc(inlen, GFP_KERNEL); |
e126ba97 EC |
820 | if (!in) { |
821 | err = -ENOMEM; | |
822 | goto err_free; | |
823 | } | |
824 | ||
ec22eb53 SM |
825 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
826 | ||
cdbd0d2b | 827 | MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); |
ec22eb53 | 828 | MLX5_SET(mkc, mkc, length64, 1); |
03232cc4 | 829 | set_mkc_access_pd_addr_fields(mkc, acc, 0, pd); |
ec22eb53 | 830 | |
fc6a9f86 | 831 | err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); |
e126ba97 EC |
832 | if (err) |
833 | goto err_in; | |
834 | ||
835 | kfree(in); | |
aa8e08d2 | 836 | mr->mmkey.type = MLX5_MKEY_MR; |
a606b0f6 MB |
837 | mr->ibmr.lkey = mr->mmkey.key; |
838 | mr->ibmr.rkey = mr->mmkey.key; | |
e126ba97 EC |
839 | mr->umem = NULL; |
840 | ||
841 | return &mr->ibmr; | |
842 | ||
843 | err_in: | |
844 | kfree(in); | |
845 | ||
846 | err_free: | |
847 | kfree(mr); | |
848 | ||
849 | return ERR_PTR(err); | |
850 | } | |
851 | ||
7b4cdaae | 852 | static int get_octo_len(u64 addr, u64 len, int page_shift) |
e126ba97 | 853 | { |
7b4cdaae | 854 | u64 page_size = 1ULL << page_shift; |
e126ba97 EC |
855 | u64 offset; |
856 | int npages; | |
857 | ||
858 | offset = addr & (page_size - 1); | |
7b4cdaae | 859 | npages = ALIGN(len + offset, page_size) >> page_shift; |
e126ba97 EC |
860 | return (npages + 1) / 2; |
861 | } | |
862 | ||
8b7ff7f3 | 863 | static int mr_cache_max_order(struct mlx5_ib_dev *dev) |
e126ba97 | 864 | { |
7d0cc6ed | 865 | if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) |
8b7ff7f3 | 866 | return MR_CACHE_LAST_STD_ENTRY + 2; |
4c25b7a3 MD |
867 | return MLX5_MAX_UMR_SHIFT; |
868 | } | |
869 | ||
c320e527 MS |
870 | static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length, |
871 | int access_flags, struct ib_umem **umem, int *npages, | |
872 | int *page_shift, int *ncont, int *order) | |
395a8e4c | 873 | { |
b4bd701a | 874 | struct ib_umem *u; |
14ab8896 | 875 | |
b4bd701a LR |
876 | *umem = NULL; |
877 | ||
261dc53f JG |
878 | if (access_flags & IB_ACCESS_ON_DEMAND) { |
879 | struct ib_umem_odp *odp; | |
880 | ||
c320e527 | 881 | odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, |
f25a546e | 882 | &mlx5_mn_ops); |
261dc53f JG |
883 | if (IS_ERR(odp)) { |
884 | mlx5_ib_dbg(dev, "umem get failed (%ld)\n", | |
885 | PTR_ERR(odp)); | |
886 | return PTR_ERR(odp); | |
887 | } | |
888 | ||
889 | u = &odp->umem; | |
890 | ||
891 | *page_shift = odp->page_shift; | |
892 | *ncont = ib_umem_odp_num_pages(odp); | |
893 | *npages = *ncont << (*page_shift - PAGE_SHIFT); | |
894 | if (order) | |
895 | *order = ilog2(roundup_pow_of_two(*ncont)); | |
896 | } else { | |
c320e527 | 897 | u = ib_umem_get(&dev->ib_dev, start, length, access_flags); |
261dc53f JG |
898 | if (IS_ERR(u)) { |
899 | mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u)); | |
900 | return PTR_ERR(u); | |
901 | } | |
902 | ||
903 | mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, | |
904 | page_shift, ncont, order); | |
395a8e4c NO |
905 | } |
906 | ||
395a8e4c NO |
907 | if (!*npages) { |
908 | mlx5_ib_warn(dev, "avoid zero region\n"); | |
b4bd701a | 909 | ib_umem_release(u); |
14ab8896 | 910 | return -EINVAL; |
395a8e4c NO |
911 | } |
912 | ||
b4bd701a LR |
913 | *umem = u; |
914 | ||
395a8e4c NO |
915 | mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", |
916 | *npages, *ncont, *order, *page_shift); | |
917 | ||
14ab8896 | 918 | return 0; |
395a8e4c NO |
919 | } |
920 | ||
add08d76 | 921 | static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) |
e126ba97 | 922 | { |
add08d76 CH |
923 | struct mlx5_ib_umr_context *context = |
924 | container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); | |
e126ba97 | 925 | |
add08d76 CH |
926 | context->status = wc->status; |
927 | complete(&context->done); | |
928 | } | |
e126ba97 | 929 | |
add08d76 CH |
930 | static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) |
931 | { | |
932 | context->cqe.done = mlx5_ib_umr_done; | |
933 | context->status = -1; | |
934 | init_completion(&context->done); | |
e126ba97 EC |
935 | } |
936 | ||
d5ea2df9 BJ |
937 | static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, |
938 | struct mlx5_umr_wr *umrwr) | |
939 | { | |
940 | struct umr_common *umrc = &dev->umrc; | |
d34ac5cd | 941 | const struct ib_send_wr *bad; |
d5ea2df9 BJ |
942 | int err; |
943 | struct mlx5_ib_umr_context umr_context; | |
944 | ||
945 | mlx5_ib_init_umr_context(&umr_context); | |
946 | umrwr->wr.wr_cqe = &umr_context.cqe; | |
947 | ||
948 | down(&umrc->sem); | |
949 | err = ib_post_send(umrc->qp, &umrwr->wr, &bad); | |
950 | if (err) { | |
951 | mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err); | |
952 | } else { | |
953 | wait_for_completion(&umr_context.done); | |
954 | if (umr_context.status != IB_WC_SUCCESS) { | |
955 | mlx5_ib_warn(dev, "reg umr failed (%u)\n", | |
956 | umr_context.status); | |
957 | err = -EFAULT; | |
958 | } | |
959 | } | |
960 | up(&umrc->sem); | |
961 | return err; | |
962 | } | |
963 | ||
b91e1751 JG |
964 | static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev, |
965 | unsigned int order) | |
966 | { | |
967 | struct mlx5_mr_cache *cache = &dev->cache; | |
968 | ||
969 | if (order < cache->ent[0].order) | |
970 | return &cache->ent[0]; | |
971 | order = order - cache->ent[0].order; | |
972 | if (order > MR_CACHE_LAST_STD_ENTRY) | |
973 | return NULL; | |
974 | return &cache->ent[order]; | |
975 | } | |
976 | ||
977 | static struct mlx5_ib_mr * | |
978 | alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr, | |
979 | u64 len, int npages, int page_shift, unsigned int order, | |
980 | int access_flags) | |
e126ba97 EC |
981 | { |
982 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
b91e1751 | 983 | struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order); |
e126ba97 | 984 | struct mlx5_ib_mr *mr; |
e126ba97 | 985 | |
b91e1751 JG |
986 | if (!ent) |
987 | return ERR_PTR(-E2BIG); | |
8383da3e JG |
988 | |
989 | /* Matches access in alloc_cache_mr() */ | |
990 | if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags)) | |
991 | return ERR_PTR(-EOPNOTSUPP); | |
992 | ||
aad719dc JG |
993 | mr = get_cache_mr(ent); |
994 | if (!mr) { | |
995 | mr = create_cache_mr(ent); | |
996 | if (IS_ERR(mr)) | |
997 | return mr; | |
e126ba97 EC |
998 | } |
999 | ||
7d0cc6ed AK |
1000 | mr->ibmr.pd = pd; |
1001 | mr->umem = umem; | |
1002 | mr->access_flags = access_flags; | |
1003 | mr->desc_size = sizeof(struct mlx5_mtt); | |
a606b0f6 MB |
1004 | mr->mmkey.iova = virt_addr; |
1005 | mr->mmkey.size = len; | |
1006 | mr->mmkey.pd = to_mpd(pd)->pdn; | |
b475598a | 1007 | |
e126ba97 | 1008 | return mr; |
e126ba97 EC |
1009 | } |
1010 | ||
7d0cc6ed AK |
1011 | #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \ |
1012 | MLX5_UMR_MTT_ALIGNMENT) | |
1013 | #define MLX5_SPARE_UMR_CHUNK 0x10000 | |
1014 | ||
1015 | int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, | |
1016 | int page_shift, int flags) | |
1017 | { | |
1018 | struct mlx5_ib_dev *dev = mr->dev; | |
9b0c289e | 1019 | struct device *ddev = dev->ib_dev.dev.parent; |
832a6b06 | 1020 | int size; |
7d0cc6ed | 1021 | void *xlt; |
832a6b06 | 1022 | dma_addr_t dma; |
e622f2f4 | 1023 | struct mlx5_umr_wr wr; |
832a6b06 HE |
1024 | struct ib_sge sg; |
1025 | int err = 0; | |
81713d37 AK |
1026 | int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT) |
1027 | ? sizeof(struct mlx5_klm) | |
1028 | : sizeof(struct mlx5_mtt); | |
7d0cc6ed AK |
1029 | const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size; |
1030 | const int page_mask = page_align - 1; | |
832a6b06 HE |
1031 | size_t pages_mapped = 0; |
1032 | size_t pages_to_map = 0; | |
1033 | size_t pages_iter = 0; | |
cbe4b8f0 | 1034 | size_t size_to_map = 0; |
7d0cc6ed | 1035 | gfp_t gfp; |
c44ef998 | 1036 | bool use_emergency_page = false; |
832a6b06 | 1037 | |
c8d75a98 MD |
1038 | if ((flags & MLX5_IB_UPD_XLT_INDIRECT) && |
1039 | !umr_can_use_indirect_mkey(dev)) | |
1040 | return -EPERM; | |
832a6b06 HE |
1041 | |
1042 | /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes, | |
7d0cc6ed AK |
1043 | * so we need to align the offset and length accordingly |
1044 | */ | |
1045 | if (idx & page_mask) { | |
1046 | npages += idx & page_mask; | |
1047 | idx &= ~page_mask; | |
832a6b06 HE |
1048 | } |
1049 | ||
7d0cc6ed AK |
1050 | gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL; |
1051 | gfp |= __GFP_ZERO | __GFP_NOWARN; | |
832a6b06 | 1052 | |
7d0cc6ed AK |
1053 | pages_to_map = ALIGN(npages, page_align); |
1054 | size = desc_size * pages_to_map; | |
1055 | size = min_t(int, size, MLX5_MAX_UMR_CHUNK); | |
832a6b06 | 1056 | |
7d0cc6ed AK |
1057 | xlt = (void *)__get_free_pages(gfp, get_order(size)); |
1058 | if (!xlt && size > MLX5_SPARE_UMR_CHUNK) { | |
1059 | mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n", | |
1060 | size, get_order(size), MLX5_SPARE_UMR_CHUNK); | |
1061 | ||
1062 | size = MLX5_SPARE_UMR_CHUNK; | |
1063 | xlt = (void *)__get_free_pages(gfp, get_order(size)); | |
832a6b06 | 1064 | } |
7d0cc6ed AK |
1065 | |
1066 | if (!xlt) { | |
7d0cc6ed | 1067 | mlx5_ib_warn(dev, "Using XLT emergency buffer\n"); |
c44ef998 | 1068 | xlt = (void *)mlx5_ib_get_xlt_emergency_page(); |
7d0cc6ed | 1069 | size = PAGE_SIZE; |
7d0cc6ed | 1070 | memset(xlt, 0, size); |
c44ef998 | 1071 | use_emergency_page = true; |
7d0cc6ed AK |
1072 | } |
1073 | pages_iter = size / desc_size; | |
1074 | dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE); | |
832a6b06 | 1075 | if (dma_mapping_error(ddev, dma)) { |
7d0cc6ed | 1076 | mlx5_ib_err(dev, "unable to map DMA during XLT update.\n"); |
832a6b06 | 1077 | err = -ENOMEM; |
7d0cc6ed | 1078 | goto free_xlt; |
832a6b06 HE |
1079 | } |
1080 | ||
cbe4b8f0 AK |
1081 | if (mr->umem->is_odp) { |
1082 | if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) { | |
1083 | struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); | |
1084 | size_t max_pages = ib_umem_odp_num_pages(odp) - idx; | |
1085 | ||
1086 | pages_to_map = min_t(size_t, pages_to_map, max_pages); | |
1087 | } | |
1088 | } | |
1089 | ||
7d0cc6ed AK |
1090 | sg.addr = dma; |
1091 | sg.lkey = dev->umrc.pd->local_dma_lkey; | |
1092 | ||
1093 | memset(&wr, 0, sizeof(wr)); | |
1094 | wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT; | |
1095 | if (!(flags & MLX5_IB_UPD_XLT_ENABLE)) | |
1096 | wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE; | |
1097 | wr.wr.sg_list = &sg; | |
1098 | wr.wr.num_sge = 1; | |
1099 | wr.wr.opcode = MLX5_IB_WR_UMR; | |
1100 | ||
1101 | wr.pd = mr->ibmr.pd; | |
1102 | wr.mkey = mr->mmkey.key; | |
1103 | wr.length = mr->mmkey.size; | |
1104 | wr.virt_addr = mr->mmkey.iova; | |
1105 | wr.access_flags = mr->access_flags; | |
1106 | wr.page_shift = page_shift; | |
1107 | ||
832a6b06 HE |
1108 | for (pages_mapped = 0; |
1109 | pages_mapped < pages_to_map && !err; | |
7d0cc6ed | 1110 | pages_mapped += pages_iter, idx += pages_iter) { |
438b228e | 1111 | npages = min_t(int, pages_iter, pages_to_map - pages_mapped); |
cbe4b8f0 | 1112 | size_to_map = npages * desc_size; |
832a6b06 | 1113 | dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE); |
cbe4b8f0 AK |
1114 | if (mr->umem->is_odp) { |
1115 | mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); | |
1116 | } else { | |
1117 | __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx, | |
1118 | npages, xlt, | |
1119 | MLX5_IB_MTT_PRESENT); | |
1120 | /* Clear padding after the pages | |
1121 | * brought from the umem. | |
1122 | */ | |
1123 | memset(xlt + size_to_map, 0, size - size_to_map); | |
1124 | } | |
832a6b06 HE |
1125 | dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); |
1126 | ||
cbe4b8f0 | 1127 | sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT); |
7d0cc6ed AK |
1128 | |
1129 | if (pages_mapped + pages_iter >= pages_to_map) { | |
1130 | if (flags & MLX5_IB_UPD_XLT_ENABLE) | |
1131 | wr.wr.send_flags |= | |
1132 | MLX5_IB_SEND_UMR_ENABLE_MR | | |
1133 | MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS | | |
1134 | MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; | |
1135 | if (flags & MLX5_IB_UPD_XLT_PD || | |
1136 | flags & MLX5_IB_UPD_XLT_ACCESS) | |
1137 | wr.wr.send_flags |= | |
1138 | MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; | |
1139 | if (flags & MLX5_IB_UPD_XLT_ADDR) | |
1140 | wr.wr.send_flags |= | |
1141 | MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; | |
1142 | } | |
832a6b06 | 1143 | |
7d0cc6ed | 1144 | wr.offset = idx * desc_size; |
31616255 | 1145 | wr.xlt_size = sg.length; |
832a6b06 | 1146 | |
d5ea2df9 | 1147 | err = mlx5_ib_post_send_wait(dev, &wr); |
832a6b06 HE |
1148 | } |
1149 | dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); | |
1150 | ||
7d0cc6ed | 1151 | free_xlt: |
c44ef998 IL |
1152 | if (use_emergency_page) |
1153 | mlx5_ib_put_xlt_emergency_page(); | |
832a6b06 | 1154 | else |
7d0cc6ed | 1155 | free_pages((unsigned long)xlt, get_order(size)); |
832a6b06 HE |
1156 | |
1157 | return err; | |
1158 | } | |
832a6b06 | 1159 | |
395a8e4c NO |
1160 | /* |
1161 | * If ibmr is NULL it will be allocated by reg_create. | |
1162 | * Else, the given ibmr will be used. | |
1163 | */ | |
1164 | static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, | |
1165 | u64 virt_addr, u64 length, | |
1166 | struct ib_umem *umem, int npages, | |
ff740aef IL |
1167 | int page_shift, int access_flags, |
1168 | bool populate) | |
e126ba97 EC |
1169 | { |
1170 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
e126ba97 | 1171 | struct mlx5_ib_mr *mr; |
ec22eb53 SM |
1172 | __be64 *pas; |
1173 | void *mkc; | |
e126ba97 | 1174 | int inlen; |
ec22eb53 | 1175 | u32 *in; |
e126ba97 | 1176 | int err; |
938fe83c | 1177 | bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); |
e126ba97 | 1178 | |
395a8e4c | 1179 | mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL); |
e126ba97 EC |
1180 | if (!mr) |
1181 | return ERR_PTR(-ENOMEM); | |
1182 | ||
ff740aef IL |
1183 | mr->ibmr.pd = pd; |
1184 | mr->access_flags = access_flags; | |
1185 | ||
1186 | inlen = MLX5_ST_SZ_BYTES(create_mkey_in); | |
1187 | if (populate) | |
1188 | inlen += sizeof(*pas) * roundup(npages, 2); | |
1b9a07ee | 1189 | in = kvzalloc(inlen, GFP_KERNEL); |
e126ba97 EC |
1190 | if (!in) { |
1191 | err = -ENOMEM; | |
1192 | goto err_1; | |
1193 | } | |
ec22eb53 | 1194 | pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); |
8383da3e JG |
1195 | if (populate) { |
1196 | if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) { | |
1197 | err = -EINVAL; | |
1198 | goto err_2; | |
1199 | } | |
c438fde1 AK |
1200 | mlx5_ib_populate_pas(dev, umem, page_shift, pas, |
1201 | pg_cap ? MLX5_IB_MTT_PRESENT : 0); | |
8383da3e | 1202 | } |
e126ba97 | 1203 | |
ec22eb53 | 1204 | /* The pg_access bit allows setting the access flags |
cc149f75 | 1205 | * in the page list submitted with the command. */ |
ec22eb53 SM |
1206 | MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); |
1207 | ||
1208 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
5eb29f0d JG |
1209 | set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr, |
1210 | populate ? pd : dev->umrc.pd); | |
ff740aef | 1211 | MLX5_SET(mkc, mkc, free, !populate); |
cdbd0d2b | 1212 | MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); |
8b7ff7f3 | 1213 | MLX5_SET(mkc, mkc, umr_en, 1); |
ec22eb53 | 1214 | |
ec22eb53 | 1215 | MLX5_SET64(mkc, mkc, len, length); |
ec22eb53 SM |
1216 | MLX5_SET(mkc, mkc, bsf_octword_size, 0); |
1217 | MLX5_SET(mkc, mkc, translations_octword_size, | |
7b4cdaae | 1218 | get_octo_len(virt_addr, length, page_shift)); |
ec22eb53 | 1219 | MLX5_SET(mkc, mkc, log_page_size, page_shift); |
ff740aef IL |
1220 | if (populate) { |
1221 | MLX5_SET(create_mkey_in, in, translations_octword_actual_size, | |
7b4cdaae | 1222 | get_octo_len(virt_addr, length, page_shift)); |
ff740aef | 1223 | } |
ec22eb53 | 1224 | |
fc6a9f86 | 1225 | err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); |
e126ba97 EC |
1226 | if (err) { |
1227 | mlx5_ib_warn(dev, "create mkey failed\n"); | |
1228 | goto err_2; | |
1229 | } | |
aa8e08d2 | 1230 | mr->mmkey.type = MLX5_MKEY_MR; |
49780d42 | 1231 | mr->desc_size = sizeof(struct mlx5_mtt); |
7eae20db | 1232 | mr->dev = dev; |
479163f4 | 1233 | kvfree(in); |
e126ba97 | 1234 | |
a606b0f6 | 1235 | mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); |
e126ba97 EC |
1236 | |
1237 | return mr; | |
1238 | ||
1239 | err_2: | |
479163f4 | 1240 | kvfree(in); |
e126ba97 EC |
1241 | |
1242 | err_1: | |
395a8e4c NO |
1243 | if (!ibmr) |
1244 | kfree(mr); | |
e126ba97 EC |
1245 | |
1246 | return ERR_PTR(err); | |
1247 | } | |
1248 | ||
ac2f7e62 | 1249 | static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, |
395a8e4c NO |
1250 | int npages, u64 length, int access_flags) |
1251 | { | |
1252 | mr->npages = npages; | |
1253 | atomic_add(npages, &dev->mdev->priv.reg_pages); | |
a606b0f6 MB |
1254 | mr->ibmr.lkey = mr->mmkey.key; |
1255 | mr->ibmr.rkey = mr->mmkey.key; | |
395a8e4c | 1256 | mr->ibmr.length = length; |
56e11d62 | 1257 | mr->access_flags = access_flags; |
395a8e4c NO |
1258 | } |
1259 | ||
3b113a1e AL |
1260 | static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr, |
1261 | u64 length, int acc, int mode) | |
6c29f57e AL |
1262 | { |
1263 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1264 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); | |
6c29f57e AL |
1265 | struct mlx5_ib_mr *mr; |
1266 | void *mkc; | |
1267 | u32 *in; | |
1268 | int err; | |
1269 | ||
1270 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
1271 | if (!mr) | |
1272 | return ERR_PTR(-ENOMEM); | |
1273 | ||
1274 | in = kzalloc(inlen, GFP_KERNEL); | |
1275 | if (!in) { | |
1276 | err = -ENOMEM; | |
1277 | goto err_free; | |
1278 | } | |
1279 | ||
1280 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
1281 | ||
3b113a1e AL |
1282 | MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); |
1283 | MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7); | |
6c29f57e | 1284 | MLX5_SET64(mkc, mkc, len, length); |
03232cc4 | 1285 | set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd); |
6c29f57e | 1286 | |
fc6a9f86 | 1287 | err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); |
6c29f57e AL |
1288 | if (err) |
1289 | goto err_in; | |
1290 | ||
1291 | kfree(in); | |
1292 | ||
1293 | mr->umem = NULL; | |
ac2f7e62 | 1294 | set_mr_fields(dev, mr, 0, length, acc); |
6c29f57e AL |
1295 | |
1296 | return &mr->ibmr; | |
1297 | ||
1298 | err_in: | |
1299 | kfree(in); | |
1300 | ||
1301 | err_free: | |
1302 | kfree(mr); | |
1303 | ||
1304 | return ERR_PTR(err); | |
1305 | } | |
1306 | ||
813e90b1 MS |
1307 | int mlx5_ib_advise_mr(struct ib_pd *pd, |
1308 | enum ib_uverbs_advise_mr_advice advice, | |
1309 | u32 flags, | |
1310 | struct ib_sge *sg_list, | |
1311 | u32 num_sge, | |
1312 | struct uverbs_attr_bundle *attrs) | |
1313 | { | |
1314 | if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH && | |
677cf51f YH |
1315 | advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && |
1316 | advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT) | |
813e90b1 MS |
1317 | return -EOPNOTSUPP; |
1318 | ||
1319 | return mlx5_ib_advise_mr_prefetch(pd, advice, flags, | |
1320 | sg_list, num_sge); | |
1321 | } | |
1322 | ||
6c29f57e AL |
1323 | struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, |
1324 | struct ib_dm_mr_attr *attr, | |
1325 | struct uverbs_attr_bundle *attrs) | |
1326 | { | |
1327 | struct mlx5_ib_dm *mdm = to_mdm(dm); | |
3b113a1e AL |
1328 | struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev; |
1329 | u64 start_addr = mdm->dev_addr + attr->offset; | |
1330 | int mode; | |
1331 | ||
1332 | switch (mdm->type) { | |
1333 | case MLX5_IB_UAPI_DM_TYPE_MEMIC: | |
1334 | if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS) | |
1335 | return ERR_PTR(-EINVAL); | |
1336 | ||
1337 | mode = MLX5_MKC_ACCESS_MODE_MEMIC; | |
1338 | start_addr -= pci_resource_start(dev->pdev, 0); | |
1339 | break; | |
25c13324 AL |
1340 | case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: |
1341 | case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: | |
1342 | if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) | |
1343 | return ERR_PTR(-EINVAL); | |
1344 | ||
1345 | mode = MLX5_MKC_ACCESS_MODE_SW_ICM; | |
1346 | break; | |
3b113a1e | 1347 | default: |
6c29f57e | 1348 | return ERR_PTR(-EINVAL); |
3b113a1e | 1349 | } |
6c29f57e | 1350 | |
3b113a1e AL |
1351 | return mlx5_ib_get_dm_mr(pd, start_addr, attr->length, |
1352 | attr->access_flags, mode); | |
6c29f57e AL |
1353 | } |
1354 | ||
e126ba97 EC |
1355 | struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
1356 | u64 virt_addr, int access_flags, | |
1357 | struct ib_udata *udata) | |
1358 | { | |
1359 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1360 | struct mlx5_ib_mr *mr = NULL; | |
8383da3e | 1361 | bool xlt_with_umr; |
e126ba97 EC |
1362 | struct ib_umem *umem; |
1363 | int page_shift; | |
1364 | int npages; | |
1365 | int ncont; | |
1366 | int order; | |
1367 | int err; | |
1368 | ||
1b19b951 | 1369 | if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) |
ea30f013 | 1370 | return ERR_PTR(-EOPNOTSUPP); |
1b19b951 | 1371 | |
900a6d79 EC |
1372 | mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", |
1373 | start, virt_addr, length, access_flags); | |
81713d37 | 1374 | |
8383da3e JG |
1375 | xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, length); |
1376 | /* ODP requires xlt update via umr to work. */ | |
1377 | if (!xlt_with_umr && (access_flags & IB_ACCESS_ON_DEMAND)) | |
1378 | return ERR_PTR(-EINVAL); | |
1379 | ||
13859d5d LR |
1380 | if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start && |
1381 | length == U64_MAX) { | |
8ffc3248 JG |
1382 | if (virt_addr != start) |
1383 | return ERR_PTR(-EINVAL); | |
81713d37 AK |
1384 | if (!(access_flags & IB_ACCESS_ON_DEMAND) || |
1385 | !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) | |
1386 | return ERR_PTR(-EINVAL); | |
1387 | ||
b0ea0fa5 | 1388 | mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags); |
4289861d LR |
1389 | if (IS_ERR(mr)) |
1390 | return ERR_CAST(mr); | |
81713d37 AK |
1391 | return &mr->ibmr; |
1392 | } | |
81713d37 | 1393 | |
c320e527 | 1394 | err = mr_umem_get(dev, start, length, access_flags, &umem, |
b0ea0fa5 | 1395 | &npages, &page_shift, &ncont, &order); |
e126ba97 | 1396 | |
ff740aef | 1397 | if (err < 0) |
14ab8896 | 1398 | return ERR_PTR(err); |
e126ba97 | 1399 | |
8383da3e | 1400 | if (xlt_with_umr) { |
ff740aef IL |
1401 | mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, |
1402 | page_shift, order, access_flags); | |
2e4e706e | 1403 | if (IS_ERR(mr)) |
e126ba97 | 1404 | mr = NULL; |
e126ba97 EC |
1405 | } |
1406 | ||
6bc1a656 ML |
1407 | if (!mr) { |
1408 | mutex_lock(&dev->slow_path_mutex); | |
395a8e4c | 1409 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, |
8383da3e | 1410 | page_shift, access_flags, !xlt_with_umr); |
6bc1a656 ML |
1411 | mutex_unlock(&dev->slow_path_mutex); |
1412 | } | |
e126ba97 EC |
1413 | |
1414 | if (IS_ERR(mr)) { | |
1415 | err = PTR_ERR(mr); | |
1416 | goto error; | |
1417 | } | |
1418 | ||
a606b0f6 | 1419 | mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); |
e126ba97 EC |
1420 | |
1421 | mr->umem = umem; | |
ac2f7e62 | 1422 | set_mr_fields(dev, mr, npages, length, access_flags); |
e126ba97 | 1423 | |
8383da3e JG |
1424 | if (xlt_with_umr) { |
1425 | /* | |
1426 | * If the MR was created with reg_create then it will be | |
1427 | * configured properly but left disabled. It is safe to go ahead | |
1428 | * and configure it again via UMR while enabling it. | |
1429 | */ | |
ff740aef IL |
1430 | int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; |
1431 | ||
1432 | if (access_flags & IB_ACCESS_ON_DEMAND) | |
1433 | update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP; | |
e126ba97 | 1434 | |
ff740aef IL |
1435 | err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, |
1436 | update_xlt_flags); | |
1437 | if (err) { | |
fbcd4983 | 1438 | dereg_mr(dev, mr); |
ff740aef IL |
1439 | return ERR_PTR(err); |
1440 | } | |
1441 | } | |
1442 | ||
aa603815 JG |
1443 | if (is_odp_mr(mr)) { |
1444 | to_ib_umem_odp(mr->umem)->private = mr; | |
189277f3 | 1445 | init_waitqueue_head(&mr->q_deferred_work); |
5256edcb | 1446 | atomic_set(&mr->num_deferred_work, 0); |
806b101b JG |
1447 | err = xa_err(xa_store(&dev->odp_mkeys, |
1448 | mlx5_base_mkey(mr->mmkey.key), &mr->mmkey, | |
1449 | GFP_KERNEL)); | |
1450 | if (err) { | |
1451 | dereg_mr(dev, mr); | |
1452 | return ERR_PTR(err); | |
1453 | } | |
a6bc3875 | 1454 | } |
13859d5d | 1455 | |
ff740aef | 1456 | return &mr->ibmr; |
e126ba97 EC |
1457 | error: |
1458 | ib_umem_release(umem); | |
1459 | return ERR_PTR(err); | |
1460 | } | |
1461 | ||
09689703 JG |
1462 | /** |
1463 | * mlx5_mr_cache_invalidate - Fence all DMA on the MR | |
1464 | * @mr: The MR to fence | |
1465 | * | |
1466 | * Upon return the NIC will not be doing any DMA to the pages under the MR, | |
1467 | * and any DMA inprogress will be completed. Failure of this function | |
1468 | * indicates the HW has failed catastrophically. | |
1469 | */ | |
1470 | int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr) | |
e126ba97 | 1471 | { |
0025b0bd | 1472 | struct mlx5_umr_wr umrwr = {}; |
e126ba97 | 1473 | |
09689703 | 1474 | if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) |
89ea94a7 MG |
1475 | return 0; |
1476 | ||
9ec4483a YH |
1477 | umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | |
1478 | MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; | |
7d0cc6ed | 1479 | umrwr.wr.opcode = MLX5_IB_WR_UMR; |
09689703 | 1480 | umrwr.pd = mr->dev->umrc.pd; |
7d0cc6ed | 1481 | umrwr.mkey = mr->mmkey.key; |
6a053953 | 1482 | umrwr.ignore_free_state = 1; |
e126ba97 | 1483 | |
09689703 | 1484 | return mlx5_ib_post_send_wait(mr->dev, &umrwr); |
e126ba97 EC |
1485 | } |
1486 | ||
7d0cc6ed | 1487 | static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, |
56e11d62 NO |
1488 | int access_flags, int flags) |
1489 | { | |
1490 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
56e11d62 | 1491 | struct mlx5_umr_wr umrwr = {}; |
56e11d62 NO |
1492 | int err; |
1493 | ||
56e11d62 NO |
1494 | umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; |
1495 | ||
7d0cc6ed AK |
1496 | umrwr.wr.opcode = MLX5_IB_WR_UMR; |
1497 | umrwr.mkey = mr->mmkey.key; | |
56e11d62 | 1498 | |
31616255 | 1499 | if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) { |
56e11d62 | 1500 | umrwr.pd = pd; |
56e11d62 | 1501 | umrwr.access_flags = access_flags; |
31616255 | 1502 | umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; |
56e11d62 NO |
1503 | } |
1504 | ||
d5ea2df9 | 1505 | err = mlx5_ib_post_send_wait(dev, &umrwr); |
56e11d62 | 1506 | |
56e11d62 NO |
1507 | return err; |
1508 | } | |
1509 | ||
1510 | int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, | |
1511 | u64 length, u64 virt_addr, int new_access_flags, | |
1512 | struct ib_pd *new_pd, struct ib_udata *udata) | |
1513 | { | |
1514 | struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); | |
1515 | struct mlx5_ib_mr *mr = to_mmr(ib_mr); | |
1516 | struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd; | |
1517 | int access_flags = flags & IB_MR_REREG_ACCESS ? | |
1518 | new_access_flags : | |
1519 | mr->access_flags; | |
56e11d62 | 1520 | int page_shift = 0; |
7d0cc6ed | 1521 | int upd_flags = 0; |
56e11d62 NO |
1522 | int npages = 0; |
1523 | int ncont = 0; | |
1524 | int order = 0; | |
b4bd701a | 1525 | u64 addr, len; |
56e11d62 NO |
1526 | int err; |
1527 | ||
1528 | mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", | |
1529 | start, virt_addr, length, access_flags); | |
1530 | ||
7d0cc6ed AK |
1531 | atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); |
1532 | ||
b4bd701a LR |
1533 | if (!mr->umem) |
1534 | return -EINVAL; | |
1535 | ||
880505cf JG |
1536 | if (is_odp_mr(mr)) |
1537 | return -EOPNOTSUPP; | |
1538 | ||
b4bd701a LR |
1539 | if (flags & IB_MR_REREG_TRANS) { |
1540 | addr = virt_addr; | |
1541 | len = length; | |
1542 | } else { | |
1543 | addr = mr->umem->address; | |
1544 | len = mr->umem->length; | |
1545 | } | |
1546 | ||
56e11d62 NO |
1547 | if (flags != IB_MR_REREG_PD) { |
1548 | /* | |
1549 | * Replace umem. This needs to be done whether or not UMR is | |
1550 | * used. | |
1551 | */ | |
1552 | flags |= IB_MR_REREG_TRANS; | |
1553 | ib_umem_release(mr->umem); | |
b4bd701a | 1554 | mr->umem = NULL; |
c320e527 MS |
1555 | err = mr_umem_get(dev, addr, len, access_flags, &mr->umem, |
1556 | &npages, &page_shift, &ncont, &order); | |
4638a3b2 LR |
1557 | if (err) |
1558 | goto err; | |
56e11d62 NO |
1559 | } |
1560 | ||
8383da3e JG |
1561 | if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags, |
1562 | access_flags) || | |
1563 | !mlx5_ib_can_load_pas_with_umr(dev, len) || | |
1564 | (flags & IB_MR_REREG_TRANS && | |
1565 | !mlx5_ib_pas_fits_in_mr(mr, addr, len))) { | |
56e11d62 NO |
1566 | /* |
1567 | * UMR can't be used - MKey needs to be replaced. | |
1568 | */ | |
b91e1751 | 1569 | if (mr->cache_ent) |
1769c4c5 JG |
1570 | detach_mr_from_cache(mr); |
1571 | err = destroy_mkey(dev, mr); | |
56e11d62 | 1572 | if (err) |
4638a3b2 | 1573 | goto err; |
56e11d62 NO |
1574 | |
1575 | mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont, | |
ff740aef | 1576 | page_shift, access_flags, true); |
56e11d62 | 1577 | |
4638a3b2 LR |
1578 | if (IS_ERR(mr)) { |
1579 | err = PTR_ERR(mr); | |
1580 | mr = to_mmr(ib_mr); | |
1581 | goto err; | |
1582 | } | |
56e11d62 NO |
1583 | } else { |
1584 | /* | |
1585 | * Send a UMR WQE | |
1586 | */ | |
7d0cc6ed AK |
1587 | mr->ibmr.pd = pd; |
1588 | mr->access_flags = access_flags; | |
1589 | mr->mmkey.iova = addr; | |
1590 | mr->mmkey.size = len; | |
1591 | mr->mmkey.pd = to_mpd(pd)->pdn; | |
1592 | ||
1593 | if (flags & IB_MR_REREG_TRANS) { | |
1594 | upd_flags = MLX5_IB_UPD_XLT_ADDR; | |
1595 | if (flags & IB_MR_REREG_PD) | |
1596 | upd_flags |= MLX5_IB_UPD_XLT_PD; | |
1597 | if (flags & IB_MR_REREG_ACCESS) | |
1598 | upd_flags |= MLX5_IB_UPD_XLT_ACCESS; | |
1599 | err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, | |
1600 | upd_flags); | |
1601 | } else { | |
1602 | err = rereg_umr(pd, mr, access_flags, flags); | |
1603 | } | |
1604 | ||
4638a3b2 LR |
1605 | if (err) |
1606 | goto err; | |
56e11d62 NO |
1607 | } |
1608 | ||
ac2f7e62 | 1609 | set_mr_fields(dev, mr, npages, len, access_flags); |
56e11d62 | 1610 | |
56e11d62 | 1611 | return 0; |
4638a3b2 LR |
1612 | |
1613 | err: | |
836a0fbb LR |
1614 | ib_umem_release(mr->umem); |
1615 | mr->umem = NULL; | |
1616 | ||
4638a3b2 LR |
1617 | clean_mr(dev, mr); |
1618 | return err; | |
56e11d62 NO |
1619 | } |
1620 | ||
8a187ee5 SG |
1621 | static int |
1622 | mlx5_alloc_priv_descs(struct ib_device *device, | |
1623 | struct mlx5_ib_mr *mr, | |
1624 | int ndescs, | |
1625 | int desc_size) | |
1626 | { | |
1627 | int size = ndescs * desc_size; | |
1628 | int add_size; | |
1629 | int ret; | |
1630 | ||
1631 | add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); | |
1632 | ||
1633 | mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); | |
1634 | if (!mr->descs_alloc) | |
1635 | return -ENOMEM; | |
1636 | ||
1637 | mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); | |
1638 | ||
9b0c289e | 1639 | mr->desc_map = dma_map_single(device->dev.parent, mr->descs, |
8a187ee5 | 1640 | size, DMA_TO_DEVICE); |
9b0c289e | 1641 | if (dma_mapping_error(device->dev.parent, mr->desc_map)) { |
8a187ee5 SG |
1642 | ret = -ENOMEM; |
1643 | goto err; | |
1644 | } | |
1645 | ||
1646 | return 0; | |
1647 | err: | |
1648 | kfree(mr->descs_alloc); | |
1649 | ||
1650 | return ret; | |
1651 | } | |
1652 | ||
1653 | static void | |
1654 | mlx5_free_priv_descs(struct mlx5_ib_mr *mr) | |
1655 | { | |
1656 | if (mr->descs) { | |
1657 | struct ib_device *device = mr->ibmr.device; | |
1658 | int size = mr->max_descs * mr->desc_size; | |
1659 | ||
9b0c289e | 1660 | dma_unmap_single(device->dev.parent, mr->desc_map, |
8a187ee5 SG |
1661 | size, DMA_TO_DEVICE); |
1662 | kfree(mr->descs_alloc); | |
1663 | mr->descs = NULL; | |
1664 | } | |
1665 | } | |
1666 | ||
eeea6953 | 1667 | static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
e126ba97 | 1668 | { |
8b91ffc1 SG |
1669 | if (mr->sig) { |
1670 | if (mlx5_core_destroy_psv(dev->mdev, | |
1671 | mr->sig->psv_memory.psv_idx)) | |
1672 | mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", | |
1673 | mr->sig->psv_memory.psv_idx); | |
1674 | if (mlx5_core_destroy_psv(dev->mdev, | |
1675 | mr->sig->psv_wire.psv_idx)) | |
1676 | mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", | |
1677 | mr->sig->psv_wire.psv_idx); | |
50211ec9 | 1678 | xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key)); |
8b91ffc1 SG |
1679 | kfree(mr->sig); |
1680 | mr->sig = NULL; | |
1681 | } | |
1682 | ||
b91e1751 | 1683 | if (!mr->cache_ent) { |
eeea6953 | 1684 | destroy_mkey(dev, mr); |
b9332dad YH |
1685 | mlx5_free_priv_descs(mr); |
1686 | } | |
6aec21f6 HE |
1687 | } |
1688 | ||
eeea6953 | 1689 | static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
6aec21f6 | 1690 | { |
6aec21f6 HE |
1691 | int npages = mr->npages; |
1692 | struct ib_umem *umem = mr->umem; | |
1693 | ||
09689703 JG |
1694 | /* Stop all DMA */ |
1695 | if (is_odp_mr(mr)) | |
1696 | mlx5_ib_fence_odp_mr(mr); | |
1697 | else | |
1698 | clean_mr(dev, mr); | |
8b4d5bc5 | 1699 | |
b91e1751 | 1700 | if (mr->cache_ent) |
09689703 JG |
1701 | mlx5_mr_cache_free(dev, mr); |
1702 | else | |
1703 | kfree(mr); | |
6aec21f6 | 1704 | |
836a0fbb | 1705 | ib_umem_release(umem); |
09689703 | 1706 | atomic_sub(npages, &dev->mdev->priv.reg_pages); |
836a0fbb | 1707 | |
e126ba97 EC |
1708 | } |
1709 | ||
c4367a26 | 1710 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) |
fbcd4983 | 1711 | { |
6c984472 MG |
1712 | struct mlx5_ib_mr *mmr = to_mmr(ibmr); |
1713 | ||
de0ae958 IR |
1714 | if (ibmr->type == IB_MR_TYPE_INTEGRITY) { |
1715 | dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr); | |
1716 | dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr); | |
1717 | } | |
6c984472 | 1718 | |
5256edcb JG |
1719 | if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) { |
1720 | mlx5_ib_free_implicit_mr(mmr); | |
1721 | return 0; | |
1722 | } | |
1723 | ||
6c984472 MG |
1724 | dereg_mr(to_mdev(ibmr->device), mmr); |
1725 | ||
eeea6953 | 1726 | return 0; |
fbcd4983 IL |
1727 | } |
1728 | ||
7796d2a3 MG |
1729 | static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs, |
1730 | int access_mode, int page_shift) | |
1731 | { | |
1732 | void *mkc; | |
1733 | ||
1734 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
1735 | ||
8383da3e JG |
1736 | /* This is only used from the kernel, so setting the PD is OK. */ |
1737 | set_mkc_access_pd_addr_fields(mkc, 0, 0, pd); | |
7796d2a3 | 1738 | MLX5_SET(mkc, mkc, free, 1); |
7796d2a3 MG |
1739 | MLX5_SET(mkc, mkc, translations_octword_size, ndescs); |
1740 | MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3); | |
1741 | MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7); | |
1742 | MLX5_SET(mkc, mkc, umr_en, 1); | |
1743 | MLX5_SET(mkc, mkc, log_page_size, page_shift); | |
1744 | } | |
1745 | ||
1746 | static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, | |
1747 | int ndescs, int desc_size, int page_shift, | |
1748 | int access_mode, u32 *in, int inlen) | |
1749 | { | |
1750 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1751 | int err; | |
1752 | ||
1753 | mr->access_mode = access_mode; | |
1754 | mr->desc_size = desc_size; | |
1755 | mr->max_descs = ndescs; | |
1756 | ||
1757 | err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); | |
1758 | if (err) | |
1759 | return err; | |
1760 | ||
1761 | mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift); | |
1762 | ||
fc6a9f86 | 1763 | err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); |
7796d2a3 MG |
1764 | if (err) |
1765 | goto err_free_descs; | |
1766 | ||
1767 | mr->mmkey.type = MLX5_MKEY_MR; | |
1768 | mr->ibmr.lkey = mr->mmkey.key; | |
1769 | mr->ibmr.rkey = mr->mmkey.key; | |
1770 | ||
1771 | return 0; | |
1772 | ||
1773 | err_free_descs: | |
1774 | mlx5_free_priv_descs(mr); | |
1775 | return err; | |
1776 | } | |
1777 | ||
6c984472 | 1778 | static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, |
de0ae958 IR |
1779 | u32 max_num_sg, u32 max_num_meta_sg, |
1780 | int desc_size, int access_mode) | |
3121e3c4 | 1781 | { |
ec22eb53 | 1782 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
6c984472 | 1783 | int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4); |
7796d2a3 | 1784 | int page_shift = 0; |
ec22eb53 | 1785 | struct mlx5_ib_mr *mr; |
ec22eb53 | 1786 | u32 *in; |
b005d316 | 1787 | int err; |
3121e3c4 SG |
1788 | |
1789 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
1790 | if (!mr) | |
1791 | return ERR_PTR(-ENOMEM); | |
1792 | ||
7796d2a3 MG |
1793 | mr->ibmr.pd = pd; |
1794 | mr->ibmr.device = pd->device; | |
1795 | ||
ec22eb53 | 1796 | in = kzalloc(inlen, GFP_KERNEL); |
3121e3c4 SG |
1797 | if (!in) { |
1798 | err = -ENOMEM; | |
1799 | goto err_free; | |
1800 | } | |
1801 | ||
de0ae958 | 1802 | if (access_mode == MLX5_MKC_ACCESS_MODE_MTT) |
7796d2a3 | 1803 | page_shift = PAGE_SHIFT; |
3121e3c4 | 1804 | |
7796d2a3 MG |
1805 | err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, |
1806 | access_mode, in, inlen); | |
6c984472 MG |
1807 | if (err) |
1808 | goto err_free_in; | |
6c984472 | 1809 | |
6c984472 MG |
1810 | mr->umem = NULL; |
1811 | kfree(in); | |
1812 | ||
1813 | return mr; | |
1814 | ||
6c984472 MG |
1815 | err_free_in: |
1816 | kfree(in); | |
1817 | err_free: | |
1818 | kfree(mr); | |
1819 | return ERR_PTR(err); | |
1820 | } | |
1821 | ||
7796d2a3 MG |
1822 | static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, |
1823 | int ndescs, u32 *in, int inlen) | |
1824 | { | |
1825 | return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), | |
1826 | PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in, | |
1827 | inlen); | |
1828 | } | |
1829 | ||
1830 | static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, | |
1831 | int ndescs, u32 *in, int inlen) | |
1832 | { | |
1833 | return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), | |
1834 | 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); | |
1835 | } | |
1836 | ||
1837 | static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, | |
1838 | int max_num_sg, int max_num_meta_sg, | |
1839 | u32 *in, int inlen) | |
1840 | { | |
1841 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1842 | u32 psv_index[2]; | |
1843 | void *mkc; | |
1844 | int err; | |
1845 | ||
1846 | mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); | |
1847 | if (!mr->sig) | |
1848 | return -ENOMEM; | |
1849 | ||
1850 | /* create mem & wire PSVs */ | |
1851 | err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index); | |
1852 | if (err) | |
1853 | goto err_free_sig; | |
1854 | ||
1855 | mr->sig->psv_memory.psv_idx = psv_index[0]; | |
1856 | mr->sig->psv_wire.psv_idx = psv_index[1]; | |
1857 | ||
1858 | mr->sig->sig_status_checked = true; | |
1859 | mr->sig->sig_err_exists = false; | |
1860 | /* Next UMR, Arm SIGERR */ | |
1861 | ++mr->sig->sigerr_count; | |
1862 | mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, | |
1863 | sizeof(struct mlx5_klm), | |
1864 | MLX5_MKC_ACCESS_MODE_KLMS); | |
1865 | if (IS_ERR(mr->klm_mr)) { | |
1866 | err = PTR_ERR(mr->klm_mr); | |
1867 | goto err_destroy_psv; | |
1868 | } | |
1869 | mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, | |
1870 | sizeof(struct mlx5_mtt), | |
1871 | MLX5_MKC_ACCESS_MODE_MTT); | |
1872 | if (IS_ERR(mr->mtt_mr)) { | |
1873 | err = PTR_ERR(mr->mtt_mr); | |
1874 | goto err_free_klm_mr; | |
1875 | } | |
1876 | ||
1877 | /* Set bsf descriptors for mkey */ | |
1878 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); | |
1879 | MLX5_SET(mkc, mkc, bsf_en, 1); | |
1880 | MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE); | |
1881 | ||
1882 | err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, | |
1883 | MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); | |
1884 | if (err) | |
1885 | goto err_free_mtt_mr; | |
1886 | ||
50211ec9 JG |
1887 | err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), |
1888 | mr->sig, GFP_KERNEL)); | |
1889 | if (err) | |
1890 | goto err_free_descs; | |
7796d2a3 MG |
1891 | return 0; |
1892 | ||
50211ec9 JG |
1893 | err_free_descs: |
1894 | destroy_mkey(dev, mr); | |
1895 | mlx5_free_priv_descs(mr); | |
7796d2a3 MG |
1896 | err_free_mtt_mr: |
1897 | dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr); | |
1898 | mr->mtt_mr = NULL; | |
1899 | err_free_klm_mr: | |
1900 | dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr); | |
1901 | mr->klm_mr = NULL; | |
1902 | err_destroy_psv: | |
1903 | if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) | |
1904 | mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", | |
1905 | mr->sig->psv_memory.psv_idx); | |
1906 | if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) | |
1907 | mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", | |
1908 | mr->sig->psv_wire.psv_idx); | |
1909 | err_free_sig: | |
1910 | kfree(mr->sig); | |
1911 | ||
1912 | return err; | |
1913 | } | |
1914 | ||
6c984472 MG |
1915 | static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, |
1916 | enum ib_mr_type mr_type, u32 max_num_sg, | |
1917 | u32 max_num_meta_sg) | |
1918 | { | |
1919 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
1920 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); | |
1921 | int ndescs = ALIGN(max_num_sg, 4); | |
1922 | struct mlx5_ib_mr *mr; | |
6c984472 MG |
1923 | u32 *in; |
1924 | int err; | |
1925 | ||
1926 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
1927 | if (!mr) | |
1928 | return ERR_PTR(-ENOMEM); | |
1929 | ||
1930 | in = kzalloc(inlen, GFP_KERNEL); | |
1931 | if (!in) { | |
1932 | err = -ENOMEM; | |
1933 | goto err_free; | |
1934 | } | |
1935 | ||
7796d2a3 MG |
1936 | mr->ibmr.device = pd->device; |
1937 | mr->umem = NULL; | |
3121e3c4 | 1938 | |
7796d2a3 MG |
1939 | switch (mr_type) { |
1940 | case IB_MR_TYPE_MEM_REG: | |
1941 | err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); | |
1942 | break; | |
1943 | case IB_MR_TYPE_SG_GAPS: | |
1944 | err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); | |
1945 | break; | |
1946 | case IB_MR_TYPE_INTEGRITY: | |
1947 | err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, | |
1948 | max_num_meta_sg, in, inlen); | |
1949 | break; | |
1950 | default: | |
9bee178b SG |
1951 | mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); |
1952 | err = -EINVAL; | |
3121e3c4 SG |
1953 | } |
1954 | ||
3121e3c4 | 1955 | if (err) |
7796d2a3 | 1956 | goto err_free_in; |
3121e3c4 | 1957 | |
3121e3c4 SG |
1958 | kfree(in); |
1959 | ||
1960 | return &mr->ibmr; | |
1961 | ||
3121e3c4 SG |
1962 | err_free_in: |
1963 | kfree(in); | |
1964 | err_free: | |
1965 | kfree(mr); | |
1966 | return ERR_PTR(err); | |
1967 | } | |
1968 | ||
6c984472 | 1969 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
42a3b153 | 1970 | u32 max_num_sg) |
6c984472 MG |
1971 | { |
1972 | return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0); | |
1973 | } | |
1974 | ||
1975 | struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, | |
1976 | u32 max_num_sg, u32 max_num_meta_sg) | |
1977 | { | |
1978 | return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg, | |
1979 | max_num_meta_sg); | |
1980 | } | |
1981 | ||
d18bb3e1 | 1982 | int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) |
d2370e0a | 1983 | { |
d18bb3e1 | 1984 | struct mlx5_ib_dev *dev = to_mdev(ibmw->device); |
ec22eb53 | 1985 | int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
d18bb3e1 | 1986 | struct mlx5_ib_mw *mw = to_mmw(ibmw); |
ec22eb53 SM |
1987 | u32 *in = NULL; |
1988 | void *mkc; | |
d2370e0a MB |
1989 | int ndescs; |
1990 | int err; | |
1991 | struct mlx5_ib_alloc_mw req = {}; | |
1992 | struct { | |
1993 | __u32 comp_mask; | |
1994 | __u32 response_length; | |
1995 | } resp = {}; | |
1996 | ||
1997 | err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); | |
1998 | if (err) | |
d18bb3e1 | 1999 | return err; |
d2370e0a MB |
2000 | |
2001 | if (req.comp_mask || req.reserved1 || req.reserved2) | |
d18bb3e1 | 2002 | return -EOPNOTSUPP; |
d2370e0a MB |
2003 | |
2004 | if (udata->inlen > sizeof(req) && | |
2005 | !ib_is_udata_cleared(udata, sizeof(req), | |
2006 | udata->inlen - sizeof(req))) | |
d18bb3e1 | 2007 | return -EOPNOTSUPP; |
d2370e0a MB |
2008 | |
2009 | ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); | |
2010 | ||
ec22eb53 | 2011 | in = kzalloc(inlen, GFP_KERNEL); |
d18bb3e1 | 2012 | if (!in) { |
d2370e0a MB |
2013 | err = -ENOMEM; |
2014 | goto free; | |
2015 | } | |
2016 | ||
ec22eb53 SM |
2017 | mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
2018 | ||
2019 | MLX5_SET(mkc, mkc, free, 1); | |
2020 | MLX5_SET(mkc, mkc, translations_octword_size, ndescs); | |
d18bb3e1 | 2021 | MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn); |
ec22eb53 SM |
2022 | MLX5_SET(mkc, mkc, umr_en, 1); |
2023 | MLX5_SET(mkc, mkc, lr, 1); | |
cdbd0d2b | 2024 | MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); |
d18bb3e1 | 2025 | MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2))); |
ec22eb53 SM |
2026 | MLX5_SET(mkc, mkc, qpn, 0xffffff); |
2027 | ||
fc6a9f86 | 2028 | err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); |
d2370e0a MB |
2029 | if (err) |
2030 | goto free; | |
2031 | ||
aa8e08d2 | 2032 | mw->mmkey.type = MLX5_MKEY_MW; |
d18bb3e1 | 2033 | ibmw->rkey = mw->mmkey.key; |
db570d7d | 2034 | mw->ndescs = ndescs; |
d2370e0a | 2035 | |
70c1430f LR |
2036 | resp.response_length = |
2037 | min(offsetofend(typeof(resp), response_length), udata->outlen); | |
d2370e0a MB |
2038 | if (resp.response_length) { |
2039 | err = ib_copy_to_udata(udata, &resp, resp.response_length); | |
d18bb3e1 LR |
2040 | if (err) |
2041 | goto free_mkey; | |
d2370e0a MB |
2042 | } |
2043 | ||
806b101b JG |
2044 | if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { |
2045 | err = xa_err(xa_store(&dev->odp_mkeys, | |
2046 | mlx5_base_mkey(mw->mmkey.key), &mw->mmkey, | |
2047 | GFP_KERNEL)); | |
2048 | if (err) | |
2049 | goto free_mkey; | |
2050 | } | |
2051 | ||
d2370e0a | 2052 | kfree(in); |
d18bb3e1 | 2053 | return 0; |
d2370e0a | 2054 | |
806b101b JG |
2055 | free_mkey: |
2056 | mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); | |
d2370e0a | 2057 | free: |
d2370e0a | 2058 | kfree(in); |
d18bb3e1 | 2059 | return err; |
d2370e0a MB |
2060 | } |
2061 | ||
2062 | int mlx5_ib_dealloc_mw(struct ib_mw *mw) | |
2063 | { | |
04177915 | 2064 | struct mlx5_ib_dev *dev = to_mdev(mw->device); |
d2370e0a | 2065 | struct mlx5_ib_mw *mmw = to_mmw(mw); |
d2370e0a | 2066 | |
04177915 | 2067 | if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { |
806b101b | 2068 | xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)); |
04177915 JG |
2069 | /* |
2070 | * pagefault_single_data_segment() may be accessing mmw under | |
2071 | * SRCU if the user bound an ODP MR to this MW. | |
2072 | */ | |
806b101b | 2073 | synchronize_srcu(&dev->odp_srcu); |
04177915 JG |
2074 | } |
2075 | ||
d18bb3e1 | 2076 | return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); |
d2370e0a MB |
2077 | } |
2078 | ||
d5436ba0 SG |
2079 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, |
2080 | struct ib_mr_status *mr_status) | |
2081 | { | |
2082 | struct mlx5_ib_mr *mmr = to_mmr(ibmr); | |
2083 | int ret = 0; | |
2084 | ||
2085 | if (check_mask & ~IB_MR_CHECK_SIG_STATUS) { | |
2086 | pr_err("Invalid status check mask\n"); | |
2087 | ret = -EINVAL; | |
2088 | goto done; | |
2089 | } | |
2090 | ||
2091 | mr_status->fail_status = 0; | |
2092 | if (check_mask & IB_MR_CHECK_SIG_STATUS) { | |
2093 | if (!mmr->sig) { | |
2094 | ret = -EINVAL; | |
2095 | pr_err("signature status check requested on a non-signature enabled MR\n"); | |
2096 | goto done; | |
2097 | } | |
2098 | ||
2099 | mmr->sig->sig_status_checked = true; | |
2100 | if (!mmr->sig->sig_err_exists) | |
2101 | goto done; | |
2102 | ||
2103 | if (ibmr->lkey == mmr->sig->err_item.key) | |
2104 | memcpy(&mr_status->sig_err, &mmr->sig->err_item, | |
2105 | sizeof(mr_status->sig_err)); | |
2106 | else { | |
2107 | mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; | |
2108 | mr_status->sig_err.sig_err_offset = 0; | |
2109 | mr_status->sig_err.key = mmr->sig->err_item.key; | |
2110 | } | |
2111 | ||
2112 | mmr->sig->sig_err_exists = false; | |
2113 | mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; | |
2114 | } | |
2115 | ||
2116 | done: | |
2117 | return ret; | |
2118 | } | |
8a187ee5 | 2119 | |
2563e2f3 MG |
2120 | static int |
2121 | mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, | |
2122 | int data_sg_nents, unsigned int *data_sg_offset, | |
2123 | struct scatterlist *meta_sg, int meta_sg_nents, | |
2124 | unsigned int *meta_sg_offset) | |
2125 | { | |
2126 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2127 | unsigned int sg_offset = 0; | |
2128 | int n = 0; | |
2129 | ||
2130 | mr->meta_length = 0; | |
2131 | if (data_sg_nents == 1) { | |
2132 | n++; | |
2133 | mr->ndescs = 1; | |
2134 | if (data_sg_offset) | |
2135 | sg_offset = *data_sg_offset; | |
2136 | mr->data_length = sg_dma_len(data_sg) - sg_offset; | |
2137 | mr->data_iova = sg_dma_address(data_sg) + sg_offset; | |
2138 | if (meta_sg_nents == 1) { | |
2139 | n++; | |
2140 | mr->meta_ndescs = 1; | |
2141 | if (meta_sg_offset) | |
2142 | sg_offset = *meta_sg_offset; | |
2143 | else | |
2144 | sg_offset = 0; | |
2145 | mr->meta_length = sg_dma_len(meta_sg) - sg_offset; | |
2146 | mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; | |
2147 | } | |
2148 | ibmr->length = mr->data_length + mr->meta_length; | |
2149 | } | |
2150 | ||
2151 | return n; | |
2152 | } | |
2153 | ||
b005d316 SG |
2154 | static int |
2155 | mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, | |
2156 | struct scatterlist *sgl, | |
ff2ba993 | 2157 | unsigned short sg_nents, |
6c984472 MG |
2158 | unsigned int *sg_offset_p, |
2159 | struct scatterlist *meta_sgl, | |
2160 | unsigned short meta_sg_nents, | |
2161 | unsigned int *meta_sg_offset_p) | |
b005d316 SG |
2162 | { |
2163 | struct scatterlist *sg = sgl; | |
2164 | struct mlx5_klm *klms = mr->descs; | |
9aa8b321 | 2165 | unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; |
b005d316 | 2166 | u32 lkey = mr->ibmr.pd->local_dma_lkey; |
6c984472 | 2167 | int i, j = 0; |
b005d316 | 2168 | |
ff2ba993 | 2169 | mr->ibmr.iova = sg_dma_address(sg) + sg_offset; |
b005d316 | 2170 | mr->ibmr.length = 0; |
b005d316 SG |
2171 | |
2172 | for_each_sg(sgl, sg, sg_nents, i) { | |
99975cd4 | 2173 | if (unlikely(i >= mr->max_descs)) |
b005d316 | 2174 | break; |
ff2ba993 CH |
2175 | klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); |
2176 | klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); | |
b005d316 | 2177 | klms[i].key = cpu_to_be32(lkey); |
0a49f2c3 | 2178 | mr->ibmr.length += sg_dma_len(sg) - sg_offset; |
ff2ba993 CH |
2179 | |
2180 | sg_offset = 0; | |
b005d316 SG |
2181 | } |
2182 | ||
9aa8b321 BVA |
2183 | if (sg_offset_p) |
2184 | *sg_offset_p = sg_offset; | |
2185 | ||
6c984472 MG |
2186 | mr->ndescs = i; |
2187 | mr->data_length = mr->ibmr.length; | |
2188 | ||
2189 | if (meta_sg_nents) { | |
2190 | sg = meta_sgl; | |
2191 | sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0; | |
2192 | for_each_sg(meta_sgl, sg, meta_sg_nents, j) { | |
2193 | if (unlikely(i + j >= mr->max_descs)) | |
2194 | break; | |
2195 | klms[i + j].va = cpu_to_be64(sg_dma_address(sg) + | |
2196 | sg_offset); | |
2197 | klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) - | |
2198 | sg_offset); | |
2199 | klms[i + j].key = cpu_to_be32(lkey); | |
2200 | mr->ibmr.length += sg_dma_len(sg) - sg_offset; | |
2201 | ||
2202 | sg_offset = 0; | |
2203 | } | |
2204 | if (meta_sg_offset_p) | |
2205 | *meta_sg_offset_p = sg_offset; | |
2206 | ||
2207 | mr->meta_ndescs = j; | |
2208 | mr->meta_length = mr->ibmr.length - mr->data_length; | |
2209 | } | |
2210 | ||
2211 | return i + j; | |
b005d316 SG |
2212 | } |
2213 | ||
8a187ee5 SG |
2214 | static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) |
2215 | { | |
2216 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2217 | __be64 *descs; | |
2218 | ||
2219 | if (unlikely(mr->ndescs == mr->max_descs)) | |
2220 | return -ENOMEM; | |
2221 | ||
2222 | descs = mr->descs; | |
2223 | descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); | |
2224 | ||
2225 | return 0; | |
2226 | } | |
2227 | ||
de0ae958 IR |
2228 | static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr) |
2229 | { | |
2230 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2231 | __be64 *descs; | |
2232 | ||
2233 | if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs)) | |
2234 | return -ENOMEM; | |
2235 | ||
2236 | descs = mr->descs; | |
2237 | descs[mr->ndescs + mr->meta_ndescs++] = | |
2238 | cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); | |
2239 | ||
2240 | return 0; | |
2241 | } | |
2242 | ||
2243 | static int | |
2244 | mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, | |
6c984472 MG |
2245 | int data_sg_nents, unsigned int *data_sg_offset, |
2246 | struct scatterlist *meta_sg, int meta_sg_nents, | |
2247 | unsigned int *meta_sg_offset) | |
2248 | { | |
2249 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
de0ae958 | 2250 | struct mlx5_ib_mr *pi_mr = mr->mtt_mr; |
6c984472 MG |
2251 | int n; |
2252 | ||
de0ae958 IR |
2253 | pi_mr->ndescs = 0; |
2254 | pi_mr->meta_ndescs = 0; | |
2255 | pi_mr->meta_length = 0; | |
2256 | ||
2257 | ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, | |
2258 | pi_mr->desc_size * pi_mr->max_descs, | |
2259 | DMA_TO_DEVICE); | |
2260 | ||
2261 | pi_mr->ibmr.page_size = ibmr->page_size; | |
2262 | n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, | |
2263 | mlx5_set_page); | |
2264 | if (n != data_sg_nents) | |
2265 | return n; | |
2266 | ||
2563e2f3 | 2267 | pi_mr->data_iova = pi_mr->ibmr.iova; |
de0ae958 IR |
2268 | pi_mr->data_length = pi_mr->ibmr.length; |
2269 | pi_mr->ibmr.length = pi_mr->data_length; | |
2270 | ibmr->length = pi_mr->data_length; | |
2271 | ||
2272 | if (meta_sg_nents) { | |
2273 | u64 page_mask = ~((u64)ibmr->page_size - 1); | |
2563e2f3 | 2274 | u64 iova = pi_mr->data_iova; |
de0ae958 IR |
2275 | |
2276 | n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, | |
2277 | meta_sg_offset, mlx5_set_page_pi); | |
2278 | ||
2279 | pi_mr->meta_length = pi_mr->ibmr.length; | |
2280 | /* | |
2281 | * PI address for the HW is the offset of the metadata address | |
2282 | * relative to the first data page address. | |
2283 | * It equals to first data page address + size of data pages + | |
2284 | * metadata offset at the first metadata page | |
2285 | */ | |
2286 | pi_mr->pi_iova = (iova & page_mask) + | |
2287 | pi_mr->ndescs * ibmr->page_size + | |
2288 | (pi_mr->ibmr.iova & ~page_mask); | |
2289 | /* | |
2290 | * In order to use one MTT MR for data and metadata, we register | |
2291 | * also the gaps between the end of the data and the start of | |
2292 | * the metadata (the sig MR will verify that the HW will access | |
2293 | * to right addresses). This mapping is safe because we use | |
2294 | * internal mkey for the registration. | |
2295 | */ | |
2296 | pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova; | |
2297 | pi_mr->ibmr.iova = iova; | |
2298 | ibmr->length += pi_mr->meta_length; | |
2299 | } | |
2300 | ||
2301 | ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, | |
2302 | pi_mr->desc_size * pi_mr->max_descs, | |
2303 | DMA_TO_DEVICE); | |
2304 | ||
2305 | return n; | |
2306 | } | |
2307 | ||
2308 | static int | |
2309 | mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, | |
2310 | int data_sg_nents, unsigned int *data_sg_offset, | |
2311 | struct scatterlist *meta_sg, int meta_sg_nents, | |
2312 | unsigned int *meta_sg_offset) | |
2313 | { | |
2314 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2315 | struct mlx5_ib_mr *pi_mr = mr->klm_mr; | |
2316 | int n; | |
6c984472 MG |
2317 | |
2318 | pi_mr->ndescs = 0; | |
2319 | pi_mr->meta_ndescs = 0; | |
2320 | pi_mr->meta_length = 0; | |
2321 | ||
2322 | ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, | |
2323 | pi_mr->desc_size * pi_mr->max_descs, | |
2324 | DMA_TO_DEVICE); | |
2325 | ||
2326 | n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset, | |
2327 | meta_sg, meta_sg_nents, meta_sg_offset); | |
2328 | ||
de0ae958 IR |
2329 | ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, |
2330 | pi_mr->desc_size * pi_mr->max_descs, | |
2331 | DMA_TO_DEVICE); | |
2332 | ||
6c984472 | 2333 | /* This is zero-based memory region */ |
2563e2f3 | 2334 | pi_mr->data_iova = 0; |
6c984472 | 2335 | pi_mr->ibmr.iova = 0; |
de0ae958 | 2336 | pi_mr->pi_iova = pi_mr->data_length; |
6c984472 | 2337 | ibmr->length = pi_mr->ibmr.length; |
6c984472 | 2338 | |
de0ae958 IR |
2339 | return n; |
2340 | } | |
6c984472 | 2341 | |
de0ae958 IR |
2342 | int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, |
2343 | int data_sg_nents, unsigned int *data_sg_offset, | |
2344 | struct scatterlist *meta_sg, int meta_sg_nents, | |
2345 | unsigned int *meta_sg_offset) | |
2346 | { | |
2347 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2563e2f3 | 2348 | struct mlx5_ib_mr *pi_mr = NULL; |
de0ae958 IR |
2349 | int n; |
2350 | ||
2351 | WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); | |
2352 | ||
2563e2f3 MG |
2353 | mr->ndescs = 0; |
2354 | mr->data_length = 0; | |
2355 | mr->data_iova = 0; | |
2356 | mr->meta_ndescs = 0; | |
2357 | mr->pi_iova = 0; | |
2358 | /* | |
2359 | * As a performance optimization, if possible, there is no need to | |
2360 | * perform UMR operation to register the data/metadata buffers. | |
2361 | * First try to map the sg lists to PA descriptors with local_dma_lkey. | |
2362 | * Fallback to UMR only in case of a failure. | |
2363 | */ | |
2364 | n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents, | |
2365 | data_sg_offset, meta_sg, meta_sg_nents, | |
2366 | meta_sg_offset); | |
2367 | if (n == data_sg_nents + meta_sg_nents) | |
2368 | goto out; | |
de0ae958 IR |
2369 | /* |
2370 | * As a performance optimization, if possible, there is no need to map | |
2371 | * the sg lists to KLM descriptors. First try to map the sg lists to MTT | |
2372 | * descriptors and fallback to KLM only in case of a failure. | |
2373 | * It's more efficient for the HW to work with MTT descriptors | |
2374 | * (especially in high load). | |
2375 | * Use KLM (indirect access) only if it's mandatory. | |
2376 | */ | |
2563e2f3 | 2377 | pi_mr = mr->mtt_mr; |
de0ae958 IR |
2378 | n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents, |
2379 | data_sg_offset, meta_sg, meta_sg_nents, | |
2380 | meta_sg_offset); | |
2381 | if (n == data_sg_nents + meta_sg_nents) | |
2382 | goto out; | |
2383 | ||
2384 | pi_mr = mr->klm_mr; | |
2385 | n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents, | |
2386 | data_sg_offset, meta_sg, meta_sg_nents, | |
2387 | meta_sg_offset); | |
6c984472 MG |
2388 | if (unlikely(n != data_sg_nents + meta_sg_nents)) |
2389 | return -ENOMEM; | |
2390 | ||
de0ae958 IR |
2391 | out: |
2392 | /* This is zero-based memory region */ | |
2393 | ibmr->iova = 0; | |
2394 | mr->pi_mr = pi_mr; | |
2563e2f3 MG |
2395 | if (pi_mr) |
2396 | ibmr->sig_attrs->meta_length = pi_mr->meta_length; | |
2397 | else | |
2398 | ibmr->sig_attrs->meta_length = mr->meta_length; | |
de0ae958 | 2399 | |
6c984472 MG |
2400 | return 0; |
2401 | } | |
2402 | ||
ff2ba993 | 2403 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
9aa8b321 | 2404 | unsigned int *sg_offset) |
8a187ee5 SG |
2405 | { |
2406 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | |
2407 | int n; | |
2408 | ||
2409 | mr->ndescs = 0; | |
2410 | ||
2411 | ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, | |
2412 | mr->desc_size * mr->max_descs, | |
2413 | DMA_TO_DEVICE); | |
2414 | ||
ec22eb53 | 2415 | if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) |
6c984472 MG |
2416 | n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, |
2417 | NULL); | |
b005d316 | 2418 | else |
ff2ba993 CH |
2419 | n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, |
2420 | mlx5_set_page); | |
8a187ee5 SG |
2421 | |
2422 | ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, | |
2423 | mr->desc_size * mr->max_descs, | |
2424 | DMA_TO_DEVICE); | |
2425 | ||
2426 | return n; | |
2427 | } |