Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. | |
51a379d0 | 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
5a0e3ad6 TH |
34 | #include <linux/slab.h> |
35 | ||
225c7b1f RD |
36 | #include "mlx4_ib.h" |
37 | ||
38 | static u32 convert_access(int acc) | |
39 | { | |
40 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) | | |
41 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | | |
42 | (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | | |
43 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | | |
804d6a89 | 44 | (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) | |
225c7b1f RD |
45 | MLX4_PERM_LOCAL_READ; |
46 | } | |
47 | ||
804d6a89 SM |
48 | static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type) |
49 | { | |
50 | switch (type) { | |
51 | case IB_MW_TYPE_1: return MLX4_MW_TYPE_1; | |
52 | case IB_MW_TYPE_2: return MLX4_MW_TYPE_2; | |
53 | default: return -1; | |
54 | } | |
55 | } | |
56 | ||
225c7b1f RD |
57 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) |
58 | { | |
59 | struct mlx4_ib_mr *mr; | |
60 | int err; | |
61 | ||
1b2cd0fc | 62 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
225c7b1f RD |
63 | if (!mr) |
64 | return ERR_PTR(-ENOMEM); | |
65 | ||
66 | err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0, | |
67 | ~0ull, convert_access(acc), 0, 0, &mr->mmr); | |
68 | if (err) | |
69 | goto err_free; | |
70 | ||
71 | err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); | |
72 | if (err) | |
73 | goto err_mr; | |
74 | ||
75 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; | |
76 | mr->umem = NULL; | |
77 | ||
78 | return &mr->ibmr; | |
79 | ||
80 | err_mr: | |
61083720 | 81 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); |
225c7b1f RD |
82 | |
83 | err_free: | |
84 | kfree(mr); | |
85 | ||
86 | return ERR_PTR(err); | |
87 | } | |
88 | ||
89 | int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, | |
90 | struct ib_umem *umem) | |
91 | { | |
92 | u64 *pages; | |
eeb8461e | 93 | int i, k, entry; |
225c7b1f RD |
94 | int n; |
95 | int len; | |
96 | int err = 0; | |
eeb8461e | 97 | struct scatterlist *sg; |
225c7b1f RD |
98 | |
99 | pages = (u64 *) __get_free_page(GFP_KERNEL); | |
100 | if (!pages) | |
101 | return -ENOMEM; | |
102 | ||
103 | i = n = 0; | |
104 | ||
eeb8461e YH |
105 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
106 | len = sg_dma_len(sg) >> mtt->page_shift; | |
107 | for (k = 0; k < len; ++k) { | |
108 | pages[i++] = sg_dma_address(sg) + | |
109 | umem->page_size * k; | |
110 | /* | |
111 | * Be friendly to mlx4_write_mtt() and | |
112 | * pass it chunks of appropriate size. | |
113 | */ | |
114 | if (i == PAGE_SIZE / sizeof (u64)) { | |
115 | err = mlx4_write_mtt(dev->dev, mtt, n, | |
116 | i, pages); | |
117 | if (err) | |
118 | goto out; | |
119 | n += i; | |
120 | i = 0; | |
225c7b1f RD |
121 | } |
122 | } | |
eeb8461e | 123 | } |
225c7b1f RD |
124 | |
125 | if (i) | |
126 | err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); | |
127 | ||
128 | out: | |
129 | free_page((unsigned long) pages); | |
130 | return err; | |
131 | } | |
132 | ||
133 | struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |
134 | u64 virt_addr, int access_flags, | |
135 | struct ib_udata *udata) | |
136 | { | |
137 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
138 | struct mlx4_ib_mr *mr; | |
139 | int shift; | |
140 | int err; | |
141 | int n; | |
142 | ||
1b2cd0fc | 143 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
225c7b1f RD |
144 | if (!mr) |
145 | return ERR_PTR(-ENOMEM); | |
146 | ||
9376932d MB |
147 | /* Force registering the memory as writable. */ |
148 | /* Used for memory re-registeration. HCA protects the access */ | |
cb9fbc5c | 149 | mr->umem = ib_umem_get(pd->uobject->context, start, length, |
9376932d | 150 | access_flags | IB_ACCESS_LOCAL_WRITE, 0); |
225c7b1f RD |
151 | if (IS_ERR(mr->umem)) { |
152 | err = PTR_ERR(mr->umem); | |
153 | goto err_free; | |
154 | } | |
155 | ||
156 | n = ib_umem_page_count(mr->umem); | |
157 | shift = ilog2(mr->umem->page_size); | |
158 | ||
159 | err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, | |
160 | convert_access(access_flags), n, shift, &mr->mmr); | |
161 | if (err) | |
162 | goto err_umem; | |
163 | ||
164 | err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); | |
165 | if (err) | |
166 | goto err_mr; | |
167 | ||
168 | err = mlx4_mr_enable(dev->dev, &mr->mmr); | |
169 | if (err) | |
170 | goto err_mr; | |
171 | ||
172 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; | |
173 | ||
174 | return &mr->ibmr; | |
175 | ||
176 | err_mr: | |
61083720 | 177 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); |
225c7b1f RD |
178 | |
179 | err_umem: | |
180 | ib_umem_release(mr->umem); | |
181 | ||
182 | err_free: | |
183 | kfree(mr); | |
184 | ||
185 | return ERR_PTR(err); | |
186 | } | |
187 | ||
9376932d MB |
188 | int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, |
189 | u64 start, u64 length, u64 virt_addr, | |
190 | int mr_access_flags, struct ib_pd *pd, | |
191 | struct ib_udata *udata) | |
192 | { | |
193 | struct mlx4_ib_dev *dev = to_mdev(mr->device); | |
194 | struct mlx4_ib_mr *mmr = to_mmr(mr); | |
195 | struct mlx4_mpt_entry *mpt_entry; | |
196 | struct mlx4_mpt_entry **pmpt_entry = &mpt_entry; | |
197 | int err; | |
198 | ||
199 | /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs, | |
200 | * we assume that the calls can't run concurrently. Otherwise, a | |
201 | * race exists. | |
202 | */ | |
203 | err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry); | |
204 | ||
205 | if (err) | |
206 | return err; | |
207 | ||
208 | if (flags & IB_MR_REREG_PD) { | |
209 | err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry, | |
210 | to_mpd(pd)->pdn); | |
211 | ||
212 | if (err) | |
213 | goto release_mpt_entry; | |
214 | } | |
215 | ||
216 | if (flags & IB_MR_REREG_ACCESS) { | |
217 | err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, | |
218 | convert_access(mr_access_flags)); | |
219 | ||
220 | if (err) | |
221 | goto release_mpt_entry; | |
222 | } | |
223 | ||
224 | if (flags & IB_MR_REREG_TRANS) { | |
225 | int shift; | |
9376932d MB |
226 | int n; |
227 | ||
228 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); | |
229 | ib_umem_release(mmr->umem); | |
230 | mmr->umem = ib_umem_get(mr->uobject->context, start, length, | |
231 | mr_access_flags | | |
232 | IB_ACCESS_LOCAL_WRITE, | |
233 | 0); | |
234 | if (IS_ERR(mmr->umem)) { | |
235 | err = PTR_ERR(mmr->umem); | |
4ff0acca | 236 | /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ |
9376932d MB |
237 | mmr->umem = NULL; |
238 | goto release_mpt_entry; | |
239 | } | |
240 | n = ib_umem_page_count(mmr->umem); | |
241 | shift = ilog2(mmr->umem->page_size); | |
242 | ||
9376932d MB |
243 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, |
244 | virt_addr, length, n, shift, | |
245 | *pmpt_entry); | |
246 | if (err) { | |
247 | ib_umem_release(mmr->umem); | |
248 | goto release_mpt_entry; | |
249 | } | |
4ff0acca MB |
250 | mmr->mmr.iova = virt_addr; |
251 | mmr->mmr.size = length; | |
9376932d MB |
252 | |
253 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); | |
254 | if (err) { | |
255 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); | |
256 | ib_umem_release(mmr->umem); | |
257 | goto release_mpt_entry; | |
258 | } | |
259 | } | |
260 | ||
261 | /* If we couldn't transfer the MR to the HCA, just remember to | |
262 | * return a failure. But dereg_mr will free the resources. | |
263 | */ | |
264 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); | |
4ff0acca MB |
265 | if (!err && flags & IB_MR_REREG_ACCESS) |
266 | mmr->mmr.access = mr_access_flags; | |
9376932d MB |
267 | |
268 | release_mpt_entry: | |
269 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); | |
270 | ||
271 | return err; | |
272 | } | |
273 | ||
1b2cd0fc SG |
274 | static int |
275 | mlx4_alloc_priv_pages(struct ib_device *device, | |
276 | struct mlx4_ib_mr *mr, | |
277 | int max_pages) | |
278 | { | |
279 | int size = max_pages * sizeof(u64); | |
280 | int add_size; | |
281 | int ret; | |
282 | ||
283 | add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); | |
284 | ||
285 | mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); | |
286 | if (!mr->pages_alloc) | |
287 | return -ENOMEM; | |
288 | ||
289 | mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN); | |
290 | ||
291 | mr->page_map = dma_map_single(device->dma_device, mr->pages, | |
292 | size, DMA_TO_DEVICE); | |
293 | ||
294 | if (dma_mapping_error(device->dma_device, mr->page_map)) { | |
295 | ret = -ENOMEM; | |
296 | goto err; | |
297 | } | |
298 | ||
299 | return 0; | |
300 | err: | |
301 | kfree(mr->pages_alloc); | |
302 | ||
303 | return ret; | |
304 | } | |
305 | ||
306 | static void | |
307 | mlx4_free_priv_pages(struct mlx4_ib_mr *mr) | |
308 | { | |
309 | if (mr->pages) { | |
310 | struct ib_device *device = mr->ibmr.device; | |
311 | int size = mr->max_pages * sizeof(u64); | |
312 | ||
313 | dma_unmap_single(device->dma_device, mr->page_map, | |
314 | size, DMA_TO_DEVICE); | |
315 | kfree(mr->pages_alloc); | |
316 | mr->pages = NULL; | |
317 | } | |
318 | } | |
319 | ||
225c7b1f RD |
320 | int mlx4_ib_dereg_mr(struct ib_mr *ibmr) |
321 | { | |
322 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | |
61083720 | 323 | int ret; |
225c7b1f | 324 | |
1b2cd0fc SG |
325 | mlx4_free_priv_pages(mr); |
326 | ||
61083720 SM |
327 | ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); |
328 | if (ret) | |
329 | return ret; | |
225c7b1f RD |
330 | if (mr->umem) |
331 | ib_umem_release(mr->umem); | |
332 | kfree(mr); | |
333 | ||
334 | return 0; | |
335 | } | |
8ad11fb6 | 336 | |
804d6a89 SM |
337 | struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) |
338 | { | |
339 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
340 | struct mlx4_ib_mw *mw; | |
341 | int err; | |
342 | ||
343 | mw = kmalloc(sizeof(*mw), GFP_KERNEL); | |
344 | if (!mw) | |
345 | return ERR_PTR(-ENOMEM); | |
346 | ||
347 | err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, | |
348 | to_mlx4_type(type), &mw->mmw); | |
349 | if (err) | |
350 | goto err_free; | |
351 | ||
352 | err = mlx4_mw_enable(dev->dev, &mw->mmw); | |
353 | if (err) | |
354 | goto err_mw; | |
355 | ||
356 | mw->ibmw.rkey = mw->mmw.key; | |
357 | ||
358 | return &mw->ibmw; | |
359 | ||
360 | err_mw: | |
361 | mlx4_mw_free(dev->dev, &mw->mmw); | |
362 | ||
363 | err_free: | |
364 | kfree(mw); | |
365 | ||
366 | return ERR_PTR(err); | |
367 | } | |
368 | ||
6ff63e19 SM |
369 | int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, |
370 | struct ib_mw_bind *mw_bind) | |
371 | { | |
e622f2f4 | 372 | struct ib_bind_mw_wr wr; |
6ff63e19 SM |
373 | struct ib_send_wr *bad_wr; |
374 | int ret; | |
375 | ||
376 | memset(&wr, 0, sizeof(wr)); | |
e622f2f4 CH |
377 | wr.wr.opcode = IB_WR_BIND_MW; |
378 | wr.wr.wr_id = mw_bind->wr_id; | |
379 | wr.wr.send_flags = mw_bind->send_flags; | |
380 | wr.mw = mw; | |
381 | wr.bind_info = mw_bind->bind_info; | |
382 | wr.rkey = ib_inc_rkey(mw->rkey); | |
383 | ||
384 | ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr); | |
6ff63e19 | 385 | if (!ret) |
e622f2f4 | 386 | mw->rkey = wr.rkey; |
6ff63e19 SM |
387 | |
388 | return ret; | |
389 | } | |
390 | ||
804d6a89 SM |
391 | int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) |
392 | { | |
393 | struct mlx4_ib_mw *mw = to_mmw(ibmw); | |
394 | ||
395 | mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); | |
396 | kfree(mw); | |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
679e34d1 SG |
401 | struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, |
402 | enum ib_mr_type mr_type, | |
403 | u32 max_num_sg) | |
95d04f07 RD |
404 | { |
405 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
406 | struct mlx4_ib_mr *mr; | |
407 | int err; | |
408 | ||
679e34d1 SG |
409 | if (mr_type != IB_MR_TYPE_MEM_REG || |
410 | max_num_sg > MLX4_MAX_FAST_REG_PAGES) | |
411 | return ERR_PTR(-EINVAL); | |
412 | ||
1b2cd0fc | 413 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
95d04f07 RD |
414 | if (!mr) |
415 | return ERR_PTR(-ENOMEM); | |
416 | ||
417 | err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0, | |
679e34d1 | 418 | max_num_sg, 0, &mr->mmr); |
95d04f07 RD |
419 | if (err) |
420 | goto err_free; | |
421 | ||
1b2cd0fc SG |
422 | err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg); |
423 | if (err) | |
424 | goto err_free_mr; | |
425 | ||
426 | mr->max_pages = max_num_sg; | |
427 | ||
95d04f07 RD |
428 | err = mlx4_mr_enable(dev->dev, &mr->mmr); |
429 | if (err) | |
1b2cd0fc | 430 | goto err_free_pl; |
95d04f07 | 431 | |
4c246edd | 432 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; |
7f3abf5c | 433 | mr->umem = NULL; |
4c246edd | 434 | |
95d04f07 RD |
435 | return &mr->ibmr; |
436 | ||
1b2cd0fc SG |
437 | err_free_pl: |
438 | mlx4_free_priv_pages(mr); | |
439 | err_free_mr: | |
61083720 | 440 | (void) mlx4_mr_free(dev->dev, &mr->mmr); |
95d04f07 RD |
441 | err_free: |
442 | kfree(mr); | |
443 | return ERR_PTR(err); | |
444 | } | |
445 | ||
446 | struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, | |
447 | int page_list_len) | |
448 | { | |
449 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | |
450 | struct mlx4_ib_fast_reg_page_list *mfrpl; | |
451 | int size = page_list_len * sizeof (u64); | |
452 | ||
5a0fd094 | 453 | if (page_list_len > MLX4_MAX_FAST_REG_PAGES) |
95d04f07 RD |
454 | return ERR_PTR(-EINVAL); |
455 | ||
456 | mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL); | |
457 | if (!mfrpl) | |
458 | return ERR_PTR(-ENOMEM); | |
459 | ||
2b6b7d4b JM |
460 | mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL); |
461 | if (!mfrpl->ibfrpl.page_list) | |
462 | goto err_free; | |
463 | ||
872bf2fb YH |
464 | mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist-> |
465 | pdev->dev, | |
95d04f07 RD |
466 | size, &mfrpl->map, |
467 | GFP_KERNEL); | |
7bd91299 | 468 | if (!mfrpl->mapped_page_list) |
95d04f07 RD |
469 | goto err_free; |
470 | ||
471 | WARN_ON(mfrpl->map & 0x3f); | |
472 | ||
473 | return &mfrpl->ibfrpl; | |
474 | ||
475 | err_free: | |
2b6b7d4b | 476 | kfree(mfrpl->ibfrpl.page_list); |
95d04f07 RD |
477 | kfree(mfrpl); |
478 | return ERR_PTR(-ENOMEM); | |
479 | } | |
480 | ||
481 | void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) | |
482 | { | |
483 | struct mlx4_ib_dev *dev = to_mdev(page_list->device); | |
484 | struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); | |
485 | int size = page_list->max_page_list_len * sizeof (u64); | |
486 | ||
872bf2fb YH |
487 | dma_free_coherent(&dev->dev->persist->pdev->dev, size, |
488 | mfrpl->mapped_page_list, | |
95d04f07 | 489 | mfrpl->map); |
2b6b7d4b | 490 | kfree(mfrpl->ibfrpl.page_list); |
95d04f07 RD |
491 | kfree(mfrpl); |
492 | } | |
493 | ||
8ad11fb6 JM |
494 | struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, |
495 | struct ib_fmr_attr *fmr_attr) | |
496 | { | |
497 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
498 | struct mlx4_ib_fmr *fmr; | |
499 | int err = -ENOMEM; | |
500 | ||
501 | fmr = kmalloc(sizeof *fmr, GFP_KERNEL); | |
502 | if (!fmr) | |
503 | return ERR_PTR(-ENOMEM); | |
504 | ||
505 | err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc), | |
506 | fmr_attr->max_pages, fmr_attr->max_maps, | |
507 | fmr_attr->page_shift, &fmr->mfmr); | |
508 | if (err) | |
509 | goto err_free; | |
510 | ||
e6028c0e | 511 | err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr); |
8ad11fb6 JM |
512 | if (err) |
513 | goto err_mr; | |
514 | ||
515 | fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key; | |
516 | ||
517 | return &fmr->ibfmr; | |
518 | ||
519 | err_mr: | |
61083720 | 520 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); |
8ad11fb6 JM |
521 | |
522 | err_free: | |
523 | kfree(fmr); | |
524 | ||
525 | return ERR_PTR(err); | |
526 | } | |
527 | ||
528 | int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |
529 | int npages, u64 iova) | |
530 | { | |
531 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | |
532 | struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device); | |
533 | ||
534 | return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, | |
535 | &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); | |
536 | } | |
537 | ||
538 | int mlx4_ib_unmap_fmr(struct list_head *fmr_list) | |
539 | { | |
540 | struct ib_fmr *ibfmr; | |
541 | int err; | |
542 | struct mlx4_dev *mdev = NULL; | |
543 | ||
544 | list_for_each_entry(ibfmr, fmr_list, list) { | |
545 | if (mdev && to_mdev(ibfmr->device)->dev != mdev) | |
546 | return -EINVAL; | |
547 | mdev = to_mdev(ibfmr->device)->dev; | |
548 | } | |
549 | ||
550 | if (!mdev) | |
551 | return 0; | |
552 | ||
553 | list_for_each_entry(ibfmr, fmr_list, list) { | |
554 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | |
555 | ||
556 | mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); | |
557 | } | |
558 | ||
559 | /* | |
560 | * Make sure all MPT status updates are visible before issuing | |
561 | * SYNC_TPT firmware command. | |
562 | */ | |
563 | wmb(); | |
564 | ||
565 | err = mlx4_SYNC_TPT(mdev); | |
566 | if (err) | |
987c8f8f | 567 | pr_warn("SYNC_TPT error %d when " |
8ad11fb6 JM |
568 | "unmapping FMRs\n", err); |
569 | ||
570 | return 0; | |
571 | } | |
572 | ||
573 | int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr) | |
574 | { | |
575 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); | |
576 | struct mlx4_ib_dev *dev = to_mdev(ibfmr->device); | |
577 | int err; | |
578 | ||
579 | err = mlx4_fmr_free(dev->dev, &ifmr->mfmr); | |
580 | ||
581 | if (!err) | |
582 | kfree(ifmr); | |
583 | ||
584 | return err; | |
585 | } | |
1b2cd0fc SG |
586 | |
587 | static int mlx4_set_page(struct ib_mr *ibmr, u64 addr) | |
588 | { | |
589 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | |
590 | ||
591 | if (unlikely(mr->npages == mr->max_pages)) | |
592 | return -ENOMEM; | |
593 | ||
594 | mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); | |
595 | ||
596 | return 0; | |
597 | } | |
598 | ||
599 | int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, | |
600 | struct scatterlist *sg, | |
601 | int sg_nents) | |
602 | { | |
603 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | |
604 | int rc; | |
605 | ||
606 | mr->npages = 0; | |
607 | ||
608 | ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, | |
609 | sizeof(u64) * mr->max_pages, | |
610 | DMA_TO_DEVICE); | |
611 | ||
612 | rc = ib_sg_to_pages(ibmr, sg, sg_nents, mlx4_set_page); | |
613 | ||
614 | ib_dma_sync_single_for_device(ibmr->device, mr->page_map, | |
615 | sizeof(u64) * mr->max_pages, | |
616 | DMA_TO_DEVICE); | |
617 | ||
618 | return rc; | |
619 | } |