Merge tag 'v6.3-p2' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[linux-block.git] / drivers / infiniband / hw / hns / hns_roce_mr.c
CommitLineData
9a443537 1/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
e89bf462 34#include <linux/vmalloc.h>
9a443537 35#include <rdma/ib_umem.h>
36#include "hns_roce_device.h"
37#include "hns_roce_cmd.h"
38#include "hns_roce_hem.h"
39
d38936f0 40static u32 hw_index_to_key(int ind)
9a443537 41{
d38936f0 42 return ((u32)ind >> 24) | ((u32)ind << 8);
9a443537 43}
44
bfcc681b 45unsigned long key_to_hw_index(u32 key)
9a443537 46{
47 return (key << 24) | (key >> 8);
48}
49
4e9fc1da 50static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
ff795f71 51{
d38936f0 52 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
9b2cf76c 53 struct ib_device *ibdev = &hr_dev->ib_dev;
9b2cf76c 54 int err;
d38936f0 55 int id;
ff795f71 56
9b2cf76c 57 /* Allocate a key for mr from mr_table */
d38936f0
YL
58 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
59 GFP_KERNEL);
60 if (id < 0) {
61 ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id);
89b4b70b 62 return -ENOMEM;
ff795f71
WHX
63 }
64
3aecfc38 65 mr->key = hw_index_to_key(id); /* MR key */
89b4b70b 66
d38936f0
YL
67 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
68 (unsigned long)id);
9b2cf76c
XW
69 if (err) {
70 ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
71 goto err_free_bitmap;
ff795f71
WHX
72 }
73
ff795f71 74 return 0;
9b2cf76c 75err_free_bitmap:
d38936f0 76 ida_free(&mtpt_ida->ida, id);
9b2cf76c 77 return err;
89b4b70b 78}
79
9b2cf76c 80static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
89b4b70b 81{
9b2cf76c 82 unsigned long obj = key_to_hw_index(mr->key);
ff795f71 83
9b2cf76c 84 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
d38936f0 85 ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj);
ff795f71
WHX
86}
87
9b2cf76c 88static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
4e9fc1da 89 struct ib_udata *udata, u64 start)
9a443537 90{
9b2cf76c
XW
91 struct ib_device *ibdev = &hr_dev->ib_dev;
92 bool is_fast = mr->type == MR_TYPE_FRMR;
93 struct hns_roce_buf_attr buf_attr = {};
94 int err;
9a443537 95
9b2cf76c
XW
96 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
97 buf_attr.page_shift = is_fast ? PAGE_SHIFT :
79d52083 98 hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
4e9fc1da 99 buf_attr.region[0].size = mr->size;
9b2cf76c
XW
100 buf_attr.region[0].hopnum = mr->pbl_hop_num;
101 buf_attr.region_count = 1;
4e9fc1da 102 buf_attr.user_access = mr->access;
9b2cf76c
XW
103 /* fast MR's buffer is alloced before mapping, not at creation */
104 buf_attr.mtt_only = is_fast;
9a443537 105
9b2cf76c 106 err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
7b0006db 107 hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
9b2cf76c
XW
108 udata, start);
109 if (err)
110 ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
111 else
112 mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
9a443537 113
9b2cf76c 114 return err;
ff795f71
WHX
115}
116
9b2cf76c 117static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
ff795f71 118{
9b2cf76c 119 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
9a443537 120}
121
70f92521 122static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
9a443537 123{
9b2cf76c 124 struct ib_device *ibdev = &hr_dev->ib_dev;
9a443537 125 int ret;
126
127 if (mr->enabled) {
cf7f8f5c 128 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
6eef5242
YL
129 key_to_hw_index(mr->key) &
130 (hr_dev->caps.num_mtpts - 1));
9a443537 131 if (ret)
9b2cf76c
XW
132 ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
133 ret);
9a443537 134 }
135
9b2cf76c
XW
136 free_mr_pbl(hr_dev, mr);
137 free_mr_key(hr_dev, mr);
9a443537 138}
139
140static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
141 struct hns_roce_mr *mr)
142{
9a443537 143 unsigned long mtpt_idx = key_to_hw_index(mr->key);
9a443537 144 struct hns_roce_cmd_mailbox *mailbox;
dc93a0d9
LC
145 struct device *dev = hr_dev->dev;
146 int ret;
9a443537 147
148 /* Allocate mailbox memory */
149 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
904de76c
WL
150 if (IS_ERR(mailbox))
151 return PTR_ERR(mailbox);
9a443537 152
68a997c5 153 if (mr->type != MR_TYPE_FRMR)
38d22088 154 ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
68a997c5 155 else
98a61519 156 ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
9a443537 157 if (ret) {
61918e9b 158 dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
9a443537 159 goto err_page;
160 }
161
cf7f8f5c 162 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
6eef5242 163 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
9a443537 164 if (ret) {
61918e9b 165 dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
9a443537 166 goto err_page;
167 }
168
169 mr->enabled = 1;
9a443537 170
171err_page:
172 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
173
9a443537 174 return ret;
175}
176
d38936f0 177void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
9a443537 178{
d38936f0 179 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
9a443537 180
d38936f0
YL
181 ida_init(&mtpt_ida->ida);
182 mtpt_ida->max = hr_dev->caps.num_mtpts - 1;
183 mtpt_ida->min = hr_dev->caps.reserved_mrws;
9a443537 184}
185
186struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
187{
9b2cf76c 188 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
ff795f71
WHX
189 struct hns_roce_mr *mr;
190 int ret;
9a443537 191
9b2cf76c 192 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
f0588567 193 if (!mr)
9a443537 194 return ERR_PTR(-ENOMEM);
195
68a997c5 196 mr->type = MR_TYPE_DMA;
4e9fc1da
LC
197 mr->pd = to_hr_pd(pd)->pdn;
198 mr->access = acc;
68a997c5 199
9a443537 200 /* Allocate memory region key */
9b2cf76c 201 hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
4e9fc1da 202 ret = alloc_mr_key(hr_dev, mr);
9a443537 203 if (ret)
204 goto err_free;
205
4e9fc1da 206 ret = hns_roce_mr_enable(hr_dev, mr);
9a443537 207 if (ret)
208 goto err_mr;
209
210 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
9a443537 211
212 return &mr->ibmr;
9a443537 213err_mr:
9b2cf76c 214 free_mr_key(hr_dev, mr);
9a443537 215
216err_free:
217 kfree(mr);
218 return ERR_PTR(ret);
219}
220
9a443537 221struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
222 u64 virt_addr, int access_flags,
223 struct ib_udata *udata)
224{
225 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
ff795f71 226 struct hns_roce_mr *mr;
ff795f71 227 int ret;
9a443537 228
9b2cf76c 229 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
9a443537 230 if (!mr)
231 return ERR_PTR(-ENOMEM);
232
4e9fc1da
LC
233 mr->iova = virt_addr;
234 mr->size = length;
235 mr->pd = to_hr_pd(pd)->pdn;
236 mr->access = access_flags;
68a997c5 237 mr->type = MR_TYPE_MR;
4e9fc1da
LC
238
239 ret = alloc_mr_key(hr_dev, mr);
9a443537 240 if (ret)
9b2cf76c 241 goto err_alloc_mr;
9a443537 242
4e9fc1da 243 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
9a443537 244 if (ret)
9b2cf76c 245 goto err_alloc_key;
9a443537 246
247 ret = hns_roce_mr_enable(hr_dev, mr);
248 if (ret)
9b2cf76c 249 goto err_alloc_pbl;
9a443537 250
251 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
252
253 return &mr->ibmr;
254
9b2cf76c
XW
255err_alloc_pbl:
256 free_mr_pbl(hr_dev, mr);
257err_alloc_key:
258 free_mr_key(hr_dev, mr);
259err_alloc_mr:
9a443537 260 kfree(mr);
261 return ERR_PTR(ret);
262}
263
6e0954b1
JG
264struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
265 u64 length, u64 virt_addr,
266 int mr_access_flags, struct ib_pd *pd,
267 struct ib_udata *udata)
a2c80b7b
WHX
268{
269 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
9b2cf76c 270 struct ib_device *ib_dev = &hr_dev->ib_dev;
a2c80b7b
WHX
271 struct hns_roce_mr *mr = to_hr_mr(ibmr);
272 struct hns_roce_cmd_mailbox *mailbox;
a2c80b7b 273 unsigned long mtpt_idx;
a2c80b7b
WHX
274 int ret;
275
276 if (!mr->enabled)
6e0954b1 277 return ERR_PTR(-EINVAL);
a2c80b7b
WHX
278
279 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
280 if (IS_ERR(mailbox))
6e0954b1 281 return ERR_CAST(mailbox);
a2c80b7b
WHX
282
283 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
162e29fe
CT
284
285 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
286 mtpt_idx);
a2c80b7b
WHX
287 if (ret)
288 goto free_cmd_mbox;
289
cf7f8f5c
CT
290 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
291 mtpt_idx);
a2c80b7b 292 if (ret)
9b2cf76c 293 ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
a2c80b7b
WHX
294
295 mr->enabled = 0;
4e9fc1da
LC
296 mr->iova = virt_addr;
297 mr->size = length;
a2c80b7b
WHX
298
299 if (flags & IB_MR_REREG_PD)
4e9fc1da
LC
300 mr->pd = to_hr_pd(pd)->pdn;
301
302 if (flags & IB_MR_REREG_ACCESS)
303 mr->access = mr_access_flags;
a2c80b7b
WHX
304
305 if (flags & IB_MR_REREG_TRANS) {
4e9fc1da
LC
306 free_mr_pbl(hr_dev, mr);
307 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
308 if (ret) {
309 ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n",
310 ret);
a2c80b7b 311 goto free_cmd_mbox;
4e9fc1da
LC
312 }
313 }
314
315 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
316 if (ret) {
317 ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret);
318 goto free_cmd_mbox;
a2c80b7b
WHX
319 }
320
cf7f8f5c
CT
321 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
322 mtpt_idx);
a2c80b7b 323 if (ret) {
9b2cf76c 324 ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
3ee0e170 325 goto free_cmd_mbox;
a2c80b7b
WHX
326 }
327
328 mr->enabled = 1;
a2c80b7b 329
a2c80b7b
WHX
330free_cmd_mbox:
331 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
332
c4c7d7a4
Y
333 if (ret)
334 return ERR_PTR(ret);
335 return NULL;
a2c80b7b
WHX
336}
337
c4367a26 338int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
9a443537 339{
bfcc681b 340 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
9a443537 341 struct hns_roce_mr *mr = to_hr_mr(ibmr);
342
70f92521
YL
343 if (hr_dev->hw->dereg_mr)
344 hr_dev->hw->dereg_mr(hr_dev);
345
38d22088
CT
346 hns_roce_mr_free(hr_dev, mr);
347 kfree(mr);
bfcc681b 348
cc377b9b 349 return 0;
9a443537 350}
c7c28191 351
68a997c5 352struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
42a3b153 353 u32 max_num_sg)
68a997c5
YL
354{
355 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
356 struct device *dev = hr_dev->dev;
357 struct hns_roce_mr *mr;
68a997c5
YL
358 int ret;
359
68a997c5
YL
360 if (mr_type != IB_MR_TYPE_MEM_REG)
361 return ERR_PTR(-EINVAL);
362
363 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
364 dev_err(dev, "max_num_sg larger than %d\n",
365 HNS_ROCE_FRMR_MAX_PA);
366 return ERR_PTR(-EINVAL);
367 }
368
369 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
370 if (!mr)
371 return ERR_PTR(-ENOMEM);
372
373 mr->type = MR_TYPE_FRMR;
4e9fc1da
LC
374 mr->pd = to_hr_pd(pd)->pdn;
375 mr->size = max_num_sg * (1 << PAGE_SHIFT);
68a997c5
YL
376
377 /* Allocate memory region key */
4e9fc1da 378 ret = alloc_mr_key(hr_dev, mr);
68a997c5
YL
379 if (ret)
380 goto err_free;
381
4e9fc1da 382 ret = alloc_mr_pbl(hr_dev, mr, NULL, 0);
9b2cf76c
XW
383 if (ret)
384 goto err_key;
385
68a997c5
YL
386 ret = hns_roce_mr_enable(hr_dev, mr);
387 if (ret)
9b2cf76c 388 goto err_pbl;
68a997c5
YL
389
390 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
4e9fc1da 391 mr->ibmr.length = mr->size;
68a997c5
YL
392
393 return &mr->ibmr;
394
9b2cf76c
XW
395err_pbl:
396 free_mr_pbl(hr_dev, mr);
a115aa00
ZS
397err_key:
398 free_mr_key(hr_dev, mr);
68a997c5
YL
399err_free:
400 kfree(mr);
401 return ERR_PTR(ret);
402}
403
404static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
405{
406 struct hns_roce_mr *mr = to_hr_mr(ibmr);
407
9b2cf76c
XW
408 if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
409 mr->page_list[mr->npages++] = addr;
410 return 0;
411 }
68a997c5 412
9b2cf76c 413 return -ENOBUFS;
68a997c5
YL
414}
415
416int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
417 unsigned int *sg_offset)
418{
9b2cf76c
XW
419 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
420 struct ib_device *ibdev = &hr_dev->ib_dev;
68a997c5 421 struct hns_roce_mr *mr = to_hr_mr(ibmr);
8e029d38 422 struct hns_roce_mtr *mtr = &mr->pbl_mtr;
9b2cf76c 423 int ret = 0;
68a997c5
YL
424
425 mr->npages = 0;
9b2cf76c
XW
426 mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
427 sizeof(dma_addr_t), GFP_KERNEL);
428 if (!mr->page_list)
429 return ret;
430
431 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
432 if (ret < 1) {
61918e9b 433 ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
9b2cf76c
XW
434 mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
435 goto err_page_list;
436 }
68a997c5 437
8e029d38
XW
438 mtr->hem_cfg.region[0].offset = 0;
439 mtr->hem_cfg.region[0].count = mr->npages;
440 mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
441 mtr->hem_cfg.region_count = 1;
442 ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
9b2cf76c
XW
443 if (ret) {
444 ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
445 ret = 0;
446 } else {
dcdc366a 447 mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
9b2cf76c
XW
448 ret = mr->npages;
449 }
450
451err_page_list:
452 kvfree(mr->page_list);
453 mr->page_list = NULL;
454
455 return ret;
68a997c5
YL
456}
457
c7c28191
YL
458static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
459 struct hns_roce_mw *mw)
460{
461 struct device *dev = hr_dev->dev;
462 int ret;
463
464 if (mw->enabled) {
cf7f8f5c 465 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
6eef5242
YL
466 key_to_hw_index(mw->rkey) &
467 (hr_dev->caps.num_mtpts - 1));
c7c28191 468 if (ret)
6eef5242 469 dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
c7c28191
YL
470
471 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
472 key_to_hw_index(mw->rkey));
473 }
474
d38936f0
YL
475 ida_free(&hr_dev->mr_table.mtpt_ida.ida,
476 (int)key_to_hw_index(mw->rkey));
c7c28191
YL
477}
478
479static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
480 struct hns_roce_mw *mw)
481{
482 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
483 struct hns_roce_cmd_mailbox *mailbox;
484 struct device *dev = hr_dev->dev;
485 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
486 int ret;
487
488 /* prepare HEM entry memory */
489 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
490 if (ret)
491 return ret;
492
493 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
494 if (IS_ERR(mailbox)) {
495 ret = PTR_ERR(mailbox);
496 goto err_table;
497 }
498
499 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
500 if (ret) {
501 dev_err(dev, "MW write mtpt fail!\n");
502 goto err_page;
503 }
504
cf7f8f5c 505 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
6eef5242 506 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
c7c28191 507 if (ret) {
6eef5242 508 dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
c7c28191
YL
509 goto err_page;
510 }
511
512 mw->enabled = 1;
513
514 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
515
516 return 0;
517
518err_page:
519 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
520
521err_table:
522 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
523
524 return ret;
525}
526
d18bb3e1 527int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
c7c28191 528{
d18bb3e1 529 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
d38936f0
YL
530 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
531 struct ib_device *ibdev = &hr_dev->ib_dev;
d18bb3e1 532 struct hns_roce_mw *mw = to_hr_mw(ibmw);
c7c28191 533 int ret;
d38936f0 534 int id;
c7c28191 535
d38936f0
YL
536 /* Allocate a key for mw from mr_table */
537 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
538 GFP_KERNEL);
539 if (id < 0) {
540 ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id);
541 return -ENOMEM;
542 }
c7c28191 543
d38936f0 544 mw->rkey = hw_index_to_key(id);
c7c28191 545
d18bb3e1
LR
546 ibmw->rkey = mw->rkey;
547 mw->pdn = to_hr_pd(ibmw->pd)->pdn;
c7c28191
YL
548 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
549 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
550 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
551
552 ret = hns_roce_mw_enable(hr_dev, mw);
553 if (ret)
554 goto err_mw;
555
d18bb3e1 556 return 0;
c7c28191
YL
557
558err_mw:
559 hns_roce_mw_free(hr_dev, mw);
d18bb3e1 560 return ret;
c7c28191
YL
561}
562
563int hns_roce_dealloc_mw(struct ib_mw *ibmw)
564{
565 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
566 struct hns_roce_mw *mw = to_hr_mw(ibmw);
567
568 hns_roce_mw_free(hr_dev, mw);
c7c28191
YL
569 return 0;
570}
38389eaa 571
3c873161 572static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
9ea9a53e
XW
573 struct hns_roce_buf_region *region, dma_addr_t *pages,
574 int max_count)
38389eaa 575{
9ea9a53e
XW
576 int count, npage;
577 int offset, end;
3c873161 578 __le64 *mtts;
3c873161 579 u64 addr;
38389eaa
LO
580 int i;
581
3c873161
XW
582 offset = region->offset;
583 end = offset + region->count;
38389eaa 584 npage = 0;
9ea9a53e
XW
585 while (offset < end && npage < max_count) {
586 count = 0;
38389eaa 587 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
5f652387 588 offset, &count);
38389eaa
LO
589 if (!mtts)
590 return -ENOBUFS;
591
9ea9a53e 592 for (i = 0; i < count && npage < max_count; i++) {
38d22088 593 addr = pages[npage];
38389eaa 594
3c873161 595 mtts[i] = cpu_to_le64(addr);
38389eaa
LO
596 npage++;
597 }
598 offset += count;
599 }
600
9ea9a53e 601 return npage;
38389eaa
LO
602}
603
3c873161
XW
604static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
605{
606 int i;
607
608 for (i = 0; i < attr->region_count; i++)
609 if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
610 attr->region[i].hopnum > 0)
611 return true;
612
613 /* because the mtr only one root base address, when hopnum is 0 means
614 * root base address equals the first buffer address, thus all alloced
615 * memory must in a continuous space accessed by direct mode.
616 */
617 return false;
618}
619
620static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
621{
622 size_t size = 0;
623 int i;
624
625 for (i = 0; i < attr->region_count; i++)
626 size += attr->region[i].size;
627
628 return size;
629}
630
3c873161
XW
631/*
632 * check the given pages in continuous address space
633 * Returns 0 on success, or the error page num.
634 */
635static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
82d07a4e 636 unsigned int page_shift)
3c873161
XW
637{
638 size_t page_size = 1 << page_shift;
639 int i;
640
641 for (i = 1; i < page_count; i++)
642 if (pages[i] - pages[i - 1] != page_size)
643 return i;
644
645 return 0;
646}
647
648static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
649{
650 /* release user buffers */
651 if (mtr->umem) {
652 ib_umem_release(mtr->umem);
653 mtr->umem = NULL;
654 }
655
656 /* release kernel buffers */
657 if (mtr->kmem) {
658 hns_roce_buf_free(hr_dev, mtr->kmem);
3c873161
XW
659 mtr->kmem = NULL;
660 }
661}
662
663static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
0e0ab04b 664 struct hns_roce_buf_attr *buf_attr,
3c873161
XW
665 struct ib_udata *udata, unsigned long user_addr)
666{
667 struct ib_device *ibdev = &hr_dev->ib_dev;
3c873161 668 size_t total_size;
3c873161
XW
669
670 total_size = mtr_bufs_size(buf_attr);
3c873161
XW
671
672 if (udata) {
673 mtr->kmem = NULL;
674 mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
675 buf_attr->user_access);
676 if (IS_ERR_OR_NULL(mtr->umem)) {
6f6e2dcb 677 ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
3c873161
XW
678 PTR_ERR(mtr->umem));
679 return -ENOMEM;
680 }
3c873161
XW
681 } else {
682 mtr->umem = NULL;
0e0ab04b
XW
683 mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
684 buf_attr->page_shift,
685 mtr->hem_cfg.is_direct ?
686 HNS_ROCE_BUF_DIRECT : 0);
6f6e2dcb
XW
687 if (IS_ERR(mtr->kmem)) {
688 ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
689 PTR_ERR(mtr->kmem));
690 return PTR_ERR(mtr->kmem);
3c873161
XW
691 }
692 }
693
3c873161 694 return 0;
3c873161
XW
695}
696
0e0ab04b
XW
697static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
698 int page_count, unsigned int page_shift)
3c873161
XW
699{
700 struct ib_device *ibdev = &hr_dev->ib_dev;
0e0ab04b 701 dma_addr_t *pages;
3c873161 702 int npage;
0e0ab04b
XW
703 int ret;
704
705 /* alloc a tmp array to store buffer's dma address */
706 pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
707 if (!pages)
708 return -ENOMEM;
3c873161
XW
709
710 if (mtr->umem)
7b0006db 711 npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count,
3c873161
XW
712 mtr->umem, page_shift);
713 else
7b0006db
XW
714 npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
715 mtr->kmem, page_shift);
3c873161 716
0e0ab04b
XW
717 if (npage != page_count) {
718 ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
719 page_count);
720 ret = -ENOBUFS;
721 goto err_alloc_list;
722 }
723
3c873161 724 if (mtr->hem_cfg.is_direct && npage > 1) {
0e0ab04b
XW
725 ret = mtr_check_direct_pages(pages, npage, page_shift);
726 if (ret) {
7b0006db
XW
727 ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
728 mtr->umem ? "umtr" : "kmtr", ret, npage);
0e0ab04b
XW
729 ret = -ENOBUFS;
730 goto err_alloc_list;
3c873161
XW
731 }
732 }
733
0e0ab04b
XW
734 ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
735 if (ret)
736 ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
737
738err_alloc_list:
739 kvfree(pages);
740
741 return ret;
3c873161
XW
742}
743
744int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
dcdc366a 745 dma_addr_t *pages, unsigned int page_cnt)
3c873161
XW
746{
747 struct ib_device *ibdev = &hr_dev->ib_dev;
748 struct hns_roce_buf_region *r;
9ea9a53e 749 unsigned int i, mapped_cnt;
2a38c0f1 750 int ret = 0;
3c873161 751
cc33b23e
XW
752 /*
753 * Only use the first page address as root ba when hopnum is 0, this
754 * is because the addresses of all pages are consecutive in this case.
755 */
756 if (mtr->hem_cfg.is_direct) {
757 mtr->hem_cfg.root_ba = pages[0];
758 return 0;
759 }
760
9ea9a53e
XW
761 for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
762 mapped_cnt < page_cnt; i++) {
8e029d38 763 r = &mtr->hem_cfg.region[i];
9ea9a53e
XW
764 /* if hopnum is 0, no need to map pages in this region */
765 if (!r->hopnum) {
766 mapped_cnt += r->count;
767 continue;
768 }
769
3c873161 770 if (r->offset + r->count > page_cnt) {
9ea9a53e 771 ret = -EINVAL;
3c873161 772 ibdev_err(ibdev,
7b0006db 773 "failed to check mtr%u count %u + %u > %u.\n",
3c873161 774 i, r->offset, r->count, page_cnt);
9ea9a53e 775 return ret;
3c873161
XW
776 }
777
9ea9a53e
XW
778 ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
779 page_cnt - mapped_cnt);
780 if (ret < 0) {
3c873161 781 ibdev_err(ibdev,
61918e9b 782 "failed to map mtr%u offset %u, ret = %d.\n",
9ea9a53e
XW
783 i, r->offset, ret);
784 return ret;
3c873161 785 }
9ea9a53e
XW
786 mapped_cnt += ret;
787 ret = 0;
3c873161
XW
788 }
789
9ea9a53e
XW
790 if (mapped_cnt < page_cnt) {
791 ret = -ENOBUFS;
792 ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
793 mapped_cnt, page_cnt);
794 }
795
796 return ret;
3c873161
XW
797}
798
38389eaa 799int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
d147583e 800 u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
38389eaa 801{
cc33b23e 802 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
dcdc366a 803 int mtt_count, left;
d147583e 804 u32 start_index;
38389eaa 805 int total = 0;
3c873161 806 __le64 *mtts;
dcdc366a 807 u32 npage;
3c873161 808 u64 addr;
38389eaa 809
3c873161 810 if (!mtt_buf || mtt_max < 1)
38389eaa
LO
811 goto done;
812
3c873161 813 /* no mtt memory in direct mode, so just return the buffer address */
cc33b23e
XW
814 if (cfg->is_direct) {
815 start_index = offset >> HNS_HW_PAGE_SHIFT;
816 for (mtt_count = 0; mtt_count < cfg->region_count &&
817 total < mtt_max; mtt_count++) {
818 npage = cfg->region[mtt_count].offset;
819 if (npage < start_index)
820 continue;
821
822 addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
38d22088 823 mtt_buf[total] = addr;
cc33b23e
XW
824
825 total++;
3c873161
XW
826 }
827
828 goto done;
829 }
830
cc33b23e 831 start_index = offset >> cfg->buf_pg_shift;
38389eaa
LO
832 left = mtt_max;
833 while (left > 0) {
834 mtt_count = 0;
3c873161 835 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
cc33b23e 836 start_index + total,
5f652387 837 &mtt_count);
3c873161 838 if (!mtts || !mtt_count)
38389eaa
LO
839 goto done;
840
841 npage = min(mtt_count, left);
38389eaa 842 left -= npage;
3c873161
XW
843 for (mtt_count = 0; mtt_count < npage; mtt_count++)
844 mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
38389eaa
LO
845 }
846
847done:
848 if (base_addr)
cc33b23e 849 *base_addr = cfg->root_ba;
38389eaa
LO
850
851 return total;
852}
3c873161 853
cc33b23e
XW
854static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
855 struct hns_roce_buf_attr *attr,
856 struct hns_roce_hem_cfg *cfg,
d147583e 857 unsigned int *buf_page_shift, u64 unalinged_size)
3c873161 858{
3c873161 859 struct hns_roce_buf_region *r;
d147583e 860 u64 first_region_padding;
0e0ab04b 861 int page_cnt, region_cnt;
a2f3d447 862 unsigned int page_shift;
cc33b23e 863 size_t buf_size;
cc33b23e 864
0e0ab04b
XW
865 /* If mtt is disabled, all pages must be within a continuous range */
866 cfg->is_direct = !mtr_has_mtt(attr);
867 buf_size = mtr_bufs_size(attr);
cc33b23e 868 if (cfg->is_direct) {
0e0ab04b
XW
869 /* When HEM buffer uses 0-level addressing, the page size is
870 * equal to the whole buffer size, and we split the buffer into
871 * small pages which is used to check whether the adjacent
872 * units are in the continuous space and its size is fixed to
873 * 4K based on hns ROCEE's requirement.
cc33b23e 874 */
0e0ab04b
XW
875 page_shift = HNS_HW_PAGE_SHIFT;
876
877 /* The ROCEE requires the page size to be 4K * 2 ^ N. */
878 cfg->buf_pg_count = 1;
879 cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
880 order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
881 first_region_padding = 0;
cc33b23e 882 } else {
0e0ab04b
XW
883 page_shift = attr->page_shift;
884 cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
885 1 << page_shift);
886 cfg->buf_pg_shift = page_shift;
887 first_region_padding = unalinged_size;
cc33b23e
XW
888 }
889
0e0ab04b
XW
890 /* Convert buffer size to page index and page count for each region and
891 * the buffer's offset needs to be appended to the first region.
892 */
893 for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count &&
cc33b23e
XW
894 region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
895 r = &cfg->region[region_cnt];
896 r->offset = page_cnt;
0e0ab04b
XW
897 buf_size = hr_hw_page_align(attr->region[region_cnt].size +
898 first_region_padding);
cc33b23e 899 r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
0e0ab04b 900 first_region_padding = 0;
cc33b23e
XW
901 page_cnt += r->count;
902 r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
903 r->count);
904 }
905
cc33b23e
XW
906 cfg->region_count = region_cnt;
907 *buf_page_shift = page_shift;
908
909 return page_cnt;
3c873161
XW
910}
911
0e0ab04b
XW
912static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
913 unsigned int ba_page_shift)
914{
915 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
916 int ret;
917
918 hns_roce_hem_list_init(&mtr->hem_list);
919 if (!cfg->is_direct) {
920 ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
921 cfg->region, cfg->region_count,
922 ba_page_shift);
923 if (ret)
924 return ret;
925 cfg->root_ba = mtr->hem_list.root_ba;
926 cfg->ba_pg_shift = ba_page_shift;
927 } else {
928 cfg->ba_pg_shift = cfg->buf_pg_shift;
929 }
930
931 return 0;
932}
933
934static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
935{
936 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
937}
938
3c873161
XW
939/**
940 * hns_roce_mtr_create - Create hns memory translate region.
941 *
779f2f5e 942 * @hr_dev: RoCE device struct pointer
3c873161 943 * @mtr: memory translate region
cc33b23e
XW
944 * @buf_attr: buffer attribute for creating mtr
945 * @ba_page_shift: page shift for multi-hop base address table
3c873161
XW
946 * @udata: user space context, if it's NULL, means kernel space
947 * @user_addr: userspace virtual address to start at
3c873161
XW
948 */
949int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
82d07a4e 950 struct hns_roce_buf_attr *buf_attr,
cc33b23e 951 unsigned int ba_page_shift, struct ib_udata *udata,
82d07a4e 952 unsigned long user_addr)
3c873161 953{
3c873161 954 struct ib_device *ibdev = &hr_dev->ib_dev;
cc33b23e 955 unsigned int buf_page_shift = 0;
0e0ab04b
XW
956 int buf_page_cnt;
957 int ret;
3c873161 958
0e0ab04b
XW
959 buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
960 &buf_page_shift,
961 udata ? user_addr & ~PAGE_MASK : 0);
962 if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
125073e7 963 ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
0e0ab04b
XW
964 buf_page_cnt, buf_page_shift);
965 return -EINVAL;
8e029d38
XW
966 }
967
0e0ab04b
XW
968 ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
969 if (ret) {
970 ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
971 return ret;
3c873161
XW
972 }
973
0e0ab04b
XW
974 /* The caller has its own buffer list and invokes the hns_roce_mtr_map()
975 * to finish the MTT configuration.
976 */
977 if (buf_attr->mtt_only) {
978 mtr->umem = NULL;
979 mtr->kmem = NULL;
3c873161 980 return 0;
3c873161
XW
981 }
982
0e0ab04b 983 ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
cc33b23e 984 if (ret) {
0e0ab04b
XW
985 ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret);
986 goto err_alloc_mtt;
3c873161
XW
987 }
988
0e0ab04b
XW
989 /* Write buffer's dma address to MTT */
990 ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
991 if (ret)
992 ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
993 else
994 return 0;
995
3c873161 996 mtr_free_bufs(hr_dev, mtr);
0e0ab04b
XW
997err_alloc_mtt:
998 mtr_free_mtt(hr_dev, mtr);
cc33b23e 999 return ret;
3c873161
XW
1000}
1001
1002void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1003{
1004 /* release multi-hop addressing resource */
1005 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1006
1007 /* free buffers */
1008 mtr_free_bufs(hr_dev, mtr);
1009}