IB/umem: remove the dmasync argument to ib_umem_get
[linux-block.git] / drivers / infiniband / hw / vmw_pvrdma / pvrdma_mr.c
CommitLineData
29c8d9eb
AR
1/*
2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
16 *
17 * The BSD 2-Clause License
18 *
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
21 * conditions are met:
22 *
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer.
26 *
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include <linux/list.h>
47#include <linux/slab.h>
48
49#include "pvrdma.h"
50
51/**
52 * pvrdma_get_dma_mr - get a DMA memory region
53 * @pd: protection domain
54 * @acc: access flags
55 *
56 * @return: ib_mr pointer on success, otherwise returns an errno.
57 */
58struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
59{
60 struct pvrdma_dev *dev = to_vdev(pd->device);
61 struct pvrdma_user_mr *mr;
62 union pvrdma_cmd_req req;
63 union pvrdma_cmd_resp rsp;
64 struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
65 struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
66 int ret;
67
68 /* Support only LOCAL_WRITE flag for DMA MRs */
69 if (acc & ~IB_ACCESS_LOCAL_WRITE) {
70 dev_warn(&dev->pdev->dev,
71 "unsupported dma mr access flags %#x\n", acc);
72 return ERR_PTR(-EOPNOTSUPP);
73 }
74
75 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
76 if (!mr)
77 return ERR_PTR(-ENOMEM);
78
79 memset(cmd, 0, sizeof(*cmd));
80 cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
81 cmd->pd_handle = to_vpd(pd)->pd_handle;
82 cmd->access_flags = acc;
83 cmd->flags = PVRDMA_MR_FLAG_DMA;
84
85 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
86 if (ret < 0) {
87 dev_warn(&dev->pdev->dev,
88 "could not get DMA mem region, error: %d\n", ret);
89 kfree(mr);
90 return ERR_PTR(ret);
91 }
92
93 mr->mmr.mr_handle = resp->mr_handle;
94 mr->ibmr.lkey = resp->lkey;
95 mr->ibmr.rkey = resp->rkey;
96
97 return &mr->ibmr;
98}
99
100/**
101 * pvrdma_reg_user_mr - register a userspace memory region
102 * @pd: protection domain
103 * @start: starting address
104 * @length: length of region
105 * @virt_addr: I/O virtual address
106 * @access_flags: access flags for memory region
107 * @udata: user data
108 *
109 * @return: ib_mr pointer on success, otherwise returns an errno.
110 */
111struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
112 u64 virt_addr, int access_flags,
113 struct ib_udata *udata)
114{
115 struct pvrdma_dev *dev = to_vdev(pd->device);
116 struct pvrdma_user_mr *mr = NULL;
117 struct ib_umem *umem;
118 union pvrdma_cmd_req req;
119 union pvrdma_cmd_resp rsp;
120 struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
121 struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
d10bcf94 122 int ret, npages;
29c8d9eb
AR
123
124 if (length == 0 || length > dev->dsr->caps.max_mr_size) {
125 dev_warn(&dev->pdev->dev, "invalid mem region length\n");
126 return ERR_PTR(-EINVAL);
127 }
128
72b894b0 129 umem = ib_umem_get(udata, start, length, access_flags);
29c8d9eb
AR
130 if (IS_ERR(umem)) {
131 dev_warn(&dev->pdev->dev,
132 "could not get umem for mem region\n");
133 return ERR_CAST(umem);
134 }
135
d10bcf94
SS
136 npages = ib_umem_num_pages(umem);
137 if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
29c8d9eb 138 dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
d10bcf94 139 npages);
29c8d9eb
AR
140 ret = -EINVAL;
141 goto err_umem;
142 }
143
144 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
145 if (!mr) {
146 ret = -ENOMEM;
147 goto err_umem;
148 }
149
150 mr->mmr.iova = virt_addr;
151 mr->mmr.size = length;
152 mr->umem = umem;
153
d10bcf94 154 ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false);
29c8d9eb
AR
155 if (ret) {
156 dev_warn(&dev->pdev->dev,
157 "could not allocate page directory\n");
158 goto err_umem;
159 }
160
161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
162 if (ret)
163 goto err_pdir;
164
165 memset(cmd, 0, sizeof(*cmd));
166 cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
167 cmd->start = start;
168 cmd->length = length;
169 cmd->pd_handle = to_vpd(pd)->pd_handle;
170 cmd->access_flags = access_flags;
d10bcf94 171 cmd->nchunks = npages;
29c8d9eb
AR
172 cmd->pdir_dma = mr->pdir.dir_dma;
173
174 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
175 if (ret < 0) {
176 dev_warn(&dev->pdev->dev,
177 "could not register mem region, error: %d\n", ret);
178 goto err_pdir;
179 }
180
181 mr->mmr.mr_handle = resp->mr_handle;
182 mr->ibmr.lkey = resp->lkey;
183 mr->ibmr.rkey = resp->rkey;
184
185 return &mr->ibmr;
186
187err_pdir:
188 pvrdma_page_dir_cleanup(dev, &mr->pdir);
189err_umem:
190 ib_umem_release(umem);
191 kfree(mr);
192
193 return ERR_PTR(ret);
194}
195
196/**
197 * pvrdma_alloc_mr - allocate a memory region
198 * @pd: protection domain
199 * @mr_type: type of memory region
200 * @max_num_sg: maximum number of pages
201 *
202 * @return: ib_mr pointer on success, otherwise returns an errno.
203 */
204struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
c4367a26 205 u32 max_num_sg, struct ib_udata *udata)
29c8d9eb
AR
206{
207 struct pvrdma_dev *dev = to_vdev(pd->device);
208 struct pvrdma_user_mr *mr;
209 union pvrdma_cmd_req req;
210 union pvrdma_cmd_resp rsp;
211 struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
212 struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
213 int size = max_num_sg * sizeof(u64);
214 int ret;
215
216 if (mr_type != IB_MR_TYPE_MEM_REG ||
217 max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
218 return ERR_PTR(-EINVAL);
219
220 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
221 if (!mr)
222 return ERR_PTR(-ENOMEM);
223
224 mr->pages = kzalloc(size, GFP_KERNEL);
225 if (!mr->pages) {
226 ret = -ENOMEM;
227 goto freemr;
228 }
229
230 ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
231 if (ret) {
232 dev_warn(&dev->pdev->dev,
233 "failed to allocate page dir for mr\n");
234 ret = -ENOMEM;
235 goto freepages;
236 }
237
238 memset(cmd, 0, sizeof(*cmd));
239 cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
240 cmd->pd_handle = to_vpd(pd)->pd_handle;
241 cmd->access_flags = 0;
242 cmd->flags = PVRDMA_MR_FLAG_FRMR;
243 cmd->nchunks = max_num_sg;
244
245 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
246 if (ret < 0) {
247 dev_warn(&dev->pdev->dev,
248 "could not create FR mem region, error: %d\n", ret);
249 goto freepdir;
250 }
251
252 mr->max_pages = max_num_sg;
253 mr->mmr.mr_handle = resp->mr_handle;
254 mr->ibmr.lkey = resp->lkey;
255 mr->ibmr.rkey = resp->rkey;
256 mr->page_shift = PAGE_SHIFT;
257 mr->umem = NULL;
258
259 return &mr->ibmr;
260
261freepdir:
262 pvrdma_page_dir_cleanup(dev, &mr->pdir);
263freepages:
264 kfree(mr->pages);
265freemr:
266 kfree(mr);
267 return ERR_PTR(ret);
268}
269
270/**
271 * pvrdma_dereg_mr - deregister a memory region
272 * @ibmr: memory region
273 *
274 * @return: 0 on success.
275 */
c4367a26 276int pvrdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
29c8d9eb
AR
277{
278 struct pvrdma_user_mr *mr = to_vmr(ibmr);
279 struct pvrdma_dev *dev = to_vdev(ibmr->device);
280 union pvrdma_cmd_req req;
281 struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
282 int ret;
283
284 memset(cmd, 0, sizeof(*cmd));
285 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
286 cmd->mr_handle = mr->mmr.mr_handle;
287 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
288 if (ret < 0)
289 dev_warn(&dev->pdev->dev,
290 "could not deregister mem region, error: %d\n", ret);
291
292 pvrdma_page_dir_cleanup(dev, &mr->pdir);
836a0fbb 293 ib_umem_release(mr->umem);
29c8d9eb
AR
294
295 kfree(mr->pages);
296 kfree(mr);
297
298 return 0;
299}
300
301static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
302{
303 struct pvrdma_user_mr *mr = to_vmr(ibmr);
304
305 if (mr->npages == mr->max_pages)
306 return -ENOMEM;
307
308 mr->pages[mr->npages++] = addr;
309 return 0;
310}
311
312int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
313 unsigned int *sg_offset)
314{
315 struct pvrdma_user_mr *mr = to_vmr(ibmr);
316 struct pvrdma_dev *dev = to_vdev(ibmr->device);
317 int ret;
318
319 mr->npages = 0;
320
321 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
322 if (ret < 0)
323 dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
324
325 return ret;
326}