2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/slab.h>
36 #include <rdma/ib_umem.h>
37 #include <rdma/ib_pack.h>
38 #include <rdma/ib_smi.h>
40 #include "ipath_verbs.h"
42 /* Fast memory region */
46 struct ipath_mregion mr; /* must be last */
49 static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
51 return container_of(ibfmr, struct ipath_fmr, ibfmr);
55 * ipath_get_dma_mr - get a DMA memory region
56 * @pd: protection domain for this memory region
59 * Returns the memory region on success, otherwise returns an errno.
60 * Note that all DMA addresses should be created via the
61 * struct ib_dma_mapping_ops functions (see ipath_dma.c).
63 struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
68 mr = kzalloc(sizeof *mr, GFP_KERNEL);
70 ret = ERR_PTR(-ENOMEM);
74 mr->mr.access_flags = acc;
81 static struct ipath_mr *alloc_mr(int count,
82 struct ipath_lkey_table *lk_table)
87 /* Allocate struct plus pointers to first level page tables. */
88 m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
89 mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
93 /* Allocate first level page tables. */
95 mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
101 if (!ipath_alloc_lkey(lk_table, &mr->mr))
103 mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
110 kfree(mr->mr.map[i]);
120 * ipath_reg_user_mr - register a userspace memory region
121 * @pd: protection domain for this memory region
122 * @start: starting userspace address
123 * @length: length of region to register
124 * @virt_addr: virtual address to use (from HCA's point of view)
125 * @mr_access_flags: access flags for this memory region
126 * @udata: unused by the InfiniPath driver
128 * Returns the memory region on success, otherwise returns an errno.
130 struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
131 u64 virt_addr, int mr_access_flags,
132 struct ib_udata *udata)
135 struct ib_umem *umem;
137 struct scatterlist *sg;
141 ret = ERR_PTR(-EINVAL);
145 umem = ib_umem_get(pd->uobject->context, start, length,
148 return (void *) umem;
151 mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
153 ret = ERR_PTR(-ENOMEM);
154 ib_umem_release(umem);
159 mr->mr.user_base = start;
160 mr->mr.iova = virt_addr;
161 mr->mr.length = length;
162 mr->mr.offset = ib_umem_offset(umem);
163 mr->mr.access_flags = mr_access_flags;
169 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
172 vaddr = page_address(sg_page(sg));
174 ret = ERR_PTR(-EINVAL);
177 mr->mr.map[m]->segs[n].vaddr = vaddr;
178 mr->mr.map[m]->segs[n].length = umem->page_size;
180 if (n == IPATH_SEGSZ) {
192 * ipath_dereg_mr - unregister and free a memory region
193 * @ibmr: the memory region to free
195 * Returns 0 on success.
197 * Note that this is called to free MRs created by ipath_get_dma_mr()
198 * or ipath_reg_user_mr().
200 int ipath_dereg_mr(struct ib_mr *ibmr)
202 struct ipath_mr *mr = to_imr(ibmr);
205 ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
209 kfree(mr->mr.map[i]);
213 ib_umem_release(mr->umem);
220 * ipath_alloc_fmr - allocate a fast memory region
221 * @pd: the protection domain for this memory region
222 * @mr_access_flags: access flags for this memory region
223 * @fmr_attr: fast memory region attributes
225 * Returns the memory region on success, otherwise returns an errno.
227 struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
228 struct ib_fmr_attr *fmr_attr)
230 struct ipath_fmr *fmr;
234 /* Allocate struct plus pointers to first level page tables. */
235 m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
236 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
240 /* Allocate first level page tables. */
242 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
250 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
253 if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
255 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
257 * Resources are allocated but no valid mapping (RKEY can't be
261 fmr->mr.user_base = 0;
265 fmr->mr.access_flags = mr_access_flags;
266 fmr->mr.max_segs = fmr_attr->max_pages;
267 fmr->page_shift = fmr_attr->page_shift;
274 kfree(fmr->mr.map[--i]);
276 ret = ERR_PTR(-ENOMEM);
283 * ipath_map_phys_fmr - set up a fast memory region
284 * @ibmfr: the fast memory region to set up
285 * @page_list: the list of pages to associate with the fast memory region
286 * @list_len: the number of pages to associate with the fast memory region
287 * @iova: the virtual address of the start of the fast memory region
289 * This may be called from interrupt context.
292 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
293 int list_len, u64 iova)
295 struct ipath_fmr *fmr = to_ifmr(ibfmr);
296 struct ipath_lkey_table *rkt;
302 if (list_len > fmr->mr.max_segs) {
306 rkt = &to_idev(ibfmr->device)->lk_table;
307 spin_lock_irqsave(&rkt->lock, flags);
308 fmr->mr.user_base = iova;
310 ps = 1 << fmr->page_shift;
311 fmr->mr.length = list_len * ps;
314 ps = 1 << fmr->page_shift;
315 for (i = 0; i < list_len; i++) {
316 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
317 fmr->mr.map[m]->segs[n].length = ps;
318 if (++n == IPATH_SEGSZ) {
323 spin_unlock_irqrestore(&rkt->lock, flags);
331 * ipath_unmap_fmr - unmap fast memory regions
332 * @fmr_list: the list of fast memory regions to unmap
334 * Returns 0 on success.
336 int ipath_unmap_fmr(struct list_head *fmr_list)
338 struct ipath_fmr *fmr;
339 struct ipath_lkey_table *rkt;
342 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
343 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
344 spin_lock_irqsave(&rkt->lock, flags);
345 fmr->mr.user_base = 0;
348 spin_unlock_irqrestore(&rkt->lock, flags);
354 * ipath_dealloc_fmr - deallocate a fast memory region
355 * @ibfmr: the fast memory region to deallocate
357 * Returns 0 on success.
359 int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
361 struct ipath_fmr *fmr = to_ifmr(ibfmr);
364 ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
367 kfree(fmr->mr.map[--i]);