Commit | Line | Data |
---|---|---|
cef1cce5 | 1 | /* |
759d5768 | 2 | * Copyright (c) 2006 QLogic, Inc. All rights reserved. |
cef1cce5 BS |
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <rdma/ib_pack.h> | |
35 | #include <rdma/ib_smi.h> | |
36 | ||
37 | #include "ipath_verbs.h" | |
38 | ||
34b2aafe BS |
39 | /* Fast memory region */ |
40 | struct ipath_fmr { | |
41 | struct ib_fmr ibfmr; | |
42 | u8 page_shift; | |
43 | struct ipath_mregion mr; /* must be last */ | |
44 | }; | |
45 | ||
46 | static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr) | |
47 | { | |
48 | return container_of(ibfmr, struct ipath_fmr, ibfmr); | |
49 | } | |
50 | ||
cef1cce5 BS |
51 | /** |
52 | * ipath_get_dma_mr - get a DMA memory region | |
53 | * @pd: protection domain for this memory region | |
54 | * @acc: access flags | |
55 | * | |
56 | * Returns the memory region on success, otherwise returns an errno. | |
57 | */ | |
58 | struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc) | |
59 | { | |
60 | struct ipath_mr *mr; | |
61 | struct ib_mr *ret; | |
62 | ||
63 | mr = kzalloc(sizeof *mr, GFP_KERNEL); | |
64 | if (!mr) { | |
65 | ret = ERR_PTR(-ENOMEM); | |
66 | goto bail; | |
67 | } | |
68 | ||
69 | mr->mr.access_flags = acc; | |
70 | ret = &mr->ibmr; | |
71 | ||
72 | bail: | |
73 | return ret; | |
74 | } | |
75 | ||
76 | static struct ipath_mr *alloc_mr(int count, | |
77 | struct ipath_lkey_table *lk_table) | |
78 | { | |
79 | struct ipath_mr *mr; | |
80 | int m, i = 0; | |
81 | ||
82 | /* Allocate struct plus pointers to first level page tables. */ | |
83 | m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ; | |
84 | mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); | |
85 | if (!mr) | |
86 | goto done; | |
87 | ||
88 | /* Allocate first level page tables. */ | |
89 | for (; i < m; i++) { | |
90 | mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL); | |
91 | if (!mr->mr.map[i]) | |
92 | goto bail; | |
93 | } | |
94 | mr->mr.mapsz = m; | |
95 | ||
96 | /* | |
97 | * ib_reg_phys_mr() will initialize mr->ibmr except for | |
98 | * lkey and rkey. | |
99 | */ | |
100 | if (!ipath_alloc_lkey(lk_table, &mr->mr)) | |
101 | goto bail; | |
102 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey; | |
103 | ||
104 | goto done; | |
105 | ||
106 | bail: | |
107 | while (i) { | |
108 | i--; | |
109 | kfree(mr->mr.map[i]); | |
110 | } | |
111 | kfree(mr); | |
112 | mr = NULL; | |
113 | ||
114 | done: | |
115 | return mr; | |
116 | } | |
117 | ||
118 | /** | |
119 | * ipath_reg_phys_mr - register a physical memory region | |
120 | * @pd: protection domain for this memory region | |
121 | * @buffer_list: pointer to the list of physical buffers to register | |
122 | * @num_phys_buf: the number of physical buffers to register | |
123 | * @iova_start: the starting address passed over IB which maps to this MR | |
124 | * | |
125 | * Returns the memory region on success, otherwise returns an errno. | |
126 | */ | |
127 | struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd, | |
128 | struct ib_phys_buf *buffer_list, | |
129 | int num_phys_buf, int acc, u64 *iova_start) | |
130 | { | |
131 | struct ipath_mr *mr; | |
132 | int n, m, i; | |
133 | struct ib_mr *ret; | |
134 | ||
135 | mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table); | |
136 | if (mr == NULL) { | |
137 | ret = ERR_PTR(-ENOMEM); | |
138 | goto bail; | |
139 | } | |
140 | ||
141 | mr->mr.user_base = *iova_start; | |
142 | mr->mr.iova = *iova_start; | |
143 | mr->mr.length = 0; | |
144 | mr->mr.offset = 0; | |
145 | mr->mr.access_flags = acc; | |
146 | mr->mr.max_segs = num_phys_buf; | |
147 | ||
148 | m = 0; | |
149 | n = 0; | |
150 | for (i = 0; i < num_phys_buf; i++) { | |
151 | mr->mr.map[m]->segs[n].vaddr = | |
152 | phys_to_virt(buffer_list[i].addr); | |
153 | mr->mr.map[m]->segs[n].length = buffer_list[i].size; | |
154 | mr->mr.length += buffer_list[i].size; | |
155 | n++; | |
156 | if (n == IPATH_SEGSZ) { | |
157 | m++; | |
158 | n = 0; | |
159 | } | |
160 | } | |
161 | ||
162 | ret = &mr->ibmr; | |
163 | ||
164 | bail: | |
165 | return ret; | |
166 | } | |
167 | ||
168 | /** | |
169 | * ipath_reg_user_mr - register a userspace memory region | |
170 | * @pd: protection domain for this memory region | |
171 | * @region: the user memory region | |
172 | * @mr_access_flags: access flags for this memory region | |
173 | * @udata: unused by the InfiniPath driver | |
174 | * | |
175 | * Returns the memory region on success, otherwise returns an errno. | |
176 | */ | |
177 | struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |
178 | int mr_access_flags, struct ib_udata *udata) | |
179 | { | |
180 | struct ipath_mr *mr; | |
181 | struct ib_umem_chunk *chunk; | |
182 | int n, m, i; | |
183 | struct ib_mr *ret; | |
184 | ||
4a45b7d4 BS |
185 | if (region->length == 0) { |
186 | ret = ERR_PTR(-EINVAL); | |
187 | goto bail; | |
188 | } | |
189 | ||
cef1cce5 BS |
190 | n = 0; |
191 | list_for_each_entry(chunk, ®ion->chunk_list, list) | |
192 | n += chunk->nents; | |
193 | ||
194 | mr = alloc_mr(n, &to_idev(pd->device)->lk_table); | |
195 | if (!mr) { | |
196 | ret = ERR_PTR(-ENOMEM); | |
197 | goto bail; | |
198 | } | |
199 | ||
200 | mr->mr.user_base = region->user_base; | |
201 | mr->mr.iova = region->virt_base; | |
202 | mr->mr.length = region->length; | |
203 | mr->mr.offset = region->offset; | |
204 | mr->mr.access_flags = mr_access_flags; | |
205 | mr->mr.max_segs = n; | |
206 | ||
207 | m = 0; | |
208 | n = 0; | |
209 | list_for_each_entry(chunk, ®ion->chunk_list, list) { | |
210 | for (i = 0; i < chunk->nmap; i++) { | |
211 | mr->mr.map[m]->segs[n].vaddr = | |
212 | page_address(chunk->page_list[i].page); | |
213 | mr->mr.map[m]->segs[n].length = region->page_size; | |
214 | n++; | |
215 | if (n == IPATH_SEGSZ) { | |
216 | m++; | |
217 | n = 0; | |
218 | } | |
219 | } | |
220 | } | |
221 | ret = &mr->ibmr; | |
222 | ||
223 | bail: | |
224 | return ret; | |
225 | } | |
226 | ||
227 | /** | |
228 | * ipath_dereg_mr - unregister and free a memory region | |
229 | * @ibmr: the memory region to free | |
230 | * | |
231 | * Returns 0 on success. | |
232 | * | |
233 | * Note that this is called to free MRs created by ipath_get_dma_mr() | |
234 | * or ipath_reg_user_mr(). | |
235 | */ | |
236 | int ipath_dereg_mr(struct ib_mr *ibmr) | |
237 | { | |
238 | struct ipath_mr *mr = to_imr(ibmr); | |
239 | int i; | |
240 | ||
241 | ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey); | |
242 | i = mr->mr.mapsz; | |
243 | while (i) { | |
244 | i--; | |
245 | kfree(mr->mr.map[i]); | |
246 | } | |
247 | kfree(mr); | |
248 | return 0; | |
249 | } | |
250 | ||
251 | /** | |
252 | * ipath_alloc_fmr - allocate a fast memory region | |
253 | * @pd: the protection domain for this memory region | |
254 | * @mr_access_flags: access flags for this memory region | |
255 | * @fmr_attr: fast memory region attributes | |
256 | * | |
257 | * Returns the memory region on success, otherwise returns an errno. | |
258 | */ | |
259 | struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | |
260 | struct ib_fmr_attr *fmr_attr) | |
261 | { | |
262 | struct ipath_fmr *fmr; | |
263 | int m, i = 0; | |
264 | struct ib_fmr *ret; | |
265 | ||
266 | /* Allocate struct plus pointers to first level page tables. */ | |
267 | m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ; | |
268 | fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); | |
269 | if (!fmr) | |
270 | goto bail; | |
271 | ||
272 | /* Allocate first level page tables. */ | |
273 | for (; i < m; i++) { | |
274 | fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], | |
275 | GFP_KERNEL); | |
276 | if (!fmr->mr.map[i]) | |
277 | goto bail; | |
278 | } | |
279 | fmr->mr.mapsz = m; | |
280 | ||
281 | /* | |
282 | * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & | |
283 | * rkey. | |
284 | */ | |
285 | if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) | |
286 | goto bail; | |
287 | fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey; | |
288 | /* | |
289 | * Resources are allocated but no valid mapping (RKEY can't be | |
290 | * used). | |
291 | */ | |
292 | fmr->mr.user_base = 0; | |
293 | fmr->mr.iova = 0; | |
294 | fmr->mr.length = 0; | |
295 | fmr->mr.offset = 0; | |
296 | fmr->mr.access_flags = mr_access_flags; | |
297 | fmr->mr.max_segs = fmr_attr->max_pages; | |
298 | fmr->page_shift = fmr_attr->page_shift; | |
299 | ||
300 | ret = &fmr->ibfmr; | |
301 | goto done; | |
302 | ||
303 | bail: | |
304 | while (i) | |
305 | kfree(fmr->mr.map[--i]); | |
306 | kfree(fmr); | |
307 | ret = ERR_PTR(-ENOMEM); | |
308 | ||
309 | done: | |
310 | return ret; | |
311 | } | |
312 | ||
313 | /** | |
314 | * ipath_map_phys_fmr - set up a fast memory region | |
315 | * @ibmfr: the fast memory region to set up | |
316 | * @page_list: the list of pages to associate with the fast memory region | |
317 | * @list_len: the number of pages to associate with the fast memory region | |
318 | * @iova: the virtual address of the start of the fast memory region | |
319 | * | |
320 | * This may be called from interrupt context. | |
321 | */ | |
322 | ||
323 | int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list, | |
324 | int list_len, u64 iova) | |
325 | { | |
326 | struct ipath_fmr *fmr = to_ifmr(ibfmr); | |
327 | struct ipath_lkey_table *rkt; | |
328 | unsigned long flags; | |
329 | int m, n, i; | |
330 | u32 ps; | |
331 | int ret; | |
332 | ||
333 | if (list_len > fmr->mr.max_segs) { | |
334 | ret = -EINVAL; | |
335 | goto bail; | |
336 | } | |
337 | rkt = &to_idev(ibfmr->device)->lk_table; | |
338 | spin_lock_irqsave(&rkt->lock, flags); | |
339 | fmr->mr.user_base = iova; | |
340 | fmr->mr.iova = iova; | |
341 | ps = 1 << fmr->page_shift; | |
342 | fmr->mr.length = list_len * ps; | |
343 | m = 0; | |
344 | n = 0; | |
345 | ps = 1 << fmr->page_shift; | |
346 | for (i = 0; i < list_len; i++) { | |
347 | fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]); | |
348 | fmr->mr.map[m]->segs[n].length = ps; | |
349 | if (++n == IPATH_SEGSZ) { | |
350 | m++; | |
351 | n = 0; | |
352 | } | |
353 | } | |
354 | spin_unlock_irqrestore(&rkt->lock, flags); | |
355 | ret = 0; | |
356 | ||
357 | bail: | |
358 | return ret; | |
359 | } | |
360 | ||
361 | /** | |
362 | * ipath_unmap_fmr - unmap fast memory regions | |
363 | * @fmr_list: the list of fast memory regions to unmap | |
364 | * | |
365 | * Returns 0 on success. | |
366 | */ | |
367 | int ipath_unmap_fmr(struct list_head *fmr_list) | |
368 | { | |
369 | struct ipath_fmr *fmr; | |
370 | struct ipath_lkey_table *rkt; | |
371 | unsigned long flags; | |
372 | ||
373 | list_for_each_entry(fmr, fmr_list, ibfmr.list) { | |
374 | rkt = &to_idev(fmr->ibfmr.device)->lk_table; | |
375 | spin_lock_irqsave(&rkt->lock, flags); | |
376 | fmr->mr.user_base = 0; | |
377 | fmr->mr.iova = 0; | |
378 | fmr->mr.length = 0; | |
379 | spin_unlock_irqrestore(&rkt->lock, flags); | |
380 | } | |
381 | return 0; | |
382 | } | |
383 | ||
384 | /** | |
385 | * ipath_dealloc_fmr - deallocate a fast memory region | |
386 | * @ibfmr: the fast memory region to deallocate | |
387 | * | |
388 | * Returns 0 on success. | |
389 | */ | |
390 | int ipath_dealloc_fmr(struct ib_fmr *ibfmr) | |
391 | { | |
392 | struct ipath_fmr *fmr = to_ifmr(ibfmr); | |
393 | int i; | |
394 | ||
395 | ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey); | |
396 | i = fmr->mr.mapsz; | |
397 | while (i) | |
398 | kfree(fmr->mr.map[--i]); | |
399 | kfree(fmr); | |
400 | return 0; | |
401 | } |