Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx4 / mr.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
51a379d0 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
225c7b1f 35#include <linux/errno.h>
ee40fa06 36#include <linux/export.h>
5a0e3ad6 37#include <linux/slab.h>
ea51b377 38#include <linux/kernel.h>
89dd86db 39#include <linux/vmalloc.h>
225c7b1f
RD
40
41#include <linux/mlx4/cmd.h>
42
43#include "mlx4.h"
44#include "icm.h"
45
225c7b1f
RD
46static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
47{
48 int o;
49 int m;
50 u32 seg;
51
52 spin_lock(&buddy->lock);
53
e4044cfc
RD
54 for (o = order; o <= buddy->max_order; ++o)
55 if (buddy->num_free[o]) {
56 m = 1 << (buddy->max_order - o);
57 seg = find_first_bit(buddy->bits[o], m);
58 if (seg < m)
59 goto found;
60 }
225c7b1f
RD
61
62 spin_unlock(&buddy->lock);
63 return -1;
64
65 found:
66 clear_bit(seg, buddy->bits[o]);
e4044cfc 67 --buddy->num_free[o];
225c7b1f
RD
68
69 while (o > order) {
70 --o;
71 seg <<= 1;
72 set_bit(seg ^ 1, buddy->bits[o]);
e4044cfc 73 ++buddy->num_free[o];
225c7b1f
RD
74 }
75
76 spin_unlock(&buddy->lock);
77
78 seg <<= order;
79
80 return seg;
81}
82
83static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
84{
85 seg >>= order;
86
87 spin_lock(&buddy->lock);
88
89 while (test_bit(seg ^ 1, buddy->bits[order])) {
90 clear_bit(seg ^ 1, buddy->bits[order]);
e4044cfc 91 --buddy->num_free[order];
225c7b1f
RD
92 seg >>= 1;
93 ++order;
94 }
95
96 set_bit(seg, buddy->bits[order]);
e4044cfc 97 ++buddy->num_free[order];
225c7b1f
RD
98
99 spin_unlock(&buddy->lock);
100}
101
e8f9b2ed 102static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
225c7b1f
RD
103{
104 int i, s;
105
106 buddy->max_order = max_order;
107 spin_lock_init(&buddy->lock);
108
96f17d59 109 buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
225c7b1f 110 GFP_KERNEL);
a8312755 111 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
e4044cfc
RD
112 GFP_KERNEL);
113 if (!buddy->bits || !buddy->num_free)
225c7b1f
RD
114 goto err_out;
115
116 for (i = 0; i <= buddy->max_order; ++i) {
117 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
96f17d59 118 buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
89dd86db 119 if (!buddy->bits[i]) {
96f17d59 120 buddy->bits[i] = vzalloc(s * sizeof(long));
89dd86db
YH
121 if (!buddy->bits[i])
122 goto err_out_free;
123 }
225c7b1f
RD
124 }
125
126 set_bit(0, buddy->bits[buddy->max_order]);
e4044cfc 127 buddy->num_free[buddy->max_order] = 1;
225c7b1f
RD
128
129 return 0;
130
131err_out_free:
132 for (i = 0; i <= buddy->max_order; ++i)
89dd86db
YH
133 if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
134 vfree(buddy->bits[i]);
135 else
136 kfree(buddy->bits[i]);
225c7b1f 137
e4044cfc 138err_out:
225c7b1f 139 kfree(buddy->bits);
e4044cfc 140 kfree(buddy->num_free);
225c7b1f 141
225c7b1f
RD
142 return -ENOMEM;
143}
144
145static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
146{
147 int i;
148
149 for (i = 0; i <= buddy->max_order; ++i)
89dd86db
YH
150 if (is_vmalloc_addr(buddy->bits[i]))
151 vfree(buddy->bits[i]);
152 else
153 kfree(buddy->bits[i]);
225c7b1f
RD
154
155 kfree(buddy->bits);
e4044cfc 156 kfree(buddy->num_free);
225c7b1f
RD
157}
158
c82e9aa0 159u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
225c7b1f
RD
160{
161 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
162 u32 seg;
2b8fb286
MA
163 int seg_order;
164 u32 offset;
225c7b1f 165
2b8fb286
MA
166 seg_order = max_t(int, order - log_mtts_per_seg, 0);
167
168 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
225c7b1f
RD
169 if (seg == -1)
170 return -1;
171
2b8fb286
MA
172 offset = seg * (1 << log_mtts_per_seg);
173
174 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
175 offset + (1 << order) - 1)) {
176 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
225c7b1f
RD
177 return -1;
178 }
179
2b8fb286 180 return offset;
225c7b1f
RD
181}
182
ea51b377
JM
183static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
184{
e7dbeba8 185 u64 in_param = 0;
ea51b377
JM
186 u64 out_param;
187 int err;
188
189 if (mlx4_is_mfunc(dev)) {
190 set_param_l(&in_param, order);
191 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
192 RES_OP_RESERVE_AND_MAP,
193 MLX4_CMD_ALLOC_RES,
194 MLX4_CMD_TIME_CLASS_A,
195 MLX4_CMD_WRAPPED);
196 if (err)
197 return -1;
198 return get_param_l(&out_param);
199 }
200 return __mlx4_alloc_mtt_range(dev, order);
201}
202
225c7b1f
RD
203int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
204 struct mlx4_mtt *mtt)
205{
206 int i;
207
208 if (!npages) {
209 mtt->order = -1;
210 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
211 return 0;
212 } else
213 mtt->page_shift = page_shift;
214
2b8fb286 215 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
225c7b1f
RD
216 ++mtt->order;
217
2b8fb286
MA
218 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
219 if (mtt->offset == -1)
225c7b1f
RD
220 return -ENOMEM;
221
222 return 0;
223}
224EXPORT_SYMBOL_GPL(mlx4_mtt_init);
225
2b8fb286 226void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
225c7b1f 227{
2b8fb286
MA
228 u32 first_seg;
229 int seg_order;
225c7b1f
RD
230 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
231
2b8fb286
MA
232 seg_order = max_t(int, order - log_mtts_per_seg, 0);
233 first_seg = offset / (1 << log_mtts_per_seg);
234
235 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
1e27ca69
MA
236 mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
237 offset + (1 << order) - 1);
ea51b377
JM
238}
239
2b8fb286 240static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
ea51b377 241{
e7dbeba8 242 u64 in_param = 0;
ea51b377
JM
243 int err;
244
245 if (mlx4_is_mfunc(dev)) {
2b8fb286 246 set_param_l(&in_param, offset);
ea51b377
JM
247 set_param_h(&in_param, order);
248 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
249 MLX4_CMD_FREE_RES,
250 MLX4_CMD_TIME_CLASS_A,
251 MLX4_CMD_WRAPPED);
252 if (err)
2b8fb286
MA
253 mlx4_warn(dev, "Failed to free mtt range at:"
254 "%d order:%d\n", offset, order);
ea51b377
JM
255 return;
256 }
2b8fb286 257 __mlx4_free_mtt_range(dev, offset, order);
ea51b377
JM
258}
259
260void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
261{
225c7b1f
RD
262 if (mtt->order < 0)
263 return;
264
2b8fb286 265 mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
225c7b1f
RD
266}
267EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
268
269u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
270{
2b8fb286 271 return (u64) mtt->offset * dev->caps.mtt_entry_sz;
225c7b1f
RD
272}
273EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
274
275static u32 hw_index_to_key(u32 ind)
276{
277 return (ind >> 24) | (ind << 8);
278}
279
280static u32 key_to_hw_index(u32 key)
281{
282 return (key << 24) | (key >> 8);
283}
284
285static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
286 int mpt_index)
287{
eb41049f 288 return mlx4_cmd(dev, mailbox->dma, mpt_index,
ea51b377
JM
289 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
290 MLX4_CMD_WRAPPED);
225c7b1f
RD
291}
292
293static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
294 int mpt_index)
295{
296 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
f9baff50
JM
297 !mailbox, MLX4_CMD_HW2SW_MPT,
298 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
225c7b1f
RD
299}
300
66431a7d 301static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
ea51b377
JM
302 u64 iova, u64 size, u32 access, int npages,
303 int page_shift, struct mlx4_mr *mr)
304{
225c7b1f
RD
305 mr->iova = iova;
306 mr->size = size;
307 mr->pd = pd;
308 mr->access = access;
b20e519a 309 mr->enabled = MLX4_MPT_DISABLED;
ea51b377
JM
310 mr->key = hw_index_to_key(mridx);
311
312 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
313}
ea51b377
JM
314
315static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
316 struct mlx4_cmd_mailbox *mailbox,
317 int num_entries)
318{
319 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
320 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
321}
322
b20e519a 323int __mlx4_mpt_reserve(struct mlx4_dev *dev)
ea51b377
JM
324{
325 struct mlx4_priv *priv = mlx4_priv(dev);
326
327 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
328}
225c7b1f 329
b20e519a 330static int mlx4_mpt_reserve(struct mlx4_dev *dev)
ea51b377
JM
331{
332 u64 out_param;
333
334 if (mlx4_is_mfunc(dev)) {
335 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
336 MLX4_CMD_ALLOC_RES,
337 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
338 return -1;
339 return get_param_l(&out_param);
340 }
b20e519a 341 return __mlx4_mpt_reserve(dev);
ea51b377
JM
342}
343
b20e519a 344void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
ea51b377
JM
345{
346 struct mlx4_priv *priv = mlx4_priv(dev);
347
7c6d74d2 348 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
ea51b377
JM
349}
350
b20e519a 351static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
ea51b377 352{
e7dbeba8 353 u64 in_param = 0;
ea51b377
JM
354
355 if (mlx4_is_mfunc(dev)) {
356 set_param_l(&in_param, index);
357 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
358 MLX4_CMD_FREE_RES,
359 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
360 mlx4_warn(dev, "Failed to release mr index:%d\n",
361 index);
362 return;
363 }
b20e519a 364 __mlx4_mpt_release(dev, index);
ea51b377
JM
365}
366
b20e519a 367int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
ea51b377
JM
368{
369 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
370
371 return mlx4_table_get(dev, &mr_table->dmpt_table, index);
372}
373
b20e519a 374static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
ea51b377 375{
e7dbeba8 376 u64 param = 0;
ea51b377
JM
377
378 if (mlx4_is_mfunc(dev)) {
379 set_param_l(&param, index);
380 return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
381 MLX4_CMD_ALLOC_RES,
382 MLX4_CMD_TIME_CLASS_A,
383 MLX4_CMD_WRAPPED);
384 }
b20e519a 385 return __mlx4_mpt_alloc_icm(dev, index);
ea51b377
JM
386}
387
b20e519a 388void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
ea51b377
JM
389{
390 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
391
392 mlx4_table_put(dev, &mr_table->dmpt_table, index);
393}
394
b20e519a 395static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
ea51b377 396{
e7dbeba8 397 u64 in_param = 0;
ea51b377
JM
398
399 if (mlx4_is_mfunc(dev)) {
400 set_param_l(&in_param, index);
401 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
402 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
403 MLX4_CMD_WRAPPED))
404 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
405 index);
406 return;
407 }
b20e519a 408 return __mlx4_mpt_free_icm(dev, index);
ea51b377
JM
409}
410
411int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
412 int npages, int page_shift, struct mlx4_mr *mr)
413{
414 u32 index;
415 int err;
416
b20e519a 417 index = mlx4_mpt_reserve(dev);
ea51b377
JM
418 if (index == -1)
419 return -ENOMEM;
420
421 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
422 access, npages, page_shift, mr);
225c7b1f 423 if (err)
b20e519a 424 mlx4_mpt_release(dev, index);
225c7b1f 425
225c7b1f
RD
426 return err;
427}
428EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
429
61083720 430static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
225c7b1f 431{
225c7b1f
RD
432 int err;
433
b20e519a 434 if (mr->enabled == MLX4_MPT_EN_HW) {
225c7b1f
RD
435 err = mlx4_HW2SW_MPT(dev, NULL,
436 key_to_hw_index(mr->key) &
437 (dev->caps.num_mpts - 1));
61083720
SM
438 if (err) {
439 mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
440 mlx4_warn(dev, "MR has MWs bound to it.\n");
441 return err;
442 }
225c7b1f 443
b20e519a 444 mr->enabled = MLX4_MPT_EN_SW;
ea51b377 445 }
225c7b1f 446 mlx4_mtt_cleanup(dev, &mr->mtt);
61083720
SM
447
448 return 0;
ea51b377 449}
ea51b377 450
61083720 451int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
ea51b377 452{
61083720
SM
453 int ret;
454
455 ret = mlx4_mr_free_reserved(dev, mr);
456 if (ret)
457 return ret;
ea51b377 458 if (mr->enabled)
b20e519a
SM
459 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
460 mlx4_mpt_release(dev, key_to_hw_index(mr->key));
61083720
SM
461
462 return 0;
225c7b1f
RD
463}
464EXPORT_SYMBOL_GPL(mlx4_mr_free);
465
466int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
467{
225c7b1f
RD
468 struct mlx4_cmd_mailbox *mailbox;
469 struct mlx4_mpt_entry *mpt_entry;
470 int err;
471
b20e519a 472 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
225c7b1f
RD
473 if (err)
474 return err;
475
476 mailbox = mlx4_alloc_cmd_mailbox(dev);
477 if (IS_ERR(mailbox)) {
478 err = PTR_ERR(mailbox);
479 goto err_table;
480 }
481 mpt_entry = mailbox->buf;
95d04f07 482 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
225c7b1f
RD
483 MLX4_MPT_FLAG_REGION |
484 mr->access);
225c7b1f
RD
485
486 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
95d04f07 487 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
225c7b1f
RD
488 mpt_entry->start = cpu_to_be64(mr->iova);
489 mpt_entry->length = cpu_to_be64(mr->size);
490 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
95d04f07 491
b2d9308a
JM
492 if (mr->mtt.order < 0) {
493 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
2b8fb286 494 mpt_entry->mtt_addr = 0;
95d04f07 495 } else {
2b8fb286
MA
496 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
497 &mr->mtt));
95d04f07
RD
498 }
499
500 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
501 /* fast register MR in free state */
502 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
c9257433
VS
503 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
504 MLX4_MPT_PD_FLAG_RAE);
2b8fb286 505 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
95d04f07
RD
506 } else {
507 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
508 }
225c7b1f
RD
509
510 err = mlx4_SW2HW_MPT(dev, mailbox,
511 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
512 if (err) {
513 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
514 goto err_cmd;
515 }
b20e519a 516 mr->enabled = MLX4_MPT_EN_HW;
225c7b1f
RD
517
518 mlx4_free_cmd_mailbox(dev, mailbox);
519
520 return 0;
521
522err_cmd:
523 mlx4_free_cmd_mailbox(dev, mailbox);
524
525err_table:
b20e519a 526 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
225c7b1f
RD
527 return err;
528}
529EXPORT_SYMBOL_GPL(mlx4_mr_enable);
530
d7bb58fb
JM
531static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
532 int start_index, int npages, u64 *page_list)
225c7b1f 533{
d7bb58fb
JM
534 struct mlx4_priv *priv = mlx4_priv(dev);
535 __be64 *mtts;
536 dma_addr_t dma_handle;
537 int i;
d7bb58fb 538
2b8fb286
MA
539 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
540 start_index, &dma_handle);
d7bb58fb 541
d7bb58fb
JM
542 if (!mtts)
543 return -ENOMEM;
544
e727f5cd
RD
545 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
546 npages * sizeof (u64), DMA_TO_DEVICE);
547
d7bb58fb
JM
548 for (i = 0; i < npages; ++i)
549 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
550
e727f5cd
RD
551 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
552 npages * sizeof (u64), DMA_TO_DEVICE);
d7bb58fb
JM
553
554 return 0;
225c7b1f
RD
555}
556
c82e9aa0 557int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
ea51b377 558 int start_index, int npages, u64 *page_list)
225c7b1f 559{
ea51b377 560 int err = 0;
d7bb58fb 561 int chunk;
2b8fb286
MA
562 int mtts_per_page;
563 int max_mtts_first_page;
564
565 /* compute how may mtts fit in the first page */
566 mtts_per_page = PAGE_SIZE / sizeof(u64);
567 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
568 % mtts_per_page;
569
570 chunk = min_t(int, max_mtts_first_page, npages);
225c7b1f 571
225c7b1f 572 while (npages > 0) {
d7bb58fb 573 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
225c7b1f 574 if (err)
d7bb58fb 575 return err;
d7bb58fb
JM
576 npages -= chunk;
577 start_index += chunk;
578 page_list += chunk;
2b8fb286
MA
579
580 chunk = min_t(int, mtts_per_page, npages);
225c7b1f 581 }
ea51b377
JM
582 return err;
583}
225c7b1f 584
ea51b377
JM
585int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
586 int start_index, int npages, u64 *page_list)
587{
588 struct mlx4_cmd_mailbox *mailbox = NULL;
589 __be64 *inbox = NULL;
590 int chunk;
591 int err = 0;
592 int i;
593
594 if (mtt->order < 0)
595 return -EINVAL;
596
597 if (mlx4_is_mfunc(dev)) {
598 mailbox = mlx4_alloc_cmd_mailbox(dev);
599 if (IS_ERR(mailbox))
600 return PTR_ERR(mailbox);
601 inbox = mailbox->buf;
602
603 while (npages > 0) {
2b8fb286
MA
604 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
605 npages);
606 inbox[0] = cpu_to_be64(mtt->offset + start_index);
ea51b377
JM
607 inbox[1] = 0;
608 for (i = 0; i < chunk; ++i)
609 inbox[i + 2] = cpu_to_be64(page_list[i] |
610 MLX4_MTT_FLAG_PRESENT);
611 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
612 if (err) {
613 mlx4_free_cmd_mailbox(dev, mailbox);
614 return err;
615 }
616
617 npages -= chunk;
618 start_index += chunk;
619 page_list += chunk;
620 }
621 mlx4_free_cmd_mailbox(dev, mailbox);
622 return err;
623 }
624
625 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
225c7b1f
RD
626}
627EXPORT_SYMBOL_GPL(mlx4_write_mtt);
628
629int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
630 struct mlx4_buf *buf)
631{
632 u64 *page_list;
633 int err;
634 int i;
635
636 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
637 if (!page_list)
638 return -ENOMEM;
639
640 for (i = 0; i < buf->npages; ++i)
641 if (buf->nbufs == 1)
b57aacfa 642 page_list[i] = buf->direct.map + (i << buf->page_shift);
225c7b1f 643 else
b57aacfa 644 page_list[i] = buf->page_list[i].map;
225c7b1f
RD
645
646 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
647
648 kfree(page_list);
649 return err;
650}
651EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
652
804d6a89
SM
653int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
654 struct mlx4_mw *mw)
655{
656 u32 index;
657
658 if ((type == MLX4_MW_TYPE_1 &&
659 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
660 (type == MLX4_MW_TYPE_2 &&
661 !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
662 return -ENOTSUPP;
663
664 index = mlx4_mpt_reserve(dev);
665 if (index == -1)
666 return -ENOMEM;
667
668 mw->key = hw_index_to_key(index);
669 mw->pd = pd;
670 mw->type = type;
671 mw->enabled = MLX4_MPT_DISABLED;
672
673 return 0;
674}
675EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
676
677int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
678{
679 struct mlx4_cmd_mailbox *mailbox;
680 struct mlx4_mpt_entry *mpt_entry;
681 int err;
682
683 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
684 if (err)
685 return err;
686
687 mailbox = mlx4_alloc_cmd_mailbox(dev);
688 if (IS_ERR(mailbox)) {
689 err = PTR_ERR(mailbox);
690 goto err_table;
691 }
692 mpt_entry = mailbox->buf;
693
804d6a89
SM
694 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
695 * off, thus creating a memory window and not a memory region.
696 */
697 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
698 mpt_entry->pd_flags = cpu_to_be32(mw->pd);
699 if (mw->type == MLX4_MW_TYPE_2) {
700 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
701 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
702 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
703 }
704
705 err = mlx4_SW2HW_MPT(dev, mailbox,
706 key_to_hw_index(mw->key) &
707 (dev->caps.num_mpts - 1));
708 if (err) {
709 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
710 goto err_cmd;
711 }
712 mw->enabled = MLX4_MPT_EN_HW;
713
714 mlx4_free_cmd_mailbox(dev, mailbox);
715
716 return 0;
717
718err_cmd:
719 mlx4_free_cmd_mailbox(dev, mailbox);
720
721err_table:
722 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
723 return err;
724}
725EXPORT_SYMBOL_GPL(mlx4_mw_enable);
726
727void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
728{
729 int err;
730
731 if (mw->enabled == MLX4_MPT_EN_HW) {
732 err = mlx4_HW2SW_MPT(dev, NULL,
733 key_to_hw_index(mw->key) &
734 (dev->caps.num_mpts - 1));
735 if (err)
736 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
737
738 mw->enabled = MLX4_MPT_EN_SW;
739 }
740 if (mw->enabled)
741 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
742 mlx4_mpt_release(dev, key_to_hw_index(mw->key));
743}
744EXPORT_SYMBOL_GPL(mlx4_mw_free);
745
3d73c288 746int mlx4_init_mr_table(struct mlx4_dev *dev)
225c7b1f 747{
ea51b377
JM
748 struct mlx4_priv *priv = mlx4_priv(dev);
749 struct mlx4_mr_table *mr_table = &priv->mr_table;
225c7b1f
RD
750 int err;
751
ea51b377
JM
752 /* Nothing to do for slaves - all MR handling is forwarded
753 * to the master */
754 if (mlx4_is_slave(dev))
755 return 0;
756
a30f1bc5
JM
757 if (!is_power_of_2(dev->caps.num_mpts))
758 return -EINVAL;
759
225c7b1f 760 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
93fc9e1b 761 ~0, dev->caps.reserved_mrws, 0);
225c7b1f
RD
762 if (err)
763 return err;
764
765 err = mlx4_buddy_init(&mr_table->mtt_buddy,
3de819e6 766 ilog2((u32)dev->caps.num_mtts /
2b8fb286 767 (1 << log_mtts_per_seg)));
225c7b1f
RD
768 if (err)
769 goto err_buddy;
770
771 if (dev->caps.reserved_mtts) {
ea51b377
JM
772 priv->reserved_mtts =
773 mlx4_alloc_mtt_range(dev,
774 fls(dev->caps.reserved_mtts - 1));
775 if (priv->reserved_mtts < 0) {
3de819e6 776 mlx4_warn(dev, "MTT table of order %u is too small.\n",
225c7b1f
RD
777 mr_table->mtt_buddy.max_order);
778 err = -ENOMEM;
779 goto err_reserve_mtts;
780 }
781 }
782
783 return 0;
784
785err_reserve_mtts:
786 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
787
788err_buddy:
789 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
790
791 return err;
792}
793
794void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
795{
ea51b377
JM
796 struct mlx4_priv *priv = mlx4_priv(dev);
797 struct mlx4_mr_table *mr_table = &priv->mr_table;
225c7b1f 798
ea51b377
JM
799 if (mlx4_is_slave(dev))
800 return;
801 if (priv->reserved_mtts >= 0)
802 mlx4_free_mtt_range(dev, priv->reserved_mtts,
803 fls(dev->caps.reserved_mtts - 1));
225c7b1f
RD
804 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
805 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
806}
8ad11fb6
JM
807
808static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
809 int npages, u64 iova)
810{
811 int i, page_mask;
812
813 if (npages > fmr->max_pages)
814 return -EINVAL;
815
816 page_mask = (1 << fmr->page_shift) - 1;
817
818 /* We are getting page lists, so va must be page aligned. */
819 if (iova & page_mask)
820 return -EINVAL;
821
822 /* Trust the user not to pass misaligned data in page_list */
823 if (0)
824 for (i = 0; i < npages; ++i) {
825 if (page_list[i] & ~page_mask)
826 return -EINVAL;
827 }
828
829 if (fmr->maps >= fmr->max_maps)
830 return -EINVAL;
831
832 return 0;
833}
834
835int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
836 int npages, u64 iova, u32 *lkey, u32 *rkey)
837{
838 u32 key;
839 int i, err;
840
841 err = mlx4_check_fmr(fmr, page_list, npages, iova);
842 if (err)
843 return err;
844
845 ++fmr->maps;
846
847 key = key_to_hw_index(fmr->mr.key);
848 key += dev->caps.num_mpts;
849 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
850
851 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
852
853 /* Make sure MPT status is visible before writing MTT entries */
854 wmb();
855
e727f5cd
RD
856 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
857 npages * sizeof(u64), DMA_TO_DEVICE);
858
8ad11fb6
JM
859 for (i = 0; i < npages; ++i)
860 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
861
e727f5cd
RD
862 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
863 npages * sizeof(u64), DMA_TO_DEVICE);
8ad11fb6
JM
864
865 fmr->mpt->key = cpu_to_be32(key);
866 fmr->mpt->lkey = cpu_to_be32(key);
867 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
868 fmr->mpt->start = cpu_to_be64(iova);
869
870 /* Make MTT entries are visible before setting MPT status */
871 wmb();
872
873 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
874
875 /* Make sure MPT status is visible before consumer can use FMR */
876 wmb();
877
878 return 0;
879}
880EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
881
882int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
883 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
884{
885 struct mlx4_priv *priv = mlx4_priv(dev);
8ad11fb6
JM
886 int err = -ENOMEM;
887
a5bbe892
EC
888 if (max_maps > dev->caps.max_fmr_maps)
889 return -EINVAL;
890
c5057ddc 891 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
8ad11fb6
JM
892 return -EINVAL;
893
894 /* All MTTs must fit in the same page */
895 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
896 return -EINVAL;
897
898 fmr->page_shift = page_shift;
899 fmr->max_pages = max_pages;
900 fmr->max_maps = max_maps;
901 fmr->maps = 0;
902
903 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
904 page_shift, &fmr->mr);
905 if (err)
906 return err;
907
8ad11fb6 908 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
2b8fb286 909 fmr->mr.mtt.offset,
8ad11fb6 910 &fmr->dma_handle);
2b8fb286 911
8ad11fb6
JM
912 if (!fmr->mtts) {
913 err = -ENOMEM;
914 goto err_free;
915 }
916
8ad11fb6
JM
917 return 0;
918
919err_free:
61083720 920 (void) mlx4_mr_free(dev, &fmr->mr);
8ad11fb6
JM
921 return err;
922}
923EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
924
925int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
926{
11e75a74
JM
927 struct mlx4_priv *priv = mlx4_priv(dev);
928 int err;
929
930 err = mlx4_mr_enable(dev, &fmr->mr);
931 if (err)
932 return err;
933
934 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
935 key_to_hw_index(fmr->mr.key), NULL);
936 if (!fmr->mpt)
937 return -ENOMEM;
938
939 return 0;
8ad11fb6
JM
940}
941EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
942
943void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
944 u32 *lkey, u32 *rkey)
945{
ea51b377
JM
946 struct mlx4_cmd_mailbox *mailbox;
947 int err;
948
8ad11fb6
JM
949 if (!fmr->maps)
950 return;
951
8ad11fb6
JM
952 fmr->maps = 0;
953
ea51b377
JM
954 mailbox = mlx4_alloc_cmd_mailbox(dev);
955 if (IS_ERR(mailbox)) {
956 err = PTR_ERR(mailbox);
957 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
958 " failed (%d)\n", err);
959 return;
960 }
961
962 err = mlx4_HW2SW_MPT(dev, NULL,
963 key_to_hw_index(fmr->mr.key) &
964 (dev->caps.num_mpts - 1));
965 mlx4_free_cmd_mailbox(dev, mailbox);
966 if (err) {
967 printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
968 err);
969 return;
970 }
b20e519a 971 fmr->mr.enabled = MLX4_MPT_EN_SW;
8ad11fb6
JM
972}
973EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
974
975int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
976{
61083720
SM
977 int ret;
978
8ad11fb6
JM
979 if (fmr->maps)
980 return -EBUSY;
981
61083720
SM
982 ret = mlx4_mr_free(dev, &fmr->mr);
983 if (ret)
984 return ret;
b20e519a 985 fmr->mr.enabled = MLX4_MPT_DISABLED;
8ad11fb6
JM
986
987 return 0;
988}
989EXPORT_SYMBOL_GPL(mlx4_fmr_free);
990
991int mlx4_SYNC_TPT(struct mlx4_dev *dev)
992{
f9baff50 993 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
5e92d803 994 MLX4_CMD_NATIVE);
8ad11fb6
JM
995}
996EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);