2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/slab.h>
35 #include <linux/errno.h>
37 #include "mthca_dev.h"
38 #include "mthca_cmd.h"
39 #include "mthca_memfree.h"
42 struct mthca_buddy *buddy;
48 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
50 struct mthca_mpt_entry {
59 __be32 window_count_limit;
61 __be32 mtt_sz; /* Arbel only */
65 #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
66 #define MTHCA_MPT_FLAG_MIO (1 << 17)
67 #define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15)
68 #define MTHCA_MPT_FLAG_PHYSICAL (1 << 9)
69 #define MTHCA_MPT_FLAG_REGION (1 << 8)
71 #define MTHCA_MTT_FLAG_PRESENT 1
73 #define MTHCA_MPT_STATUS_SW 0xF0
74 #define MTHCA_MPT_STATUS_HW 0x00
76 #define SINAI_FMR_KEY_INC 0x1000000
79 * Buddy allocator for MTT segments (currently not very efficient
80 * since it doesn't keep a free list and just searches linearly
81 * through the bitmaps)
84 static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
90 spin_lock(&buddy->lock);
92 for (o = order; o <= buddy->max_order; ++o)
93 if (buddy->num_free[o]) {
94 m = 1 << (buddy->max_order - o);
95 seg = find_first_bit(buddy->bits[o], m);
100 spin_unlock(&buddy->lock);
104 __clear_bit(seg, buddy->bits[o]);
105 --buddy->num_free[o];
110 __set_bit(seg ^ 1, buddy->bits[o]);
111 ++buddy->num_free[o];
114 spin_unlock(&buddy->lock);
121 static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
125 spin_lock(&buddy->lock);
127 while (test_bit(seg ^ 1, buddy->bits[order])) {
128 __clear_bit(seg ^ 1, buddy->bits[order]);
129 --buddy->num_free[order];
134 __set_bit(seg, buddy->bits[order]);
135 ++buddy->num_free[order];
137 spin_unlock(&buddy->lock);
140 static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
144 buddy->max_order = max_order;
145 spin_lock_init(&buddy->lock);
147 buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
149 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
151 if (!buddy->bits || !buddy->num_free)
154 for (i = 0; i <= buddy->max_order; ++i) {
155 buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i),
161 __set_bit(0, buddy->bits[buddy->max_order]);
162 buddy->num_free[buddy->max_order] = 1;
167 for (i = 0; i <= buddy->max_order; ++i)
168 bitmap_free(buddy->bits[i]);
172 kfree(buddy->num_free);
177 static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
181 for (i = 0; i <= buddy->max_order; ++i)
182 bitmap_free(buddy->bits[i]);
185 kfree(buddy->num_free);
188 static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
189 struct mthca_buddy *buddy)
191 u32 seg = mthca_buddy_alloc(buddy, order);
196 if (mthca_is_memfree(dev))
197 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
198 seg + (1 << order) - 1)) {
199 mthca_buddy_free(buddy, seg, order);
206 static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
207 struct mthca_buddy *buddy)
209 struct mthca_mtt *mtt;
213 return ERR_PTR(-EINVAL);
215 mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
217 return ERR_PTR(-ENOMEM);
221 for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
224 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
225 if (mtt->first_seg == -1) {
227 return ERR_PTR(-ENOMEM);
233 struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
235 return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
238 void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
243 mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
245 mthca_table_put_range(dev, dev->mr_table.mtt_table,
247 mtt->first_seg + (1 << mtt->order) - 1);
252 static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
253 int start_index, u64 *buffer_list, int list_len)
255 struct mthca_mailbox *mailbox;
260 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
262 return PTR_ERR(mailbox);
263 mtt_entry = mailbox->buf;
265 while (list_len > 0) {
266 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
267 mtt->first_seg * dev->limits.mtt_seg_size +
270 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
271 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
272 MTHCA_MTT_FLAG_PRESENT);
275 * If we have an odd number of entries to write, add
276 * one more dummy entry for firmware efficiency.
279 mtt_entry[i + 2] = 0;
281 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1);
283 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
293 mthca_free_mailbox(dev, mailbox);
297 int mthca_write_mtt_size(struct mthca_dev *dev)
299 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
300 !(dev->mthca_flags & MTHCA_FLAG_FMR))
302 * Be friendly to WRITE_MTT command
303 * and leave two empty slots for the
304 * index and reserved fields of the
307 return PAGE_SIZE / sizeof (u64) - 2;
309 /* For Arbel, all MTTs must fit in the same page. */
310 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff;
313 static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
314 struct mthca_mtt *mtt, int start_index,
315 u64 *buffer_list, int list_len)
320 mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
321 start_index * sizeof (u64);
322 for (i = 0; i < list_len; ++i)
323 mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT),
327 static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
328 struct mthca_mtt *mtt, int start_index,
329 u64 *buffer_list, int list_len)
332 dma_addr_t dma_handle;
334 int s = start_index * sizeof (u64);
336 /* For Arbel, all MTTs must fit in the same page. */
337 BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
338 /* Require full segments */
339 BUG_ON(s % dev->limits.mtt_seg_size);
341 mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
342 s / dev->limits.mtt_seg_size, &dma_handle);
346 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
347 list_len * sizeof (u64), DMA_TO_DEVICE);
349 for (i = 0; i < list_len; ++i)
350 mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT);
352 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
353 list_len * sizeof (u64), DMA_TO_DEVICE);
356 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
357 int start_index, u64 *buffer_list, int list_len)
359 int size = mthca_write_mtt_size(dev);
362 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
363 !(dev->mthca_flags & MTHCA_FLAG_FMR))
364 return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len);
366 while (list_len > 0) {
367 chunk = min(size, list_len);
368 if (mthca_is_memfree(dev))
369 mthca_arbel_write_mtt_seg(dev, mtt, start_index,
372 mthca_tavor_write_mtt_seg(dev, mtt, start_index,
376 start_index += chunk;
377 buffer_list += chunk;
383 static inline u32 tavor_hw_index_to_key(u32 ind)
388 static inline u32 tavor_key_to_hw_index(u32 key)
393 static inline u32 arbel_hw_index_to_key(u32 ind)
395 return (ind >> 24) | (ind << 8);
398 static inline u32 arbel_key_to_hw_index(u32 key)
400 return (key << 24) | (key >> 8);
403 static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
405 if (mthca_is_memfree(dev))
406 return arbel_hw_index_to_key(ind);
408 return tavor_hw_index_to_key(ind);
411 static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
413 if (mthca_is_memfree(dev))
414 return arbel_key_to_hw_index(key);
416 return tavor_key_to_hw_index(key);
419 static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
421 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
422 return ((key << 20) & 0x800000) | (key & 0x7fffff);
427 int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
428 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
430 struct mthca_mailbox *mailbox;
431 struct mthca_mpt_entry *mpt_entry;
436 WARN_ON(buffer_size_shift >= 32);
438 key = mthca_alloc(&dev->mr_table.mpt_alloc);
441 key = adjust_key(dev, key);
442 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
444 if (mthca_is_memfree(dev)) {
445 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
447 goto err_out_mpt_free;
450 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
451 if (IS_ERR(mailbox)) {
452 err = PTR_ERR(mailbox);
455 mpt_entry = mailbox->buf;
457 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
459 MTHCA_MPT_FLAG_REGION |
462 mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
464 mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
465 mpt_entry->key = cpu_to_be32(key);
466 mpt_entry->pd = cpu_to_be32(pd);
467 mpt_entry->start = cpu_to_be64(iova);
468 mpt_entry->length = cpu_to_be64(total_size);
470 memset(&mpt_entry->lkey, 0,
471 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
475 cpu_to_be64(dev->mr_table.mtt_base +
476 mr->mtt->first_seg * dev->limits.mtt_seg_size);
479 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
480 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
482 printk("[%02x] ", i * 4);
483 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
484 if ((i + 1) % 4 == 0)
489 err = mthca_SW2HW_MPT(dev, mailbox,
490 key & (dev->limits.num_mpts - 1));
492 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
493 goto err_out_mailbox;
496 mthca_free_mailbox(dev, mailbox);
500 mthca_free_mailbox(dev, mailbox);
503 mthca_table_put(dev, dev->mr_table.mpt_table, key);
506 mthca_free(&dev->mr_table.mpt_alloc, key);
510 int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
511 u32 access, struct mthca_mr *mr)
514 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
517 int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
518 u64 *buffer_list, int buffer_size_shift,
519 int list_len, u64 iova, u64 total_size,
520 u32 access, struct mthca_mr *mr)
524 mr->mtt = mthca_alloc_mtt(dev, list_len);
526 return PTR_ERR(mr->mtt);
528 err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
530 mthca_free_mtt(dev, mr->mtt);
534 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
535 total_size, access, mr);
537 mthca_free_mtt(dev, mr->mtt);
543 static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
545 mthca_table_put(dev, dev->mr_table.mpt_table,
546 key_to_hw_index(dev, lkey));
548 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
551 void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
555 err = mthca_HW2SW_MPT(dev, NULL,
556 key_to_hw_index(dev, mr->ibmr.lkey) &
557 (dev->limits.num_mpts - 1));
559 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
561 mthca_free_region(dev, mr->ibmr.lkey);
562 mthca_free_mtt(dev, mr->mtt);
565 int mthca_init_mr_table(struct mthca_dev *dev)
568 int mpts, mtts, err, i;
570 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
571 dev->limits.num_mpts,
572 ~0, dev->limits.reserved_mrws);
576 if (!mthca_is_memfree(dev) &&
577 (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
578 dev->limits.fmr_reserved_mtts = 0;
580 dev->mthca_flags |= MTHCA_FLAG_FMR;
582 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
583 mthca_dbg(dev, "Memory key throughput optimization activated.\n");
585 err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
586 fls(dev->limits.num_mtt_segs - 1));
591 dev->mr_table.tavor_fmr.mpt_base = NULL;
592 dev->mr_table.tavor_fmr.mtt_base = NULL;
594 if (dev->limits.fmr_reserved_mtts) {
595 i = fls(dev->limits.fmr_reserved_mtts - 1);
598 mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n");
602 mpts = mtts = 1 << i;
604 mtts = dev->limits.num_mtt_segs;
605 mpts = dev->limits.num_mpts;
608 if (!mthca_is_memfree(dev) &&
609 (dev->mthca_flags & MTHCA_FLAG_FMR)) {
611 addr = pci_resource_start(dev->pdev, 4) +
612 ((pci_resource_len(dev->pdev, 4) - 1) &
613 dev->mr_table.mpt_base);
615 dev->mr_table.tavor_fmr.mpt_base =
616 ioremap(addr, mpts * sizeof(struct mthca_mpt_entry));
618 if (!dev->mr_table.tavor_fmr.mpt_base) {
619 mthca_warn(dev, "MPT ioremap for FMR failed.\n");
624 addr = pci_resource_start(dev->pdev, 4) +
625 ((pci_resource_len(dev->pdev, 4) - 1) &
626 dev->mr_table.mtt_base);
628 dev->mr_table.tavor_fmr.mtt_base =
629 ioremap(addr, mtts * dev->limits.mtt_seg_size);
630 if (!dev->mr_table.tavor_fmr.mtt_base) {
631 mthca_warn(dev, "MTT ioremap for FMR failed.\n");
637 if (dev->limits.fmr_reserved_mtts) {
638 err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, fls(mtts - 1));
640 goto err_fmr_mtt_buddy;
642 /* Prevent regular MRs from using FMR keys */
643 err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, fls(mtts - 1));
645 goto err_reserve_fmr;
647 dev->mr_table.fmr_mtt_buddy =
648 &dev->mr_table.tavor_fmr.mtt_buddy;
650 dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
652 /* FMR table is always the first, take reserved MTTs out of there */
653 if (dev->limits.reserved_mtts) {
654 i = fls(dev->limits.reserved_mtts - 1);
656 if (mthca_alloc_mtt_range(dev, i,
657 dev->mr_table.fmr_mtt_buddy) == -1) {
658 mthca_warn(dev, "MTT table of order %d is too small.\n",
659 dev->mr_table.fmr_mtt_buddy->max_order);
661 goto err_reserve_mtts;
669 if (dev->limits.fmr_reserved_mtts)
670 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
673 if (dev->mr_table.tavor_fmr.mtt_base)
674 iounmap(dev->mr_table.tavor_fmr.mtt_base);
677 if (dev->mr_table.tavor_fmr.mpt_base)
678 iounmap(dev->mr_table.tavor_fmr.mpt_base);
681 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
684 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
689 void mthca_cleanup_mr_table(struct mthca_dev *dev)
691 /* XXX check if any MRs are still allocated? */
692 if (dev->limits.fmr_reserved_mtts)
693 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
695 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
697 if (dev->mr_table.tavor_fmr.mtt_base)
698 iounmap(dev->mr_table.tavor_fmr.mtt_base);
699 if (dev->mr_table.tavor_fmr.mpt_base)
700 iounmap(dev->mr_table.tavor_fmr.mpt_base);
702 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);