[PATCH] IB: Update current firmware versions in mthca driver
[linux-block.git] / drivers / infiniband / hw / mthca / mthca_memfree.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
56483ec1 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
1da177e4
LT
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id$
34 */
35
dbcf31ba
RD
36#include <linux/mm.h>
37
1da177e4
LT
38#include "mthca_memfree.h"
39#include "mthca_dev.h"
40#include "mthca_cmd.h"
41
42/*
43 * We allocate in as big chunks as we can, up to a maximum of 256 KB
44 * per chunk.
45 */
46enum {
47 MTHCA_ICM_ALLOC_SIZE = 1 << 18,
48 MTHCA_TABLE_CHUNK_SIZE = 1 << 18
49};
50
56483ec1
RD
51struct mthca_user_db_table {
52 struct semaphore mutex;
53 struct {
54 u64 uvirt;
55 struct scatterlist mem;
56 int refcount;
57 } page[0];
58};
59
1da177e4
LT
60void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
61{
62 struct mthca_icm_chunk *chunk, *tmp;
63 int i;
64
65 if (!icm)
66 return;
67
68 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
69 if (chunk->nsg > 0)
70 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
71 PCI_DMA_BIDIRECTIONAL);
72
73 for (i = 0; i < chunk->npages; ++i)
74 __free_pages(chunk->mem[i].page,
75 get_order(chunk->mem[i].length));
76
77 kfree(chunk);
78 }
79
80 kfree(icm);
81}
82
83struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
84 unsigned int gfp_mask)
85{
86 struct mthca_icm *icm;
87 struct mthca_icm_chunk *chunk = NULL;
88 int cur_order;
89
90 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
91 if (!icm)
92 return icm;
93
94 icm->refcount = 0;
95 INIT_LIST_HEAD(&icm->chunk_list);
96
97 cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
98
99 while (npages > 0) {
100 if (!chunk) {
101 chunk = kmalloc(sizeof *chunk,
102 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
103 if (!chunk)
104 goto fail;
105
106 chunk->npages = 0;
107 chunk->nsg = 0;
108 list_add_tail(&chunk->list, &icm->chunk_list);
109 }
110
111 while (1 << cur_order > npages)
112 --cur_order;
113
114 chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
115 if (chunk->mem[chunk->npages].page) {
116 chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
117 chunk->mem[chunk->npages].offset = 0;
118
119 if (++chunk->npages == MTHCA_ICM_CHUNK_LEN) {
120 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
121 chunk->npages,
122 PCI_DMA_BIDIRECTIONAL);
123
124 if (chunk->nsg <= 0)
125 goto fail;
126
127 chunk = NULL;
128 }
129
130 npages -= 1 << cur_order;
131 } else {
132 --cur_order;
133 if (cur_order < 0)
134 goto fail;
135 }
136 }
137
138 if (chunk) {
139 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
140 chunk->npages,
141 PCI_DMA_BIDIRECTIONAL);
142
143 if (chunk->nsg <= 0)
144 goto fail;
145 }
146
147 return icm;
148
149fail:
150 mthca_free_icm(dev, icm);
151 return NULL;
152}
153
154int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
155{
156 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
157 int ret = 0;
158 u8 status;
159
160 down(&table->mutex);
161
162 if (table->icm[i]) {
163 ++table->icm[i]->refcount;
164 goto out;
165 }
166
167 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
168 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
169 __GFP_NOWARN);
170 if (!table->icm[i]) {
171 ret = -ENOMEM;
172 goto out;
173 }
174
175 if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
176 &status) || status) {
177 mthca_free_icm(dev, table->icm[i]);
178 table->icm[i] = NULL;
179 ret = -ENOMEM;
180 goto out;
181 }
182
183 ++table->icm[i]->refcount;
184
185out:
186 up(&table->mutex);
187 return ret;
188}
189
190void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
191{
a03a5a67 192 int i;
1da177e4
LT
193 u8 status;
194
a03a5a67
RD
195 if (!mthca_is_memfree(dev))
196 return;
197
198 i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
199
1da177e4
LT
200 down(&table->mutex);
201
202 if (--table->icm[i]->refcount == 0) {
203 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
204 MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
205 mthca_free_icm(dev, table->icm[i]);
206 table->icm[i] = NULL;
207 }
208
209 up(&table->mutex);
210}
211
0fabd9fb
MT
212void *mthca_table_find(struct mthca_icm_table *table, int obj)
213{
214 int idx, offset, i;
215 struct mthca_icm_chunk *chunk;
216 struct mthca_icm *icm;
217 struct page *page = NULL;
218
219 if (!table->lowmem)
220 return NULL;
221
222 down(&table->mutex);
223
224 idx = (obj & (table->num_obj - 1)) * table->obj_size;
225 icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
226 offset = idx % MTHCA_TABLE_CHUNK_SIZE;
227
228 if (!icm)
229 goto out;
230
231 list_for_each_entry(chunk, &icm->chunk_list, list) {
232 for (i = 0; i < chunk->npages; ++i) {
233 if (chunk->mem[i].length >= offset) {
234 page = chunk->mem[i].page;
235 break;
236 }
237 offset -= chunk->mem[i].length;
238 }
239 }
240
241out:
242 up(&table->mutex);
243 return page ? lowmem_page_address(page) + offset : NULL;
244}
245
86562a13
RD
246int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
247 int start, int end)
248{
249 int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
250 int i, err;
251
252 for (i = start; i <= end; i += inc) {
253 err = mthca_table_get(dev, table, i);
254 if (err)
255 goto fail;
256 }
257
258 return 0;
259
260fail:
261 while (i > start) {
262 i -= inc;
263 mthca_table_put(dev, table, i);
264 }
265
266 return err;
267}
268
269void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
270 int start, int end)
271{
272 int i;
273
a03a5a67
RD
274 if (!mthca_is_memfree(dev))
275 return;
276
86562a13
RD
277 for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
278 mthca_table_put(dev, table, i);
279}
280
1da177e4
LT
281struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
282 u64 virt, int obj_size,
283 int nobj, int reserved,
284 int use_lowmem)
285{
286 struct mthca_icm_table *table;
287 int num_icm;
288 int i;
289 u8 status;
290
291 num_icm = obj_size * nobj / MTHCA_TABLE_CHUNK_SIZE;
292
293 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
294 if (!table)
295 return NULL;
296
297 table->virt = virt;
298 table->num_icm = num_icm;
299 table->num_obj = nobj;
300 table->obj_size = obj_size;
301 table->lowmem = use_lowmem;
302 init_MUTEX(&table->mutex);
303
304 for (i = 0; i < num_icm; ++i)
305 table->icm[i] = NULL;
306
307 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
308 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
309 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
310 __GFP_NOWARN);
311 if (!table->icm[i])
312 goto err;
313 if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
314 &status) || status) {
315 mthca_free_icm(dev, table->icm[i]);
316 table->icm[i] = NULL;
317 goto err;
318 }
319
320 /*
321 * Add a reference to this ICM chunk so that it never
322 * gets freed (since it contains reserved firmware objects).
323 */
324 ++table->icm[i]->refcount;
325 }
326
327 return table;
328
329err:
330 for (i = 0; i < num_icm; ++i)
331 if (table->icm[i]) {
332 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
333 MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
334 mthca_free_icm(dev, table->icm[i]);
335 }
336
337 kfree(table);
338
339 return NULL;
340}
341
342void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
343{
344 int i;
345 u8 status;
346
347 for (i = 0; i < table->num_icm; ++i)
348 if (table->icm[i]) {
349 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
350 MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
351 mthca_free_icm(dev, table->icm[i]);
352 }
353
354 kfree(table);
355}
356
56483ec1 357static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
1da177e4
LT
358{
359 return dev->uar_table.uarc_base +
56483ec1 360 uar->index * dev->uar_table.uarc_size +
1da177e4
LT
361 page * 4096;
362}
363
56483ec1
RD
364int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
365 struct mthca_user_db_table *db_tab, int index, u64 uaddr)
366{
367 int ret = 0;
368 u8 status;
369 int i;
370
371 if (!mthca_is_memfree(dev))
372 return 0;
373
374 if (index < 0 || index > dev->uar_table.uarc_size / 8)
375 return -EINVAL;
376
377 down(&db_tab->mutex);
378
379 i = index / MTHCA_DB_REC_PER_PAGE;
380
381 if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) ||
382 (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
383 (uaddr & 4095)) {
384 ret = -EINVAL;
385 goto out;
386 }
387
388 if (db_tab->page[i].refcount) {
389 ++db_tab->page[i].refcount;
390 goto out;
391 }
392
393 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
394 &db_tab->page[i].mem.page, NULL);
395 if (ret < 0)
396 goto out;
397
398 db_tab->page[i].mem.length = 4096;
399 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
400
401 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
402 if (ret < 0) {
403 put_page(db_tab->page[i].mem.page);
404 goto out;
405 }
406
407 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
408 mthca_uarc_virt(dev, uar, i), &status);
409 if (!ret && status)
410 ret = -EINVAL;
411 if (ret) {
412 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
413 put_page(db_tab->page[i].mem.page);
414 goto out;
415 }
416
417 db_tab->page[i].uvirt = uaddr;
418 db_tab->page[i].refcount = 1;
419
420out:
421 up(&db_tab->mutex);
422 return ret;
423}
424
425void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
426 struct mthca_user_db_table *db_tab, int index)
427{
428 if (!mthca_is_memfree(dev))
429 return;
430
431 /*
432 * To make our bookkeeping simpler, we don't unmap DB
433 * pages until we clean up the whole db table.
434 */
435
436 down(&db_tab->mutex);
437
438 --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
439
440 up(&db_tab->mutex);
441}
442
443struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
444{
445 struct mthca_user_db_table *db_tab;
446 int npages;
447 int i;
448
449 if (!mthca_is_memfree(dev))
450 return NULL;
451
452 npages = dev->uar_table.uarc_size / 4096;
453 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
454 if (!db_tab)
455 return ERR_PTR(-ENOMEM);
456
457 init_MUTEX(&db_tab->mutex);
458 for (i = 0; i < npages; ++i) {
459 db_tab->page[i].refcount = 0;
460 db_tab->page[i].uvirt = 0;
461 }
462
463 return db_tab;
464}
465
466void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
467 struct mthca_user_db_table *db_tab)
468{
469 int i;
470 u8 status;
471
472 if (!mthca_is_memfree(dev))
473 return;
474
475 for (i = 0; i < dev->uar_table.uarc_size / 4096; ++i) {
476 if (db_tab->page[i].uvirt) {
477 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
478 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
479 put_page(db_tab->page[i].mem.page);
480 }
481 }
482}
483
1da177e4
LT
484int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db)
485{
486 int group;
487 int start, end, dir;
488 int i, j;
489 struct mthca_db_page *page;
490 int ret = 0;
491 u8 status;
492
493 down(&dev->db_tab->mutex);
494
495 switch (type) {
496 case MTHCA_DB_TYPE_CQ_ARM:
497 case MTHCA_DB_TYPE_SQ:
498 group = 0;
499 start = 0;
500 end = dev->db_tab->max_group1;
501 dir = 1;
502 break;
503
504 case MTHCA_DB_TYPE_CQ_SET_CI:
505 case MTHCA_DB_TYPE_RQ:
506 case MTHCA_DB_TYPE_SRQ:
507 group = 1;
508 start = dev->db_tab->npages - 1;
509 end = dev->db_tab->min_group2;
510 dir = -1;
511 break;
512
513 default:
2714eb5a
RD
514 ret = -EINVAL;
515 goto out;
1da177e4
LT
516 }
517
518 for (i = start; i != end; i += dir)
519 if (dev->db_tab->page[i].db_rec &&
520 !bitmap_full(dev->db_tab->page[i].used,
521 MTHCA_DB_REC_PER_PAGE)) {
522 page = dev->db_tab->page + i;
523 goto found;
524 }
525
526 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
527 ret = -ENOMEM;
528 goto out;
529 }
530
531 page = dev->db_tab->page + end;
532 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096,
533 &page->mapping, GFP_KERNEL);
534 if (!page->db_rec) {
535 ret = -ENOMEM;
536 goto out;
537 }
538 memset(page->db_rec, 0, 4096);
539
56483ec1
RD
540 ret = mthca_MAP_ICM_page(dev, page->mapping,
541 mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
1da177e4
LT
542 if (!ret && status)
543 ret = -EINVAL;
544 if (ret) {
545 dma_free_coherent(&dev->pdev->dev, 4096,
546 page->db_rec, page->mapping);
547 goto out;
548 }
549
550 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
551 if (group == 0)
552 ++dev->db_tab->max_group1;
553 else
554 --dev->db_tab->min_group2;
555
556found:
557 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
558 set_bit(j, page->used);
559
560 if (group == 1)
561 j = MTHCA_DB_REC_PER_PAGE - 1 - j;
562
563 ret = i * MTHCA_DB_REC_PER_PAGE + j;
564
565 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
566
567 *db = (u32 *) &page->db_rec[j];
568
569out:
570 up(&dev->db_tab->mutex);
571
572 return ret;
573}
574
575void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
576{
577 int i, j;
578 struct mthca_db_page *page;
579 u8 status;
580
581 i = db_index / MTHCA_DB_REC_PER_PAGE;
582 j = db_index % MTHCA_DB_REC_PER_PAGE;
583
584 page = dev->db_tab->page + i;
585
586 down(&dev->db_tab->mutex);
587
588 page->db_rec[j] = 0;
589 if (i >= dev->db_tab->min_group2)
590 j = MTHCA_DB_REC_PER_PAGE - 1 - j;
591 clear_bit(j, page->used);
592
593 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
594 i >= dev->db_tab->max_group1 - 1) {
56483ec1 595 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
1da177e4
LT
596
597 dma_free_coherent(&dev->pdev->dev, 4096,
598 page->db_rec, page->mapping);
599 page->db_rec = NULL;
600
601 if (i == dev->db_tab->max_group1) {
602 --dev->db_tab->max_group1;
603 /* XXX may be able to unmap more pages now */
604 }
605 if (i == dev->db_tab->min_group2)
606 ++dev->db_tab->min_group2;
607 }
608
609 up(&dev->db_tab->mutex);
610}
611
612int mthca_init_db_tab(struct mthca_dev *dev)
613{
614 int i;
615
d10ddbf6 616 if (!mthca_is_memfree(dev))
1da177e4
LT
617 return 0;
618
619 dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
620 if (!dev->db_tab)
621 return -ENOMEM;
622
623 init_MUTEX(&dev->db_tab->mutex);
624
85665c98 625 dev->db_tab->npages = dev->uar_table.uarc_size / 4096;
1da177e4
LT
626 dev->db_tab->max_group1 = 0;
627 dev->db_tab->min_group2 = dev->db_tab->npages - 1;
628
629 dev->db_tab->page = kmalloc(dev->db_tab->npages *
630 sizeof *dev->db_tab->page,
631 GFP_KERNEL);
632 if (!dev->db_tab->page) {
633 kfree(dev->db_tab);
634 return -ENOMEM;
635 }
636
637 for (i = 0; i < dev->db_tab->npages; ++i)
638 dev->db_tab->page[i].db_rec = NULL;
639
640 return 0;
641}
642
643void mthca_cleanup_db_tab(struct mthca_dev *dev)
644{
645 int i;
646 u8 status;
647
d10ddbf6 648 if (!mthca_is_memfree(dev))
1da177e4
LT
649 return;
650
651 /*
652 * Because we don't always free our UARC pages when they
653 * become empty to make mthca_free_db() simpler we need to
654 * make a sweep through the doorbell pages and free any
655 * leftover pages now.
656 */
657 for (i = 0; i < dev->db_tab->npages; ++i) {
658 if (!dev->db_tab->page[i].db_rec)
659 continue;
660
661 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
662 mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
663
56483ec1 664 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
1da177e4
LT
665
666 dma_free_coherent(&dev->pdev->dev, 4096,
667 dev->db_tab->page[i].db_rec,
668 dev->db_tab->page[i].mapping);
669 }
670
671 kfree(dev->db_tab->page);
672 kfree(dev->db_tab);
673}