2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 /* Crude resource management */
33 #include <linux/spinlock.h>
34 #include <linux/genalloc.h>
35 #include <linux/ratelimit.h>
38 static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
42 if (c4iw_id_table_alloc(&rdev->resource.qid_table,
43 rdev->lldi.vr->qp.start,
44 rdev->lldi.vr->qp.size,
45 rdev->lldi.vr->qp.size, 0))
48 for (i = rdev->lldi.vr->qp.start;
49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
50 if (!(i & rdev->qpmask))
51 c4iw_id_free(&rdev->resource.qid_table, i);
55 /* nr_* must be power of 2 */
56 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
59 err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
60 C4IW_ID_TABLE_F_RANDOM);
63 err = c4iw_init_qid_table(rdev);
66 err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
72 c4iw_id_table_free(&rdev->resource.qid_table);
74 c4iw_id_table_free(&rdev->resource.tpt_table);
80 * returns 0 if no resource available
82 u32 c4iw_get_resource(struct c4iw_id_table *id_table)
85 entry = c4iw_id_alloc(id_table);
86 if (entry == (u32)(-1))
91 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
93 pr_debug("entry 0x%x\n", entry);
94 c4iw_id_free(id_table, entry);
97 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
99 struct c4iw_qid_list *entry;
103 mutex_lock(&uctx->lock);
104 if (!list_empty(&uctx->cqids)) {
105 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
107 list_del(&entry->entry);
111 qid = c4iw_get_resource(&rdev->resource.qid_table);
114 mutex_lock(&rdev->stats.lock);
115 rdev->stats.qid.cur += rdev->qpmask + 1;
116 mutex_unlock(&rdev->stats.lock);
117 for (i = qid+1; i & rdev->qpmask; i++) {
118 entry = kmalloc(sizeof *entry, GFP_KERNEL);
122 list_add_tail(&entry->entry, &uctx->cqids);
126 * now put the same ids on the qp list since they all
127 * map to the same db/gts page.
129 entry = kmalloc(sizeof *entry, GFP_KERNEL);
133 list_add_tail(&entry->entry, &uctx->qpids);
134 for (i = qid+1; i & rdev->qpmask; i++) {
135 entry = kmalloc(sizeof *entry, GFP_KERNEL);
139 list_add_tail(&entry->entry, &uctx->qpids);
143 mutex_unlock(&uctx->lock);
144 pr_debug("qid 0x%x\n", qid);
145 mutex_lock(&rdev->stats.lock);
146 if (rdev->stats.qid.cur > rdev->stats.qid.max)
147 rdev->stats.qid.max = rdev->stats.qid.cur;
148 mutex_unlock(&rdev->stats.lock);
152 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
153 struct c4iw_dev_ucontext *uctx)
155 struct c4iw_qid_list *entry;
157 entry = kmalloc(sizeof *entry, GFP_KERNEL);
160 pr_debug("qid 0x%x\n", qid);
162 mutex_lock(&uctx->lock);
163 list_add_tail(&entry->entry, &uctx->cqids);
164 mutex_unlock(&uctx->lock);
167 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
169 struct c4iw_qid_list *entry;
173 mutex_lock(&uctx->lock);
174 if (!list_empty(&uctx->qpids)) {
175 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
177 list_del(&entry->entry);
181 qid = c4iw_get_resource(&rdev->resource.qid_table);
183 mutex_lock(&rdev->stats.lock);
184 rdev->stats.qid.fail++;
185 mutex_unlock(&rdev->stats.lock);
188 mutex_lock(&rdev->stats.lock);
189 rdev->stats.qid.cur += rdev->qpmask + 1;
190 mutex_unlock(&rdev->stats.lock);
191 for (i = qid+1; i & rdev->qpmask; i++) {
192 entry = kmalloc(sizeof *entry, GFP_KERNEL);
196 list_add_tail(&entry->entry, &uctx->qpids);
200 * now put the same ids on the cq list since they all
201 * map to the same db/gts page.
203 entry = kmalloc(sizeof *entry, GFP_KERNEL);
207 list_add_tail(&entry->entry, &uctx->cqids);
208 for (i = qid; i & rdev->qpmask; i++) {
209 entry = kmalloc(sizeof *entry, GFP_KERNEL);
213 list_add_tail(&entry->entry, &uctx->cqids);
217 mutex_unlock(&uctx->lock);
218 pr_debug("qid 0x%x\n", qid);
219 mutex_lock(&rdev->stats.lock);
220 if (rdev->stats.qid.cur > rdev->stats.qid.max)
221 rdev->stats.qid.max = rdev->stats.qid.cur;
222 mutex_unlock(&rdev->stats.lock);
226 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
227 struct c4iw_dev_ucontext *uctx)
229 struct c4iw_qid_list *entry;
231 entry = kmalloc(sizeof *entry, GFP_KERNEL);
234 pr_debug("qid 0x%x\n", qid);
236 mutex_lock(&uctx->lock);
237 list_add_tail(&entry->entry, &uctx->qpids);
238 mutex_unlock(&uctx->lock);
241 void c4iw_destroy_resource(struct c4iw_resource *rscp)
243 c4iw_id_table_free(&rscp->tpt_table);
244 c4iw_id_table_free(&rscp->qid_table);
245 c4iw_id_table_free(&rscp->pdid_table);
249 * PBL Memory Manager. Uses Linux generic allocator.
252 #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
254 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
256 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
257 pr_debug("addr 0x%x size %d\n", (u32)addr, size);
258 mutex_lock(&rdev->stats.lock);
260 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
261 if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
262 rdev->stats.pbl.max = rdev->stats.pbl.cur;
263 kref_get(&rdev->pbl_kref);
265 rdev->stats.pbl.fail++;
266 mutex_unlock(&rdev->stats.lock);
270 static void destroy_pblpool(struct kref *kref)
272 struct c4iw_rdev *rdev;
274 rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
275 gen_pool_destroy(rdev->pbl_pool);
276 complete(&rdev->pbl_compl);
279 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
281 pr_debug("addr 0x%x size %d\n", addr, size);
282 mutex_lock(&rdev->stats.lock);
283 rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
284 mutex_unlock(&rdev->stats.lock);
285 gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
286 kref_put(&rdev->pbl_kref, destroy_pblpool);
289 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
291 unsigned pbl_start, pbl_chunk, pbl_top;
293 rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
297 pbl_start = rdev->lldi.vr->pbl.start;
298 pbl_chunk = rdev->lldi.vr->pbl.size;
299 pbl_top = pbl_start + pbl_chunk;
301 while (pbl_start < pbl_top) {
302 pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
303 if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
304 pr_debug("failed to add PBL chunk (%x/%x)\n",
305 pbl_start, pbl_chunk);
306 if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
307 pr_warn("Failed to add all PBL chunks (%x/%x)\n",
308 pbl_start, pbl_top - pbl_start);
313 pr_debug("added PBL chunk (%x/%x)\n",
314 pbl_start, pbl_chunk);
315 pbl_start += pbl_chunk;
322 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
324 kref_put(&rdev->pbl_kref, destroy_pblpool);
328 * RQT Memory Manager. Uses Linux generic allocator.
331 #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
333 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
335 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
336 pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6);
338 pr_warn_ratelimited("%s: Out of RQT memory\n",
339 pci_name(rdev->lldi.pdev));
340 mutex_lock(&rdev->stats.lock);
342 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
343 if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
344 rdev->stats.rqt.max = rdev->stats.rqt.cur;
345 kref_get(&rdev->rqt_kref);
347 rdev->stats.rqt.fail++;
348 mutex_unlock(&rdev->stats.lock);
352 static void destroy_rqtpool(struct kref *kref)
354 struct c4iw_rdev *rdev;
356 rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
357 gen_pool_destroy(rdev->rqt_pool);
358 complete(&rdev->rqt_compl);
361 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
363 pr_debug("addr 0x%x size %d\n", addr, size << 6);
364 mutex_lock(&rdev->stats.lock);
365 rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
366 mutex_unlock(&rdev->stats.lock);
367 gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
368 kref_put(&rdev->rqt_kref, destroy_rqtpool);
371 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
373 unsigned rqt_start, rqt_chunk, rqt_top;
375 rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
379 rqt_start = rdev->lldi.vr->rq.start;
380 rqt_chunk = rdev->lldi.vr->rq.size;
381 rqt_top = rqt_start + rqt_chunk;
383 while (rqt_start < rqt_top) {
384 rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
385 if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
386 pr_debug("failed to add RQT chunk (%x/%x)\n",
387 rqt_start, rqt_chunk);
388 if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
389 pr_warn("Failed to add all RQT chunks (%x/%x)\n",
390 rqt_start, rqt_top - rqt_start);
395 pr_debug("added RQT chunk (%x/%x)\n",
396 rqt_start, rqt_chunk);
397 rqt_start += rqt_chunk;
403 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
405 kref_put(&rdev->rqt_kref, destroy_rqtpool);
411 #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
413 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
415 unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
416 pr_debug("addr 0x%x size %d\n", (u32)addr, size);
418 mutex_lock(&rdev->stats.lock);
419 rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
420 if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max)
421 rdev->stats.ocqp.max = rdev->stats.ocqp.cur;
422 mutex_unlock(&rdev->stats.lock);
427 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
429 pr_debug("addr 0x%x size %d\n", addr, size);
430 mutex_lock(&rdev->stats.lock);
431 rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
432 mutex_unlock(&rdev->stats.lock);
433 gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
436 int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
438 unsigned start, chunk, top;
440 rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
441 if (!rdev->ocqp_pool)
444 start = rdev->lldi.vr->ocq.start;
445 chunk = rdev->lldi.vr->ocq.size;
448 while (start < top) {
449 chunk = min(top - start + 1, chunk);
450 if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
451 pr_debug("failed to add OCQP chunk (%x/%x)\n",
453 if (chunk <= 1024 << MIN_OCQP_SHIFT) {
454 pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
460 pr_debug("added OCQP chunk (%x/%x)\n",
468 void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
470 gen_pool_destroy(rdev->ocqp_pool);