[IB] Fix leak on MAD initialization failure
[linux-block.git] / drivers / infiniband / hw / mthca / mthca_srq.c
CommitLineData
ec34a922
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
33 */
34
35#include "mthca_dev.h"
36#include "mthca_cmd.h"
37#include "mthca_memfree.h"
38#include "mthca_wqe.h"
39
40enum {
41 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
42};
43
44struct mthca_tavor_srq_context {
45 __be64 wqe_base_ds; /* low 6 bits is descriptor size */
46 __be32 state_pd;
47 __be32 lkey;
48 __be32 uar;
49 __be32 wqe_cnt;
50 u32 reserved[2];
51};
52
53struct mthca_arbel_srq_context {
54 __be32 state_logsize_srqn;
55 __be32 lkey;
56 __be32 db_index;
57 __be32 logstride_usrpage;
58 __be64 wqe_base;
59 __be32 eq_pd;
60 __be16 limit_watermark;
61 __be16 wqe_cnt;
62 u16 reserved1;
63 __be16 wqe_counter;
64 u32 reserved2[3];
65};
66
67static void *get_wqe(struct mthca_srq *srq, int n)
68{
69 if (srq->is_direct)
70 return srq->queue.direct.buf + (n << srq->wqe_shift);
71 else
72 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
73 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
74}
75
76/*
77 * Return a pointer to the location within a WQE that we're using as a
78 * link when the WQE is in the free list. We use an offset of 4
79 * because in the Tavor case, posting a WQE may overwrite the first
80 * four bytes of the previous WQE. The offset avoids corrupting our
81 * free list if the WQE has already completed and been put on the free
82 * list when we post the next WQE.
83 */
84static inline int *wqe_to_link(void *wqe)
85{
86 return (int *) (wqe + 4);
87}
88
89static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
90 struct mthca_pd *pd,
91 struct mthca_srq *srq,
92 struct mthca_tavor_srq_context *context)
93{
94 memset(context, 0, sizeof *context);
95
96 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
97 context->state_pd = cpu_to_be32(pd->pd_num);
98 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
99
100 if (pd->ibpd.uobject)
101 context->uar =
102 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
103 else
104 context->uar = cpu_to_be32(dev->driver_uar.index);
105}
106
107static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
108 struct mthca_pd *pd,
109 struct mthca_srq *srq,
110 struct mthca_arbel_srq_context *context)
111{
112 int logsize;
113
114 memset(context, 0, sizeof *context);
115
116 logsize = long_log2(srq->max) + srq->wqe_shift;
117 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
118 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
119 context->db_index = cpu_to_be32(srq->db_index);
120 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
121 if (pd->ibpd.uobject)
122 context->logstride_usrpage |=
123 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
124 else
125 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
126 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
127}
128
129static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
130{
131 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
132 srq->is_direct, &srq->mr);
133 kfree(srq->wrid);
134}
135
136static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
137 struct mthca_srq *srq)
138{
139 struct mthca_data_seg *scatter;
140 void *wqe;
141 int err;
142 int i;
143
144 if (pd->ibpd.uobject)
145 return 0;
146
147 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
148 if (!srq->wrid)
149 return -ENOMEM;
150
151 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
152 MTHCA_MAX_DIRECT_SRQ_SIZE,
153 &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
154 if (err) {
155 kfree(srq->wrid);
156 return err;
157 }
158
159 /*
160 * Now initialize the SRQ buffer so that all of the WQEs are
161 * linked into the list of free WQEs. In addition, set the
162 * scatter list L_Keys to the sentry value of 0x100.
163 */
164 for (i = 0; i < srq->max; ++i) {
165 wqe = get_wqe(srq, i);
166
167 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
168
169 for (scatter = wqe + sizeof (struct mthca_next_seg);
170 (void *) scatter < wqe + (1 << srq->wqe_shift);
171 ++scatter)
172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
173 }
174
6577ae51
RD
175 srq->last = get_wqe(srq, srq->max - 1);
176
ec34a922
RD
177 return 0;
178}
179
180int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
181 struct ib_srq_attr *attr, struct mthca_srq *srq)
182{
183 struct mthca_mailbox *mailbox;
184 u8 status;
185 int ds;
186 int err;
187
188 /* Sanity check SRQ size before proceeding */
189 if (attr->max_wr > 16 << 20 || attr->max_sge > 64)
190 return -EINVAL;
191
192 srq->max = attr->max_wr;
193 srq->max_gs = attr->max_sge;
ec34a922
RD
194 srq->counter = 0;
195
196 if (mthca_is_memfree(dev))
197 srq->max = roundup_pow_of_two(srq->max + 1);
198
199 ds = min(64UL,
200 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
201 srq->max_gs * sizeof (struct mthca_data_seg)));
202 srq->wqe_shift = long_log2(ds);
203
204 srq->srqn = mthca_alloc(&dev->srq_table.alloc);
205 if (srq->srqn == -1)
206 return -ENOMEM;
207
208 if (mthca_is_memfree(dev)) {
209 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
210 if (err)
211 goto err_out;
212
213 if (!pd->ibpd.uobject) {
214 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
215 srq->srqn, &srq->db);
216 if (srq->db_index < 0) {
217 err = -ENOMEM;
218 goto err_out_icm;
219 }
220 }
221 }
222
223 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
224 if (IS_ERR(mailbox)) {
225 err = PTR_ERR(mailbox);
226 goto err_out_db;
227 }
228
229 err = mthca_alloc_srq_buf(dev, pd, srq);
230 if (err)
231 goto err_out_mailbox;
232
233 spin_lock_init(&srq->lock);
234 atomic_set(&srq->refcount, 1);
235 init_waitqueue_head(&srq->wait);
236
237 if (mthca_is_memfree(dev))
238 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
239 else
240 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
241
242 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
243
244 if (err) {
245 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
246 goto err_out_free_buf;
247 }
248 if (status) {
249 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
250 status);
251 err = -EINVAL;
252 goto err_out_free_buf;
253 }
254
255 spin_lock_irq(&dev->srq_table.lock);
256 if (mthca_array_set(&dev->srq_table.srq,
257 srq->srqn & (dev->limits.num_srqs - 1),
258 srq)) {
259 spin_unlock_irq(&dev->srq_table.lock);
260 goto err_out_free_srq;
261 }
262 spin_unlock_irq(&dev->srq_table.lock);
263
264 mthca_free_mailbox(dev, mailbox);
265
266 srq->first_free = 0;
267 srq->last_free = srq->max - 1;
268
269 return 0;
270
271err_out_free_srq:
272 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
273 if (err)
274 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
275 else if (status)
276 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
277
278err_out_free_buf:
279 if (!pd->ibpd.uobject)
280 mthca_free_srq_buf(dev, srq);
281
282err_out_mailbox:
283 mthca_free_mailbox(dev, mailbox);
284
285err_out_db:
286 if (!pd->ibpd.uobject && mthca_is_memfree(dev))
287 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
288
289err_out_icm:
290 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
291
292err_out:
293 mthca_free(&dev->srq_table.alloc, srq->srqn);
294
295 return err;
296}
297
298void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
299{
300 struct mthca_mailbox *mailbox;
301 int err;
302 u8 status;
303
304 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
305 if (IS_ERR(mailbox)) {
306 mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
307 return;
308 }
309
310 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
311 if (err)
312 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
313 else if (status)
314 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
315
316 spin_lock_irq(&dev->srq_table.lock);
317 mthca_array_clear(&dev->srq_table.srq,
318 srq->srqn & (dev->limits.num_srqs - 1));
319 spin_unlock_irq(&dev->srq_table.lock);
320
321 atomic_dec(&srq->refcount);
322 wait_event(srq->wait, !atomic_read(&srq->refcount));
323
324 if (!srq->ibsrq.uobject) {
325 mthca_free_srq_buf(dev, srq);
326 if (mthca_is_memfree(dev))
327 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
328 }
329
330 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
331 mthca_free(&dev->srq_table.alloc, srq->srqn);
332 mthca_free_mailbox(dev, mailbox);
333}
334
90f104da
RD
335int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
336 enum ib_srq_attr_mask attr_mask)
337{
338 struct mthca_dev *dev = to_mdev(ibsrq->device);
339 struct mthca_srq *srq = to_msrq(ibsrq);
340 int ret;
341 u8 status;
342
343 /* We don't support resizing SRQs (yet?) */
344 if (attr_mask & IB_SRQ_MAX_WR)
345 return -EINVAL;
346
347 if (attr_mask & IB_SRQ_LIMIT) {
348 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
349 if (ret)
350 return ret;
351 if (status)
352 return -EINVAL;
353 }
354
355 return 0;
356}
357
ec34a922
RD
358void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
359 enum ib_event_type event_type)
360{
361 struct mthca_srq *srq;
362 struct ib_event event;
363
364 spin_lock(&dev->srq_table.lock);
365 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
366 if (srq)
367 atomic_inc(&srq->refcount);
368 spin_unlock(&dev->srq_table.lock);
369
370 if (!srq) {
371 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
372 return;
373 }
374
375 if (!srq->ibsrq.event_handler)
376 goto out;
377
378 event.device = &dev->ib_dev;
379 event.event = event_type;
90f104da 380 event.element.srq = &srq->ibsrq;
ec34a922
RD
381 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
382
383out:
384 if (atomic_dec_and_test(&srq->refcount))
385 wake_up(&srq->wait);
386}
387
388/*
389 * This function must be called with IRQs disabled.
390 */
391void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
392{
393 int ind;
394
395 ind = wqe_addr >> srq->wqe_shift;
396
397 spin_lock(&srq->lock);
398
399 if (likely(srq->first_free >= 0))
400 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
401 else
402 srq->first_free = ind;
403
404 *wqe_to_link(get_wqe(srq, ind)) = -1;
405 srq->last_free = ind;
406
407 spin_unlock(&srq->lock);
408}
409
410int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
411 struct ib_recv_wr **bad_wr)
412{
413 struct mthca_dev *dev = to_mdev(ibsrq->device);
414 struct mthca_srq *srq = to_msrq(ibsrq);
415 unsigned long flags;
416 int err = 0;
417 int first_ind;
418 int ind;
419 int next_ind;
420 int nreq;
421 int i;
422 void *wqe;
423 void *prev_wqe;
424
425 spin_lock_irqsave(&srq->lock, flags);
426
427 first_ind = srq->first_free;
428
429 for (nreq = 0; wr; ++nreq, wr = wr->next) {
430 ind = srq->first_free;
431
432 if (ind < 0) {
433 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
434 err = -ENOMEM;
435 *bad_wr = wr;
3853194c 436 break;
ec34a922
RD
437 }
438
439 wqe = get_wqe(srq, ind);
440 next_ind = *wqe_to_link(wqe);
e23d6d2b
RD
441
442 if (next_ind < 0) {
443 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
444 err = -ENOMEM;
445 *bad_wr = wr;
446 break;
447 }
448
ec34a922
RD
449 prev_wqe = srq->last;
450 srq->last = wqe;
451
452 ((struct mthca_next_seg *) wqe)->nda_op = 0;
453 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
454 /* flags field will always remain 0 */
455
456 wqe += sizeof (struct mthca_next_seg);
457
458 if (unlikely(wr->num_sge > srq->max_gs)) {
459 err = -EINVAL;
460 *bad_wr = wr;
461 srq->last = prev_wqe;
3853194c 462 break;
ec34a922
RD
463 }
464
465 for (i = 0; i < wr->num_sge; ++i) {
466 ((struct mthca_data_seg *) wqe)->byte_count =
467 cpu_to_be32(wr->sg_list[i].length);
468 ((struct mthca_data_seg *) wqe)->lkey =
469 cpu_to_be32(wr->sg_list[i].lkey);
470 ((struct mthca_data_seg *) wqe)->addr =
471 cpu_to_be64(wr->sg_list[i].addr);
472 wqe += sizeof (struct mthca_data_seg);
473 }
474
475 if (i < srq->max_gs) {
476 ((struct mthca_data_seg *) wqe)->byte_count = 0;
477 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
478 ((struct mthca_data_seg *) wqe)->addr = 0;
479 }
480
d6cff021
RD
481 ((struct mthca_next_seg *) prev_wqe)->nda_op =
482 cpu_to_be32((ind << srq->wqe_shift) | 1);
483 wmb();
484 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
485 cpu_to_be32(MTHCA_NEXT_DBD);
ec34a922
RD
486
487 srq->wrid[ind] = wr->wr_id;
488 srq->first_free = next_ind;
489 }
490
ec34a922
RD
491 if (likely(nreq)) {
492 __be32 doorbell[2];
493
494 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
495 doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
496
497 /*
498 * Make sure that descriptors are written before
499 * doorbell is rung.
500 */
501 wmb();
502
503 mthca_write64(doorbell,
504 dev->kar + MTHCA_RECEIVE_DOORBELL,
505 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
506 }
507
508 spin_unlock_irqrestore(&srq->lock, flags);
509 return err;
510}
511
512int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
513 struct ib_recv_wr **bad_wr)
514{
515 struct mthca_dev *dev = to_mdev(ibsrq->device);
516 struct mthca_srq *srq = to_msrq(ibsrq);
517 unsigned long flags;
518 int err = 0;
519 int ind;
520 int next_ind;
521 int nreq;
522 int i;
523 void *wqe;
524
525 spin_lock_irqsave(&srq->lock, flags);
526
527 for (nreq = 0; wr; ++nreq, wr = wr->next) {
528 ind = srq->first_free;
529
530 if (ind < 0) {
531 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
532 err = -ENOMEM;
533 *bad_wr = wr;
3853194c 534 break;
ec34a922
RD
535 }
536
537 wqe = get_wqe(srq, ind);
538 next_ind = *wqe_to_link(wqe);
539
e23d6d2b
RD
540 if (next_ind < 0) {
541 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
542 err = -ENOMEM;
543 *bad_wr = wr;
544 break;
545 }
546
ec34a922
RD
547 ((struct mthca_next_seg *) wqe)->nda_op =
548 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
549 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
550 /* flags field will always remain 0 */
551
552 wqe += sizeof (struct mthca_next_seg);
553
554 if (unlikely(wr->num_sge > srq->max_gs)) {
555 err = -EINVAL;
556 *bad_wr = wr;
3853194c 557 break;
ec34a922
RD
558 }
559
560 for (i = 0; i < wr->num_sge; ++i) {
561 ((struct mthca_data_seg *) wqe)->byte_count =
562 cpu_to_be32(wr->sg_list[i].length);
563 ((struct mthca_data_seg *) wqe)->lkey =
564 cpu_to_be32(wr->sg_list[i].lkey);
565 ((struct mthca_data_seg *) wqe)->addr =
566 cpu_to_be64(wr->sg_list[i].addr);
567 wqe += sizeof (struct mthca_data_seg);
568 }
569
570 if (i < srq->max_gs) {
571 ((struct mthca_data_seg *) wqe)->byte_count = 0;
572 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
573 ((struct mthca_data_seg *) wqe)->addr = 0;
574 }
575
576 srq->wrid[ind] = wr->wr_id;
577 srq->first_free = next_ind;
578 }
579
580 if (likely(nreq)) {
581 srq->counter += nreq;
582
583 /*
584 * Make sure that descriptors are written before
585 * we write doorbell record.
586 */
587 wmb();
588 *srq->db = cpu_to_be32(srq->counter);
589 }
590
591 spin_unlock_irqrestore(&srq->lock, flags);
592 return err;
593}
594
595int __devinit mthca_init_srq_table(struct mthca_dev *dev)
596{
597 int err;
598
599 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
600 return 0;
601
602 spin_lock_init(&dev->srq_table.lock);
603
604 err = mthca_alloc_init(&dev->srq_table.alloc,
605 dev->limits.num_srqs,
606 dev->limits.num_srqs - 1,
607 dev->limits.reserved_srqs);
608 if (err)
609 return err;
610
611 err = mthca_array_init(&dev->srq_table.srq,
612 dev->limits.num_srqs);
613 if (err)
614 mthca_alloc_cleanup(&dev->srq_table.alloc);
615
616 return err;
617}
618
619void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev)
620{
621 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
622 return;
623
624 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
625 mthca_alloc_cleanup(&dev->srq_table.alloc);
626}