Commit | Line | Data |
---|---|---|
ec34a922 RD |
1 | /* |
2 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $ | |
33 | */ | |
34 | ||
35 | #include "mthca_dev.h" | |
36 | #include "mthca_cmd.h" | |
37 | #include "mthca_memfree.h" | |
38 | #include "mthca_wqe.h" | |
39 | ||
40 | enum { | |
41 | MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE | |
42 | }; | |
43 | ||
44 | struct mthca_tavor_srq_context { | |
45 | __be64 wqe_base_ds; /* low 6 bits is descriptor size */ | |
46 | __be32 state_pd; | |
47 | __be32 lkey; | |
48 | __be32 uar; | |
49 | __be32 wqe_cnt; | |
50 | u32 reserved[2]; | |
51 | }; | |
52 | ||
53 | struct mthca_arbel_srq_context { | |
54 | __be32 state_logsize_srqn; | |
55 | __be32 lkey; | |
56 | __be32 db_index; | |
57 | __be32 logstride_usrpage; | |
58 | __be64 wqe_base; | |
59 | __be32 eq_pd; | |
60 | __be16 limit_watermark; | |
61 | __be16 wqe_cnt; | |
62 | u16 reserved1; | |
63 | __be16 wqe_counter; | |
64 | u32 reserved2[3]; | |
65 | }; | |
66 | ||
67 | static void *get_wqe(struct mthca_srq *srq, int n) | |
68 | { | |
69 | if (srq->is_direct) | |
70 | return srq->queue.direct.buf + (n << srq->wqe_shift); | |
71 | else | |
72 | return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + | |
73 | ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); | |
74 | } | |
75 | ||
76 | /* | |
77 | * Return a pointer to the location within a WQE that we're using as a | |
78 | * link when the WQE is in the free list. We use an offset of 4 | |
79 | * because in the Tavor case, posting a WQE may overwrite the first | |
80 | * four bytes of the previous WQE. The offset avoids corrupting our | |
81 | * free list if the WQE has already completed and been put on the free | |
82 | * list when we post the next WQE. | |
83 | */ | |
84 | static inline int *wqe_to_link(void *wqe) | |
85 | { | |
86 | return (int *) (wqe + 4); | |
87 | } | |
88 | ||
89 | static void mthca_tavor_init_srq_context(struct mthca_dev *dev, | |
90 | struct mthca_pd *pd, | |
91 | struct mthca_srq *srq, | |
92 | struct mthca_tavor_srq_context *context) | |
93 | { | |
94 | memset(context, 0, sizeof *context); | |
95 | ||
96 | context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); | |
97 | context->state_pd = cpu_to_be32(pd->pd_num); | |
98 | context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); | |
99 | ||
100 | if (pd->ibpd.uobject) | |
101 | context->uar = | |
102 | cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); | |
103 | else | |
104 | context->uar = cpu_to_be32(dev->driver_uar.index); | |
105 | } | |
106 | ||
107 | static void mthca_arbel_init_srq_context(struct mthca_dev *dev, | |
108 | struct mthca_pd *pd, | |
109 | struct mthca_srq *srq, | |
110 | struct mthca_arbel_srq_context *context) | |
111 | { | |
112 | int logsize; | |
113 | ||
114 | memset(context, 0, sizeof *context); | |
115 | ||
116 | logsize = long_log2(srq->max) + srq->wqe_shift; | |
117 | context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); | |
118 | context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); | |
119 | context->db_index = cpu_to_be32(srq->db_index); | |
120 | context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); | |
121 | if (pd->ibpd.uobject) | |
122 | context->logstride_usrpage |= | |
123 | cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); | |
124 | else | |
125 | context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); | |
126 | context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); | |
127 | } | |
128 | ||
129 | static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) | |
130 | { | |
131 | mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, | |
132 | srq->is_direct, &srq->mr); | |
133 | kfree(srq->wrid); | |
134 | } | |
135 | ||
136 | static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, | |
137 | struct mthca_srq *srq) | |
138 | { | |
139 | struct mthca_data_seg *scatter; | |
140 | void *wqe; | |
141 | int err; | |
142 | int i; | |
143 | ||
144 | if (pd->ibpd.uobject) | |
145 | return 0; | |
146 | ||
147 | srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); | |
148 | if (!srq->wrid) | |
149 | return -ENOMEM; | |
150 | ||
151 | err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, | |
152 | MTHCA_MAX_DIRECT_SRQ_SIZE, | |
153 | &srq->queue, &srq->is_direct, pd, 1, &srq->mr); | |
154 | if (err) { | |
155 | kfree(srq->wrid); | |
156 | return err; | |
157 | } | |
158 | ||
159 | /* | |
160 | * Now initialize the SRQ buffer so that all of the WQEs are | |
161 | * linked into the list of free WQEs. In addition, set the | |
162 | * scatter list L_Keys to the sentry value of 0x100. | |
163 | */ | |
164 | for (i = 0; i < srq->max; ++i) { | |
165 | wqe = get_wqe(srq, i); | |
166 | ||
167 | *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; | |
168 | ||
169 | for (scatter = wqe + sizeof (struct mthca_next_seg); | |
170 | (void *) scatter < wqe + (1 << srq->wqe_shift); | |
171 | ++scatter) | |
172 | scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | |
173 | } | |
174 | ||
6577ae51 RD |
175 | srq->last = get_wqe(srq, srq->max - 1); |
176 | ||
ec34a922 RD |
177 | return 0; |
178 | } | |
179 | ||
180 | int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |
181 | struct ib_srq_attr *attr, struct mthca_srq *srq) | |
182 | { | |
183 | struct mthca_mailbox *mailbox; | |
184 | u8 status; | |
185 | int ds; | |
186 | int err; | |
187 | ||
188 | /* Sanity check SRQ size before proceeding */ | |
189 | if (attr->max_wr > 16 << 20 || attr->max_sge > 64) | |
190 | return -EINVAL; | |
191 | ||
192 | srq->max = attr->max_wr; | |
193 | srq->max_gs = attr->max_sge; | |
ec34a922 RD |
194 | srq->counter = 0; |
195 | ||
196 | if (mthca_is_memfree(dev)) | |
197 | srq->max = roundup_pow_of_two(srq->max + 1); | |
198 | ||
199 | ds = min(64UL, | |
200 | roundup_pow_of_two(sizeof (struct mthca_next_seg) + | |
201 | srq->max_gs * sizeof (struct mthca_data_seg))); | |
202 | srq->wqe_shift = long_log2(ds); | |
203 | ||
204 | srq->srqn = mthca_alloc(&dev->srq_table.alloc); | |
205 | if (srq->srqn == -1) | |
206 | return -ENOMEM; | |
207 | ||
208 | if (mthca_is_memfree(dev)) { | |
209 | err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); | |
210 | if (err) | |
211 | goto err_out; | |
212 | ||
213 | if (!pd->ibpd.uobject) { | |
214 | srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, | |
215 | srq->srqn, &srq->db); | |
216 | if (srq->db_index < 0) { | |
217 | err = -ENOMEM; | |
218 | goto err_out_icm; | |
219 | } | |
220 | } | |
221 | } | |
222 | ||
223 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | |
224 | if (IS_ERR(mailbox)) { | |
225 | err = PTR_ERR(mailbox); | |
226 | goto err_out_db; | |
227 | } | |
228 | ||
229 | err = mthca_alloc_srq_buf(dev, pd, srq); | |
230 | if (err) | |
231 | goto err_out_mailbox; | |
232 | ||
233 | spin_lock_init(&srq->lock); | |
234 | atomic_set(&srq->refcount, 1); | |
235 | init_waitqueue_head(&srq->wait); | |
236 | ||
237 | if (mthca_is_memfree(dev)) | |
238 | mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); | |
239 | else | |
240 | mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); | |
241 | ||
242 | err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); | |
243 | ||
244 | if (err) { | |
245 | mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); | |
246 | goto err_out_free_buf; | |
247 | } | |
248 | if (status) { | |
249 | mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", | |
250 | status); | |
251 | err = -EINVAL; | |
252 | goto err_out_free_buf; | |
253 | } | |
254 | ||
255 | spin_lock_irq(&dev->srq_table.lock); | |
256 | if (mthca_array_set(&dev->srq_table.srq, | |
257 | srq->srqn & (dev->limits.num_srqs - 1), | |
258 | srq)) { | |
259 | spin_unlock_irq(&dev->srq_table.lock); | |
260 | goto err_out_free_srq; | |
261 | } | |
262 | spin_unlock_irq(&dev->srq_table.lock); | |
263 | ||
264 | mthca_free_mailbox(dev, mailbox); | |
265 | ||
266 | srq->first_free = 0; | |
267 | srq->last_free = srq->max - 1; | |
268 | ||
269 | return 0; | |
270 | ||
271 | err_out_free_srq: | |
272 | err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); | |
273 | if (err) | |
274 | mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); | |
275 | else if (status) | |
276 | mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); | |
277 | ||
278 | err_out_free_buf: | |
279 | if (!pd->ibpd.uobject) | |
280 | mthca_free_srq_buf(dev, srq); | |
281 | ||
282 | err_out_mailbox: | |
283 | mthca_free_mailbox(dev, mailbox); | |
284 | ||
285 | err_out_db: | |
286 | if (!pd->ibpd.uobject && mthca_is_memfree(dev)) | |
287 | mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); | |
288 | ||
289 | err_out_icm: | |
290 | mthca_table_put(dev, dev->srq_table.table, srq->srqn); | |
291 | ||
292 | err_out: | |
293 | mthca_free(&dev->srq_table.alloc, srq->srqn); | |
294 | ||
295 | return err; | |
296 | } | |
297 | ||
298 | void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) | |
299 | { | |
300 | struct mthca_mailbox *mailbox; | |
301 | int err; | |
302 | u8 status; | |
303 | ||
304 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); | |
305 | if (IS_ERR(mailbox)) { | |
306 | mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); | |
307 | return; | |
308 | } | |
309 | ||
310 | err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); | |
311 | if (err) | |
312 | mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); | |
313 | else if (status) | |
314 | mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); | |
315 | ||
316 | spin_lock_irq(&dev->srq_table.lock); | |
317 | mthca_array_clear(&dev->srq_table.srq, | |
318 | srq->srqn & (dev->limits.num_srqs - 1)); | |
319 | spin_unlock_irq(&dev->srq_table.lock); | |
320 | ||
321 | atomic_dec(&srq->refcount); | |
322 | wait_event(srq->wait, !atomic_read(&srq->refcount)); | |
323 | ||
324 | if (!srq->ibsrq.uobject) { | |
325 | mthca_free_srq_buf(dev, srq); | |
326 | if (mthca_is_memfree(dev)) | |
327 | mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); | |
328 | } | |
329 | ||
330 | mthca_table_put(dev, dev->srq_table.table, srq->srqn); | |
331 | mthca_free(&dev->srq_table.alloc, srq->srqn); | |
332 | mthca_free_mailbox(dev, mailbox); | |
333 | } | |
334 | ||
90f104da RD |
335 | int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
336 | enum ib_srq_attr_mask attr_mask) | |
337 | { | |
338 | struct mthca_dev *dev = to_mdev(ibsrq->device); | |
339 | struct mthca_srq *srq = to_msrq(ibsrq); | |
340 | int ret; | |
341 | u8 status; | |
342 | ||
343 | /* We don't support resizing SRQs (yet?) */ | |
344 | if (attr_mask & IB_SRQ_MAX_WR) | |
345 | return -EINVAL; | |
346 | ||
347 | if (attr_mask & IB_SRQ_LIMIT) { | |
348 | ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); | |
349 | if (ret) | |
350 | return ret; | |
351 | if (status) | |
352 | return -EINVAL; | |
353 | } | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
ec34a922 RD |
358 | void mthca_srq_event(struct mthca_dev *dev, u32 srqn, |
359 | enum ib_event_type event_type) | |
360 | { | |
361 | struct mthca_srq *srq; | |
362 | struct ib_event event; | |
363 | ||
364 | spin_lock(&dev->srq_table.lock); | |
365 | srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); | |
366 | if (srq) | |
367 | atomic_inc(&srq->refcount); | |
368 | spin_unlock(&dev->srq_table.lock); | |
369 | ||
370 | if (!srq) { | |
371 | mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); | |
372 | return; | |
373 | } | |
374 | ||
375 | if (!srq->ibsrq.event_handler) | |
376 | goto out; | |
377 | ||
378 | event.device = &dev->ib_dev; | |
379 | event.event = event_type; | |
90f104da | 380 | event.element.srq = &srq->ibsrq; |
ec34a922 RD |
381 | srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); |
382 | ||
383 | out: | |
384 | if (atomic_dec_and_test(&srq->refcount)) | |
385 | wake_up(&srq->wait); | |
386 | } | |
387 | ||
388 | /* | |
389 | * This function must be called with IRQs disabled. | |
390 | */ | |
391 | void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) | |
392 | { | |
393 | int ind; | |
394 | ||
395 | ind = wqe_addr >> srq->wqe_shift; | |
396 | ||
397 | spin_lock(&srq->lock); | |
398 | ||
399 | if (likely(srq->first_free >= 0)) | |
400 | *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; | |
401 | else | |
402 | srq->first_free = ind; | |
403 | ||
404 | *wqe_to_link(get_wqe(srq, ind)) = -1; | |
405 | srq->last_free = ind; | |
406 | ||
407 | spin_unlock(&srq->lock); | |
408 | } | |
409 | ||
410 | int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
411 | struct ib_recv_wr **bad_wr) | |
412 | { | |
413 | struct mthca_dev *dev = to_mdev(ibsrq->device); | |
414 | struct mthca_srq *srq = to_msrq(ibsrq); | |
415 | unsigned long flags; | |
416 | int err = 0; | |
417 | int first_ind; | |
418 | int ind; | |
419 | int next_ind; | |
420 | int nreq; | |
421 | int i; | |
422 | void *wqe; | |
423 | void *prev_wqe; | |
424 | ||
425 | spin_lock_irqsave(&srq->lock, flags); | |
426 | ||
427 | first_ind = srq->first_free; | |
428 | ||
429 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
430 | ind = srq->first_free; | |
431 | ||
432 | if (ind < 0) { | |
433 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | |
434 | err = -ENOMEM; | |
435 | *bad_wr = wr; | |
3853194c | 436 | break; |
ec34a922 RD |
437 | } |
438 | ||
439 | wqe = get_wqe(srq, ind); | |
440 | next_ind = *wqe_to_link(wqe); | |
441 | prev_wqe = srq->last; | |
442 | srq->last = wqe; | |
443 | ||
444 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | |
445 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | |
446 | /* flags field will always remain 0 */ | |
447 | ||
448 | wqe += sizeof (struct mthca_next_seg); | |
449 | ||
450 | if (unlikely(wr->num_sge > srq->max_gs)) { | |
451 | err = -EINVAL; | |
452 | *bad_wr = wr; | |
453 | srq->last = prev_wqe; | |
3853194c | 454 | break; |
ec34a922 RD |
455 | } |
456 | ||
457 | for (i = 0; i < wr->num_sge; ++i) { | |
458 | ((struct mthca_data_seg *) wqe)->byte_count = | |
459 | cpu_to_be32(wr->sg_list[i].length); | |
460 | ((struct mthca_data_seg *) wqe)->lkey = | |
461 | cpu_to_be32(wr->sg_list[i].lkey); | |
462 | ((struct mthca_data_seg *) wqe)->addr = | |
463 | cpu_to_be64(wr->sg_list[i].addr); | |
464 | wqe += sizeof (struct mthca_data_seg); | |
465 | } | |
466 | ||
467 | if (i < srq->max_gs) { | |
468 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | |
469 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | |
470 | ((struct mthca_data_seg *) wqe)->addr = 0; | |
471 | } | |
472 | ||
d6cff021 RD |
473 | ((struct mthca_next_seg *) prev_wqe)->nda_op = |
474 | cpu_to_be32((ind << srq->wqe_shift) | 1); | |
475 | wmb(); | |
476 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
477 | cpu_to_be32(MTHCA_NEXT_DBD); | |
ec34a922 RD |
478 | |
479 | srq->wrid[ind] = wr->wr_id; | |
480 | srq->first_free = next_ind; | |
481 | } | |
482 | ||
ec34a922 RD |
483 | if (likely(nreq)) { |
484 | __be32 doorbell[2]; | |
485 | ||
486 | doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); | |
487 | doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); | |
488 | ||
489 | /* | |
490 | * Make sure that descriptors are written before | |
491 | * doorbell is rung. | |
492 | */ | |
493 | wmb(); | |
494 | ||
495 | mthca_write64(doorbell, | |
496 | dev->kar + MTHCA_RECEIVE_DOORBELL, | |
497 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
498 | } | |
499 | ||
500 | spin_unlock_irqrestore(&srq->lock, flags); | |
501 | return err; | |
502 | } | |
503 | ||
504 | int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
505 | struct ib_recv_wr **bad_wr) | |
506 | { | |
507 | struct mthca_dev *dev = to_mdev(ibsrq->device); | |
508 | struct mthca_srq *srq = to_msrq(ibsrq); | |
509 | unsigned long flags; | |
510 | int err = 0; | |
511 | int ind; | |
512 | int next_ind; | |
513 | int nreq; | |
514 | int i; | |
515 | void *wqe; | |
516 | ||
517 | spin_lock_irqsave(&srq->lock, flags); | |
518 | ||
519 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
520 | ind = srq->first_free; | |
521 | ||
522 | if (ind < 0) { | |
523 | mthca_err(dev, "SRQ %06x full\n", srq->srqn); | |
524 | err = -ENOMEM; | |
525 | *bad_wr = wr; | |
3853194c | 526 | break; |
ec34a922 RD |
527 | } |
528 | ||
529 | wqe = get_wqe(srq, ind); | |
530 | next_ind = *wqe_to_link(wqe); | |
531 | ||
532 | ((struct mthca_next_seg *) wqe)->nda_op = | |
533 | cpu_to_be32((next_ind << srq->wqe_shift) | 1); | |
534 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | |
535 | /* flags field will always remain 0 */ | |
536 | ||
537 | wqe += sizeof (struct mthca_next_seg); | |
538 | ||
539 | if (unlikely(wr->num_sge > srq->max_gs)) { | |
540 | err = -EINVAL; | |
541 | *bad_wr = wr; | |
3853194c | 542 | break; |
ec34a922 RD |
543 | } |
544 | ||
545 | for (i = 0; i < wr->num_sge; ++i) { | |
546 | ((struct mthca_data_seg *) wqe)->byte_count = | |
547 | cpu_to_be32(wr->sg_list[i].length); | |
548 | ((struct mthca_data_seg *) wqe)->lkey = | |
549 | cpu_to_be32(wr->sg_list[i].lkey); | |
550 | ((struct mthca_data_seg *) wqe)->addr = | |
551 | cpu_to_be64(wr->sg_list[i].addr); | |
552 | wqe += sizeof (struct mthca_data_seg); | |
553 | } | |
554 | ||
555 | if (i < srq->max_gs) { | |
556 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | |
557 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | |
558 | ((struct mthca_data_seg *) wqe)->addr = 0; | |
559 | } | |
560 | ||
561 | srq->wrid[ind] = wr->wr_id; | |
562 | srq->first_free = next_ind; | |
563 | } | |
564 | ||
565 | if (likely(nreq)) { | |
566 | srq->counter += nreq; | |
567 | ||
568 | /* | |
569 | * Make sure that descriptors are written before | |
570 | * we write doorbell record. | |
571 | */ | |
572 | wmb(); | |
573 | *srq->db = cpu_to_be32(srq->counter); | |
574 | } | |
575 | ||
576 | spin_unlock_irqrestore(&srq->lock, flags); | |
577 | return err; | |
578 | } | |
579 | ||
580 | int __devinit mthca_init_srq_table(struct mthca_dev *dev) | |
581 | { | |
582 | int err; | |
583 | ||
584 | if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) | |
585 | return 0; | |
586 | ||
587 | spin_lock_init(&dev->srq_table.lock); | |
588 | ||
589 | err = mthca_alloc_init(&dev->srq_table.alloc, | |
590 | dev->limits.num_srqs, | |
591 | dev->limits.num_srqs - 1, | |
592 | dev->limits.reserved_srqs); | |
593 | if (err) | |
594 | return err; | |
595 | ||
596 | err = mthca_array_init(&dev->srq_table.srq, | |
597 | dev->limits.num_srqs); | |
598 | if (err) | |
599 | mthca_alloc_cleanup(&dev->srq_table.alloc); | |
600 | ||
601 | return err; | |
602 | } | |
603 | ||
604 | void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev) | |
605 | { | |
606 | if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) | |
607 | return; | |
608 | ||
609 | mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); | |
610 | mthca_alloc_cleanup(&dev->srq_table.alloc); | |
611 | } |