IB/ipath: Wait for PIO available interrupt
[linux-2.6-block.git] / drivers / infiniband / hw / ipath / ipath_ruc.c
CommitLineData
e28c00ad 1/*
759d5768 2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
e28c00ad
BS
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "ipath_verbs.h"
373d9915 35#include "ipath_kernel.h"
e28c00ad
BS
36
37/*
38 * Convert the AETH RNR timeout code into the number of milliseconds.
39 */
40const u32 ib_ipath_rnr_table[32] = {
41 656, /* 0 */
42 1, /* 1 */
43 1, /* 2 */
44 1, /* 3 */
45 1, /* 4 */
46 1, /* 5 */
47 1, /* 6 */
48 1, /* 7 */
49 1, /* 8 */
50 1, /* 9 */
51 1, /* A */
52 1, /* B */
53 1, /* C */
54 1, /* D */
55 2, /* E */
56 2, /* F */
57 3, /* 10 */
58 4, /* 11 */
59 6, /* 12 */
60 8, /* 13 */
61 11, /* 14 */
62 16, /* 15 */
63 21, /* 16 */
64 31, /* 17 */
65 41, /* 18 */
66 62, /* 19 */
67 82, /* 1A */
68 123, /* 1B */
69 164, /* 1C */
70 246, /* 1D */
71 328, /* 1E */
72 492 /* 1F */
73};
74
75/**
76 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
77 * @qp: the QP
78 *
79 * XXX Use a simple list for now. We might need a priority
80 * queue if we have lots of QPs waiting for RNR timeouts
81 * but that should be rare.
82 */
83void ipath_insert_rnr_queue(struct ipath_qp *qp)
84{
85 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
86 unsigned long flags;
87
88 spin_lock_irqsave(&dev->pending_lock, flags);
89 if (list_empty(&dev->rnrwait))
90 list_add(&qp->timerwait, &dev->rnrwait);
91 else {
92 struct list_head *l = &dev->rnrwait;
93 struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
94 timerwait);
95
96 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
97 qp->s_rnr_timeout -= nqp->s_rnr_timeout;
98 l = l->next;
99 if (l->next == &dev->rnrwait)
100 break;
101 nqp = list_entry(l->next, struct ipath_qp,
102 timerwait);
103 }
104 list_add(&qp->timerwait, l);
105 }
106 spin_unlock_irqrestore(&dev->pending_lock, flags);
107}
108
373d9915
RC
109static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe)
110{
373d9915
RC
111 int user = to_ipd(qp->ibqp.pd)->user;
112 int i, j, ret;
113 struct ib_wc wc;
114
115 qp->r_len = 0;
116 for (i = j = 0; i < wqe->num_sge; i++) {
117 if (wqe->sg_list[i].length == 0)
118 continue;
119 /* Check LKEY */
120 if ((user && wqe->sg_list[i].lkey == 0) ||
6a553af2 121 !ipath_lkey_ok(qp, &qp->r_sg_list[j], &wqe->sg_list[i],
373d9915
RC
122 IB_ACCESS_LOCAL_WRITE))
123 goto bad_lkey;
124 qp->r_len += wqe->sg_list[i].length;
125 j++;
126 }
127 qp->r_sge.sge = qp->r_sg_list[0];
128 qp->r_sge.sg_list = qp->r_sg_list + 1;
129 qp->r_sge.num_sge = j;
130 ret = 1;
131 goto bail;
132
133bad_lkey:
134 wc.wr_id = wqe->wr_id;
135 wc.status = IB_WC_LOC_PROT_ERR;
136 wc.opcode = IB_WC_RECV;
137 wc.vendor_err = 0;
138 wc.byte_len = 0;
139 wc.imm_data = 0;
062dbb69 140 wc.qp = &qp->ibqp;
373d9915
RC
141 wc.src_qp = 0;
142 wc.wc_flags = 0;
143 wc.pkey_index = 0;
144 wc.slid = 0;
145 wc.sl = 0;
146 wc.dlid_path_bits = 0;
147 wc.port_num = 0;
148 /* Signal solicited completion event. */
149 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
150 ret = 0;
151bail:
152 return ret;
153}
154
e28c00ad
BS
155/**
156 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
157 * @qp: the QP
158 * @wr_id_only: update wr_id only, not SGEs
159 *
160 * Return 0 if no RWQE is available, otherwise return 1.
161 *
12eef41f 162 * Can be called from interrupt level.
e28c00ad
BS
163 */
164int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
165{
12eef41f 166 unsigned long flags;
e28c00ad 167 struct ipath_rq *rq;
373d9915 168 struct ipath_rwq *wq;
e28c00ad
BS
169 struct ipath_srq *srq;
170 struct ipath_rwqe *wqe;
373d9915
RC
171 void (*handler)(struct ib_event *, void *);
172 u32 tail;
173 int ret;
e28c00ad 174
373d9915
RC
175 if (qp->ibqp.srq) {
176 srq = to_isrq(qp->ibqp.srq);
177 handler = srq->ibsrq.event_handler;
178 rq = &srq->rq;
179 } else {
180 srq = NULL;
181 handler = NULL;
e28c00ad 182 rq = &qp->r_rq;
e28c00ad
BS
183 }
184
12eef41f 185 spin_lock_irqsave(&rq->lock, flags);
373d9915
RC
186 wq = rq->wq;
187 tail = wq->tail;
188 /* Validate tail before using it since it is user writable. */
189 if (tail >= rq->size)
190 tail = 0;
191 do {
192 if (unlikely(tail == wq->head)) {
193 spin_unlock_irqrestore(&rq->lock, flags);
194 ret = 0;
195 goto bail;
196 }
197 wqe = get_rwqe_ptr(rq, tail);
198 if (++tail >= rq->size)
199 tail = 0;
200 } while (!wr_id_only && !init_sge(qp, wqe));
e28c00ad 201 qp->r_wr_id = wqe->wr_id;
373d9915
RC
202 wq->tail = tail;
203
204 ret = 1;
9f9630d5 205 qp->r_wrid_valid = 1;
373d9915 206 if (handler) {
e28c00ad
BS
207 u32 n;
208
373d9915
RC
209 /*
210 * validate head pointer value and compute
211 * the number of remaining WQEs.
212 */
213 n = wq->head;
214 if (n >= rq->size)
215 n = 0;
216 if (n < tail)
217 n += rq->size - tail;
e28c00ad 218 else
373d9915 219 n -= tail;
e28c00ad 220 if (n < srq->limit) {
373d9915
RC
221 struct ib_event ev;
222
e28c00ad 223 srq->limit = 0;
12eef41f 224 spin_unlock_irqrestore(&rq->lock, flags);
e28c00ad
BS
225 ev.device = qp->ibqp.device;
226 ev.element.srq = qp->ibqp.srq;
227 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
373d9915 228 handler(&ev, srq->ibsrq.srq_context);
12eef41f
BS
229 goto bail;
230 }
231 }
12eef41f 232 spin_unlock_irqrestore(&rq->lock, flags);
373d9915 233
e28c00ad
BS
234bail:
235 return ret;
236}
237
238/**
239 * ipath_ruc_loopback - handle UC and RC lookback requests
240 * @sqp: the loopback QP
e28c00ad
BS
241 *
242 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
243 * forward a WQE addressed to the same HCA.
244 * Note that although we are single threaded due to the tasklet, we still
245 * have to protect against post_send(). We don't have to worry about
246 * receive interrupts since this is a connected protocol and all packets
247 * will pass through here.
248 */
ddd4bb22 249static void ipath_ruc_loopback(struct ipath_qp *sqp)
e28c00ad
BS
250{
251 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
252 struct ipath_qp *qp;
253 struct ipath_swqe *wqe;
254 struct ipath_sge *sge;
255 unsigned long flags;
ddd4bb22 256 struct ib_wc wc;
e28c00ad 257 u64 sdata;
3859e39d 258 atomic64_t *maddr;
e28c00ad
BS
259
260 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
261 if (!qp) {
262 dev->n_pkt_drops++;
263 return;
264 }
265
266again:
267 spin_lock_irqsave(&sqp->s_lock, flags);
268
7b21d26d 269 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) ||
6d2fad04 270 sqp->s_rnr_timeout) {
e28c00ad
BS
271 spin_unlock_irqrestore(&sqp->s_lock, flags);
272 goto done;
273 }
274
275 /* Get the next send request. */
276 if (sqp->s_last == sqp->s_head) {
277 /* Send work queue is empty. */
278 spin_unlock_irqrestore(&sqp->s_lock, flags);
279 goto done;
280 }
281
282 /*
283 * We can rely on the entry not changing without the s_lock
284 * being held until we update s_last.
285 */
286 wqe = get_swqe_ptr(sqp, sqp->s_last);
287 spin_unlock_irqrestore(&sqp->s_lock, flags);
288
ddd4bb22
BS
289 wc.wc_flags = 0;
290 wc.imm_data = 0;
e28c00ad
BS
291
292 sqp->s_sge.sge = wqe->sg_list[0];
293 sqp->s_sge.sg_list = wqe->sg_list + 1;
294 sqp->s_sge.num_sge = wqe->wr.num_sge;
295 sqp->s_len = wqe->length;
296 switch (wqe->wr.opcode) {
297 case IB_WR_SEND_WITH_IMM:
ddd4bb22
BS
298 wc.wc_flags = IB_WC_WITH_IMM;
299 wc.imm_data = wqe->wr.imm_data;
e28c00ad
BS
300 /* FALLTHROUGH */
301 case IB_WR_SEND:
e28c00ad
BS
302 if (!ipath_get_rwqe(qp, 0)) {
303 rnr_nak:
e28c00ad
BS
304 /* Handle RNR NAK */
305 if (qp->ibqp.qp_type == IB_QPT_UC)
306 goto send_comp;
307 if (sqp->s_rnr_retry == 0) {
ddd4bb22 308 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
e28c00ad
BS
309 goto err;
310 }
311 if (sqp->s_rnr_retry_cnt < 7)
312 sqp->s_rnr_retry--;
313 dev->n_rnr_naks++;
314 sqp->s_rnr_timeout =
3859e39d 315 ib_ipath_rnr_table[qp->r_min_rnr_timer];
e28c00ad
BS
316 ipath_insert_rnr_queue(sqp);
317 goto done;
318 }
e28c00ad
BS
319 break;
320
321 case IB_WR_RDMA_WRITE_WITH_IMM:
ddd4bb22
BS
322 wc.wc_flags = IB_WC_WITH_IMM;
323 wc.imm_data = wqe->wr.imm_data;
e28c00ad
BS
324 if (!ipath_get_rwqe(qp, 1))
325 goto rnr_nak;
e28c00ad
BS
326 /* FALLTHROUGH */
327 case IB_WR_RDMA_WRITE:
328 if (wqe->length == 0)
329 break;
6a553af2 330 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
e28c00ad
BS
331 wqe->wr.wr.rdma.remote_addr,
332 wqe->wr.wr.rdma.rkey,
333 IB_ACCESS_REMOTE_WRITE))) {
334 acc_err:
ddd4bb22 335 wc.status = IB_WC_REM_ACCESS_ERR;
e28c00ad 336 err:
ddd4bb22
BS
337 wc.wr_id = wqe->wr.wr_id;
338 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
339 wc.vendor_err = 0;
340 wc.byte_len = 0;
062dbb69 341 wc.qp = &sqp->ibqp;
ddd4bb22
BS
342 wc.src_qp = sqp->remote_qpn;
343 wc.pkey_index = 0;
344 wc.slid = sqp->remote_ah_attr.dlid;
345 wc.sl = sqp->remote_ah_attr.sl;
346 wc.dlid_path_bits = 0;
347 wc.port_num = 0;
3859e39d 348 spin_lock_irqsave(&sqp->s_lock, flags);
ddd4bb22 349 ipath_sqerror_qp(sqp, &wc);
3859e39d 350 spin_unlock_irqrestore(&sqp->s_lock, flags);
e28c00ad
BS
351 goto done;
352 }
353 break;
354
355 case IB_WR_RDMA_READ:
3859e39d
RC
356 if (unlikely(!(qp->qp_access_flags &
357 IB_ACCESS_REMOTE_READ)))
358 goto acc_err;
6a553af2 359 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
e28c00ad
BS
360 wqe->wr.wr.rdma.remote_addr,
361 wqe->wr.wr.rdma.rkey,
362 IB_ACCESS_REMOTE_READ)))
363 goto acc_err;
e28c00ad
BS
364 qp->r_sge.sge = wqe->sg_list[0];
365 qp->r_sge.sg_list = wqe->sg_list + 1;
366 qp->r_sge.num_sge = wqe->wr.num_sge;
367 break;
368
369 case IB_WR_ATOMIC_CMP_AND_SWP:
370 case IB_WR_ATOMIC_FETCH_AND_ADD:
3859e39d
RC
371 if (unlikely(!(qp->qp_access_flags &
372 IB_ACCESS_REMOTE_ATOMIC)))
373 goto acc_err;
6a553af2 374 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
3859e39d
RC
375 wqe->wr.wr.atomic.remote_addr,
376 wqe->wr.wr.atomic.rkey,
e28c00ad
BS
377 IB_ACCESS_REMOTE_ATOMIC)))
378 goto acc_err;
379 /* Perform atomic OP and save result. */
3859e39d
RC
380 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
381 sdata = wqe->wr.wr.atomic.compare_add;
382 *(u64 *) sqp->s_sge.sge.vaddr =
383 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
384 (u64) atomic64_add_return(sdata, maddr) - sdata :
385 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
386 sdata, wqe->wr.wr.atomic.swap);
e28c00ad
BS
387 goto send_comp;
388
389 default:
390 goto done;
391 }
392
393 sge = &sqp->s_sge.sge;
394 while (sqp->s_len) {
395 u32 len = sqp->s_len;
396
397 if (len > sge->length)
398 len = sge->length;
399 BUG_ON(len == 0);
400 ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
401 sge->vaddr += len;
402 sge->length -= len;
403 sge->sge_length -= len;
404 if (sge->sge_length == 0) {
405 if (--sqp->s_sge.num_sge)
406 *sge = *sqp->s_sge.sg_list++;
407 } else if (sge->length == 0 && sge->mr != NULL) {
408 if (++sge->n >= IPATH_SEGSZ) {
409 if (++sge->m >= sge->mr->mapsz)
410 break;
411 sge->n = 0;
412 }
413 sge->vaddr =
414 sge->mr->map[sge->m]->segs[sge->n].vaddr;
415 sge->length =
416 sge->mr->map[sge->m]->segs[sge->n].length;
417 }
418 sqp->s_len -= len;
419 }
420
421 if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
422 wqe->wr.opcode == IB_WR_RDMA_READ)
423 goto send_comp;
424
425 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
ddd4bb22 426 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
e28c00ad 427 else
ddd4bb22
BS
428 wc.opcode = IB_WC_RECV;
429 wc.wr_id = qp->r_wr_id;
430 wc.status = IB_WC_SUCCESS;
431 wc.vendor_err = 0;
432 wc.byte_len = wqe->length;
062dbb69 433 wc.qp = &qp->ibqp;
ddd4bb22 434 wc.src_qp = qp->remote_qpn;
e28c00ad 435 /* XXX do we know which pkey matched? Only needed for GSI. */
ddd4bb22
BS
436 wc.pkey_index = 0;
437 wc.slid = qp->remote_ah_attr.dlid;
438 wc.sl = qp->remote_ah_attr.sl;
439 wc.dlid_path_bits = 0;
e28c00ad 440 /* Signal completion event if the solicited bit is set. */
ddd4bb22 441 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
e28c00ad
BS
442 wqe->wr.send_flags & IB_SEND_SOLICITED);
443
444send_comp:
445 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
446
3859e39d 447 if (!(sqp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
e28c00ad 448 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
ddd4bb22
BS
449 wc.wr_id = wqe->wr.wr_id;
450 wc.status = IB_WC_SUCCESS;
451 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
452 wc.vendor_err = 0;
453 wc.byte_len = wqe->length;
062dbb69 454 wc.qp = &sqp->ibqp;
ddd4bb22
BS
455 wc.src_qp = 0;
456 wc.pkey_index = 0;
457 wc.slid = 0;
458 wc.sl = 0;
459 wc.dlid_path_bits = 0;
460 wc.port_num = 0;
461 ipath_cq_enter(to_icq(sqp->ibqp.send_cq), &wc, 0);
e28c00ad
BS
462 }
463
464 /* Update s_last now that we are finished with the SWQE */
465 spin_lock_irqsave(&sqp->s_lock, flags);
466 if (++sqp->s_last >= sqp->s_size)
467 sqp->s_last = 0;
468 spin_unlock_irqrestore(&sqp->s_lock, flags);
469 goto again;
470
471done:
472 if (atomic_dec_and_test(&qp->refcount))
473 wake_up(&qp->wait);
474}
475
34b2aafe
BS
476static int want_buffer(struct ipath_devdata *dd)
477{
478 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
479 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
480 dd->ipath_sendctrl);
481
482 return 0;
483}
484
e28c00ad
BS
485/**
486 * ipath_no_bufs_available - tell the layer driver we need buffers
487 * @qp: the QP that caused the problem
488 * @dev: the device we ran out of buffers on
489 *
490 * Called when we run out of PIO buffers.
491 */
492void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
493{
494 unsigned long flags;
495
496 spin_lock_irqsave(&dev->pending_lock, flags);
94b8d9f9 497 if (list_empty(&qp->piowait))
e28c00ad
BS
498 list_add_tail(&qp->piowait, &dev->piowait);
499 spin_unlock_irqrestore(&dev->pending_lock, flags);
500 /*
34b2aafe 501 * Note that as soon as want_buffer() is called and
e28c00ad
BS
502 * possibly before it returns, ipath_ib_piobufavail()
503 * could be called. If we are still in the tasklet function,
504 * tasklet_hi_schedule() will not call us until the next time
505 * tasklet_hi_schedule() is called.
db5518cd
RC
506 * We leave the busy flag set so that another post send doesn't
507 * try to put the same QP on the piowait list again.
e28c00ad 508 */
34b2aafe 509 want_buffer(dev->dd);
e28c00ad
BS
510 dev->n_piowait++;
511}
512
513/**
ddd4bb22 514 * ipath_post_ruc_send - post RC and UC sends
e28c00ad
BS
515 * @qp: the QP to post on
516 * @wr: the work request to send
517 */
ddd4bb22 518int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
e28c00ad
BS
519{
520 struct ipath_swqe *wqe;
521 unsigned long flags;
522 u32 next;
523 int i, j;
524 int acc;
525 int ret;
526
527 /*
528 * Don't allow RDMA reads or atomic operations on UC or
529 * undefined operations.
530 * Make sure buffer is large enough to hold the result for atomics.
531 */
532 if (qp->ibqp.qp_type == IB_QPT_UC) {
533 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) {
534 ret = -EINVAL;
535 goto bail;
536 }
537 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
538 ret = -EINVAL;
539 goto bail;
540 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
541 (wr->num_sge == 0 ||
542 wr->sg_list[0].length < sizeof(u64) ||
543 wr->sg_list[0].addr & (sizeof(u64) - 1))) {
544 ret = -EINVAL;
545 goto bail;
3859e39d
RC
546 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
547 ret = -EINVAL;
548 goto bail;
e28c00ad
BS
549 }
550 /* IB spec says that num_sge == 0 is OK. */
551 if (wr->num_sge > qp->s_max_sge) {
552 ret = -ENOMEM;
553 goto bail;
554 }
555 spin_lock_irqsave(&qp->s_lock, flags);
556 next = qp->s_head + 1;
557 if (next >= qp->s_size)
558 next = 0;
559 if (next == qp->s_last) {
560 spin_unlock_irqrestore(&qp->s_lock, flags);
561 ret = -EINVAL;
562 goto bail;
563 }
564
565 wqe = get_swqe_ptr(qp, qp->s_head);
566 wqe->wr = *wr;
567 wqe->ssn = qp->s_ssn++;
568 wqe->sg_list[0].mr = NULL;
569 wqe->sg_list[0].vaddr = NULL;
570 wqe->sg_list[0].length = 0;
571 wqe->sg_list[0].sge_length = 0;
572 wqe->length = 0;
573 acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0;
574 for (i = 0, j = 0; i < wr->num_sge; i++) {
575 if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
576 spin_unlock_irqrestore(&qp->s_lock, flags);
577 ret = -EINVAL;
578 goto bail;
579 }
580 if (wr->sg_list[i].length == 0)
581 continue;
6a553af2 582 if (!ipath_lkey_ok(qp, &wqe->sg_list[j], &wr->sg_list[i],
e28c00ad
BS
583 acc)) {
584 spin_unlock_irqrestore(&qp->s_lock, flags);
585 ret = -EINVAL;
586 goto bail;
587 }
588 wqe->length += wr->sg_list[i].length;
589 j++;
590 }
591 wqe->wr.num_sge = j;
592 qp->s_head = next;
e28c00ad
BS
593 spin_unlock_irqrestore(&qp->s_lock, flags);
594
ddd4bb22 595 ipath_do_ruc_send((unsigned long) qp);
e28c00ad
BS
596
597 ret = 0;
598
599bail:
600 return ret;
601}
ddd4bb22
BS
602
603/**
604 * ipath_make_grh - construct a GRH header
605 * @dev: a pointer to the ipath device
606 * @hdr: a pointer to the GRH header being constructed
607 * @grh: the global route address to send to
608 * @hwords: the number of 32 bit words of header being sent
609 * @nwords: the number of 32 bit words of data being sent
610 *
611 * Return the size of the header in 32 bit words.
612 */
613u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
614 struct ib_global_route *grh, u32 hwords, u32 nwords)
615{
616 hdr->version_tclass_flow =
617 cpu_to_be32((6 << 28) |
618 (grh->traffic_class << 20) |
619 grh->flow_label);
620 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
621 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
622 hdr->next_hdr = 0x1B;
623 hdr->hop_limit = grh->hop_limit;
624 /* The SGID is 32-bit aligned. */
625 hdr->sgid.global.subnet_prefix = dev->gid_prefix;
34b2aafe 626 hdr->sgid.global.interface_id = dev->dd->ipath_guid;
ddd4bb22
BS
627 hdr->dgid = grh->dgid;
628
629 /* GRH header size in 32-bit words. */
630 return sizeof(struct ib_grh) / sizeof(u32);
631}
632
633/**
634 * ipath_do_ruc_send - perform a send on an RC or UC QP
635 * @data: contains a pointer to the QP
636 *
637 * Process entries in the send work queue until credit or queue is
638 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
639 * Otherwise, after we drop the QP s_lock, two threads could send
640 * packets out of order.
641 */
642void ipath_do_ruc_send(unsigned long data)
643{
644 struct ipath_qp *qp = (struct ipath_qp *)data;
645 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
646 unsigned long flags;
647 u16 lrh0;
648 u32 nwords;
649 u32 extra_bytes;
650 u32 bth0;
651 u32 bth2;
652 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
653 struct ipath_other_headers *ohdr;
654
3859e39d 655 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy))
ddd4bb22
BS
656 goto bail;
657
34b2aafe 658 if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) {
ddd4bb22
BS
659 ipath_ruc_loopback(qp);
660 goto clear;
661 }
662
663 ohdr = &qp->s_hdr.u.oth;
664 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
665 ohdr = &qp->s_hdr.u.l.oth;
666
667again:
668 /* Check for a constructed packet to be sent. */
669 if (qp->s_hdrwords != 0) {
670 /*
671 * If no PIO bufs are available, return. An interrupt will
672 * call ipath_ib_piobufavail() when one is available.
673 */
674 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
675 (u32 *) &qp->s_hdr, qp->s_cur_size,
676 qp->s_cur_sge)) {
677 ipath_no_bufs_available(qp, dev);
678 goto bail;
679 }
680 dev->n_unicast_xmit++;
681 /* Record that we sent the packet and s_hdr is empty. */
682 qp->s_hdrwords = 0;
683 }
684
685 /*
686 * The lock is needed to synchronize between setting
687 * qp->s_ack_state, resend timer, and post_send().
688 */
689 spin_lock_irqsave(&qp->s_lock, flags);
690
3859e39d
RC
691 if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
692 ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
693 ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
ddd4bb22
BS
694 /*
695 * Clear the busy bit before unlocking to avoid races with
696 * adding new work queue items and then failing to process
697 * them.
698 */
3859e39d 699 clear_bit(IPATH_S_BUSY, &qp->s_busy);
ddd4bb22
BS
700 spin_unlock_irqrestore(&qp->s_lock, flags);
701 goto bail;
702 }
703
704 spin_unlock_irqrestore(&qp->s_lock, flags);
705
706 /* Construct the header. */
707 extra_bytes = (4 - qp->s_cur_size) & 3;
708 nwords = (qp->s_cur_size + extra_bytes) >> 2;
27b678dd 709 lrh0 = IPATH_LRH_BTH;
ddd4bb22
BS
710 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
711 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
712 &qp->remote_ah_attr.grh,
713 qp->s_hdrwords, nwords);
27b678dd 714 lrh0 = IPATH_LRH_GRH;
ddd4bb22
BS
715 }
716 lrh0 |= qp->remote_ah_attr.sl << 4;
717 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
718 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
719 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
720 SIZE_OF_CRC);
34b2aafe
BS
721 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
722 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
ddd4bb22
BS
723 bth0 |= extra_bytes << 20;
724 ohdr->bth[0] = cpu_to_be32(bth0);
725 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
726 ohdr->bth[2] = cpu_to_be32(bth2);
727
728 /* Check for more work to do. */
729 goto again;
730
731clear:
3859e39d 732 clear_bit(IPATH_S_BUSY, &qp->s_busy);
ddd4bb22
BS
733bail:
734 return;
735}