IB/qib, staging/rdma/hfi1: add s_hlock for use in post send
[linux-block.git] / drivers / infiniband / hw / qib / qib_verbs.c
CommitLineData
f931551b 1/*
e2eed58b 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
1fb9fed6 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
f931551b
RC
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_mad.h>
36#include <rdma/ib_user_verbs.h>
37#include <linux/io.h>
e4dd23d7 38#include <linux/module.h>
f931551b
RC
39#include <linux/utsname.h>
40#include <linux/rculist.h>
41#include <linux/mm.h>
af061a64 42#include <linux/random.h>
d6f1c17e 43#include <linux/vmalloc.h>
eb636ac0 44#include <rdma/rdma_vt.h>
f931551b
RC
45
46#include "qib.h"
47#include "qib_common.h"
48
af061a64 49static unsigned int ib_qib_qp_table_size = 256;
f931551b
RC
50module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51MODULE_PARM_DESC(qp_table_size, "QP table size");
52
7c2e11fe
DD
53static unsigned int qib_lkey_table_size = 16;
54module_param_named(lkey_table_size, qib_lkey_table_size, uint,
f931551b
RC
55 S_IRUGO);
56MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
58
59static unsigned int ib_qib_max_pds = 0xFFFF;
60module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
63
64static unsigned int ib_qib_max_ahs = 0xFFFF;
65module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
67
68unsigned int ib_qib_max_cqes = 0x2FFFF;
69module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
72
73unsigned int ib_qib_max_cqs = 0x1FFFF;
74module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
76
77unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
80
81unsigned int ib_qib_max_qps = 16384;
82module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
84
85unsigned int ib_qib_max_sges = 0x60;
86module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
88
89unsigned int ib_qib_max_mcast_grps = 16384;
90module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
93
94unsigned int ib_qib_max_mcast_qp_attached = 16;
95module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
96 uint, S_IRUGO);
97MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
99
100unsigned int ib_qib_max_srqs = 1024;
101module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
103
104unsigned int ib_qib_max_srq_sges = 128;
105module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
107
108unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111
112static unsigned int ib_qib_disable_sma;
113module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114MODULE_PARM_DESC(disable_sma, "Disable the SMA");
115
f931551b
RC
116/*
117 * Translate ib_wr_opcode into ib_wc_opcode.
118 */
119const enum ib_wc_opcode ib_qib_wc_opcode[] = {
120 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
121 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
122 [IB_WR_SEND] = IB_WC_SEND,
123 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
124 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
125 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
126 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
127};
128
129/*
130 * System image GUID.
131 */
132__be64 ib_qib_sys_image_guid;
133
134/**
135 * qib_copy_sge - copy data to SGE memory
136 * @ss: the SGE state
137 * @data: the data to copy
138 * @length: the length of the data
139 */
7c2e11fe 140void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
f931551b 141{
7c2e11fe 142 struct rvt_sge *sge = &ss->sge;
f931551b
RC
143
144 while (length) {
145 u32 len = sge->length;
146
147 if (len > length)
148 len = length;
149 if (len > sge->sge_length)
150 len = sge->sge_length;
151 BUG_ON(len == 0);
152 memcpy(sge->vaddr, data, len);
153 sge->vaddr += len;
154 sge->length -= len;
155 sge->sge_length -= len;
156 if (sge->sge_length == 0) {
157 if (release)
7c2e11fe 158 rvt_put_mr(sge->mr);
f931551b
RC
159 if (--ss->num_sge)
160 *sge = *ss->sg_list++;
161 } else if (sge->length == 0 && sge->mr->lkey) {
7c2e11fe 162 if (++sge->n >= RVT_SEGSZ) {
f931551b
RC
163 if (++sge->m >= sge->mr->mapsz)
164 break;
165 sge->n = 0;
166 }
167 sge->vaddr =
168 sge->mr->map[sge->m]->segs[sge->n].vaddr;
169 sge->length =
170 sge->mr->map[sge->m]->segs[sge->n].length;
171 }
172 data += len;
173 length -= len;
174 }
175}
176
177/**
178 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
179 * @ss: the SGE state
180 * @length: the number of bytes to skip
181 */
7c2e11fe 182void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
f931551b 183{
7c2e11fe 184 struct rvt_sge *sge = &ss->sge;
f931551b
RC
185
186 while (length) {
187 u32 len = sge->length;
188
189 if (len > length)
190 len = length;
191 if (len > sge->sge_length)
192 len = sge->sge_length;
193 BUG_ON(len == 0);
194 sge->vaddr += len;
195 sge->length -= len;
196 sge->sge_length -= len;
197 if (sge->sge_length == 0) {
198 if (release)
7c2e11fe 199 rvt_put_mr(sge->mr);
f931551b
RC
200 if (--ss->num_sge)
201 *sge = *ss->sg_list++;
202 } else if (sge->length == 0 && sge->mr->lkey) {
7c2e11fe 203 if (++sge->n >= RVT_SEGSZ) {
f931551b
RC
204 if (++sge->m >= sge->mr->mapsz)
205 break;
206 sge->n = 0;
207 }
208 sge->vaddr =
209 sge->mr->map[sge->m]->segs[sge->n].vaddr;
210 sge->length =
211 sge->mr->map[sge->m]->segs[sge->n].length;
212 }
213 length -= len;
214 }
215}
216
217/*
218 * Count the number of DMA descriptors needed to send length bytes of data.
219 * Don't modify the qib_sge_state to get the count.
220 * Return zero if any of the segments is not aligned.
221 */
7c2e11fe 222static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
f931551b 223{
7c2e11fe
DD
224 struct rvt_sge *sg_list = ss->sg_list;
225 struct rvt_sge sge = ss->sge;
f931551b
RC
226 u8 num_sge = ss->num_sge;
227 u32 ndesc = 1; /* count the header */
228
229 while (length) {
230 u32 len = sge.length;
231
232 if (len > length)
233 len = length;
234 if (len > sge.sge_length)
235 len = sge.sge_length;
236 BUG_ON(len == 0);
237 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
238 (len != length && (len & (sizeof(u32) - 1)))) {
239 ndesc = 0;
240 break;
241 }
242 ndesc++;
243 sge.vaddr += len;
244 sge.length -= len;
245 sge.sge_length -= len;
246 if (sge.sge_length == 0) {
247 if (--num_sge)
248 sge = *sg_list++;
249 } else if (sge.length == 0 && sge.mr->lkey) {
7c2e11fe 250 if (++sge.n >= RVT_SEGSZ) {
f931551b
RC
251 if (++sge.m >= sge.mr->mapsz)
252 break;
253 sge.n = 0;
254 }
255 sge.vaddr =
256 sge.mr->map[sge.m]->segs[sge.n].vaddr;
257 sge.length =
258 sge.mr->map[sge.m]->segs[sge.n].length;
259 }
260 length -= len;
261 }
262 return ndesc;
263}
264
265/*
266 * Copy from the SGEs to the data buffer.
267 */
7c2e11fe 268static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
f931551b 269{
7c2e11fe 270 struct rvt_sge *sge = &ss->sge;
f931551b
RC
271
272 while (length) {
273 u32 len = sge->length;
274
275 if (len > length)
276 len = length;
277 if (len > sge->sge_length)
278 len = sge->sge_length;
279 BUG_ON(len == 0);
280 memcpy(data, sge->vaddr, len);
281 sge->vaddr += len;
282 sge->length -= len;
283 sge->sge_length -= len;
284 if (sge->sge_length == 0) {
285 if (--ss->num_sge)
286 *sge = *ss->sg_list++;
287 } else if (sge->length == 0 && sge->mr->lkey) {
7c2e11fe 288 if (++sge->n >= RVT_SEGSZ) {
f931551b
RC
289 if (++sge->m >= sge->mr->mapsz)
290 break;
291 sge->n = 0;
292 }
293 sge->vaddr =
294 sge->mr->map[sge->m]->segs[sge->n].vaddr;
295 sge->length =
296 sge->mr->map[sge->m]->segs[sge->n].length;
297 }
298 data += len;
299 length -= len;
300 }
301}
302
f931551b
RC
303/**
304 * qib_qp_rcv - processing an incoming packet on a QP
305 * @rcd: the context pointer
306 * @hdr: the packet header
307 * @has_grh: true if the packet has a GRH
308 * @data: the packet data
309 * @tlen: the packet length
310 * @qp: the QP the packet came on
311 *
312 * This is called from qib_ib_rcv() to process an incoming packet
313 * for the given QP.
314 * Called at interrupt level.
315 */
316static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
7c2e11fe 317 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
f931551b
RC
318{
319 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
320
a5210c12
RC
321 spin_lock(&qp->r_lock);
322
f931551b 323 /* Check for valid receive state. */
db3ef0eb 324 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
f24a6d48 325 ibp->rvp.n_pkt_drops++;
a5210c12 326 goto unlock;
f931551b
RC
327 }
328
329 switch (qp->ibqp.qp_type) {
330 case IB_QPT_SMI:
331 case IB_QPT_GSI:
332 if (ib_qib_disable_sma)
333 break;
334 /* FALLTHROUGH */
335 case IB_QPT_UD:
336 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
337 break;
338
339 case IB_QPT_RC:
340 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
341 break;
342
343 case IB_QPT_UC:
344 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
345 break;
346
347 default:
348 break;
349 }
a5210c12
RC
350
351unlock:
352 spin_unlock(&qp->r_lock);
f931551b
RC
353}
354
355/**
356 * qib_ib_rcv - process an incoming packet
357 * @rcd: the context pointer
358 * @rhdr: the header of the packet
359 * @data: the packet payload
360 * @tlen: the packet length
361 *
362 * This is called from qib_kreceive() to process an incoming packet at
363 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
364 */
365void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
366{
367 struct qib_pportdata *ppd = rcd->ppd;
368 struct qib_ibport *ibp = &ppd->ibport_data;
369 struct qib_ib_header *hdr = rhdr;
1cefc2cd
HC
370 struct qib_devdata *dd = ppd->dd;
371 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
f931551b 372 struct qib_other_headers *ohdr;
7c2e11fe 373 struct rvt_qp *qp;
f931551b
RC
374 u32 qp_num;
375 int lnh;
376 u8 opcode;
377 u16 lid;
378
379 /* 24 == LRH+BTH+CRC */
380 if (unlikely(tlen < 24))
381 goto drop;
382
383 /* Check for a valid destination LID (see ch. 7.11.1). */
384 lid = be16_to_cpu(hdr->lrh[1]);
9ff198f5 385 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
f931551b
RC
386 lid &= ~((1 << ppd->lmc) - 1);
387 if (unlikely(lid != ppd->lid))
388 goto drop;
389 }
390
391 /* Check for GRH */
392 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
393 if (lnh == QIB_LRH_BTH)
394 ohdr = &hdr->u.oth;
395 else if (lnh == QIB_LRH_GRH) {
396 u32 vtf;
397
398 ohdr = &hdr->u.l.oth;
399 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
400 goto drop;
401 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
402 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
403 goto drop;
404 } else
405 goto drop;
406
ddb88765
MM
407 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
408#ifdef CONFIG_DEBUG_FS
409 rcd->opstats->stats[opcode].n_bytes += tlen;
410 rcd->opstats->stats[opcode].n_packets++;
411#endif
f931551b
RC
412
413 /* Get the destination QP number. */
70696ea7 414 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
f931551b 415 if (qp_num == QIB_MULTICAST_QPN) {
18f6c582
HC
416 struct rvt_mcast *mcast;
417 struct rvt_mcast_qp *p;
f931551b
RC
418
419 if (lnh != QIB_LRH_GRH)
420 goto drop;
18f6c582 421 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
f931551b
RC
422 if (mcast == NULL)
423 goto drop;
7d7632ad 424 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
f931551b
RC
425 list_for_each_entry_rcu(p, &mcast->qp_list, list)
426 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
427 /*
18f6c582 428 * Notify rvt_multicast_detach() if it is waiting for us
f931551b
RC
429 * to finish.
430 */
431 if (atomic_dec_return(&mcast->refcount) <= 1)
432 wake_up(&mcast->wait);
433 } else {
1cefc2cd
HC
434 rcu_read_lock();
435 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
436 if (!qp) {
437 rcu_read_unlock();
438 goto drop;
af061a64 439 }
7d7632ad 440 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
f931551b 441 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
1cefc2cd 442 rcu_read_unlock();
f931551b
RC
443 }
444 return;
445
446drop:
f24a6d48 447 ibp->rvp.n_pkt_drops++;
f931551b
RC
448}
449
450/*
451 * This is called from a timer to check for QPs
452 * which need kernel memory in order to send a packet.
453 */
454static void mem_timer(unsigned long data)
455{
456 struct qib_ibdev *dev = (struct qib_ibdev *) data;
457 struct list_head *list = &dev->memwait;
7c2e11fe 458 struct rvt_qp *qp = NULL;
ffc26907 459 struct qib_qp_priv *priv = NULL;
f931551b
RC
460 unsigned long flags;
461
cd18201f 462 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
f931551b 463 if (!list_empty(list)) {
ffc26907
DD
464 priv = list_entry(list->next, struct qib_qp_priv, iowait);
465 qp = priv->owner;
466 list_del_init(&priv->iowait);
f931551b
RC
467 atomic_inc(&qp->refcount);
468 if (!list_empty(list))
469 mod_timer(&dev->mem_timer, jiffies + 1);
470 }
cd18201f 471 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
f931551b
RC
472
473 if (qp) {
474 spin_lock_irqsave(&qp->s_lock, flags);
01ba79d4
HC
475 if (qp->s_flags & RVT_S_WAIT_KMEM) {
476 qp->s_flags &= ~RVT_S_WAIT_KMEM;
f931551b
RC
477 qib_schedule_send(qp);
478 }
479 spin_unlock_irqrestore(&qp->s_lock, flags);
480 if (atomic_dec_and_test(&qp->refcount))
481 wake_up(&qp->wait);
482 }
483}
484
7c2e11fe 485static void update_sge(struct rvt_sge_state *ss, u32 length)
f931551b 486{
7c2e11fe 487 struct rvt_sge *sge = &ss->sge;
f931551b
RC
488
489 sge->vaddr += length;
490 sge->length -= length;
491 sge->sge_length -= length;
492 if (sge->sge_length == 0) {
493 if (--ss->num_sge)
494 *sge = *ss->sg_list++;
495 } else if (sge->length == 0 && sge->mr->lkey) {
7c2e11fe 496 if (++sge->n >= RVT_SEGSZ) {
f931551b
RC
497 if (++sge->m >= sge->mr->mapsz)
498 return;
499 sge->n = 0;
500 }
501 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
502 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
503 }
504}
505
506#ifdef __LITTLE_ENDIAN
507static inline u32 get_upper_bits(u32 data, u32 shift)
508{
509 return data >> shift;
510}
511
512static inline u32 set_upper_bits(u32 data, u32 shift)
513{
514 return data << shift;
515}
516
517static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
518{
519 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
520 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
521 return data;
522}
523#else
524static inline u32 get_upper_bits(u32 data, u32 shift)
525{
526 return data << shift;
527}
528
529static inline u32 set_upper_bits(u32 data, u32 shift)
530{
531 return data >> shift;
532}
533
534static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
535{
536 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
537 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
538 return data;
539}
540#endif
541
7c2e11fe 542static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
f931551b
RC
543 u32 length, unsigned flush_wc)
544{
545 u32 extra = 0;
546 u32 data = 0;
547 u32 last;
548
549 while (1) {
550 u32 len = ss->sge.length;
551 u32 off;
552
553 if (len > length)
554 len = length;
555 if (len > ss->sge.sge_length)
556 len = ss->sge.sge_length;
557 BUG_ON(len == 0);
558 /* If the source address is not aligned, try to align it. */
559 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
560 if (off) {
561 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
562 ~(sizeof(u32) - 1));
563 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
564 u32 y;
565
566 y = sizeof(u32) - off;
567 if (len > y)
568 len = y;
569 if (len + extra >= sizeof(u32)) {
570 data |= set_upper_bits(v, extra *
571 BITS_PER_BYTE);
572 len = sizeof(u32) - extra;
573 if (len == length) {
574 last = data;
575 break;
576 }
577 __raw_writel(data, piobuf);
578 piobuf++;
579 extra = 0;
580 data = 0;
581 } else {
582 /* Clear unused upper bytes */
583 data |= clear_upper_bytes(v, len, extra);
584 if (len == length) {
585 last = data;
586 break;
587 }
588 extra += len;
589 }
590 } else if (extra) {
591 /* Source address is aligned. */
592 u32 *addr = (u32 *) ss->sge.vaddr;
593 int shift = extra * BITS_PER_BYTE;
594 int ushift = 32 - shift;
595 u32 l = len;
596
597 while (l >= sizeof(u32)) {
598 u32 v = *addr;
599
600 data |= set_upper_bits(v, shift);
601 __raw_writel(data, piobuf);
602 data = get_upper_bits(v, ushift);
603 piobuf++;
604 addr++;
605 l -= sizeof(u32);
606 }
607 /*
608 * We still have 'extra' number of bytes leftover.
609 */
610 if (l) {
611 u32 v = *addr;
612
613 if (l + extra >= sizeof(u32)) {
614 data |= set_upper_bits(v, shift);
615 len -= l + extra - sizeof(u32);
616 if (len == length) {
617 last = data;
618 break;
619 }
620 __raw_writel(data, piobuf);
621 piobuf++;
622 extra = 0;
623 data = 0;
624 } else {
625 /* Clear unused upper bytes */
626 data |= clear_upper_bytes(v, l, extra);
627 if (len == length) {
628 last = data;
629 break;
630 }
631 extra += l;
632 }
633 } else if (len == length) {
634 last = data;
635 break;
636 }
637 } else if (len == length) {
638 u32 w;
639
640 /*
641 * Need to round up for the last dword in the
642 * packet.
643 */
644 w = (len + 3) >> 2;
645 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
646 piobuf += w - 1;
647 last = ((u32 *) ss->sge.vaddr)[w - 1];
648 break;
649 } else {
650 u32 w = len >> 2;
651
652 qib_pio_copy(piobuf, ss->sge.vaddr, w);
653 piobuf += w;
654
655 extra = len & (sizeof(u32) - 1);
656 if (extra) {
657 u32 v = ((u32 *) ss->sge.vaddr)[w];
658
659 /* Clear unused upper bytes */
660 data = clear_upper_bytes(v, extra, 0);
661 }
662 }
663 update_sge(ss, len);
664 length -= len;
665 }
666 /* Update address before sending packet. */
667 update_sge(ss, length);
668 if (flush_wc) {
669 /* must flush early everything before trigger word */
670 qib_flush_wc();
671 __raw_writel(last, piobuf);
672 /* be sure trigger word is written */
673 qib_flush_wc();
674 } else
675 __raw_writel(last, piobuf);
676}
677
48947109 678static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
7c2e11fe 679 struct rvt_qp *qp)
f931551b 680{
ffc26907 681 struct qib_qp_priv *priv = qp->priv;
f931551b
RC
682 struct qib_verbs_txreq *tx;
683 unsigned long flags;
684
685 spin_lock_irqsave(&qp->s_lock, flags);
cd18201f 686 spin_lock(&dev->rdi.pending_lock);
f931551b
RC
687
688 if (!list_empty(&dev->txreq_free)) {
689 struct list_head *l = dev->txreq_free.next;
690
691 list_del(l);
cd18201f 692 spin_unlock(&dev->rdi.pending_lock);
48947109 693 spin_unlock_irqrestore(&qp->s_lock, flags);
f931551b 694 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
f931551b 695 } else {
db3ef0eb 696 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
ffc26907 697 list_empty(&priv->iowait)) {
f931551b 698 dev->n_txwait++;
01ba79d4 699 qp->s_flags |= RVT_S_WAIT_TX;
ffc26907 700 list_add_tail(&priv->iowait, &dev->txwait);
f931551b 701 }
01ba79d4 702 qp->s_flags &= ~RVT_S_BUSY;
cd18201f 703 spin_unlock(&dev->rdi.pending_lock);
48947109
MM
704 spin_unlock_irqrestore(&qp->s_lock, flags);
705 tx = ERR_PTR(-EBUSY);
f931551b 706 }
48947109
MM
707 return tx;
708}
f931551b 709
48947109 710static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
7c2e11fe 711 struct rvt_qp *qp)
48947109
MM
712{
713 struct qib_verbs_txreq *tx;
714 unsigned long flags;
f931551b 715
cd18201f 716 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
48947109
MM
717 /* assume the list non empty */
718 if (likely(!list_empty(&dev->txreq_free))) {
719 struct list_head *l = dev->txreq_free.next;
720
721 list_del(l);
cd18201f 722 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
48947109
MM
723 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
724 } else {
725 /* call slow path to get the extra lock */
cd18201f 726 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
48947109
MM
727 tx = __get_txreq(dev, qp);
728 }
f931551b
RC
729 return tx;
730}
731
732void qib_put_txreq(struct qib_verbs_txreq *tx)
733{
734 struct qib_ibdev *dev;
7c2e11fe 735 struct rvt_qp *qp;
ffc26907 736 struct qib_qp_priv *priv;
f931551b
RC
737 unsigned long flags;
738
739 qp = tx->qp;
740 dev = to_idev(qp->ibqp.device);
741
f931551b 742 if (tx->mr) {
7c2e11fe 743 rvt_put_mr(tx->mr);
f931551b
RC
744 tx->mr = NULL;
745 }
746 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
747 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
748 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
749 tx->txreq.addr, tx->hdr_dwords << 2,
750 DMA_TO_DEVICE);
751 kfree(tx->align_buf);
752 }
753
cd18201f 754 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
f931551b
RC
755
756 /* Put struct back on free list */
757 list_add(&tx->txreq.list, &dev->txreq_free);
758
759 if (!list_empty(&dev->txwait)) {
760 /* Wake up first QP wanting a free struct */
ffc26907
DD
761 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
762 iowait);
763 qp = priv->owner;
764 list_del_init(&priv->iowait);
f931551b 765 atomic_inc(&qp->refcount);
cd18201f 766 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
f931551b
RC
767
768 spin_lock_irqsave(&qp->s_lock, flags);
01ba79d4
HC
769 if (qp->s_flags & RVT_S_WAIT_TX) {
770 qp->s_flags &= ~RVT_S_WAIT_TX;
f931551b
RC
771 qib_schedule_send(qp);
772 }
773 spin_unlock_irqrestore(&qp->s_lock, flags);
774
775 if (atomic_dec_and_test(&qp->refcount))
776 wake_up(&qp->wait);
777 } else
cd18201f 778 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
f931551b
RC
779}
780
781/*
782 * This is called when there are send DMA descriptors that might be
783 * available.
784 *
785 * This is called with ppd->sdma_lock held.
786 */
787void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
788{
7c2e11fe 789 struct rvt_qp *qp, *nqp;
ffc26907 790 struct qib_qp_priv *qpp, *nqpp;
7c2e11fe 791 struct rvt_qp *qps[20];
f931551b
RC
792 struct qib_ibdev *dev;
793 unsigned i, n;
794
795 n = 0;
796 dev = &ppd->dd->verbs_dev;
cd18201f 797 spin_lock(&dev->rdi.pending_lock);
f931551b
RC
798
799 /* Search wait list for first QP wanting DMA descriptors. */
ffc26907
DD
800 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
801 qp = qpp->owner;
802 nqp = nqpp->owner;
f931551b
RC
803 if (qp->port_num != ppd->port)
804 continue;
805 if (n == ARRAY_SIZE(qps))
806 break;
ffc26907 807 if (qpp->s_tx->txreq.sg_count > avail)
f931551b 808 break;
ffc26907
DD
809 avail -= qpp->s_tx->txreq.sg_count;
810 list_del_init(&qpp->iowait);
f931551b
RC
811 atomic_inc(&qp->refcount);
812 qps[n++] = qp;
813 }
814
cd18201f 815 spin_unlock(&dev->rdi.pending_lock);
f931551b
RC
816
817 for (i = 0; i < n; i++) {
818 qp = qps[i];
819 spin_lock(&qp->s_lock);
01ba79d4
HC
820 if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
821 qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
f931551b
RC
822 qib_schedule_send(qp);
823 }
824 spin_unlock(&qp->s_lock);
825 if (atomic_dec_and_test(&qp->refcount))
826 wake_up(&qp->wait);
827 }
828}
829
830/*
831 * This is called with ppd->sdma_lock held.
832 */
833static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
834{
835 struct qib_verbs_txreq *tx =
836 container_of(cookie, struct qib_verbs_txreq, txreq);
7c2e11fe 837 struct rvt_qp *qp = tx->qp;
ffc26907 838 struct qib_qp_priv *priv = qp->priv;
f931551b
RC
839
840 spin_lock(&qp->s_lock);
841 if (tx->wqe)
842 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
843 else if (qp->ibqp.qp_type == IB_QPT_RC) {
844 struct qib_ib_header *hdr;
845
846 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
847 hdr = &tx->align_buf->hdr;
848 else {
849 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
850
851 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
852 }
853 qib_rc_send_complete(qp, hdr);
854 }
ffc26907 855 if (atomic_dec_and_test(&priv->s_dma_busy)) {
f931551b 856 if (qp->state == IB_QPS_RESET)
ffc26907 857 wake_up(&priv->wait_dma);
01ba79d4
HC
858 else if (qp->s_flags & RVT_S_WAIT_DMA) {
859 qp->s_flags &= ~RVT_S_WAIT_DMA;
f931551b
RC
860 qib_schedule_send(qp);
861 }
862 }
863 spin_unlock(&qp->s_lock);
864
865 qib_put_txreq(tx);
866}
867
7c2e11fe 868static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
f931551b 869{
ffc26907 870 struct qib_qp_priv *priv = qp->priv;
f931551b
RC
871 unsigned long flags;
872 int ret = 0;
873
874 spin_lock_irqsave(&qp->s_lock, flags);
db3ef0eb 875 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
cd18201f 876 spin_lock(&dev->rdi.pending_lock);
ffc26907 877 if (list_empty(&priv->iowait)) {
f931551b
RC
878 if (list_empty(&dev->memwait))
879 mod_timer(&dev->mem_timer, jiffies + 1);
01ba79d4 880 qp->s_flags |= RVT_S_WAIT_KMEM;
ffc26907 881 list_add_tail(&priv->iowait, &dev->memwait);
f931551b 882 }
cd18201f 883 spin_unlock(&dev->rdi.pending_lock);
01ba79d4 884 qp->s_flags &= ~RVT_S_BUSY;
f931551b
RC
885 ret = -EBUSY;
886 }
887 spin_unlock_irqrestore(&qp->s_lock, flags);
888
889 return ret;
890}
891
7c2e11fe
DD
892static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
893 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
f931551b
RC
894 u32 plen, u32 dwords)
895{
ffc26907 896 struct qib_qp_priv *priv = qp->priv;
f931551b
RC
897 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
898 struct qib_devdata *dd = dd_from_dev(dev);
899 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
900 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
901 struct qib_verbs_txreq *tx;
902 struct qib_pio_header *phdr;
903 u32 control;
904 u32 ndesc;
905 int ret;
906
ffc26907 907 tx = priv->s_tx;
f931551b 908 if (tx) {
ffc26907 909 priv->s_tx = NULL;
f931551b
RC
910 /* resend previously constructed packet */
911 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
912 goto bail;
913 }
914
48947109
MM
915 tx = get_txreq(dev, qp);
916 if (IS_ERR(tx))
917 goto bail_tx;
f931551b
RC
918
919 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
920 be16_to_cpu(hdr->lrh[0]) >> 12);
921 tx->qp = qp;
f931551b
RC
922 tx->wqe = qp->s_wqe;
923 tx->mr = qp->s_rdma_mr;
924 if (qp->s_rdma_mr)
925 qp->s_rdma_mr = NULL;
926 tx->txreq.callback = sdma_complete;
927 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
928 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
929 else
930 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
931 if (plen + 1 > dd->piosize2kmax_dwords)
932 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
933
934 if (len) {
935 /*
936 * Don't try to DMA if it takes more descriptors than
937 * the queue holds.
938 */
939 ndesc = qib_count_sge(ss, len);
940 if (ndesc >= ppd->sdma_descq_cnt)
941 ndesc = 0;
942 } else
943 ndesc = 1;
944 if (ndesc) {
945 phdr = &dev->pio_hdrs[tx->hdr_inx];
946 phdr->pbc[0] = cpu_to_le32(plen);
947 phdr->pbc[1] = cpu_to_le32(control);
948 memcpy(&phdr->hdr, hdr, hdrwords << 2);
949 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
950 tx->txreq.sg_count = ndesc;
951 tx->txreq.addr = dev->pio_hdrs_phys +
952 tx->hdr_inx * sizeof(struct qib_pio_header);
953 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
954 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
955 goto bail;
956 }
957
958 /* Allocate a buffer and copy the header and payload to it. */
959 tx->hdr_dwords = plen + 1;
960 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
961 if (!phdr)
962 goto err_tx;
963 phdr->pbc[0] = cpu_to_le32(plen);
964 phdr->pbc[1] = cpu_to_le32(control);
965 memcpy(&phdr->hdr, hdr, hdrwords << 2);
966 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
967
968 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
969 tx->hdr_dwords << 2, DMA_TO_DEVICE);
970 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
971 goto map_err;
972 tx->align_buf = phdr;
973 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
974 tx->txreq.sg_count = 1;
975 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
976 goto unaligned;
977
978map_err:
979 kfree(phdr);
980err_tx:
981 qib_put_txreq(tx);
982 ret = wait_kmem(dev, qp);
983unaligned:
f24a6d48 984 ibp->rvp.n_unaligned++;
f931551b
RC
985bail:
986 return ret;
48947109
MM
987bail_tx:
988 ret = PTR_ERR(tx);
989 goto bail;
f931551b
RC
990}
991
992/*
993 * If we are now in the error state, return zero to flush the
994 * send work request.
995 */
7c2e11fe 996static int no_bufs_available(struct rvt_qp *qp)
f931551b 997{
ffc26907 998 struct qib_qp_priv *priv = qp->priv;
f931551b
RC
999 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1000 struct qib_devdata *dd;
1001 unsigned long flags;
1002 int ret = 0;
1003
1004 /*
1005 * Note that as soon as want_buffer() is called and
1006 * possibly before it returns, qib_ib_piobufavail()
1007 * could be called. Therefore, put QP on the I/O wait list before
1008 * enabling the PIO avail interrupt.
1009 */
1010 spin_lock_irqsave(&qp->s_lock, flags);
db3ef0eb 1011 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
cd18201f 1012 spin_lock(&dev->rdi.pending_lock);
ffc26907 1013 if (list_empty(&priv->iowait)) {
f931551b 1014 dev->n_piowait++;
01ba79d4 1015 qp->s_flags |= RVT_S_WAIT_PIO;
ffc26907 1016 list_add_tail(&priv->iowait, &dev->piowait);
f931551b
RC
1017 dd = dd_from_dev(dev);
1018 dd->f_wantpiobuf_intr(dd, 1);
1019 }
cd18201f 1020 spin_unlock(&dev->rdi.pending_lock);
01ba79d4 1021 qp->s_flags &= ~RVT_S_BUSY;
f931551b
RC
1022 ret = -EBUSY;
1023 }
1024 spin_unlock_irqrestore(&qp->s_lock, flags);
1025 return ret;
1026}
1027
7c2e11fe
DD
1028static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
1029 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
f931551b
RC
1030 u32 plen, u32 dwords)
1031{
1032 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1033 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1034 u32 *hdr = (u32 *) ibhdr;
1035 u32 __iomem *piobuf_orig;
1036 u32 __iomem *piobuf;
1037 u64 pbc;
1038 unsigned long flags;
1039 unsigned flush_wc;
1040 u32 control;
1041 u32 pbufn;
1042
1043 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1044 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1045 pbc = ((u64) control << 32) | plen;
1046 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1047 if (unlikely(piobuf == NULL))
1048 return no_bufs_available(qp);
1049
1050 /*
1051 * Write the pbc.
1052 * We have to flush after the PBC for correctness on some cpus
1053 * or WC buffer can be written out of order.
1054 */
1055 writeq(pbc, piobuf);
1056 piobuf_orig = piobuf;
1057 piobuf += 2;
1058
1059 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1060 if (len == 0) {
1061 /*
1062 * If there is just the header portion, must flush before
1063 * writing last word of header for correctness, and after
1064 * the last header word (trigger word).
1065 */
1066 if (flush_wc) {
1067 qib_flush_wc();
1068 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1069 qib_flush_wc();
1070 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1071 qib_flush_wc();
1072 } else
1073 qib_pio_copy(piobuf, hdr, hdrwords);
1074 goto done;
1075 }
1076
1077 if (flush_wc)
1078 qib_flush_wc();
1079 qib_pio_copy(piobuf, hdr, hdrwords);
1080 piobuf += hdrwords;
1081
1082 /* The common case is aligned and contained in one segment. */
1083 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1084 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1085 u32 *addr = (u32 *) ss->sge.vaddr;
1086
1087 /* Update address before sending packet. */
1088 update_sge(ss, len);
1089 if (flush_wc) {
1090 qib_pio_copy(piobuf, addr, dwords - 1);
1091 /* must flush early everything before trigger word */
1092 qib_flush_wc();
1093 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1094 /* be sure trigger word is written */
1095 qib_flush_wc();
1096 } else
1097 qib_pio_copy(piobuf, addr, dwords);
1098 goto done;
1099 }
1100 copy_io(piobuf, ss, len, flush_wc);
1101done:
1102 if (dd->flags & QIB_USE_SPCL_TRIG) {
1103 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
da12c1f6 1104
f931551b
RC
1105 qib_flush_wc();
1106 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1107 }
1108 qib_sendbuf_done(dd, pbufn);
1109 if (qp->s_rdma_mr) {
7c2e11fe 1110 rvt_put_mr(qp->s_rdma_mr);
f931551b
RC
1111 qp->s_rdma_mr = NULL;
1112 }
1113 if (qp->s_wqe) {
1114 spin_lock_irqsave(&qp->s_lock, flags);
1115 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1116 spin_unlock_irqrestore(&qp->s_lock, flags);
1117 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1118 spin_lock_irqsave(&qp->s_lock, flags);
1119 qib_rc_send_complete(qp, ibhdr);
1120 spin_unlock_irqrestore(&qp->s_lock, flags);
1121 }
1122 return 0;
1123}
1124
1125/**
1126 * qib_verbs_send - send a packet
1127 * @qp: the QP to send on
1128 * @hdr: the packet header
1129 * @hdrwords: the number of 32-bit words in the header
1130 * @ss: the SGE to send
1131 * @len: the length of the packet in bytes
1132 *
1133 * Return zero if packet is sent or queued OK.
01ba79d4 1134 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
f931551b 1135 */
7c2e11fe
DD
1136int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
1137 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
f931551b
RC
1138{
1139 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1140 u32 plen;
1141 int ret;
1142 u32 dwords = (len + 3) >> 2;
1143
1144 /*
1145 * Calculate the send buffer trigger address.
1146 * The +1 counts for the pbc control dword following the pbc length.
1147 */
1148 plen = hdrwords + dwords + 1;
1149
1150 /*
1151 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1152 * can defer SDMA restart until link goes ACTIVE without
1153 * worrying about just how we got there.
1154 */
1155 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1156 !(dd->flags & QIB_HAS_SEND_DMA))
1157 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1158 plen, dwords);
1159 else
1160 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1161 plen, dwords);
1162
1163 return ret;
1164}
1165
1166int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1167 u64 *rwords, u64 *spkts, u64 *rpkts,
1168 u64 *xmit_wait)
1169{
1170 int ret;
1171 struct qib_devdata *dd = ppd->dd;
1172
1173 if (!(dd->flags & QIB_PRESENT)) {
1174 /* no hardware, freeze, etc. */
1175 ret = -EINVAL;
1176 goto bail;
1177 }
1178 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1179 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1180 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1181 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1182 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1183
1184 ret = 0;
1185
1186bail:
1187 return ret;
1188}
1189
1190/**
1191 * qib_get_counters - get various chip counters
1192 * @dd: the qlogic_ib device
1193 * @cntrs: counters are placed here
1194 *
1195 * Return the counters needed by recv_pma_get_portcounters().
1196 */
1197int qib_get_counters(struct qib_pportdata *ppd,
1198 struct qib_verbs_counters *cntrs)
1199{
1200 int ret;
1201
1202 if (!(ppd->dd->flags & QIB_PRESENT)) {
1203 /* no hardware, freeze, etc. */
1204 ret = -EINVAL;
1205 goto bail;
1206 }
1207 cntrs->symbol_error_counter =
1208 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1209 cntrs->link_error_recovery_counter =
1210 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1211 /*
1212 * The link downed counter counts when the other side downs the
1213 * connection. We add in the number of times we downed the link
1214 * due to local link integrity errors to compensate.
1215 */
1216 cntrs->link_downed_counter =
1217 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1218 cntrs->port_rcv_errors =
1219 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1220 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1221 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1222 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1223 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1224 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1225 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1226 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1227 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1228 cntrs->port_rcv_errors +=
1229 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1230 cntrs->port_rcv_errors +=
1231 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1232 cntrs->port_rcv_remphys_errors =
1233 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1234 cntrs->port_xmit_discards =
1235 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1236 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1237 QIBPORTCNTR_WORDSEND);
1238 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1239 QIBPORTCNTR_WORDRCV);
1240 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1241 QIBPORTCNTR_PKTSEND);
1242 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1243 QIBPORTCNTR_PKTRCV);
1244 cntrs->local_link_integrity_errors =
1245 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1246 cntrs->excessive_buffer_overrun_errors =
1247 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1248 cntrs->vl15_dropped =
1249 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1250
1251 ret = 0;
1252
1253bail:
1254 return ret;
1255}
1256
1257/**
1258 * qib_ib_piobufavail - callback when a PIO buffer is available
1259 * @dd: the device pointer
1260 *
1261 * This is called from qib_intr() at interrupt level when a PIO buffer is
1262 * available after qib_verbs_send() returned an error that no buffers were
1263 * available. Disable the interrupt if there are no more QPs waiting.
1264 */
1265void qib_ib_piobufavail(struct qib_devdata *dd)
1266{
1267 struct qib_ibdev *dev = &dd->verbs_dev;
1268 struct list_head *list;
7c2e11fe
DD
1269 struct rvt_qp *qps[5];
1270 struct rvt_qp *qp;
f931551b
RC
1271 unsigned long flags;
1272 unsigned i, n;
ffc26907 1273 struct qib_qp_priv *priv;
f931551b
RC
1274
1275 list = &dev->piowait;
1276 n = 0;
1277
1278 /*
1279 * Note: checking that the piowait list is empty and clearing
1280 * the buffer available interrupt needs to be atomic or we
1281 * could end up with QPs on the wait list with the interrupt
1282 * disabled.
1283 */
cd18201f 1284 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
f931551b
RC
1285 while (!list_empty(list)) {
1286 if (n == ARRAY_SIZE(qps))
1287 goto full;
ffc26907
DD
1288 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1289 qp = priv->owner;
1290 list_del_init(&priv->iowait);
f931551b
RC
1291 atomic_inc(&qp->refcount);
1292 qps[n++] = qp;
1293 }
1294 dd->f_wantpiobuf_intr(dd, 0);
1295full:
cd18201f 1296 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
f931551b
RC
1297
1298 for (i = 0; i < n; i++) {
1299 qp = qps[i];
1300
1301 spin_lock_irqsave(&qp->s_lock, flags);
01ba79d4
HC
1302 if (qp->s_flags & RVT_S_WAIT_PIO) {
1303 qp->s_flags &= ~RVT_S_WAIT_PIO;
f931551b
RC
1304 qib_schedule_send(qp);
1305 }
1306 spin_unlock_irqrestore(&qp->s_lock, flags);
1307
1308 /* Notify qib_destroy_qp() if it is waiting. */
1309 if (atomic_dec_and_test(&qp->refcount))
1310 wake_up(&qp->wait);
1311 }
1312}
1313
530a5d8e 1314static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num,
f931551b
RC
1315 struct ib_port_attr *props)
1316{
530a5d8e
HC
1317 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
1318 struct qib_devdata *dd = dd_from_dev(ibdev);
1319 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
f931551b
RC
1320 enum ib_mtu mtu;
1321 u16 lid = ppd->lid;
1322
f931551b
RC
1323 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1324 props->lmc = ppd->lmc;
f931551b
RC
1325 props->state = dd->f_iblink_state(ppd->lastibcstat);
1326 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
f931551b 1327 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
f931551b
RC
1328 props->active_width = ppd->link_width_active;
1329 /* See rate_show() */
1330 props->active_speed = ppd->link_speed_active;
1331 props->max_vl_num = qib_num_vls(ppd->vls_supported);
f931551b
RC
1332
1333 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1334 switch (ppd->ibmtu) {
1335 case 4096:
1336 mtu = IB_MTU_4096;
1337 break;
1338 case 2048:
1339 mtu = IB_MTU_2048;
1340 break;
1341 case 1024:
1342 mtu = IB_MTU_1024;
1343 break;
1344 case 512:
1345 mtu = IB_MTU_512;
1346 break;
1347 case 256:
1348 mtu = IB_MTU_256;
1349 break;
1350 default:
1351 mtu = IB_MTU_2048;
1352 }
1353 props->active_mtu = mtu;
f931551b
RC
1354
1355 return 0;
1356}
1357
1358static int qib_modify_device(struct ib_device *device,
1359 int device_modify_mask,
1360 struct ib_device_modify *device_modify)
1361{
1362 struct qib_devdata *dd = dd_from_ibdev(device);
1363 unsigned i;
1364 int ret;
1365
1366 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1367 IB_DEVICE_MODIFY_NODE_DESC)) {
1368 ret = -EOPNOTSUPP;
1369 goto bail;
1370 }
1371
1372 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1373 memcpy(device->node_desc, device_modify->node_desc, 64);
1374 for (i = 0; i < dd->num_pports; i++) {
1375 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1376
1377 qib_node_desc_chg(ibp);
1378 }
1379 }
1380
1381 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1382 ib_qib_sys_image_guid =
1383 cpu_to_be64(device_modify->sys_image_guid);
1384 for (i = 0; i < dd->num_pports; i++) {
1385 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1386
1387 qib_sys_guid_chg(ibp);
1388 }
1389 }
1390
1391 ret = 0;
1392
1393bail:
1394 return ret;
1395}
1396
20f333b6 1397static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
f931551b 1398{
530a5d8e
HC
1399 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
1400 struct qib_devdata *dd = dd_from_dev(ibdev);
1401 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
1402
1403 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
f931551b 1404
f931551b
RC
1405 return 0;
1406}
1407
23667546
DD
1408static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1409 int guid_index, __be64 *guid)
f931551b 1410{
23667546
DD
1411 struct qib_ibport *ibp = container_of(rvp, struct qib_ibport, rvp);
1412 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
f931551b 1413
23667546
DD
1414 if (guid_index == 0)
1415 *guid = ppd->guid;
1416 else if (guid_index < QIB_GUIDS_PER_PORT)
1417 *guid = ibp->guids[guid_index - 1];
1418 else
1419 return -EINVAL;
f931551b 1420
23667546 1421 return 0;
f931551b
RC
1422}
1423
f931551b
RC
1424int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1425{
f931551b 1426 if (ah_attr->sl > 15)
96ab1ac1 1427 return -EINVAL;
f931551b 1428
96ab1ac1 1429 return 0;
f931551b
RC
1430}
1431
5418a5ab
HC
1432static void qib_notify_new_ah(struct ib_device *ibdev,
1433 struct ib_ah_attr *ah_attr,
1434 struct rvt_ah *ah)
1435{
1436 struct qib_ibport *ibp;
1437 struct qib_pportdata *ppd;
1438
1439 /*
1440 * Do not trust reading anything from rvt_ah at this point as it is not
1441 * done being setup. We can however modify things which we need to set.
1442 */
1443
1444 ibp = to_iport(ibdev, ah_attr->port_num);
1445 ppd = ppd_from_ibp(ibp);
1446 ah->vl = ibp->sl_to_vl[ah->attr.sl];
1447 ah->log_pmtu = ilog2(ppd->ibmtu);
1448}
1449
1fb9fed6
MM
1450struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1451{
1452 struct ib_ah_attr attr;
1453 struct ib_ah *ah = ERR_PTR(-EINVAL);
7c2e11fe 1454 struct rvt_qp *qp0;
1fb9fed6 1455
041af0bb 1456 memset(&attr, 0, sizeof(attr));
1fb9fed6
MM
1457 attr.dlid = dlid;
1458 attr.port_num = ppd_from_ibp(ibp)->port;
1459 rcu_read_lock();
f24a6d48 1460 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1fb9fed6
MM
1461 if (qp0)
1462 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1463 rcu_read_unlock();
1464 return ah;
1465}
1466
f931551b
RC
1467/**
1468 * qib_get_npkeys - return the size of the PKEY table for context 0
1469 * @dd: the qlogic_ib device
1470 */
1471unsigned qib_get_npkeys(struct qib_devdata *dd)
1472{
1473 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1474}
1475
1476/*
1477 * Return the indexed PKEY from the port PKEY table.
1478 * No need to validate rcd[ctxt]; the port is setup if we are here.
1479 */
1480unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1481{
1482 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1483 struct qib_devdata *dd = ppd->dd;
1484 unsigned ctxt = ppd->hw_pidx;
1485 unsigned ret;
1486
1487 /* dd->rcd null if mini_init or some init failures */
1488 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1489 ret = 0;
1490 else
1491 ret = dd->rcd[ctxt]->pkeys[index];
1492
1493 return ret;
1494}
1495
f931551b
RC
1496static void init_ibport(struct qib_pportdata *ppd)
1497{
1498 struct qib_verbs_counters cntrs;
1499 struct qib_ibport *ibp = &ppd->ibport_data;
1500
f24a6d48 1501 spin_lock_init(&ibp->rvp.lock);
f931551b 1502 /* Set the prefix to the default value (see ch. 4.1.1) */
f24a6d48
HC
1503 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1504 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1505 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
f931551b
RC
1506 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1507 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1508 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1509 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1510 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
f24a6d48
HC
1511 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1512 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1513 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1514 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1515 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1516 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
f931551b
RC
1517
1518 /* Snapshot current HW counters to "clear" them. */
1519 qib_get_counters(ppd, &cntrs);
1520 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1521 ibp->z_link_error_recovery_counter =
1522 cntrs.link_error_recovery_counter;
1523 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1524 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1525 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1526 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1527 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1528 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1529 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1530 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1531 ibp->z_local_link_integrity_errors =
1532 cntrs.local_link_integrity_errors;
1533 ibp->z_excessive_buffer_overrun_errors =
1534 cntrs.excessive_buffer_overrun_errors;
1535 ibp->z_vl15_dropped = cntrs.vl15_dropped;
f24a6d48
HC
1536 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1537 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
f931551b
RC
1538}
1539
0aeddea2
HC
1540/**
1541 * qib_fill_device_attr - Fill in rvt dev info device attributes.
1542 * @dd: the device data structure
1543 */
1544static void qib_fill_device_attr(struct qib_devdata *dd)
1545{
1546 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1547
1548 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1549
1550 rdi->dparms.props.max_pd = ib_qib_max_pds;
1551 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1552 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1553 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1554 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1555 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1556 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1557 rdi->dparms.props.vendor_id =
1558 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1559 rdi->dparms.props.vendor_part_id = dd->deviceid;
1560 rdi->dparms.props.hw_ver = dd->minrev;
1561 rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
1562 rdi->dparms.props.max_mr_size = ~0ULL;
1563 rdi->dparms.props.max_qp = ib_qib_max_qps;
1564 rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
1565 rdi->dparms.props.max_sge = ib_qib_max_sges;
1566 rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
1567 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1568 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1569 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1570 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1571 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1572 rdi->dparms.props.max_map_per_fmr = 32767;
1573 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1574 rdi->dparms.props.max_qp_init_rd_atom = 255;
1575 rdi->dparms.props.max_srq = ib_qib_max_srqs;
1576 rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
1577 rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
1578 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1579 rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
1580 rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
1581 rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1582 rdi->dparms.props.max_total_mcast_qp_attach =
1583 rdi->dparms.props.max_mcast_qp_attach *
1584 rdi->dparms.props.max_mcast_grp;
1585}
1586
f931551b
RC
1587/**
1588 * qib_register_ib_device - register our device with the infiniband core
1589 * @dd: the device data structure
1590 * Return the allocated qib_ibdev pointer or NULL on error.
1591 */
1592int qib_register_ib_device(struct qib_devdata *dd)
1593{
1594 struct qib_ibdev *dev = &dd->verbs_dev;
2dc05ab5 1595 struct ib_device *ibdev = &dev->rdi.ibdev;
f931551b 1596 struct qib_pportdata *ppd = dd->pport;
76fec3e0 1597 unsigned i, ctxt;
f931551b
RC
1598 int ret;
1599
af061a64 1600 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
f931551b
RC
1601 for (i = 0; i < dd->num_pports; i++)
1602 init_ibport(ppd + i);
1603
1604 /* Only need to initialize non-zero fields. */
045277cf 1605 setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
f931551b 1606
47c7ea6d 1607 qpt_mask = dd->qpn_mask;
f931551b 1608
f931551b
RC
1609 INIT_LIST_HEAD(&dev->piowait);
1610 INIT_LIST_HEAD(&dev->dmawait);
1611 INIT_LIST_HEAD(&dev->txwait);
1612 INIT_LIST_HEAD(&dev->memwait);
1613 INIT_LIST_HEAD(&dev->txreq_free);
1614
1615 if (ppd->sdma_descq_cnt) {
1616 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
1617 ppd->sdma_descq_cnt *
1618 sizeof(struct qib_pio_header),
1619 &dev->pio_hdrs_phys,
1620 GFP_KERNEL);
1621 if (!dev->pio_hdrs) {
1622 ret = -ENOMEM;
1623 goto err_hdrs;
1624 }
1625 }
1626
1627 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
1628 struct qib_verbs_txreq *tx;
1629
041af0bb 1630 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
f931551b
RC
1631 if (!tx) {
1632 ret = -ENOMEM;
1633 goto err_tx;
1634 }
1635 tx->hdr_inx = i;
1636 list_add(&tx->txreq.list, &dev->txreq_free);
1637 }
1638
1639 /*
1640 * The system image GUID is supposed to be the same for all
1641 * IB HCAs in a single system but since there can be other
1642 * device types in the system, we can't be sure this is unique.
1643 */
1644 if (!ib_qib_sys_image_guid)
1645 ib_qib_sys_image_guid = ppd->guid;
1646
1647 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
1648 ibdev->owner = THIS_MODULE;
1649 ibdev->node_guid = ppd->guid;
f931551b 1650 ibdev->phys_port_cnt = dd->num_pports;
f931551b 1651 ibdev->dma_device = &dd->pcidev->dev;
f931551b 1652 ibdev->modify_device = qib_modify_device;
f931551b 1653 ibdev->process_mad = qib_process_mad;
f931551b
RC
1654
1655 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
e2eed58b 1656 "Intel Infiniband HCA %s", init_utsname()->nodename);
f931551b 1657
2dc05ab5
DD
1658 /*
1659 * Fill in rvt info object.
1660 */
1661 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
6a9df403
DD
1662 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
1663 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
96ab1ac1 1664 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
46a80d62 1665 dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
5418a5ab 1666 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
20f333b6
HC
1667 dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
1668 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
1669 dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free;
47c7ea6d 1670 dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
20f333b6 1671 dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset;
db3ef0eb
HC
1672 dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
1673 dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
20f333b6
HC
1674 dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp;
1675 dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
1676 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
1677 dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
1678 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
1679 dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
1680 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
46a80d62 1681 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send;
530a5d8e 1682 dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port;
20f333b6 1683 dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port;
530a5d8e 1684 dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg;
70696ea7
HC
1685
1686 dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC;
23667546 1687 dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be;
7c2e11fe 1688 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
47c7ea6d
HC
1689 dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
1690 dd->verbs_dev.rdi.dparms.qpn_start = 1;
1691 dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
1692 dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
1693 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1694 dd->verbs_dev.rdi.dparms.qos_shift = 1;
034a3e70 1695 dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
70696ea7
HC
1696 dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT;
1697 dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK;
76fec3e0
HC
1698 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1699 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
4bb88e5f 1700 dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
530a5d8e
HC
1701 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1702 dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
1703
4bb88e5f
HC
1704 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1705 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1706 "qib_cq%d", dd->unit);
76fec3e0 1707
0aeddea2
HC
1708 qib_fill_device_attr(dd);
1709
76fec3e0
HC
1710 ppd = dd->pport;
1711 for (i = 0; i < dd->num_pports; i++, ppd++) {
1712 ctxt = ppd->hw_pidx;
1713 rvt_init_port(&dd->verbs_dev.rdi,
1714 &ppd->ibport_data.rvp,
1715 i,
1716 dd->rcd[ctxt]->pkeys);
1717 }
2dc05ab5
DD
1718
1719 ret = rvt_register_device(&dd->verbs_dev.rdi);
f931551b 1720 if (ret)
5196aa96 1721 goto err_tx;
f931551b 1722
c9bdad3c
MM
1723 ret = qib_verbs_register_sysfs(dd);
1724 if (ret)
f931551b
RC
1725 goto err_class;
1726
5196aa96 1727 return ret;
f931551b
RC
1728
1729err_class:
2dc05ab5 1730 rvt_unregister_device(&dd->verbs_dev.rdi);
f931551b
RC
1731err_tx:
1732 while (!list_empty(&dev->txreq_free)) {
1733 struct list_head *l = dev->txreq_free.next;
1734 struct qib_verbs_txreq *tx;
1735
1736 list_del(l);
1737 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1738 kfree(tx);
1739 }
1740 if (ppd->sdma_descq_cnt)
1741 dma_free_coherent(&dd->pcidev->dev,
1742 ppd->sdma_descq_cnt *
1743 sizeof(struct qib_pio_header),
1744 dev->pio_hdrs, dev->pio_hdrs_phys);
1745err_hdrs:
f931551b 1746 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
f931551b
RC
1747 return ret;
1748}
1749
1750void qib_unregister_ib_device(struct qib_devdata *dd)
1751{
1752 struct qib_ibdev *dev = &dd->verbs_dev;
f931551b
RC
1753
1754 qib_verbs_unregister_sysfs(dd);
1755
2dc05ab5 1756 rvt_unregister_device(&dd->verbs_dev.rdi);
f931551b
RC
1757
1758 if (!list_empty(&dev->piowait))
1759 qib_dev_err(dd, "piowait list not empty!\n");
1760 if (!list_empty(&dev->dmawait))
1761 qib_dev_err(dd, "dmawait list not empty!\n");
1762 if (!list_empty(&dev->txwait))
1763 qib_dev_err(dd, "txwait list not empty!\n");
1764 if (!list_empty(&dev->memwait))
1765 qib_dev_err(dd, "memwait list not empty!\n");
f931551b 1766
f931551b 1767 del_timer_sync(&dev->mem_timer);
f931551b
RC
1768 while (!list_empty(&dev->txreq_free)) {
1769 struct list_head *l = dev->txreq_free.next;
1770 struct qib_verbs_txreq *tx;
1771
1772 list_del(l);
1773 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1774 kfree(tx);
1775 }
1776 if (dd->pport->sdma_descq_cnt)
1777 dma_free_coherent(&dd->pcidev->dev,
1778 dd->pport->sdma_descq_cnt *
1779 sizeof(struct qib_pio_header),
1780 dev->pio_hdrs, dev->pio_hdrs_phys);
f931551b 1781}
551ace12 1782
46a80d62
MM
1783/**
1784 * _qib_schedule_send - schedule progress
1785 * @qp - the qp
1786 *
1787 * This schedules progress w/o regard to the s_flags.
1788 *
1789 * It is only used in post send, which doesn't hold
1790 * the s_lock.
551ace12 1791 */
46a80d62 1792void _qib_schedule_send(struct rvt_qp *qp)
551ace12 1793{
46a80d62
MM
1794 struct qib_ibport *ibp =
1795 to_iport(qp->ibqp.device, qp->port_num);
1796 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
ffc26907 1797 struct qib_qp_priv *priv = qp->priv;
551ace12 1798
46a80d62
MM
1799 queue_work(ppd->qib_wq, &priv->s_work);
1800}
1801
1802/**
1803 * qib_schedule_send - schedule progress
1804 * @qp - the qp
1805 *
1806 * This schedules qp progress. The s_lock
1807 * should be held.
1808 */
1809void qib_schedule_send(struct rvt_qp *qp)
1810{
1811 if (qib_send_ok(qp))
1812 _qib_schedule_send(qp);
551ace12 1813}