2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
49 #include <linux/pci.h>
51 #include <linux/delay.h>
52 #include <linux/netdevice.h>
53 #include <linux/vmalloc.h>
54 #include <linux/module.h>
55 #include <linux/prefetch.h>
56 #include <rdma/ib_verbs.h>
64 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
67 * The size has to be longer than this string, so we can append
68 * board/chip information to it in the initialization code.
70 const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
72 DEFINE_SPINLOCK(hfi1_devs_lock);
73 LIST_HEAD(hfi1_dev_list);
74 DEFINE_MUTEX(hfi1_mutex); /* general driver use */
76 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
77 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
78 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is 8192");
80 unsigned int hfi1_cu = 1;
81 module_param_named(cu, hfi1_cu, uint, S_IRUGO);
82 MODULE_PARM_DESC(cu, "Credit return units");
84 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
85 static int hfi1_caps_set(const char *, const struct kernel_param *);
86 static int hfi1_caps_get(char *, const struct kernel_param *);
87 static const struct kernel_param_ops cap_ops = {
91 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
92 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
94 MODULE_LICENSE("Dual BSD/GPL");
95 MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
96 MODULE_VERSION(HFI1_DRIVER_VERSION);
99 * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
101 #define MAX_PKT_RECV 64
102 #define EGR_HEAD_UPDATE_THRESHOLD 16
104 struct hfi1_ib_stats hfi1_stats;
106 static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
109 unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
110 cap_mask = *cap_mask_ptr, value, diff,
111 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
112 HFI1_CAP_WRITABLE_MASK);
114 ret = kstrtoul(val, 0, &value);
116 pr_warn("Invalid module parameter value for 'cap_mask'\n");
119 /* Get the changed bits (except the locked bit) */
120 diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
122 /* Remove any bits that are not allowed to change after driver load */
123 if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
124 pr_warn("Ignoring non-writable capability bits %#lx\n",
129 /* Mask off any reserved bits */
130 diff &= ~HFI1_CAP_RESERVED_MASK;
131 /* Clear any previously set and changing bits */
133 /* Update the bits with the new capability */
134 cap_mask |= (value & diff);
135 /* Check for any kernel/user restrictions */
136 diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
137 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
139 /* Set the bitmask to the final set */
140 *cap_mask_ptr = cap_mask;
145 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
147 unsigned long cap_mask = *(unsigned long *)kp->arg;
149 cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
150 cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
152 return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
155 const char *get_unit_name(int unit)
157 static char iname[16];
159 snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit);
163 const char *get_card_name(struct rvt_dev_info *rdi)
165 struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
166 struct hfi1_devdata *dd = container_of(ibdev,
167 struct hfi1_devdata, verbs_dev);
168 return get_unit_name(dd->unit);
171 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
173 struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
174 struct hfi1_devdata *dd = container_of(ibdev,
175 struct hfi1_devdata, verbs_dev);
180 * Return count of units with at least one port ACTIVE.
182 int hfi1_count_active_units(void)
184 struct hfi1_devdata *dd;
185 struct hfi1_pportdata *ppd;
187 int pidx, nunits_active = 0;
189 spin_lock_irqsave(&hfi1_devs_lock, flags);
190 list_for_each_entry(dd, &hfi1_dev_list, list) {
191 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase)
193 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
194 ppd = dd->pport + pidx;
195 if (ppd->lid && ppd->linkup) {
201 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
202 return nunits_active;
206 * Return count of all units, optionally return in arguments
207 * the number of usable (present) units, and the number of
210 int hfi1_count_units(int *npresentp, int *nupp)
212 int nunits = 0, npresent = 0, nup = 0;
213 struct hfi1_devdata *dd;
216 struct hfi1_pportdata *ppd;
218 spin_lock_irqsave(&hfi1_devs_lock, flags);
220 list_for_each_entry(dd, &hfi1_dev_list, list) {
222 if ((dd->flags & HFI1_PRESENT) && dd->kregbase)
224 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
225 ppd = dd->pport + pidx;
226 if (ppd->lid && ppd->linkup)
231 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
234 *npresentp = npresent;
242 * Get address of eager buffer from it's index (allocated in chunks, not
245 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
248 u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
250 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
251 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
252 (offset * RCV_BUF_BLOCK_SIZE));
256 * Validate and encode the a given RcvArray Buffer size.
257 * The function will check whether the given size falls within
258 * allowed size ranges for the respective type and, optionally,
259 * return the proper encoding.
261 inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
263 if (unlikely(!PAGE_ALIGNED(size)))
265 if (unlikely(size < MIN_EAGER_BUFFER))
268 (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
271 *encoded = ilog2(size / PAGE_SIZE) + 1;
275 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
276 struct hfi1_packet *packet)
278 struct hfi1_message_header *rhdr = packet->hdr;
279 u32 rte = rhf_rcv_type_err(packet->rhf);
280 int lnh = be16_to_cpu(rhdr->lrh[0]) & 3;
281 struct hfi1_ibport *ibp = &ppd->ibport_data;
282 struct hfi1_devdata *dd = ppd->dd;
283 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
285 if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
288 if (packet->rhf & RHF_TID_ERR) {
289 /* For TIDERR and RC QPs preemptively schedule a NAK */
290 struct hfi1_ib_header *hdr = (struct hfi1_ib_header *)rhdr;
291 struct hfi1_other_headers *ohdr = NULL;
292 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
293 u16 lid = be16_to_cpu(hdr->lrh[1]);
297 /* Sanity check packet */
302 if (lnh == HFI1_LRH_BTH) {
304 } else if (lnh == HFI1_LRH_GRH) {
307 ohdr = &hdr->u.l.oth;
308 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
310 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
311 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
313 rcv_flags |= HFI1_HAS_GRH;
317 /* Get the destination QP number. */
318 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
319 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
324 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
331 * Handle only RC QPs - for other QP types drop error
334 spin_lock_irqsave(&qp->r_lock, flags);
336 /* Check for valid receive state. */
337 if (!(ib_rvt_state_ops[qp->state] &
338 RVT_PROCESS_RECV_OK)) {
339 ibp->rvp.n_pkt_drops++;
342 switch (qp->ibqp.qp_type) {
351 /* For now don't handle any other QP types */
355 spin_unlock_irqrestore(&qp->r_lock, flags);
358 } /* Valid packet with TIDErr */
360 /* handle "RcvTypeErr" flags */
362 case RHF_RTE_ERROR_OP_CODE_ERR:
368 if (rhf_use_egr_bfr(packet->rhf))
372 goto drop; /* this should never happen */
374 if (lnh == HFI1_LRH_BTH)
375 bth = (__be32 *)ebuf;
376 else if (lnh == HFI1_LRH_GRH)
377 bth = (__be32 *)((char *)ebuf + sizeof(struct ib_grh));
381 opcode = be32_to_cpu(bth[0]) >> 24;
384 if (opcode == IB_OPCODE_CNP) {
386 * Only in pre-B0 h/w is the CNP_OPCODE handled
387 * via this code path.
389 struct rvt_qp *qp = NULL;
392 u8 svc_type, sl, sc5;
394 sc5 = (be16_to_cpu(rhdr->lrh[0]) >> 12) & 0xf;
395 if (rhf_dc_info(packet->rhf))
397 sl = ibp->sc_to_sl[sc5];
399 lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK;
401 qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
407 switch (qp->ibqp.qp_type) {
411 svc_type = IB_CC_SVCTYPE_UD;
414 rlid = be16_to_cpu(rhdr->lrh[3]);
415 rqpn = qp->remote_qpn;
416 svc_type = IB_CC_SVCTYPE_UC;
422 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
426 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
437 static inline void init_packet(struct hfi1_ctxtdata *rcd,
438 struct hfi1_packet *packet)
440 packet->rsize = rcd->rcvhdrqentsize; /* words */
441 packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
445 packet->rhf_addr = get_rhf_addr(rcd);
446 packet->rhf = rhf_to_cpu(packet->rhf_addr);
447 packet->rhqoff = rcd->head;
449 packet->rcv_flags = 0;
452 static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
453 struct hfi1_other_headers *ohdr,
454 u64 rhf, u32 bth1, struct ib_grh *grh)
456 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
461 switch (qp->ibqp.qp_type) {
465 rlid = be16_to_cpu(hdr->lrh[3]);
466 rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
467 svc_type = IB_CC_SVCTYPE_UD;
470 rlid = qp->remote_ah_attr.dlid;
471 rqpn = qp->remote_qpn;
472 svc_type = IB_CC_SVCTYPE_UC;
475 rlid = qp->remote_ah_attr.dlid;
476 rqpn = qp->remote_qpn;
477 svc_type = IB_CC_SVCTYPE_RC;
483 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
484 if (rhf_dc_info(rhf))
487 if (bth1 & HFI1_FECN_SMASK) {
488 u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
489 u16 dlid = be16_to_cpu(hdr->lrh[1]);
491 return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc5, grh);
494 if (bth1 & HFI1_BECN_SMASK) {
495 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
496 u32 lqpn = bth1 & RVT_QPN_MASK;
497 u8 sl = ibp->sc_to_sl[sc5];
499 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
504 struct hfi1_ctxtdata *rcd;
512 static inline void init_ps_mdata(struct ps_mdata *mdata,
513 struct hfi1_packet *packet)
515 struct hfi1_ctxtdata *rcd = packet->rcd;
518 mdata->rsize = packet->rsize;
519 mdata->maxcnt = packet->maxcnt;
520 mdata->ps_head = packet->rhqoff;
522 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
523 mdata->ps_tail = get_rcvhdrtail(rcd);
524 if (rcd->ctxt == HFI1_CTRL_CTXT)
525 mdata->ps_seq = rcd->seq_cnt;
527 mdata->ps_seq = 0; /* not used with DMA_RTAIL */
529 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
530 mdata->ps_seq = rcd->seq_cnt;
534 static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
535 struct hfi1_ctxtdata *rcd)
537 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
538 return mdata->ps_head == mdata->ps_tail;
539 return mdata->ps_seq != rhf_rcv_seq(rhf);
542 static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
543 struct hfi1_ctxtdata *rcd)
546 * Control context can potentially receive an invalid rhf.
549 if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail))
550 return mdata->ps_seq != rhf_rcv_seq(rhf);
555 static inline void update_ps_mdata(struct ps_mdata *mdata,
556 struct hfi1_ctxtdata *rcd)
558 mdata->ps_head += mdata->rsize;
559 if (mdata->ps_head >= mdata->maxcnt)
562 /* Control context must do seq counting */
563 if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
564 (rcd->ctxt == HFI1_CTRL_CTXT)) {
565 if (++mdata->ps_seq > 13)
571 * prescan_rxq - search through the receive queue looking for packets
572 * containing Excplicit Congestion Notifications (FECNs, or BECNs).
573 * When an ECN is found, process the Congestion Notification, and toggle
575 * This is declared as a macro to allow quick checking of the port to avoid
576 * the overhead of a function call if not enabled.
578 #define prescan_rxq(rcd, packet) \
580 if (rcd->ppd->cc_prescan) \
581 __prescan_rxq(packet); \
583 static void __prescan_rxq(struct hfi1_packet *packet)
585 struct hfi1_ctxtdata *rcd = packet->rcd;
586 struct ps_mdata mdata;
588 init_ps_mdata(&mdata, packet);
591 struct hfi1_devdata *dd = rcd->dd;
592 struct hfi1_ibport *ibp = &rcd->ppd->ibport_data;
593 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
596 struct hfi1_ib_header *hdr;
597 struct hfi1_other_headers *ohdr;
598 struct ib_grh *grh = NULL;
599 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
600 u64 rhf = rhf_to_cpu(rhf_addr);
601 u32 etype = rhf_rcv_type(rhf), qpn, bth1;
605 if (ps_done(&mdata, rhf, rcd))
608 if (ps_skip(&mdata, rhf, rcd))
611 if (etype != RHF_RCV_TYPE_IB)
614 hdr = (struct hfi1_ib_header *)
615 hfi1_get_msgheader(dd, rhf_addr);
616 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
618 if (lnh == HFI1_LRH_BTH) {
620 } else if (lnh == HFI1_LRH_GRH) {
621 ohdr = &hdr->u.l.oth;
624 goto next; /* just in case */
626 bth1 = be32_to_cpu(ohdr->bth[1]);
627 is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
632 qpn = bth1 & RVT_QPN_MASK;
634 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
641 process_ecn(qp, hdr, ohdr, rhf, bth1, grh);
644 /* turn off BECN, FECN */
645 bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK);
646 ohdr->bth[1] = cpu_to_be32(bth1);
648 update_ps_mdata(&mdata, rcd);
652 static inline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
654 int ret = RCV_PKT_OK;
656 /* Set up for the next packet */
657 packet->rhqoff += packet->rsize;
658 if (packet->rhqoff >= packet->maxcnt)
662 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) {
667 this_cpu_inc(*packet->rcd->dd->rcv_limit);
671 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
672 packet->rcd->dd->rhf_offset;
673 packet->rhf = rhf_to_cpu(packet->rhf_addr);
678 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
680 int ret = RCV_PKT_OK;
682 packet->hdr = hfi1_get_msgheader(packet->rcd->dd,
684 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
685 packet->etype = rhf_rcv_type(packet->rhf);
687 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
688 /* retrieve eager buffer details */
690 if (rhf_use_egr_bfr(packet->rhf)) {
691 packet->etail = rhf_egr_index(packet->rhf);
692 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
695 * Prefetch the contents of the eager buffer. It is
696 * OK to send a negative length to prefetch_range().
697 * The +2 is the size of the RHF.
699 prefetch_range(packet->ebuf,
700 packet->tlen - ((packet->rcd->rcvhdrqentsize -
701 (rhf_hdrq_offset(packet->rhf)
706 * Call a type specific handler for the packet. We
707 * should be able to trust that etype won't be beyond
708 * the range of valid indexes. If so something is really
709 * wrong and we can probably just let things come
710 * crashing down. There is no need to eat another
711 * comparison in this performance critical code.
713 packet->rcd->dd->rhf_rcv_function_map[packet->etype](packet);
716 /* Set up for the next packet */
717 packet->rhqoff += packet->rsize;
718 if (packet->rhqoff >= packet->maxcnt)
721 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) {
726 this_cpu_inc(*packet->rcd->dd->rcv_limit);
730 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
731 packet->rcd->dd->rhf_offset;
732 packet->rhf = rhf_to_cpu(packet->rhf_addr);
737 static inline void process_rcv_update(int last, struct hfi1_packet *packet)
740 * Update head regs etc., every 16 packets, if not last pkt,
741 * to help prevent rcvhdrq overflows, when many packets
742 * are processed and queue is nearly full.
743 * Don't request an interrupt for intermediate updates.
745 if (!last && !(packet->numpkt & 0xf)) {
746 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
747 packet->etail, 0, 0);
750 packet->rcv_flags = 0;
753 static inline void finish_packet(struct hfi1_packet *packet)
756 * Nothing we need to free for the packet.
758 * The only thing we need to do is a final update and call for an
761 update_usrhead(packet->rcd, packet->rcd->head, packet->updegr,
762 packet->etail, rcv_intr_dynamic, packet->numpkt);
765 static inline void process_rcv_qp_work(struct hfi1_packet *packet)
767 struct hfi1_ctxtdata *rcd;
768 struct rvt_qp *qp, *nqp;
771 rcd->head = packet->rhqoff;
774 * Iterate over all QPs waiting to respond.
775 * The list won't change since the IRQ is only run on one CPU.
777 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
778 list_del_init(&qp->rspwait);
779 if (qp->r_flags & RVT_R_RSP_NAK) {
780 qp->r_flags &= ~RVT_R_RSP_NAK;
781 hfi1_send_rc_ack(rcd, qp, 0);
783 if (qp->r_flags & RVT_R_RSP_SEND) {
786 qp->r_flags &= ~RVT_R_RSP_SEND;
787 spin_lock_irqsave(&qp->s_lock, flags);
788 if (ib_rvt_state_ops[qp->state] &
789 RVT_PROCESS_OR_FLUSH_SEND)
790 hfi1_schedule_send(qp);
791 spin_unlock_irqrestore(&qp->s_lock, flags);
793 if (atomic_dec_and_test(&qp->refcount))
799 * Handle receive interrupts when using the no dma rtail option.
801 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
804 int last = RCV_PKT_OK;
805 struct hfi1_packet packet;
807 init_packet(rcd, &packet);
808 seq = rhf_rcv_seq(packet.rhf);
809 if (seq != rcd->seq_cnt) {
814 prescan_rxq(rcd, &packet);
816 while (last == RCV_PKT_OK) {
817 last = process_rcv_packet(&packet, thread);
818 seq = rhf_rcv_seq(packet.rhf);
819 if (++rcd->seq_cnt > 13)
821 if (seq != rcd->seq_cnt)
823 process_rcv_update(last, &packet);
825 process_rcv_qp_work(&packet);
827 finish_packet(&packet);
831 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
834 int last = RCV_PKT_OK;
835 struct hfi1_packet packet;
837 init_packet(rcd, &packet);
838 hdrqtail = get_rcvhdrtail(rcd);
839 if (packet.rhqoff == hdrqtail) {
843 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
845 prescan_rxq(rcd, &packet);
847 while (last == RCV_PKT_OK) {
848 last = process_rcv_packet(&packet, thread);
849 if (packet.rhqoff == hdrqtail)
851 process_rcv_update(last, &packet);
853 process_rcv_qp_work(&packet);
855 finish_packet(&packet);
859 static inline void set_all_nodma_rtail(struct hfi1_devdata *dd)
863 for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
864 dd->rcd[i]->do_interrupt =
865 &handle_receive_interrupt_nodma_rtail;
868 static inline void set_all_dma_rtail(struct hfi1_devdata *dd)
872 for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
873 dd->rcd[i]->do_interrupt =
874 &handle_receive_interrupt_dma_rtail;
877 void set_all_slowpath(struct hfi1_devdata *dd)
881 /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
882 for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
883 dd->rcd[i]->do_interrupt = &handle_receive_interrupt;
886 static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
887 struct hfi1_packet packet,
888 struct hfi1_devdata *dd)
890 struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
891 struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd,
894 if (hdr2sc(hdr, packet.rhf) != 0xf) {
895 int hwstate = read_logical_state(dd);
897 if (hwstate != LSTATE_ACTIVE) {
898 dd_dev_info(dd, "Unexpected link state %d\n", hwstate);
902 queue_work(rcd->ppd->hfi1_wq, lsaw);
909 * handle_receive_interrupt - receive a packet
912 * Called from interrupt handler for errors or receive interrupt.
913 * This is the slow path interrupt handler.
915 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
917 struct hfi1_devdata *dd = rcd->dd;
919 int needset, last = RCV_PKT_OK;
920 struct hfi1_packet packet;
923 /* Control context will always use the slow path interrupt handler */
924 needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
926 init_packet(rcd, &packet);
928 if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
929 u32 seq = rhf_rcv_seq(packet.rhf);
931 if (seq != rcd->seq_cnt) {
937 hdrqtail = get_rcvhdrtail(rcd);
938 if (packet.rhqoff == hdrqtail) {
942 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
945 * Control context can potentially receive an invalid
946 * rhf. Drop such packets.
948 if (rcd->ctxt == HFI1_CTRL_CTXT) {
949 u32 seq = rhf_rcv_seq(packet.rhf);
951 if (seq != rcd->seq_cnt)
956 prescan_rxq(rcd, &packet);
958 while (last == RCV_PKT_OK) {
959 if (unlikely(dd->do_drop &&
960 atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
964 /* On to the next packet */
965 packet.rhqoff += packet.rsize;
966 packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
969 packet.rhf = rhf_to_cpu(packet.rhf_addr);
971 } else if (skip_pkt) {
972 last = skip_rcv_packet(&packet, thread);
975 /* Auto activate link on non-SC15 packet receive */
976 if (unlikely(rcd->ppd->host_link_state ==
978 set_armed_to_active(rcd, packet, dd))
980 last = process_rcv_packet(&packet, thread);
983 if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
984 u32 seq = rhf_rcv_seq(packet.rhf);
986 if (++rcd->seq_cnt > 13)
988 if (seq != rcd->seq_cnt)
991 dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
992 set_all_nodma_rtail(dd);
996 if (packet.rhqoff == hdrqtail)
999 * Control context can potentially receive an invalid
1000 * rhf. Drop such packets.
1002 if (rcd->ctxt == HFI1_CTRL_CTXT) {
1003 u32 seq = rhf_rcv_seq(packet.rhf);
1005 if (++rcd->seq_cnt > 13)
1007 if (!last && (seq != rcd->seq_cnt))
1013 "Switching to DMA_RTAIL\n");
1014 set_all_dma_rtail(dd);
1019 process_rcv_update(last, &packet);
1022 process_rcv_qp_work(&packet);
1026 * Always write head at end, and setup rcv interrupt, even
1027 * if no packets were processed.
1029 finish_packet(&packet);
1034 * We may discover in the interrupt that the hardware link state has
1035 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
1036 * and we need to update the driver's notion of the link state. We cannot
1037 * run set_link_state from interrupt context, so we queue this function on
1040 * We delay the regular interrupt processing until after the state changes
1041 * so that the link will be in the correct state by the time any application
1042 * we wake up attempts to send a reply to any message it received.
1043 * (Subsequent receive interrupts may possibly force the wakeup before we
1044 * update the link state.)
1046 * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
1047 * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
1048 * so we're safe from use-after-free of the rcd.
1050 void receive_interrupt_work(struct work_struct *work)
1052 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
1053 linkstate_active_work);
1054 struct hfi1_devdata *dd = ppd->dd;
1057 /* Received non-SC15 packet implies neighbor_normal */
1058 ppd->neighbor_normal = 1;
1059 set_link_state(ppd, HLS_UP_ACTIVE);
1062 * Interrupt all kernel contexts that could have had an
1063 * interrupt during auto activation.
1065 for (i = HFI1_CTRL_CTXT; i < dd->first_user_ctxt; i++)
1066 force_recv_intr(dd->rcd[i]);
1070 * Convert a given MTU size to the on-wire MAD packet enumeration.
1071 * Return -1 if the size is invalid.
1073 int mtu_to_enum(u32 mtu, int default_if_bad)
1076 case 0: return OPA_MTU_0;
1077 case 256: return OPA_MTU_256;
1078 case 512: return OPA_MTU_512;
1079 case 1024: return OPA_MTU_1024;
1080 case 2048: return OPA_MTU_2048;
1081 case 4096: return OPA_MTU_4096;
1082 case 8192: return OPA_MTU_8192;
1083 case 10240: return OPA_MTU_10240;
1085 return default_if_bad;
1088 u16 enum_to_mtu(int mtu)
1091 case OPA_MTU_0: return 0;
1092 case OPA_MTU_256: return 256;
1093 case OPA_MTU_512: return 512;
1094 case OPA_MTU_1024: return 1024;
1095 case OPA_MTU_2048: return 2048;
1096 case OPA_MTU_4096: return 4096;
1097 case OPA_MTU_8192: return 8192;
1098 case OPA_MTU_10240: return 10240;
1099 default: return 0xffff;
1104 * set_mtu - set the MTU
1105 * @ppd: the per port data
1107 * We can handle "any" incoming size, the issue here is whether we
1108 * need to restrict our outgoing size. We do not deal with what happens
1109 * to programs that are already running when the size changes.
1111 int set_mtu(struct hfi1_pportdata *ppd)
1113 struct hfi1_devdata *dd = ppd->dd;
1114 int i, drain, ret = 0, is_up = 0;
1117 for (i = 0; i < ppd->vls_supported; i++)
1118 if (ppd->ibmtu < dd->vld[i].mtu)
1119 ppd->ibmtu = dd->vld[i].mtu;
1120 ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
1122 mutex_lock(&ppd->hls_lock);
1123 if (ppd->host_link_state == HLS_UP_INIT ||
1124 ppd->host_link_state == HLS_UP_ARMED ||
1125 ppd->host_link_state == HLS_UP_ACTIVE)
1128 drain = !is_ax(dd) && is_up;
1132 * MTU is specified per-VL. To ensure that no packet gets
1133 * stuck (due, e.g., to the MTU for the packet's VL being
1134 * reduced), empty the per-VL FIFOs before adjusting MTU.
1136 ret = stop_drain_data_vls(dd);
1139 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
1144 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
1147 open_fill_data_vls(dd); /* reopen all VLs */
1150 mutex_unlock(&ppd->hls_lock);
1155 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
1157 struct hfi1_devdata *dd = ppd->dd;
1161 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
1163 dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid);
1168 void shutdown_led_override(struct hfi1_pportdata *ppd)
1170 struct hfi1_devdata *dd = ppd->dd;
1173 * This pairs with the memory barrier in hfi1_start_led_override to
1174 * ensure that we read the correct state of LED beaconing represented
1175 * by led_override_timer_active
1178 if (atomic_read(&ppd->led_override_timer_active)) {
1179 del_timer_sync(&ppd->led_override_timer);
1180 atomic_set(&ppd->led_override_timer_active, 0);
1181 /* Ensure the atomic_set is visible to all CPUs */
1185 /* Hand control of the LED to the DC for normal operation */
1186 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
1189 static void run_led_override(unsigned long opaque)
1191 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque;
1192 struct hfi1_devdata *dd = ppd->dd;
1193 unsigned long timeout;
1196 if (!(dd->flags & HFI1_INITTED))
1199 phase_idx = ppd->led_override_phase & 1;
1201 setextled(dd, phase_idx);
1203 timeout = ppd->led_override_vals[phase_idx];
1205 /* Set up for next phase */
1206 ppd->led_override_phase = !ppd->led_override_phase;
1208 mod_timer(&ppd->led_override_timer, jiffies + timeout);
1212 * To have the LED blink in a particular pattern, provide timeon and timeoff
1214 * To turn off custom blinking and return to normal operation, use
1215 * shutdown_led_override()
1217 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
1218 unsigned int timeoff)
1220 if (!(ppd->dd->flags & HFI1_INITTED))
1223 /* Convert to jiffies for direct use in timer */
1224 ppd->led_override_vals[0] = msecs_to_jiffies(timeoff);
1225 ppd->led_override_vals[1] = msecs_to_jiffies(timeon);
1227 /* Arbitrarily start from LED on phase */
1228 ppd->led_override_phase = 1;
1231 * If the timer has not already been started, do so. Use a "quick"
1232 * timeout so the handler will be called soon to look at our request.
1234 if (!timer_pending(&ppd->led_override_timer)) {
1235 setup_timer(&ppd->led_override_timer, run_led_override,
1236 (unsigned long)ppd);
1237 ppd->led_override_timer.expires = jiffies + 1;
1238 add_timer(&ppd->led_override_timer);
1239 atomic_set(&ppd->led_override_timer_active, 1);
1240 /* Ensure the atomic_set is visible to all CPUs */
1246 * hfi1_reset_device - reset the chip if possible
1247 * @unit: the device to reset
1249 * Whether or not reset is successful, we attempt to re-initialize the chip
1250 * (that is, much like a driver unload/reload). We clear the INITTED flag
1251 * so that the various entry points will fail until we reinitialize. For
1252 * now, we only allow this if no user contexts are open that use chip resources
1254 int hfi1_reset_device(int unit)
1257 struct hfi1_devdata *dd = hfi1_lookup(unit);
1258 struct hfi1_pportdata *ppd;
1259 unsigned long flags;
1267 dd_dev_info(dd, "Reset on unit %u requested\n", unit);
1269 if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) {
1271 "Invalid unit number %u or not initialized or not present\n",
1277 spin_lock_irqsave(&dd->uctxt_lock, flags);
1279 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
1280 if (!dd->rcd[i] || !dd->rcd[i]->cnt)
1282 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1286 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1288 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1289 ppd = dd->pport + pidx;
1291 shutdown_led_override(ppd);
1293 if (dd->flags & HFI1_HAS_SEND_DMA)
1296 hfi1_reset_cpu_counters(dd);
1298 ret = hfi1_init(dd, 1);
1302 "Reinitialize unit %u after reset failed with %d\n",
1305 dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
1312 void handle_eflags(struct hfi1_packet *packet)
1314 struct hfi1_ctxtdata *rcd = packet->rcd;
1315 u32 rte = rhf_rcv_type_err(packet->rhf);
1317 rcv_hdrerr(rcd, rcd->ppd, packet);
1318 if (rhf_err_flags(packet->rhf))
1320 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
1321 rcd->ctxt, packet->rhf,
1322 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
1323 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
1324 packet->rhf & RHF_DC_ERR ? "dc " : "",
1325 packet->rhf & RHF_TID_ERR ? "tid " : "",
1326 packet->rhf & RHF_LEN_ERR ? "len " : "",
1327 packet->rhf & RHF_ECC_ERR ? "ecc " : "",
1328 packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
1329 packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
1334 * The following functions are called by the interrupt handler. They are type
1335 * specific handlers for each packet type.
1337 int process_receive_ib(struct hfi1_packet *packet)
1339 trace_hfi1_rcvhdr(packet->rcd->ppd->dd,
1341 rhf_err_flags(packet->rhf),
1346 rhf_egr_index(packet->rhf));
1348 if (unlikely(rhf_err_flags(packet->rhf))) {
1349 handle_eflags(packet);
1350 return RHF_RCV_CONTINUE;
1353 hfi1_ib_rcv(packet);
1354 return RHF_RCV_CONTINUE;
1357 int process_receive_bypass(struct hfi1_packet *packet)
1359 if (unlikely(rhf_err_flags(packet->rhf)))
1360 handle_eflags(packet);
1362 dd_dev_err(packet->rcd->dd,
1363 "Bypass packets are not supported in normal operation. Dropping\n");
1364 return RHF_RCV_CONTINUE;
1367 int process_receive_error(struct hfi1_packet *packet)
1369 handle_eflags(packet);
1371 if (unlikely(rhf_err_flags(packet->rhf)))
1372 dd_dev_err(packet->rcd->dd,
1373 "Unhandled error packet received. Dropping.\n");
1375 return RHF_RCV_CONTINUE;
1378 int kdeth_process_expected(struct hfi1_packet *packet)
1380 if (unlikely(rhf_err_flags(packet->rhf)))
1381 handle_eflags(packet);
1383 dd_dev_err(packet->rcd->dd,
1384 "Unhandled expected packet received. Dropping.\n");
1385 return RHF_RCV_CONTINUE;
1388 int kdeth_process_eager(struct hfi1_packet *packet)
1390 if (unlikely(rhf_err_flags(packet->rhf)))
1391 handle_eflags(packet);
1393 dd_dev_err(packet->rcd->dd,
1394 "Unhandled eager packet received. Dropping.\n");
1395 return RHF_RCV_CONTINUE;
1398 int process_receive_invalid(struct hfi1_packet *packet)
1400 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
1401 rhf_rcv_type(packet->rhf));
1402 return RHF_RCV_CONTINUE;