Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-block.git] / drivers / infiniband / hw / hfi1 / driver.c
1 /*
2  * Copyright(c) 2015-2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 #include <linux/spinlock.h>
49 #include <linux/pci.h>
50 #include <linux/io.h>
51 #include <linux/delay.h>
52 #include <linux/netdevice.h>
53 #include <linux/vmalloc.h>
54 #include <linux/module.h>
55 #include <linux/prefetch.h>
56 #include <rdma/ib_verbs.h>
57
58 #include "hfi.h"
59 #include "trace.h"
60 #include "qp.h"
61 #include "sdma.h"
62 #include "debugfs.h"
63 #include "vnic.h"
64
65 #undef pr_fmt
66 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
67
68 /*
69  * The size has to be longer than this string, so we can append
70  * board/chip information to it in the initialization code.
71  */
72 const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
73
74 DEFINE_SPINLOCK(hfi1_devs_lock);
75 LIST_HEAD(hfi1_dev_list);
76 DEFINE_MUTEX(hfi1_mutex);       /* general driver use */
77
78 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
79 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
80 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify(
81                  HFI1_DEFAULT_MAX_MTU));
82
83 unsigned int hfi1_cu = 1;
84 module_param_named(cu, hfi1_cu, uint, S_IRUGO);
85 MODULE_PARM_DESC(cu, "Credit return units");
86
87 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
88 static int hfi1_caps_set(const char *val, const struct kernel_param *kp);
89 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp);
90 static const struct kernel_param_ops cap_ops = {
91         .set = hfi1_caps_set,
92         .get = hfi1_caps_get
93 };
94 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
95 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
96
97 MODULE_LICENSE("Dual BSD/GPL");
98 MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
99
100 /*
101  * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
102  */
103 #define MAX_PKT_RECV 64
104 /*
105  * MAX_PKT_THREAD_RCV is the max # of packets processed before
106  * the qp_wait_list queue is flushed.
107  */
108 #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4)
109 #define EGR_HEAD_UPDATE_THRESHOLD 16
110
111 struct hfi1_ib_stats hfi1_stats;
112
113 static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
114 {
115         int ret = 0;
116         unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
117                 cap_mask = *cap_mask_ptr, value, diff,
118                 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
119                               HFI1_CAP_WRITABLE_MASK);
120
121         ret = kstrtoul(val, 0, &value);
122         if (ret) {
123                 pr_warn("Invalid module parameter value for 'cap_mask'\n");
124                 goto done;
125         }
126         /* Get the changed bits (except the locked bit) */
127         diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
128
129         /* Remove any bits that are not allowed to change after driver load */
130         if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
131                 pr_warn("Ignoring non-writable capability bits %#lx\n",
132                         diff & ~write_mask);
133                 diff &= write_mask;
134         }
135
136         /* Mask off any reserved bits */
137         diff &= ~HFI1_CAP_RESERVED_MASK;
138         /* Clear any previously set and changing bits */
139         cap_mask &= ~diff;
140         /* Update the bits with the new capability */
141         cap_mask |= (value & diff);
142         /* Check for any kernel/user restrictions */
143         diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
144                 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
145         cap_mask &= ~diff;
146         /* Set the bitmask to the final set */
147         *cap_mask_ptr = cap_mask;
148 done:
149         return ret;
150 }
151
152 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
153 {
154         unsigned long cap_mask = *(unsigned long *)kp->arg;
155
156         cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
157         cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
158
159         return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
160 }
161
162 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
163 {
164         struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
165         struct hfi1_devdata *dd = container_of(ibdev,
166                                                struct hfi1_devdata, verbs_dev);
167         return dd->pcidev;
168 }
169
170 /*
171  * Return count of units with at least one port ACTIVE.
172  */
173 int hfi1_count_active_units(void)
174 {
175         struct hfi1_devdata *dd;
176         struct hfi1_pportdata *ppd;
177         unsigned long flags;
178         int pidx, nunits_active = 0;
179
180         spin_lock_irqsave(&hfi1_devs_lock, flags);
181         list_for_each_entry(dd, &hfi1_dev_list, list) {
182                 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1)
183                         continue;
184                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
185                         ppd = dd->pport + pidx;
186                         if (ppd->lid && ppd->linkup) {
187                                 nunits_active++;
188                                 break;
189                         }
190                 }
191         }
192         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
193         return nunits_active;
194 }
195
196 /*
197  * Get address of eager buffer from it's index (allocated in chunks, not
198  * contiguous).
199  */
200 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
201                                u8 *update)
202 {
203         u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
204
205         *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
206         return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
207                         (offset * RCV_BUF_BLOCK_SIZE));
208 }
209
210 static inline void *hfi1_get_header(struct hfi1_devdata *dd,
211                                     __le32 *rhf_addr)
212 {
213         u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
214
215         return (void *)(rhf_addr - dd->rhf_offset + offset);
216 }
217
218 static inline struct ib_header *hfi1_get_msgheader(struct hfi1_devdata *dd,
219                                                    __le32 *rhf_addr)
220 {
221         return (struct ib_header *)hfi1_get_header(dd, rhf_addr);
222 }
223
224 static inline struct hfi1_16b_header
225                 *hfi1_get_16B_header(struct hfi1_devdata *dd,
226                                      __le32 *rhf_addr)
227 {
228         return (struct hfi1_16b_header *)hfi1_get_header(dd, rhf_addr);
229 }
230
231 /*
232  * Validate and encode the a given RcvArray Buffer size.
233  * The function will check whether the given size falls within
234  * allowed size ranges for the respective type and, optionally,
235  * return the proper encoding.
236  */
237 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
238 {
239         if (unlikely(!PAGE_ALIGNED(size)))
240                 return 0;
241         if (unlikely(size < MIN_EAGER_BUFFER))
242                 return 0;
243         if (size >
244             (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
245                 return 0;
246         if (encoded)
247                 *encoded = ilog2(size / PAGE_SIZE) + 1;
248         return 1;
249 }
250
251 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
252                        struct hfi1_packet *packet)
253 {
254         struct ib_header *rhdr = packet->hdr;
255         u32 rte = rhf_rcv_type_err(packet->rhf);
256         u32 mlid_base;
257         struct hfi1_ibport *ibp = rcd_to_iport(rcd);
258         struct hfi1_devdata *dd = ppd->dd;
259         struct hfi1_ibdev *verbs_dev = &dd->verbs_dev;
260         struct rvt_dev_info *rdi = &verbs_dev->rdi;
261
262         if ((packet->rhf & RHF_DC_ERR) &&
263             hfi1_dbg_fault_suppress_err(verbs_dev))
264                 return;
265
266         if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
267                 return;
268
269         if (packet->etype == RHF_RCV_TYPE_BYPASS) {
270                 goto drop;
271         } else {
272                 u8 lnh = ib_get_lnh(rhdr);
273
274                 mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE);
275                 if (lnh == HFI1_LRH_BTH) {
276                         packet->ohdr = &rhdr->u.oth;
277                 } else if (lnh == HFI1_LRH_GRH) {
278                         packet->ohdr = &rhdr->u.l.oth;
279                         packet->grh = &rhdr->u.l.grh;
280                 } else {
281                         goto drop;
282                 }
283         }
284
285         if (packet->rhf & RHF_TID_ERR) {
286                 /* For TIDERR and RC QPs preemptively schedule a NAK */
287                 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
288                 u32 dlid = ib_get_dlid(rhdr);
289                 u32 qp_num;
290
291                 /* Sanity check packet */
292                 if (tlen < 24)
293                         goto drop;
294
295                 /* Check for GRH */
296                 if (packet->grh) {
297                         u32 vtf;
298                         struct ib_grh *grh = packet->grh;
299
300                         if (grh->next_hdr != IB_GRH_NEXT_HDR)
301                                 goto drop;
302                         vtf = be32_to_cpu(grh->version_tclass_flow);
303                         if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
304                                 goto drop;
305                 }
306
307                 /* Get the destination QP number. */
308                 qp_num = ib_bth_get_qpn(packet->ohdr);
309                 if (dlid < mlid_base) {
310                         struct rvt_qp *qp;
311                         unsigned long flags;
312
313                         rcu_read_lock();
314                         qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
315                         if (!qp) {
316                                 rcu_read_unlock();
317                                 goto drop;
318                         }
319
320                         /*
321                          * Handle only RC QPs - for other QP types drop error
322                          * packet.
323                          */
324                         spin_lock_irqsave(&qp->r_lock, flags);
325
326                         /* Check for valid receive state. */
327                         if (!(ib_rvt_state_ops[qp->state] &
328                               RVT_PROCESS_RECV_OK)) {
329                                 ibp->rvp.n_pkt_drops++;
330                         }
331
332                         switch (qp->ibqp.qp_type) {
333                         case IB_QPT_RC:
334                                 hfi1_rc_hdrerr(rcd, packet, qp);
335                                 break;
336                         default:
337                                 /* For now don't handle any other QP types */
338                                 break;
339                         }
340
341                         spin_unlock_irqrestore(&qp->r_lock, flags);
342                         rcu_read_unlock();
343                 } /* Unicast QP */
344         } /* Valid packet with TIDErr */
345
346         /* handle "RcvTypeErr" flags */
347         switch (rte) {
348         case RHF_RTE_ERROR_OP_CODE_ERR:
349         {
350                 void *ebuf = NULL;
351                 u8 opcode;
352
353                 if (rhf_use_egr_bfr(packet->rhf))
354                         ebuf = packet->ebuf;
355
356                 if (!ebuf)
357                         goto drop; /* this should never happen */
358
359                 opcode = ib_bth_get_opcode(packet->ohdr);
360                 if (opcode == IB_OPCODE_CNP) {
361                         /*
362                          * Only in pre-B0 h/w is the CNP_OPCODE handled
363                          * via this code path.
364                          */
365                         struct rvt_qp *qp = NULL;
366                         u32 lqpn, rqpn;
367                         u16 rlid;
368                         u8 svc_type, sl, sc5;
369
370                         sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf);
371                         sl = ibp->sc_to_sl[sc5];
372
373                         lqpn = ib_bth_get_qpn(packet->ohdr);
374                         rcu_read_lock();
375                         qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
376                         if (!qp) {
377                                 rcu_read_unlock();
378                                 goto drop;
379                         }
380
381                         switch (qp->ibqp.qp_type) {
382                         case IB_QPT_UD:
383                                 rlid = 0;
384                                 rqpn = 0;
385                                 svc_type = IB_CC_SVCTYPE_UD;
386                                 break;
387                         case IB_QPT_UC:
388                                 rlid = ib_get_slid(rhdr);
389                                 rqpn = qp->remote_qpn;
390                                 svc_type = IB_CC_SVCTYPE_UC;
391                                 break;
392                         default:
393                                 rcu_read_unlock();
394                                 goto drop;
395                         }
396
397                         process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
398                         rcu_read_unlock();
399                 }
400
401                 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
402                 break;
403         }
404         default:
405                 break;
406         }
407
408 drop:
409         return;
410 }
411
412 static inline void init_packet(struct hfi1_ctxtdata *rcd,
413                                struct hfi1_packet *packet)
414 {
415         packet->rsize = rcd->rcvhdrqentsize; /* words */
416         packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
417         packet->rcd = rcd;
418         packet->updegr = 0;
419         packet->etail = -1;
420         packet->rhf_addr = get_rhf_addr(rcd);
421         packet->rhf = rhf_to_cpu(packet->rhf_addr);
422         packet->rhqoff = rcd->head;
423         packet->numpkt = 0;
424 }
425
426 /* We support only two types - 9B and 16B for now */
427 static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
428         [HFI1_PKT_TYPE_9B] = &return_cnp,
429         [HFI1_PKT_TYPE_16B] = &return_cnp_16B
430 };
431
432 void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
433                                bool do_cnp)
434 {
435         struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
436         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
437         struct ib_other_headers *ohdr = pkt->ohdr;
438         struct ib_grh *grh = pkt->grh;
439         u32 rqpn = 0, bth1;
440         u16 pkey;
441         u32 rlid, slid, dlid = 0;
442         u8 hdr_type, sc, svc_type;
443         bool is_mcast = false;
444
445         /* can be called from prescan */
446         if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
447                 is_mcast = hfi1_is_16B_mcast(dlid);
448                 pkey = hfi1_16B_get_pkey(pkt->hdr);
449                 sc = hfi1_16B_get_sc(pkt->hdr);
450                 dlid = hfi1_16B_get_dlid(pkt->hdr);
451                 slid = hfi1_16B_get_slid(pkt->hdr);
452                 hdr_type = HFI1_PKT_TYPE_16B;
453         } else {
454                 is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
455                            (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
456                 pkey = ib_bth_get_pkey(ohdr);
457                 sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
458                 dlid = ib_get_dlid(pkt->hdr);
459                 slid = ib_get_slid(pkt->hdr);
460                 hdr_type = HFI1_PKT_TYPE_9B;
461         }
462
463         switch (qp->ibqp.qp_type) {
464         case IB_QPT_UD:
465                 dlid = ppd->lid;
466                 rlid = slid;
467                 rqpn = ib_get_sqpn(pkt->ohdr);
468                 svc_type = IB_CC_SVCTYPE_UD;
469                 break;
470         case IB_QPT_SMI:
471         case IB_QPT_GSI:
472                 rlid = slid;
473                 rqpn = ib_get_sqpn(pkt->ohdr);
474                 svc_type = IB_CC_SVCTYPE_UD;
475                 break;
476         case IB_QPT_UC:
477                 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
478                 rqpn = qp->remote_qpn;
479                 svc_type = IB_CC_SVCTYPE_UC;
480                 break;
481         case IB_QPT_RC:
482                 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
483                 rqpn = qp->remote_qpn;
484                 svc_type = IB_CC_SVCTYPE_RC;
485                 break;
486         default:
487                 return;
488         }
489
490         bth1 = be32_to_cpu(ohdr->bth[1]);
491         /* Call appropriate CNP handler */
492         if (do_cnp && (bth1 & IB_FECN_SMASK))
493                 hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
494                                               dlid, rlid, sc, grh);
495
496         if (!is_mcast && (bth1 & IB_BECN_SMASK)) {
497                 u32 lqpn = bth1 & RVT_QPN_MASK;
498                 u8 sl = ibp->sc_to_sl[sc];
499
500                 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
501         }
502
503 }
504
505 struct ps_mdata {
506         struct hfi1_ctxtdata *rcd;
507         u32 rsize;
508         u32 maxcnt;
509         u32 ps_head;
510         u32 ps_tail;
511         u32 ps_seq;
512 };
513
514 static inline void init_ps_mdata(struct ps_mdata *mdata,
515                                  struct hfi1_packet *packet)
516 {
517         struct hfi1_ctxtdata *rcd = packet->rcd;
518
519         mdata->rcd = rcd;
520         mdata->rsize = packet->rsize;
521         mdata->maxcnt = packet->maxcnt;
522         mdata->ps_head = packet->rhqoff;
523
524         if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
525                 mdata->ps_tail = get_rcvhdrtail(rcd);
526                 if (rcd->ctxt == HFI1_CTRL_CTXT)
527                         mdata->ps_seq = rcd->seq_cnt;
528                 else
529                         mdata->ps_seq = 0; /* not used with DMA_RTAIL */
530         } else {
531                 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
532                 mdata->ps_seq = rcd->seq_cnt;
533         }
534 }
535
536 static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
537                           struct hfi1_ctxtdata *rcd)
538 {
539         if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
540                 return mdata->ps_head == mdata->ps_tail;
541         return mdata->ps_seq != rhf_rcv_seq(rhf);
542 }
543
544 static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
545                           struct hfi1_ctxtdata *rcd)
546 {
547         /*
548          * Control context can potentially receive an invalid rhf.
549          * Drop such packets.
550          */
551         if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail))
552                 return mdata->ps_seq != rhf_rcv_seq(rhf);
553
554         return 0;
555 }
556
557 static inline void update_ps_mdata(struct ps_mdata *mdata,
558                                    struct hfi1_ctxtdata *rcd)
559 {
560         mdata->ps_head += mdata->rsize;
561         if (mdata->ps_head >= mdata->maxcnt)
562                 mdata->ps_head = 0;
563
564         /* Control context must do seq counting */
565         if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
566             (rcd->ctxt == HFI1_CTRL_CTXT)) {
567                 if (++mdata->ps_seq > 13)
568                         mdata->ps_seq = 1;
569         }
570 }
571
572 /*
573  * prescan_rxq - search through the receive queue looking for packets
574  * containing Excplicit Congestion Notifications (FECNs, or BECNs).
575  * When an ECN is found, process the Congestion Notification, and toggle
576  * it off.
577  * This is declared as a macro to allow quick checking of the port to avoid
578  * the overhead of a function call if not enabled.
579  */
580 #define prescan_rxq(rcd, packet) \
581         do { \
582                 if (rcd->ppd->cc_prescan) \
583                         __prescan_rxq(packet); \
584         } while (0)
585 static void __prescan_rxq(struct hfi1_packet *packet)
586 {
587         struct hfi1_ctxtdata *rcd = packet->rcd;
588         struct ps_mdata mdata;
589
590         init_ps_mdata(&mdata, packet);
591
592         while (1) {
593                 struct hfi1_devdata *dd = rcd->dd;
594                 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
595                 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
596                                          dd->rhf_offset;
597                 struct rvt_qp *qp;
598                 struct ib_header *hdr;
599                 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
600                 u64 rhf = rhf_to_cpu(rhf_addr);
601                 u32 etype = rhf_rcv_type(rhf), qpn, bth1;
602                 int is_ecn = 0;
603                 u8 lnh;
604
605                 if (ps_done(&mdata, rhf, rcd))
606                         break;
607
608                 if (ps_skip(&mdata, rhf, rcd))
609                         goto next;
610
611                 if (etype != RHF_RCV_TYPE_IB)
612                         goto next;
613
614                 packet->hdr = hfi1_get_msgheader(dd, rhf_addr);
615                 hdr = packet->hdr;
616                 lnh = ib_get_lnh(hdr);
617
618                 if (lnh == HFI1_LRH_BTH) {
619                         packet->ohdr = &hdr->u.oth;
620                         packet->grh = NULL;
621                 } else if (lnh == HFI1_LRH_GRH) {
622                         packet->ohdr = &hdr->u.l.oth;
623                         packet->grh = &hdr->u.l.grh;
624                 } else {
625                         goto next; /* just in case */
626                 }
627
628                 bth1 = be32_to_cpu(packet->ohdr->bth[1]);
629                 is_ecn = !!(bth1 & (IB_FECN_SMASK | IB_BECN_SMASK));
630
631                 if (!is_ecn)
632                         goto next;
633
634                 qpn = bth1 & RVT_QPN_MASK;
635                 rcu_read_lock();
636                 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
637
638                 if (!qp) {
639                         rcu_read_unlock();
640                         goto next;
641                 }
642
643                 process_ecn(qp, packet, true);
644                 rcu_read_unlock();
645
646                 /* turn off BECN, FECN */
647                 bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK);
648                 packet->ohdr->bth[1] = cpu_to_be32(bth1);
649 next:
650                 update_ps_mdata(&mdata, rcd);
651         }
652 }
653
654 static void process_rcv_qp_work(struct hfi1_packet *packet)
655 {
656         struct rvt_qp *qp, *nqp;
657         struct hfi1_ctxtdata *rcd = packet->rcd;
658
659         /*
660          * Iterate over all QPs waiting to respond.
661          * The list won't change since the IRQ is only run on one CPU.
662          */
663         list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
664                 list_del_init(&qp->rspwait);
665                 if (qp->r_flags & RVT_R_RSP_NAK) {
666                         qp->r_flags &= ~RVT_R_RSP_NAK;
667                         packet->qp = qp;
668                         hfi1_send_rc_ack(packet, 0);
669                 }
670                 if (qp->r_flags & RVT_R_RSP_SEND) {
671                         unsigned long flags;
672
673                         qp->r_flags &= ~RVT_R_RSP_SEND;
674                         spin_lock_irqsave(&qp->s_lock, flags);
675                         if (ib_rvt_state_ops[qp->state] &
676                                         RVT_PROCESS_OR_FLUSH_SEND)
677                                 hfi1_schedule_send(qp);
678                         spin_unlock_irqrestore(&qp->s_lock, flags);
679                 }
680                 rvt_put_qp(qp);
681         }
682 }
683
684 static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
685 {
686         if (thread) {
687                 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0)
688                         /* allow defered processing */
689                         process_rcv_qp_work(packet);
690                 cond_resched();
691                 return RCV_PKT_OK;
692         } else {
693                 this_cpu_inc(*packet->rcd->dd->rcv_limit);
694                 return RCV_PKT_LIMIT;
695         }
696 }
697
698 static inline int check_max_packet(struct hfi1_packet *packet, int thread)
699 {
700         int ret = RCV_PKT_OK;
701
702         if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0))
703                 ret = max_packet_exceeded(packet, thread);
704         return ret;
705 }
706
707 static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
708 {
709         int ret;
710
711         /* Set up for the next packet */
712         packet->rhqoff += packet->rsize;
713         if (packet->rhqoff >= packet->maxcnt)
714                 packet->rhqoff = 0;
715
716         packet->numpkt++;
717         ret = check_max_packet(packet, thread);
718
719         packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
720                                      packet->rcd->dd->rhf_offset;
721         packet->rhf = rhf_to_cpu(packet->rhf_addr);
722
723         return ret;
724 }
725
726 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
727 {
728         int ret;
729
730         packet->etype = rhf_rcv_type(packet->rhf);
731
732         /* total length */
733         packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
734         /* retrieve eager buffer details */
735         packet->ebuf = NULL;
736         if (rhf_use_egr_bfr(packet->rhf)) {
737                 packet->etail = rhf_egr_index(packet->rhf);
738                 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
739                                  &packet->updegr);
740                 /*
741                  * Prefetch the contents of the eager buffer.  It is
742                  * OK to send a negative length to prefetch_range().
743                  * The +2 is the size of the RHF.
744                  */
745                 prefetch_range(packet->ebuf,
746                                packet->tlen - ((packet->rcd->rcvhdrqentsize -
747                                                (rhf_hdrq_offset(packet->rhf)
748                                                 + 2)) * 4));
749         }
750
751         /*
752          * Call a type specific handler for the packet. We
753          * should be able to trust that etype won't be beyond
754          * the range of valid indexes. If so something is really
755          * wrong and we can probably just let things come
756          * crashing down. There is no need to eat another
757          * comparison in this performance critical code.
758          */
759         packet->rcd->dd->rhf_rcv_function_map[packet->etype](packet);
760         packet->numpkt++;
761
762         /* Set up for the next packet */
763         packet->rhqoff += packet->rsize;
764         if (packet->rhqoff >= packet->maxcnt)
765                 packet->rhqoff = 0;
766
767         ret = check_max_packet(packet, thread);
768
769         packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
770                                       packet->rcd->dd->rhf_offset;
771         packet->rhf = rhf_to_cpu(packet->rhf_addr);
772
773         return ret;
774 }
775
776 static inline void process_rcv_update(int last, struct hfi1_packet *packet)
777 {
778         /*
779          * Update head regs etc., every 16 packets, if not last pkt,
780          * to help prevent rcvhdrq overflows, when many packets
781          * are processed and queue is nearly full.
782          * Don't request an interrupt for intermediate updates.
783          */
784         if (!last && !(packet->numpkt & 0xf)) {
785                 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
786                                packet->etail, 0, 0);
787                 packet->updegr = 0;
788         }
789         packet->grh = NULL;
790 }
791
792 static inline void finish_packet(struct hfi1_packet *packet)
793 {
794         /*
795          * Nothing we need to free for the packet.
796          *
797          * The only thing we need to do is a final update and call for an
798          * interrupt
799          */
800         update_usrhead(packet->rcd, packet->rcd->head, packet->updegr,
801                        packet->etail, rcv_intr_dynamic, packet->numpkt);
802 }
803
804 /*
805  * Handle receive interrupts when using the no dma rtail option.
806  */
807 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
808 {
809         u32 seq;
810         int last = RCV_PKT_OK;
811         struct hfi1_packet packet;
812
813         init_packet(rcd, &packet);
814         seq = rhf_rcv_seq(packet.rhf);
815         if (seq != rcd->seq_cnt) {
816                 last = RCV_PKT_DONE;
817                 goto bail;
818         }
819
820         prescan_rxq(rcd, &packet);
821
822         while (last == RCV_PKT_OK) {
823                 last = process_rcv_packet(&packet, thread);
824                 seq = rhf_rcv_seq(packet.rhf);
825                 if (++rcd->seq_cnt > 13)
826                         rcd->seq_cnt = 1;
827                 if (seq != rcd->seq_cnt)
828                         last = RCV_PKT_DONE;
829                 process_rcv_update(last, &packet);
830         }
831         process_rcv_qp_work(&packet);
832         rcd->head = packet.rhqoff;
833 bail:
834         finish_packet(&packet);
835         return last;
836 }
837
838 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
839 {
840         u32 hdrqtail;
841         int last = RCV_PKT_OK;
842         struct hfi1_packet packet;
843
844         init_packet(rcd, &packet);
845         hdrqtail = get_rcvhdrtail(rcd);
846         if (packet.rhqoff == hdrqtail) {
847                 last = RCV_PKT_DONE;
848                 goto bail;
849         }
850         smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
851
852         prescan_rxq(rcd, &packet);
853
854         while (last == RCV_PKT_OK) {
855                 last = process_rcv_packet(&packet, thread);
856                 if (packet.rhqoff == hdrqtail)
857                         last = RCV_PKT_DONE;
858                 process_rcv_update(last, &packet);
859         }
860         process_rcv_qp_work(&packet);
861         rcd->head = packet.rhqoff;
862 bail:
863         finish_packet(&packet);
864         return last;
865 }
866
867 static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
868 {
869         struct hfi1_ctxtdata *rcd;
870         u16 i;
871
872         /*
873          * For dynamically allocated kernel contexts (like vnic) switch
874          * interrupt handler only for that context. Otherwise, switch
875          * interrupt handler for all statically allocated kernel contexts.
876          */
877         if (ctxt >= dd->first_dyn_alloc_ctxt) {
878                 rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
879                 if (rcd) {
880                         rcd->do_interrupt =
881                                 &handle_receive_interrupt_nodma_rtail;
882                         hfi1_rcd_put(rcd);
883                 }
884                 return;
885         }
886
887         for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
888                 rcd = hfi1_rcd_get_by_index(dd, i);
889                 if (rcd)
890                         rcd->do_interrupt =
891                                 &handle_receive_interrupt_nodma_rtail;
892                 hfi1_rcd_put(rcd);
893         }
894 }
895
896 static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
897 {
898         struct hfi1_ctxtdata *rcd;
899         u16 i;
900
901         /*
902          * For dynamically allocated kernel contexts (like vnic) switch
903          * interrupt handler only for that context. Otherwise, switch
904          * interrupt handler for all statically allocated kernel contexts.
905          */
906         if (ctxt >= dd->first_dyn_alloc_ctxt) {
907                 rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
908                 if (rcd) {
909                         rcd->do_interrupt =
910                                 &handle_receive_interrupt_dma_rtail;
911                         hfi1_rcd_put(rcd);
912                 }
913                 return;
914         }
915
916         for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
917                 rcd = hfi1_rcd_get_by_index(dd, i);
918                 if (rcd)
919                         rcd->do_interrupt =
920                                 &handle_receive_interrupt_dma_rtail;
921                 hfi1_rcd_put(rcd);
922         }
923 }
924
925 void set_all_slowpath(struct hfi1_devdata *dd)
926 {
927         struct hfi1_ctxtdata *rcd;
928         u16 i;
929
930         /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
931         for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
932                 rcd = hfi1_rcd_get_by_index(dd, i);
933                 if (!rcd)
934                         continue;
935                 if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
936                         rcd->do_interrupt = &handle_receive_interrupt;
937
938                 hfi1_rcd_put(rcd);
939         }
940 }
941
942 static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
943                                       struct hfi1_packet *packet,
944                                       struct hfi1_devdata *dd)
945 {
946         struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
947         u8 etype = rhf_rcv_type(packet->rhf);
948         u8 sc = SC15_PACKET;
949
950         if (etype == RHF_RCV_TYPE_IB) {
951                 struct ib_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
952                                                            packet->rhf_addr);
953                 sc = hfi1_9B_get_sc5(hdr, packet->rhf);
954         } else if (etype == RHF_RCV_TYPE_BYPASS) {
955                 struct hfi1_16b_header *hdr = hfi1_get_16B_header(
956                                                 packet->rcd->dd,
957                                                 packet->rhf_addr);
958                 sc = hfi1_16B_get_sc(hdr);
959         }
960         if (sc != SC15_PACKET) {
961                 int hwstate = driver_lstate(rcd->ppd);
962
963                 if (hwstate != IB_PORT_ACTIVE) {
964                         dd_dev_info(dd,
965                                     "Unexpected link state %s\n",
966                                     opa_lstate_name(hwstate));
967                         return 0;
968                 }
969
970                 queue_work(rcd->ppd->link_wq, lsaw);
971                 return 1;
972         }
973         return 0;
974 }
975
976 /*
977  * handle_receive_interrupt - receive a packet
978  * @rcd: the context
979  *
980  * Called from interrupt handler for errors or receive interrupt.
981  * This is the slow path interrupt handler.
982  */
983 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
984 {
985         struct hfi1_devdata *dd = rcd->dd;
986         u32 hdrqtail;
987         int needset, last = RCV_PKT_OK;
988         struct hfi1_packet packet;
989         int skip_pkt = 0;
990
991         /* Control context will always use the slow path interrupt handler */
992         needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
993
994         init_packet(rcd, &packet);
995
996         if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
997                 u32 seq = rhf_rcv_seq(packet.rhf);
998
999                 if (seq != rcd->seq_cnt) {
1000                         last = RCV_PKT_DONE;
1001                         goto bail;
1002                 }
1003                 hdrqtail = 0;
1004         } else {
1005                 hdrqtail = get_rcvhdrtail(rcd);
1006                 if (packet.rhqoff == hdrqtail) {
1007                         last = RCV_PKT_DONE;
1008                         goto bail;
1009                 }
1010                 smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
1011
1012                 /*
1013                  * Control context can potentially receive an invalid
1014                  * rhf. Drop such packets.
1015                  */
1016                 if (rcd->ctxt == HFI1_CTRL_CTXT) {
1017                         u32 seq = rhf_rcv_seq(packet.rhf);
1018
1019                         if (seq != rcd->seq_cnt)
1020                                 skip_pkt = 1;
1021                 }
1022         }
1023
1024         prescan_rxq(rcd, &packet);
1025
1026         while (last == RCV_PKT_OK) {
1027                 if (unlikely(dd->do_drop &&
1028                              atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
1029                              DROP_PACKET_ON)) {
1030                         dd->do_drop = 0;
1031
1032                         /* On to the next packet */
1033                         packet.rhqoff += packet.rsize;
1034                         packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
1035                                           packet.rhqoff +
1036                                           dd->rhf_offset;
1037                         packet.rhf = rhf_to_cpu(packet.rhf_addr);
1038
1039                 } else if (skip_pkt) {
1040                         last = skip_rcv_packet(&packet, thread);
1041                         skip_pkt = 0;
1042                 } else {
1043                         /* Auto activate link on non-SC15 packet receive */
1044                         if (unlikely(rcd->ppd->host_link_state ==
1045                                      HLS_UP_ARMED) &&
1046                             set_armed_to_active(rcd, &packet, dd))
1047                                 goto bail;
1048                         last = process_rcv_packet(&packet, thread);
1049                 }
1050
1051                 if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1052                         u32 seq = rhf_rcv_seq(packet.rhf);
1053
1054                         if (++rcd->seq_cnt > 13)
1055                                 rcd->seq_cnt = 1;
1056                         if (seq != rcd->seq_cnt)
1057                                 last = RCV_PKT_DONE;
1058                         if (needset) {
1059                                 dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
1060                                 set_nodma_rtail(dd, rcd->ctxt);
1061                                 needset = 0;
1062                         }
1063                 } else {
1064                         if (packet.rhqoff == hdrqtail)
1065                                 last = RCV_PKT_DONE;
1066                         /*
1067                          * Control context can potentially receive an invalid
1068                          * rhf. Drop such packets.
1069                          */
1070                         if (rcd->ctxt == HFI1_CTRL_CTXT) {
1071                                 u32 seq = rhf_rcv_seq(packet.rhf);
1072
1073                                 if (++rcd->seq_cnt > 13)
1074                                         rcd->seq_cnt = 1;
1075                                 if (!last && (seq != rcd->seq_cnt))
1076                                         skip_pkt = 1;
1077                         }
1078
1079                         if (needset) {
1080                                 dd_dev_info(dd,
1081                                             "Switching to DMA_RTAIL\n");
1082                                 set_dma_rtail(dd, rcd->ctxt);
1083                                 needset = 0;
1084                         }
1085                 }
1086
1087                 process_rcv_update(last, &packet);
1088         }
1089
1090         process_rcv_qp_work(&packet);
1091         rcd->head = packet.rhqoff;
1092
1093 bail:
1094         /*
1095          * Always write head at end, and setup rcv interrupt, even
1096          * if no packets were processed.
1097          */
1098         finish_packet(&packet);
1099         return last;
1100 }
1101
1102 /*
1103  * We may discover in the interrupt that the hardware link state has
1104  * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
1105  * and we need to update the driver's notion of the link state.  We cannot
1106  * run set_link_state from interrupt context, so we queue this function on
1107  * a workqueue.
1108  *
1109  * We delay the regular interrupt processing until after the state changes
1110  * so that the link will be in the correct state by the time any application
1111  * we wake up attempts to send a reply to any message it received.
1112  * (Subsequent receive interrupts may possibly force the wakeup before we
1113  * update the link state.)
1114  *
1115  * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
1116  * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
1117  * so we're safe from use-after-free of the rcd.
1118  */
1119 void receive_interrupt_work(struct work_struct *work)
1120 {
1121         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
1122                                                   linkstate_active_work);
1123         struct hfi1_devdata *dd = ppd->dd;
1124         struct hfi1_ctxtdata *rcd;
1125         u16 i;
1126
1127         /* Received non-SC15 packet implies neighbor_normal */
1128         ppd->neighbor_normal = 1;
1129         set_link_state(ppd, HLS_UP_ACTIVE);
1130
1131         /*
1132          * Interrupt all statically allocated kernel contexts that could
1133          * have had an interrupt during auto activation.
1134          */
1135         for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) {
1136                 rcd = hfi1_rcd_get_by_index(dd, i);
1137                 if (rcd)
1138                         force_recv_intr(rcd);
1139                 hfi1_rcd_put(rcd);
1140         }
1141 }
1142
1143 /*
1144  * Convert a given MTU size to the on-wire MAD packet enumeration.
1145  * Return -1 if the size is invalid.
1146  */
1147 int mtu_to_enum(u32 mtu, int default_if_bad)
1148 {
1149         switch (mtu) {
1150         case     0: return OPA_MTU_0;
1151         case   256: return OPA_MTU_256;
1152         case   512: return OPA_MTU_512;
1153         case  1024: return OPA_MTU_1024;
1154         case  2048: return OPA_MTU_2048;
1155         case  4096: return OPA_MTU_4096;
1156         case  8192: return OPA_MTU_8192;
1157         case 10240: return OPA_MTU_10240;
1158         }
1159         return default_if_bad;
1160 }
1161
1162 u16 enum_to_mtu(int mtu)
1163 {
1164         switch (mtu) {
1165         case OPA_MTU_0:     return 0;
1166         case OPA_MTU_256:   return 256;
1167         case OPA_MTU_512:   return 512;
1168         case OPA_MTU_1024:  return 1024;
1169         case OPA_MTU_2048:  return 2048;
1170         case OPA_MTU_4096:  return 4096;
1171         case OPA_MTU_8192:  return 8192;
1172         case OPA_MTU_10240: return 10240;
1173         default: return 0xffff;
1174         }
1175 }
1176
1177 /*
1178  * set_mtu - set the MTU
1179  * @ppd: the per port data
1180  *
1181  * We can handle "any" incoming size, the issue here is whether we
1182  * need to restrict our outgoing size.  We do not deal with what happens
1183  * to programs that are already running when the size changes.
1184  */
1185 int set_mtu(struct hfi1_pportdata *ppd)
1186 {
1187         struct hfi1_devdata *dd = ppd->dd;
1188         int i, drain, ret = 0, is_up = 0;
1189
1190         ppd->ibmtu = 0;
1191         for (i = 0; i < ppd->vls_supported; i++)
1192                 if (ppd->ibmtu < dd->vld[i].mtu)
1193                         ppd->ibmtu = dd->vld[i].mtu;
1194         ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
1195
1196         mutex_lock(&ppd->hls_lock);
1197         if (ppd->host_link_state == HLS_UP_INIT ||
1198             ppd->host_link_state == HLS_UP_ARMED ||
1199             ppd->host_link_state == HLS_UP_ACTIVE)
1200                 is_up = 1;
1201
1202         drain = !is_ax(dd) && is_up;
1203
1204         if (drain)
1205                 /*
1206                  * MTU is specified per-VL. To ensure that no packet gets
1207                  * stuck (due, e.g., to the MTU for the packet's VL being
1208                  * reduced), empty the per-VL FIFOs before adjusting MTU.
1209                  */
1210                 ret = stop_drain_data_vls(dd);
1211
1212         if (ret) {
1213                 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
1214                            __func__);
1215                 goto err;
1216         }
1217
1218         hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
1219
1220         if (drain)
1221                 open_fill_data_vls(dd); /* reopen all VLs */
1222
1223 err:
1224         mutex_unlock(&ppd->hls_lock);
1225
1226         return ret;
1227 }
1228
1229 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
1230 {
1231         struct hfi1_devdata *dd = ppd->dd;
1232
1233         ppd->lid = lid;
1234         ppd->lmc = lmc;
1235         hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
1236
1237         dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
1238
1239         return 0;
1240 }
1241
1242 void shutdown_led_override(struct hfi1_pportdata *ppd)
1243 {
1244         struct hfi1_devdata *dd = ppd->dd;
1245
1246         /*
1247          * This pairs with the memory barrier in hfi1_start_led_override to
1248          * ensure that we read the correct state of LED beaconing represented
1249          * by led_override_timer_active
1250          */
1251         smp_rmb();
1252         if (atomic_read(&ppd->led_override_timer_active)) {
1253                 del_timer_sync(&ppd->led_override_timer);
1254                 atomic_set(&ppd->led_override_timer_active, 0);
1255                 /* Ensure the atomic_set is visible to all CPUs */
1256                 smp_wmb();
1257         }
1258
1259         /* Hand control of the LED to the DC for normal operation */
1260         write_csr(dd, DCC_CFG_LED_CNTRL, 0);
1261 }
1262
1263 static void run_led_override(struct timer_list *t)
1264 {
1265         struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
1266         struct hfi1_devdata *dd = ppd->dd;
1267         unsigned long timeout;
1268         int phase_idx;
1269
1270         if (!(dd->flags & HFI1_INITTED))
1271                 return;
1272
1273         phase_idx = ppd->led_override_phase & 1;
1274
1275         setextled(dd, phase_idx);
1276
1277         timeout = ppd->led_override_vals[phase_idx];
1278
1279         /* Set up for next phase */
1280         ppd->led_override_phase = !ppd->led_override_phase;
1281
1282         mod_timer(&ppd->led_override_timer, jiffies + timeout);
1283 }
1284
1285 /*
1286  * To have the LED blink in a particular pattern, provide timeon and timeoff
1287  * in milliseconds.
1288  * To turn off custom blinking and return to normal operation, use
1289  * shutdown_led_override()
1290  */
1291 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
1292                              unsigned int timeoff)
1293 {
1294         if (!(ppd->dd->flags & HFI1_INITTED))
1295                 return;
1296
1297         /* Convert to jiffies for direct use in timer */
1298         ppd->led_override_vals[0] = msecs_to_jiffies(timeoff);
1299         ppd->led_override_vals[1] = msecs_to_jiffies(timeon);
1300
1301         /* Arbitrarily start from LED on phase */
1302         ppd->led_override_phase = 1;
1303
1304         /*
1305          * If the timer has not already been started, do so. Use a "quick"
1306          * timeout so the handler will be called soon to look at our request.
1307          */
1308         if (!timer_pending(&ppd->led_override_timer)) {
1309                 timer_setup(&ppd->led_override_timer, run_led_override, 0);
1310                 ppd->led_override_timer.expires = jiffies + 1;
1311                 add_timer(&ppd->led_override_timer);
1312                 atomic_set(&ppd->led_override_timer_active, 1);
1313                 /* Ensure the atomic_set is visible to all CPUs */
1314                 smp_wmb();
1315         }
1316 }
1317
1318 /**
1319  * hfi1_reset_device - reset the chip if possible
1320  * @unit: the device to reset
1321  *
1322  * Whether or not reset is successful, we attempt to re-initialize the chip
1323  * (that is, much like a driver unload/reload).  We clear the INITTED flag
1324  * so that the various entry points will fail until we reinitialize.  For
1325  * now, we only allow this if no user contexts are open that use chip resources
1326  */
1327 int hfi1_reset_device(int unit)
1328 {
1329         int ret;
1330         struct hfi1_devdata *dd = hfi1_lookup(unit);
1331         struct hfi1_pportdata *ppd;
1332         int pidx;
1333
1334         if (!dd) {
1335                 ret = -ENODEV;
1336                 goto bail;
1337         }
1338
1339         dd_dev_info(dd, "Reset on unit %u requested\n", unit);
1340
1341         if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) {
1342                 dd_dev_info(dd,
1343                             "Invalid unit number %u or not initialized or not present\n",
1344                             unit);
1345                 ret = -ENXIO;
1346                 goto bail;
1347         }
1348
1349         /* If there are any user/vnic contexts, we cannot reset */
1350         mutex_lock(&hfi1_mutex);
1351         if (dd->rcd)
1352                 if (hfi1_stats.sps_ctxts) {
1353                         mutex_unlock(&hfi1_mutex);
1354                         ret = -EBUSY;
1355                         goto bail;
1356                 }
1357         mutex_unlock(&hfi1_mutex);
1358
1359         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1360                 ppd = dd->pport + pidx;
1361
1362                 shutdown_led_override(ppd);
1363         }
1364         if (dd->flags & HFI1_HAS_SEND_DMA)
1365                 sdma_exit(dd);
1366
1367         hfi1_reset_cpu_counters(dd);
1368
1369         ret = hfi1_init(dd, 1);
1370
1371         if (ret)
1372                 dd_dev_err(dd,
1373                            "Reinitialize unit %u after reset failed with %d\n",
1374                            unit, ret);
1375         else
1376                 dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
1377                             unit);
1378
1379 bail:
1380         return ret;
1381 }
1382
1383 static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
1384 {
1385         packet->hdr = (struct hfi1_ib_message_header *)
1386                         hfi1_get_msgheader(packet->rcd->dd,
1387                                            packet->rhf_addr);
1388         packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
1389 }
1390
1391 static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
1392 {
1393         struct hfi1_pportdata *ppd = packet->rcd->ppd;
1394
1395         /* slid and dlid cannot be 0 */
1396         if ((!packet->slid) || (!packet->dlid))
1397                 return -EINVAL;
1398
1399         /* Compare port lid with incoming packet dlid */
1400         if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
1401             (packet->dlid !=
1402                 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) {
1403                 if (packet->dlid != ppd->lid)
1404                         return -EINVAL;
1405         }
1406
1407         /* No multicast packets with SC15 */
1408         if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF))
1409                 return -EINVAL;
1410
1411         /* Packets with permissive DLID always on SC15 */
1412         if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE),
1413                                          16B)) &&
1414             (packet->sc != 0xF))
1415                 return -EINVAL;
1416
1417         return 0;
1418 }
1419
1420 static int hfi1_setup_9B_packet(struct hfi1_packet *packet)
1421 {
1422         struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1423         struct ib_header *hdr;
1424         u8 lnh;
1425
1426         hfi1_setup_ib_header(packet);
1427         hdr = packet->hdr;
1428
1429         lnh = ib_get_lnh(hdr);
1430         if (lnh == HFI1_LRH_BTH) {
1431                 packet->ohdr = &hdr->u.oth;
1432                 packet->grh = NULL;
1433         } else if (lnh == HFI1_LRH_GRH) {
1434                 u32 vtf;
1435
1436                 packet->ohdr = &hdr->u.l.oth;
1437                 packet->grh = &hdr->u.l.grh;
1438                 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1439                         goto drop;
1440                 vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1441                 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
1442                         goto drop;
1443         } else {
1444                 goto drop;
1445         }
1446
1447         /* Query commonly used fields from packet header */
1448         packet->payload = packet->ebuf;
1449         packet->opcode = ib_bth_get_opcode(packet->ohdr);
1450         packet->slid = ib_get_slid(hdr);
1451         packet->dlid = ib_get_dlid(hdr);
1452         if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
1453                      (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE))))
1454                 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1455                                 be16_to_cpu(IB_MULTICAST_LID_BASE);
1456         packet->sl = ib_get_sl(hdr);
1457         packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf);
1458         packet->pad = ib_bth_get_pad(packet->ohdr);
1459         packet->extra_byte = 0;
1460         packet->pkey = ib_bth_get_pkey(packet->ohdr);
1461         packet->migrated = ib_bth_is_migration(packet->ohdr);
1462
1463         return 0;
1464 drop:
1465         ibp->rvp.n_pkt_drops++;
1466         return -EINVAL;
1467 }
1468
1469 static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
1470 {
1471         /*
1472          * Bypass packets have a different header/payload split
1473          * compared to an IB packet.
1474          * Current split is set such that 16 bytes of the actual
1475          * header is in the header buffer and the remining is in
1476          * the eager buffer. We chose 16 since hfi1 driver only
1477          * supports 16B bypass packets and we will be able to
1478          * receive the entire LRH with such a split.
1479          */
1480
1481         struct hfi1_ctxtdata *rcd = packet->rcd;
1482         struct hfi1_pportdata *ppd = rcd->ppd;
1483         struct hfi1_ibport *ibp = &ppd->ibport_data;
1484         u8 l4;
1485         u8 grh_len;
1486
1487         packet->hdr = (struct hfi1_16b_header *)
1488                         hfi1_get_16B_header(packet->rcd->dd,
1489                                             packet->rhf_addr);
1490         packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
1491
1492         l4 = hfi1_16B_get_l4(packet->hdr);
1493         if (l4 == OPA_16B_L4_IB_LOCAL) {
1494                 grh_len = 0;
1495                 packet->ohdr = packet->ebuf;
1496                 packet->grh = NULL;
1497         } else if (l4 == OPA_16B_L4_IB_GLOBAL) {
1498                 u32 vtf;
1499
1500                 grh_len = sizeof(struct ib_grh);
1501                 packet->ohdr = packet->ebuf + grh_len;
1502                 packet->grh = packet->ebuf;
1503                 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1504                         goto drop;
1505                 vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1506                 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
1507                         goto drop;
1508         } else {
1509                 goto drop;
1510         }
1511
1512         /* Query commonly used fields from packet header */
1513         packet->opcode = ib_bth_get_opcode(packet->ohdr);
1514         /* hdr_len_by_opcode already has an IB LRH factored in */
1515         packet->hlen = hdr_len_by_opcode[packet->opcode] +
1516                 (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len;
1517         packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES;
1518         packet->slid = hfi1_16B_get_slid(packet->hdr);
1519         packet->dlid = hfi1_16B_get_dlid(packet->hdr);
1520         if (unlikely(hfi1_is_16B_mcast(packet->dlid)))
1521                 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1522                                 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR),
1523                                             16B);
1524         packet->sc = hfi1_16B_get_sc(packet->hdr);
1525         packet->sl = ibp->sc_to_sl[packet->sc];
1526         packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1527         packet->extra_byte = SIZE_OF_LT;
1528         packet->pkey = hfi1_16B_get_pkey(packet->hdr);
1529         packet->migrated = opa_bth_is_migration(packet->ohdr);
1530
1531         if (hfi1_bypass_ingress_pkt_check(packet))
1532                 goto drop;
1533
1534         return 0;
1535 drop:
1536         hfi1_cdbg(PKT, "%s: packet dropped\n", __func__);
1537         ibp->rvp.n_pkt_drops++;
1538         return -EINVAL;
1539 }
1540
1541 void handle_eflags(struct hfi1_packet *packet)
1542 {
1543         struct hfi1_ctxtdata *rcd = packet->rcd;
1544         u32 rte = rhf_rcv_type_err(packet->rhf);
1545
1546         rcv_hdrerr(rcd, rcd->ppd, packet);
1547         if (rhf_err_flags(packet->rhf))
1548                 dd_dev_err(rcd->dd,
1549                            "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
1550                            rcd->ctxt, packet->rhf,
1551                            packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
1552                            packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
1553                            packet->rhf & RHF_DC_ERR ? "dc " : "",
1554                            packet->rhf & RHF_TID_ERR ? "tid " : "",
1555                            packet->rhf & RHF_LEN_ERR ? "len " : "",
1556                            packet->rhf & RHF_ECC_ERR ? "ecc " : "",
1557                            packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
1558                            packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
1559                            rte);
1560 }
1561
1562 /*
1563  * The following functions are called by the interrupt handler. They are type
1564  * specific handlers for each packet type.
1565  */
1566 int process_receive_ib(struct hfi1_packet *packet)
1567 {
1568         if (unlikely(hfi1_dbg_fault_packet(packet)))
1569                 return RHF_RCV_CONTINUE;
1570
1571         if (hfi1_setup_9B_packet(packet))
1572                 return RHF_RCV_CONTINUE;
1573
1574         trace_hfi1_rcvhdr(packet);
1575
1576         if (unlikely(rhf_err_flags(packet->rhf))) {
1577                 handle_eflags(packet);
1578                 return RHF_RCV_CONTINUE;
1579         }
1580
1581         hfi1_ib_rcv(packet);
1582         return RHF_RCV_CONTINUE;
1583 }
1584
1585 static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
1586 {
1587         /* Packet received in VNIC context via RSM */
1588         if (packet->rcd->is_vnic)
1589                 return true;
1590
1591         if ((hfi1_16B_get_l2(packet->ebuf) == OPA_16B_L2_TYPE) &&
1592             (hfi1_16B_get_l4(packet->ebuf) == OPA_16B_L4_ETHR))
1593                 return true;
1594
1595         return false;
1596 }
1597
1598 int process_receive_bypass(struct hfi1_packet *packet)
1599 {
1600         struct hfi1_devdata *dd = packet->rcd->dd;
1601
1602         if (hfi1_is_vnic_packet(packet)) {
1603                 hfi1_vnic_bypass_rcv(packet);
1604                 return RHF_RCV_CONTINUE;
1605         }
1606
1607         if (hfi1_setup_bypass_packet(packet))
1608                 return RHF_RCV_CONTINUE;
1609
1610         trace_hfi1_rcvhdr(packet);
1611
1612         if (unlikely(rhf_err_flags(packet->rhf))) {
1613                 handle_eflags(packet);
1614                 return RHF_RCV_CONTINUE;
1615         }
1616
1617         if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
1618                 hfi1_16B_rcv(packet);
1619         } else {
1620                 dd_dev_err(dd,
1621                            "Bypass packets other than 16B are not supported in normal operation. Dropping\n");
1622                 incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
1623                 if (!(dd->err_info_rcvport.status_and_code &
1624                       OPA_EI_STATUS_SMASK)) {
1625                         u64 *flits = packet->ebuf;
1626
1627                         if (flits && !(packet->rhf & RHF_LEN_ERR)) {
1628                                 dd->err_info_rcvport.packet_flit1 = flits[0];
1629                                 dd->err_info_rcvport.packet_flit2 =
1630                                         packet->tlen > sizeof(flits[0]) ?
1631                                         flits[1] : 0;
1632                         }
1633                         dd->err_info_rcvport.status_and_code |=
1634                                 (OPA_EI_STATUS_SMASK | BAD_L2_ERR);
1635                 }
1636         }
1637         return RHF_RCV_CONTINUE;
1638 }
1639
1640 int process_receive_error(struct hfi1_packet *packet)
1641 {
1642         /* KHdrHCRCErr -- KDETH packet with a bad HCRC */
1643         if (unlikely(
1644                  hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
1645                  rhf_rcv_type_err(packet->rhf) == 3))
1646                 return RHF_RCV_CONTINUE;
1647
1648         hfi1_setup_ib_header(packet);
1649         handle_eflags(packet);
1650
1651         if (unlikely(rhf_err_flags(packet->rhf)))
1652                 dd_dev_err(packet->rcd->dd,
1653                            "Unhandled error packet received. Dropping.\n");
1654
1655         return RHF_RCV_CONTINUE;
1656 }
1657
1658 int kdeth_process_expected(struct hfi1_packet *packet)
1659 {
1660         if (unlikely(hfi1_dbg_fault_packet(packet)))
1661                 return RHF_RCV_CONTINUE;
1662
1663         hfi1_setup_ib_header(packet);
1664         if (unlikely(rhf_err_flags(packet->rhf)))
1665                 handle_eflags(packet);
1666
1667         dd_dev_err(packet->rcd->dd,
1668                    "Unhandled expected packet received. Dropping.\n");
1669         return RHF_RCV_CONTINUE;
1670 }
1671
1672 int kdeth_process_eager(struct hfi1_packet *packet)
1673 {
1674         hfi1_setup_ib_header(packet);
1675         if (unlikely(rhf_err_flags(packet->rhf)))
1676                 handle_eflags(packet);
1677         if (unlikely(hfi1_dbg_fault_packet(packet)))
1678                 return RHF_RCV_CONTINUE;
1679
1680         dd_dev_err(packet->rcd->dd,
1681                    "Unhandled eager packet received. Dropping.\n");
1682         return RHF_RCV_CONTINUE;
1683 }
1684
1685 int process_receive_invalid(struct hfi1_packet *packet)
1686 {
1687         dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
1688                    rhf_rcv_type(packet->rhf));
1689         return RHF_RCV_CONTINUE;
1690 }
1691
1692 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
1693 {
1694         struct hfi1_packet packet;
1695         struct ps_mdata mdata;
1696
1697         seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s head %llu tail %llu\n",
1698                    rcd->ctxt, rcd->rcvhdrq_cnt, rcd->rcvhdrqentsize,
1699                    HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
1700                    "dma_rtail" : "nodma_rtail",
1701                    read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) &
1702                    RCV_HDR_HEAD_HEAD_MASK,
1703                    read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL));
1704
1705         init_packet(rcd, &packet);
1706         init_ps_mdata(&mdata, &packet);
1707
1708         while (1) {
1709                 struct hfi1_devdata *dd = rcd->dd;
1710                 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
1711                                          dd->rhf_offset;
1712                 struct ib_header *hdr;
1713                 u64 rhf = rhf_to_cpu(rhf_addr);
1714                 u32 etype = rhf_rcv_type(rhf), qpn;
1715                 u8 opcode;
1716                 u32 psn;
1717                 u8 lnh;
1718
1719                 if (ps_done(&mdata, rhf, rcd))
1720                         break;
1721
1722                 if (ps_skip(&mdata, rhf, rcd))
1723                         goto next;
1724
1725                 if (etype > RHF_RCV_TYPE_IB)
1726                         goto next;
1727
1728                 packet.hdr = hfi1_get_msgheader(dd, rhf_addr);
1729                 hdr = packet.hdr;
1730
1731                 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
1732
1733                 if (lnh == HFI1_LRH_BTH)
1734                         packet.ohdr = &hdr->u.oth;
1735                 else if (lnh == HFI1_LRH_GRH)
1736                         packet.ohdr = &hdr->u.l.oth;
1737                 else
1738                         goto next; /* just in case */
1739
1740                 opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24);
1741                 qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK;
1742                 psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2]));
1743
1744                 seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n",
1745                            mdata.ps_head, opcode, qpn, psn);
1746 next:
1747                 update_ps_mdata(&mdata, rcd);
1748         }
1749 }