2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/pci.h>
49 #include <linux/netdevice.h>
50 #include <linux/vmalloc.h>
51 #include <linux/delay.h>
52 #include <linux/xarray.h>
53 #include <linux/module.h>
54 #include <linux/printk.h>
55 #include <linux/hrtimer.h>
56 #include <linux/bitmap.h>
57 #include <linux/numa.h>
58 #include <rdma/rdma_vt.h>
74 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
77 * min buffers we want to have per context, after driver
79 #define HFI1_MIN_USER_CTXT_BUFCNT 7
81 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2
82 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
83 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
84 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
86 #define NUM_IB_PORTS 1
89 * Number of user receive contexts we are configured to use (to allow for more
90 * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
92 int num_user_contexts = -1;
93 module_param_named(num_user_contexts, num_user_contexts, int, 0444);
95 num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
97 uint krcvqs[RXE_NUM_DATA_VL];
99 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
100 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
102 /* computed based on above array */
103 unsigned long n_krcvqs;
105 static unsigned hfi1_rcvarr_split = 25;
106 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
107 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
109 static uint eager_buffer_size = (8 << 20); /* 8MB */
110 module_param(eager_buffer_size, uint, S_IRUGO);
111 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
113 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
114 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
115 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
117 static uint hfi1_hdrq_entsize = 32;
118 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444);
119 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
121 unsigned int user_credit_return_threshold = 33; /* default is 33% */
122 module_param(user_credit_return_threshold, uint, S_IRUGO);
123 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
125 static inline u64 encode_rcv_header_entry_size(u16 size);
127 DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
129 static int hfi1_create_kctxt(struct hfi1_devdata *dd,
130 struct hfi1_pportdata *ppd)
132 struct hfi1_ctxtdata *rcd;
135 /* Control context has to be always 0 */
136 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
138 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
140 dd_dev_err(dd, "Kernel receive context allocation failed\n");
145 * Set up the kernel context flags here and now because they use
146 * default values for all receive side memories. User contexts will
147 * be handled as they are created.
149 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
150 HFI1_CAP_KGET(NODROP_RHQ_FULL) |
151 HFI1_CAP_KGET(NODROP_EGR_FULL) |
152 HFI1_CAP_KGET(DMA_RTAIL);
154 /* Control context must use DMA_RTAIL */
155 if (rcd->ctxt == HFI1_CTRL_CTXT)
156 rcd->flags |= HFI1_CAP_DMA_RTAIL;
159 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
161 dd_dev_err(dd, "Kernel send context allocation failed\n");
164 hfi1_init_ctxt(rcd->sc);
170 * Create the receive context array and one or more kernel contexts
172 int hfi1_create_kctxts(struct hfi1_devdata *dd)
177 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
178 GFP_KERNEL, dd->node);
182 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
183 ret = hfi1_create_kctxt(dd, dd->pport);
190 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
191 hfi1_free_ctxt(dd->rcd[i]);
193 /* All the contexts should be freed, free the array */
200 * Helper routines for the receive context reference count (rcd and uctxt).
202 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
204 kref_init(&rcd->kref);
208 * hfi1_rcd_free - When reference is zero clean up.
209 * @kref: pointer to an initialized rcd data structure
212 static void hfi1_rcd_free(struct kref *kref)
215 struct hfi1_ctxtdata *rcd =
216 container_of(kref, struct hfi1_ctxtdata, kref);
218 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
219 rcd->dd->rcd[rcd->ctxt] = NULL;
220 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
222 hfi1_free_ctxtdata(rcd->dd, rcd);
228 * hfi1_rcd_put - decrement reference for rcd
229 * @rcd: pointer to an initialized rcd data structure
231 * Use this to put a reference after the init.
233 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
236 return kref_put(&rcd->kref, hfi1_rcd_free);
242 * hfi1_rcd_get - increment reference for rcd
243 * @rcd: pointer to an initialized rcd data structure
245 * Use this to get a reference after the init.
247 * Return : reflect kref_get_unless_zero(), which returns non-zero on
248 * increment, otherwise 0.
250 int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
252 return kref_get_unless_zero(&rcd->kref);
256 * allocate_rcd_index - allocate an rcd index from the rcd array
257 * @dd: pointer to a valid devdata structure
258 * @rcd: rcd data structure to assign
259 * @index: pointer to index that is allocated
261 * Find an empty index in the rcd array, and assign the given rcd to it.
262 * If the array is full, we are EBUSY.
265 static int allocate_rcd_index(struct hfi1_devdata *dd,
266 struct hfi1_ctxtdata *rcd, u16 *index)
271 spin_lock_irqsave(&dd->uctxt_lock, flags);
272 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
276 if (ctxt < dd->num_rcv_contexts) {
281 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
283 if (ctxt >= dd->num_rcv_contexts)
292 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
294 * @dd: pointer to a valid devdata structure
295 * @ctxt: the index of an possilbe rcd
297 * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
298 * ctxt index is valid.
300 * The caller is responsible for making the _put().
303 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
306 if (ctxt < dd->num_rcv_contexts)
307 return hfi1_rcd_get_by_index(dd, ctxt);
313 * hfi1_rcd_get_by_index
314 * @dd: pointer to a valid devdata structure
315 * @ctxt: the index of an possilbe rcd
317 * We need to protect access to the rcd array. If access is needed to
318 * one or more index, get the protecting spinlock and then increment the
321 * The caller is responsible for making the _put().
324 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
327 struct hfi1_ctxtdata *rcd = NULL;
329 spin_lock_irqsave(&dd->uctxt_lock, flags);
332 if (!hfi1_rcd_get(rcd))
335 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
341 * Common code for user and kernel context create and setup.
342 * NOTE: the initial kref is done here (hf1_rcd_init()).
344 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
345 struct hfi1_ctxtdata **context)
347 struct hfi1_devdata *dd = ppd->dd;
348 struct hfi1_ctxtdata *rcd;
349 unsigned kctxt_ngroups = 0;
352 if (dd->rcv_entries.nctxt_extra >
353 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
354 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
355 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
356 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
358 u32 rcvtids, max_entries;
362 ret = allocate_rcd_index(dd, rcd, &ctxt);
369 INIT_LIST_HEAD(&rcd->qp_wait_list);
370 hfi1_exp_tid_group_init(rcd);
374 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
375 rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
377 mutex_init(&rcd->exp_mutex);
378 spin_lock_init(&rcd->exp_lock);
379 INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
380 INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
382 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
385 * Calculate the context's RcvArray entry starting point.
386 * We do this here because we have to take into account all
387 * the RcvArray entries that previous context would have
388 * taken and we have to account for any extra groups assigned
389 * to the static (kernel) or dynamic (vnic/user) contexts.
391 if (ctxt < dd->first_dyn_alloc_ctxt) {
392 if (ctxt < kctxt_ngroups) {
393 base = ctxt * (dd->rcv_entries.ngroups + 1);
394 rcd->rcv_array_groups++;
396 base = kctxt_ngroups +
397 (ctxt * dd->rcv_entries.ngroups);
400 u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
402 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
404 if (ct < dd->rcv_entries.nctxt_extra) {
405 base += ct * (dd->rcv_entries.ngroups + 1);
406 rcd->rcv_array_groups++;
408 base += dd->rcv_entries.nctxt_extra +
409 (ct * dd->rcv_entries.ngroups);
412 rcd->eager_base = base * dd->rcv_entries.group_size;
414 rcd->rcvhdrq_cnt = rcvhdrcnt;
415 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
417 rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
419 * Simple Eager buffer allocation: we have already pre-allocated
420 * the number of RcvArray entry groups. Each ctxtdata structure
421 * holds the number of groups for that context.
423 * To follow CSR requirements and maintain cacheline alignment,
424 * make sure all sizes and bases are multiples of group_size.
426 * The expected entry count is what is left after assigning
429 max_entries = rcd->rcv_array_groups *
430 dd->rcv_entries.group_size;
431 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
432 rcd->egrbufs.count = round_down(rcvtids,
433 dd->rcv_entries.group_size);
434 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
435 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
437 rcd->egrbufs.count = MAX_EAGER_ENTRIES;
440 "ctxt%u: max Eager buffer RcvArray entries: %u\n",
441 rcd->ctxt, rcd->egrbufs.count);
444 * Allocate array that will hold the eager buffer accounting
446 * This will allocate the maximum possible buffer count based
447 * on the value of the RcvArray split parameter.
448 * The resulting value will be rounded down to the closest
449 * multiple of dd->rcv_entries.group_size.
451 rcd->egrbufs.buffers =
452 kcalloc_node(rcd->egrbufs.count,
453 sizeof(*rcd->egrbufs.buffers),
455 if (!rcd->egrbufs.buffers)
457 rcd->egrbufs.rcvtids =
458 kcalloc_node(rcd->egrbufs.count,
459 sizeof(*rcd->egrbufs.rcvtids),
461 if (!rcd->egrbufs.rcvtids)
463 rcd->egrbufs.size = eager_buffer_size;
465 * The size of the buffers programmed into the RcvArray
466 * entries needs to be big enough to handle the highest
469 if (rcd->egrbufs.size < hfi1_max_mtu) {
470 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
472 "ctxt%u: eager bufs size too small. Adjusting to %u\n",
473 rcd->ctxt, rcd->egrbufs.size);
475 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
477 /* Applicable only for statically created kernel contexts */
478 if (ctxt < dd->first_dyn_alloc_ctxt) {
479 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
484 /* Initialize TID flow generations for the context */
485 hfi1_kern_init_ctxt_generations(rcd);
500 * @rcd: pointer to an initialized rcd data structure
502 * This wrapper is the free function that matches hfi1_create_ctxtdata().
503 * When a context is done being used (kernel or user), this function is called
504 * for the "final" put to match the kref init from hf1i_create_ctxtdata().
505 * Other users of the context do a get/put sequence to make sure that the
506 * structure isn't removed while in use.
508 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
514 * Convert a receive header entry size that to the encoding used in the CSR.
516 * Return a zero if the given size is invalid.
518 static inline u64 encode_rcv_header_entry_size(u16 size)
520 /* there are only 3 valid receive header entry sizes */
527 return 0; /* invalid */
531 * Select the largest ccti value over all SLs to determine the intra-
532 * packet gap for the link.
534 * called with cca_timer_lock held (to protect access to cca_timer
535 * array), and rcu_read_lock() (to protect access to cc_state).
537 void set_link_ipg(struct hfi1_pportdata *ppd)
539 struct hfi1_devdata *dd = ppd->dd;
540 struct cc_state *cc_state;
542 u16 cce, ccti_limit, max_ccti = 0;
545 u32 current_egress_rate; /* Mbits /sec */
548 * max_pkt_time is the maximum packet egress time in units
549 * of the fabric clock period 1/(805 MHz).
552 cc_state = get_cc_state(ppd);
556 * This should _never_ happen - rcu_read_lock() is held,
557 * and set_link_ipg() should not be called if cc_state
562 for (i = 0; i < OPA_MAX_SLS; i++) {
563 u16 ccti = ppd->cca_timer[i].ccti;
569 ccti_limit = cc_state->cct.ccti_limit;
570 if (max_ccti > ccti_limit)
571 max_ccti = ccti_limit;
573 cce = cc_state->cct.entries[max_ccti].entry;
574 shift = (cce & 0xc000) >> 14;
575 mult = (cce & 0x3fff);
577 current_egress_rate = active_egress_rate(ppd);
579 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
581 src = (max_pkt_time >> shift) * mult;
583 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
584 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
586 write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
589 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
591 struct cca_timer *cca_timer;
592 struct hfi1_pportdata *ppd;
594 u16 ccti_timer, ccti_min;
595 struct cc_state *cc_state;
597 enum hrtimer_restart ret = HRTIMER_NORESTART;
599 cca_timer = container_of(t, struct cca_timer, hrtimer);
600 ppd = cca_timer->ppd;
605 cc_state = get_cc_state(ppd);
609 return HRTIMER_NORESTART;
613 * 1) decrement ccti for SL
614 * 2) calculate IPG for link (set_link_ipg())
615 * 3) restart timer, unless ccti is at min value
618 ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
619 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
621 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
623 if (cca_timer->ccti > ccti_min) {
628 if (cca_timer->ccti > ccti_min) {
629 unsigned long nsec = 1024 * ccti_timer;
630 /* ccti_timer is in units of 1.024 usec */
631 hrtimer_forward_now(t, ns_to_ktime(nsec));
632 ret = HRTIMER_RESTART;
635 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
641 * Common code for initializing the physical port structure.
643 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
644 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
647 uint default_pkey_idx;
648 struct cc_state *cc_state;
651 ppd->hw_pidx = hw_pidx;
652 ppd->port = port; /* IB port number, not index */
653 ppd->prev_link_width = LINK_WIDTH_DEFAULT;
655 * There are C_VL_COUNT number of PortVLXmitWait counters.
656 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
658 for (i = 0; i < C_VL_COUNT + 1; i++) {
659 ppd->port_vl_xmit_wait_last[i] = 0;
660 ppd->vl_xmit_flit_cnt[i] = 0;
663 default_pkey_idx = 1;
665 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
666 ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
669 dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
671 ppd->pkeys[!default_pkey_idx] = 0x8001;
674 INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
675 INIT_WORK(&ppd->link_up_work, handle_link_up);
676 INIT_WORK(&ppd->link_down_work, handle_link_down);
677 INIT_WORK(&ppd->freeze_work, handle_freeze);
678 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
679 INIT_WORK(&ppd->sma_message_work, handle_sma_message);
680 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
681 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
682 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
683 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
685 mutex_init(&ppd->hls_lock);
686 spin_lock_init(&ppd->qsfp_info.qsfp_lock);
688 ppd->qsfp_info.ppd = ppd;
689 ppd->sm_trap_qp = 0x0;
694 spin_lock_init(&ppd->cca_timer_lock);
696 for (i = 0; i < OPA_MAX_SLS; i++) {
697 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
699 ppd->cca_timer[i].ppd = ppd;
700 ppd->cca_timer[i].sl = i;
701 ppd->cca_timer[i].ccti = 0;
702 ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
705 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
707 spin_lock_init(&ppd->cc_state_lock);
708 spin_lock_init(&ppd->cc_log_lock);
709 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
710 RCU_INIT_POINTER(ppd->cc_state, cc_state);
716 dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
720 * Do initialization for device that is only needed on
721 * first detect, not on resets.
723 static int loadtime_init(struct hfi1_devdata *dd)
729 * init_after_reset - re-initialize after a reset
730 * @dd: the hfi1_ib device
732 * sanity check at least some of the values after reset, and
733 * ensure no receive or transmit (explicitly, in case reset
736 static int init_after_reset(struct hfi1_devdata *dd)
739 struct hfi1_ctxtdata *rcd;
741 * Ensure chip does no sends or receives, tail updates, or
742 * pioavail updates while we re-initialize. This is mostly
743 * for the driver data structures, not chip registers.
745 for (i = 0; i < dd->num_rcv_contexts; i++) {
746 rcd = hfi1_rcd_get_by_index(dd, i);
747 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
748 HFI1_RCVCTRL_INTRAVAIL_DIS |
749 HFI1_RCVCTRL_TAILUPD_DIS, rcd);
752 pio_send_control(dd, PSC_GLOBAL_DISABLE);
753 for (i = 0; i < dd->num_send_contexts; i++)
754 sc_disable(dd->send_contexts[i].sc);
759 static void enable_chip(struct hfi1_devdata *dd)
761 struct hfi1_ctxtdata *rcd;
765 /* enable PIO send */
766 pio_send_control(dd, PSC_GLOBAL_ENABLE);
769 * Enable kernel ctxts' receive and receive interrupt.
770 * Other ctxts done as user opens and initializes them.
772 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
773 rcd = hfi1_rcd_get_by_index(dd, i);
776 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
777 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
778 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
779 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
780 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
781 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
782 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
783 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
784 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
785 if (HFI1_CAP_IS_KSET(TID_RDMA))
786 rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB;
787 hfi1_rcvctrl(dd, rcvmask, rcd);
794 * create_workqueues - create per port workqueues
795 * @dd: the hfi1_ib device
797 static int create_workqueues(struct hfi1_devdata *dd)
800 struct hfi1_pportdata *ppd;
802 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
803 ppd = dd->pport + pidx;
808 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
810 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
817 * Make the link workqueue single-threaded to enforce
823 WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
832 pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
833 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
834 ppd = dd->pport + pidx;
836 destroy_workqueue(ppd->hfi1_wq);
840 destroy_workqueue(ppd->link_wq);
848 * enable_general_intr() - Enable the IRQs that will be handled by the
849 * general interrupt handler.
853 static void enable_general_intr(struct hfi1_devdata *dd)
855 set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
856 set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
857 set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
858 set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
859 set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
860 set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
861 set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
865 * hfi1_init - do the actual initialization sequence on the chip
866 * @dd: the hfi1_ib device
867 * @reinit: re-initializing, so don't allocate new memory
869 * Do the actual initialization sequence on the chip. This is done
870 * both from the init routine called from the PCI infrastructure, and
871 * when we reset the chip, or detect that it was reset internally,
872 * or it's administratively re-enabled.
874 * Memory allocation here and in called routines is only done in
875 * the first case (reinit == 0). We have to be careful, because even
876 * without memory allocation, we need to re-write all the chip registers
877 * TIDs, etc. after the reset or enable has completed.
879 int hfi1_init(struct hfi1_devdata *dd, int reinit)
881 int ret = 0, pidx, lastfail = 0;
884 struct hfi1_ctxtdata *rcd;
885 struct hfi1_pportdata *ppd;
887 /* Set up send low level handlers */
888 dd->process_pio_send = hfi1_verbs_send_pio;
889 dd->process_dma_send = hfi1_verbs_send_dma;
890 dd->pio_inline_send = pio_copy;
891 dd->process_vnic_dma_send = hfi1_vnic_send_dma;
894 atomic_set(&dd->drop_packet, DROP_PACKET_ON);
897 atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
901 /* make sure the link is not "up" */
902 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
903 ppd = dd->pport + pidx;
908 ret = init_after_reset(dd);
910 ret = loadtime_init(dd);
914 /* allocate dummy tail memory for all receive contexts */
915 dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
917 &dd->rcvhdrtail_dummy_dma,
920 if (!dd->rcvhdrtail_dummy_kvaddr) {
921 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
926 /* dd->rcd can be NULL if early initialization failed */
927 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
929 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
930 * re-init, the simplest way to handle this is to free
931 * existing, and re-allocate.
932 * Need to re-create rest of ctxt 0 ctxtdata as well.
934 rcd = hfi1_rcd_get_by_index(dd, i);
938 rcd->do_interrupt = &handle_receive_interrupt;
940 lastfail = hfi1_create_rcvhdrq(dd, rcd);
942 lastfail = hfi1_setup_eagerbufs(rcd);
944 lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
947 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
954 /* Allocate enough memory for user event notification. */
955 len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
956 sizeof(*dd->events));
957 dd->events = vmalloc_user(len);
959 dd_dev_err(dd, "Failed to allocate user events page\n");
961 * Allocate a page for device and port status.
962 * Page will be shared amongst all user processes.
964 dd->status = vmalloc_user(PAGE_SIZE);
966 dd_dev_err(dd, "Failed to allocate dev status page\n");
967 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
968 ppd = dd->pport + pidx;
970 /* Currently, we only have one port */
971 ppd->statusp = &dd->status->port;
976 /* enable chip even if we have an error, so we can debug cause */
981 * Set status even if port serdes is not initialized
982 * so that diags will work.
985 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
988 /* enable all interrupts from the chip */
989 enable_general_intr(dd);
992 /* chip is OK for user apps; mark it as initialized */
993 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
994 ppd = dd->pport + pidx;
997 * start the serdes - must be after interrupts are
998 * enabled so we are notified when the link goes up
1000 lastfail = bringup_serdes(ppd);
1003 "Failed to bring up port %u\n",
1007 * Set status even if port serdes is not initialized
1008 * so that diags will work.
1011 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
1012 HFI1_STATUS_INITTED;
1013 if (!ppd->link_speed_enabled)
1018 /* if ret is non-zero, we probably should do some cleanup here... */
1022 struct hfi1_devdata *hfi1_lookup(int unit)
1024 return xa_load(&hfi1_dev_table, unit);
1028 * Stop the timers during unit shutdown, or after an error late
1029 * in initialization.
1031 static void stop_timers(struct hfi1_devdata *dd)
1033 struct hfi1_pportdata *ppd;
1036 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1037 ppd = dd->pport + pidx;
1038 if (ppd->led_override_timer.function) {
1039 del_timer_sync(&ppd->led_override_timer);
1040 atomic_set(&ppd->led_override_timer_active, 0);
1046 * shutdown_device - shut down a device
1047 * @dd: the hfi1_ib device
1049 * This is called to make the device quiet when we are about to
1050 * unload the driver, and also when the device is administratively
1051 * disabled. It does not free any data structures.
1052 * Everything it does has to be setup again by hfi1_init(dd, 1)
1054 static void shutdown_device(struct hfi1_devdata *dd)
1056 struct hfi1_pportdata *ppd;
1057 struct hfi1_ctxtdata *rcd;
1061 if (dd->flags & HFI1_SHUTDOWN)
1063 dd->flags |= HFI1_SHUTDOWN;
1065 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1066 ppd = dd->pport + pidx;
1070 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
1071 HFI1_STATUS_IB_READY);
1073 dd->flags &= ~HFI1_INITTED;
1075 /* mask and clean up interrupts */
1076 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
1077 msix_clean_up_interrupts(dd);
1079 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1080 ppd = dd->pport + pidx;
1081 for (i = 0; i < dd->num_rcv_contexts; i++) {
1082 rcd = hfi1_rcd_get_by_index(dd, i);
1083 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
1084 HFI1_RCVCTRL_CTXT_DIS |
1085 HFI1_RCVCTRL_INTRAVAIL_DIS |
1086 HFI1_RCVCTRL_PKEY_DIS |
1087 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
1091 * Gracefully stop all sends allowing any in progress to
1092 * trickle out first.
1094 for (i = 0; i < dd->num_send_contexts; i++)
1095 sc_flush(dd->send_contexts[i].sc);
1099 * Enough for anything that's going to trickle out to have actually
1104 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1105 ppd = dd->pport + pidx;
1107 /* disable all contexts */
1108 for (i = 0; i < dd->num_send_contexts; i++)
1109 sc_disable(dd->send_contexts[i].sc);
1110 /* disable the send device */
1111 pio_send_control(dd, PSC_GLOBAL_DISABLE);
1113 shutdown_led_override(ppd);
1116 * Clear SerdesEnable.
1117 * We can't count on interrupts since we are stopping.
1119 hfi1_quiet_serdes(ppd);
1122 destroy_workqueue(ppd->hfi1_wq);
1123 ppd->hfi1_wq = NULL;
1126 destroy_workqueue(ppd->link_wq);
1127 ppd->link_wq = NULL;
1134 * hfi1_free_ctxtdata - free a context's allocated data
1135 * @dd: the hfi1_ib device
1136 * @rcd: the ctxtdata structure
1138 * free up any allocated data for a context
1139 * It should never change any chip state, or global driver state.
1141 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1149 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
1150 rcd->rcvhdrq, rcd->rcvhdrq_dma);
1151 rcd->rcvhdrq = NULL;
1152 if (rcd->rcvhdrtail_kvaddr) {
1153 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1154 (void *)rcd->rcvhdrtail_kvaddr,
1155 rcd->rcvhdrqtailaddr_dma);
1156 rcd->rcvhdrtail_kvaddr = NULL;
1160 /* all the RcvArray entries should have been cleared by now */
1161 kfree(rcd->egrbufs.rcvtids);
1162 rcd->egrbufs.rcvtids = NULL;
1164 for (e = 0; e < rcd->egrbufs.alloced; e++) {
1165 if (rcd->egrbufs.buffers[e].dma)
1166 dma_free_coherent(&dd->pcidev->dev,
1167 rcd->egrbufs.buffers[e].len,
1168 rcd->egrbufs.buffers[e].addr,
1169 rcd->egrbufs.buffers[e].dma);
1171 kfree(rcd->egrbufs.buffers);
1172 rcd->egrbufs.alloced = 0;
1173 rcd->egrbufs.buffers = NULL;
1178 vfree(rcd->subctxt_uregbase);
1179 vfree(rcd->subctxt_rcvegrbuf);
1180 vfree(rcd->subctxt_rcvhdr_base);
1181 kfree(rcd->opstats);
1183 rcd->subctxt_uregbase = NULL;
1184 rcd->subctxt_rcvegrbuf = NULL;
1185 rcd->subctxt_rcvhdr_base = NULL;
1186 rcd->opstats = NULL;
1190 * Release our hold on the shared asic data. If we are the last one,
1191 * return the structure to be finalized outside the lock. Must be
1192 * holding hfi1_dev_table lock.
1194 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
1196 struct hfi1_asic_data *ad;
1201 dd->asic_data->dds[dd->hfi1_id] = NULL;
1202 other = dd->hfi1_id ? 0 : 1;
1204 dd->asic_data = NULL;
1205 /* return NULL if the other dd still has a link */
1206 return ad->dds[other] ? NULL : ad;
1209 static void finalize_asic_data(struct hfi1_devdata *dd,
1210 struct hfi1_asic_data *ad)
1212 clean_up_i2c(dd, ad);
1217 * hfi1_clean_devdata - cleans up per-unit data structure
1218 * @dd: pointer to a valid devdata structure
1220 * It cleans up all data structures set up by
1221 * by hfi1_alloc_devdata().
1223 static void hfi1_clean_devdata(struct hfi1_devdata *dd)
1225 struct hfi1_asic_data *ad;
1226 unsigned long flags;
1228 xa_lock_irqsave(&hfi1_dev_table, flags);
1229 __xa_erase(&hfi1_dev_table, dd->unit);
1230 ad = release_asic_data(dd);
1231 xa_unlock_irqrestore(&hfi1_dev_table, flags);
1233 finalize_asic_data(dd, ad);
1234 free_platform_config(dd);
1235 rcu_barrier(); /* wait for rcu callbacks to complete */
1236 free_percpu(dd->int_counter);
1237 free_percpu(dd->rcv_limit);
1238 free_percpu(dd->send_schedule);
1239 free_percpu(dd->tx_opstats);
1240 dd->int_counter = NULL;
1241 dd->rcv_limit = NULL;
1242 dd->send_schedule = NULL;
1243 dd->tx_opstats = NULL;
1244 kfree(dd->comp_vect);
1245 dd->comp_vect = NULL;
1246 sdma_clean(dd, dd->num_sdma);
1247 rvt_dealloc_device(&dd->verbs_dev.rdi);
1250 static void __hfi1_free_devdata(struct kobject *kobj)
1252 struct hfi1_devdata *dd =
1253 container_of(kobj, struct hfi1_devdata, kobj);
1255 hfi1_clean_devdata(dd);
1258 static struct kobj_type hfi1_devdata_type = {
1259 .release = __hfi1_free_devdata,
1262 void hfi1_free_devdata(struct hfi1_devdata *dd)
1264 kobject_put(&dd->kobj);
1268 * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
1269 * @pdev: Valid PCI device
1270 * @extra: How many bytes to alloc past the default
1272 * Must be done via verbs allocator, because the verbs cleanup process
1273 * both does cleanup and free of the data structure.
1274 * "extra" is for chip-specific data.
1276 static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
1279 struct hfi1_devdata *dd;
1282 /* extra is * number of ports */
1283 nports = extra / sizeof(struct hfi1_pportdata);
1285 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1288 return ERR_PTR(-ENOMEM);
1289 dd->num_pports = nports;
1290 dd->pport = (struct hfi1_pportdata *)(dd + 1);
1292 pci_set_drvdata(pdev, dd);
1293 dd->node = NUMA_NO_NODE;
1295 ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
1299 "Could not allocate unit ID: error %d\n", -ret);
1302 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
1305 * Initialize all locks for the device. This needs to be as early as
1306 * possible so locks are usable.
1308 spin_lock_init(&dd->sc_lock);
1309 spin_lock_init(&dd->sendctrl_lock);
1310 spin_lock_init(&dd->rcvctrl_lock);
1311 spin_lock_init(&dd->uctxt_lock);
1312 spin_lock_init(&dd->hfi1_diag_trans_lock);
1313 spin_lock_init(&dd->sc_init_lock);
1314 spin_lock_init(&dd->dc8051_memlock);
1315 seqlock_init(&dd->sc2vl_lock);
1316 spin_lock_init(&dd->sde_map_lock);
1317 spin_lock_init(&dd->pio_map_lock);
1318 mutex_init(&dd->dc8051_lock);
1319 init_waitqueue_head(&dd->event_queue);
1320 spin_lock_init(&dd->irq_src_lock);
1322 dd->int_counter = alloc_percpu(u64);
1323 if (!dd->int_counter) {
1328 dd->rcv_limit = alloc_percpu(u64);
1329 if (!dd->rcv_limit) {
1334 dd->send_schedule = alloc_percpu(u64);
1335 if (!dd->send_schedule) {
1340 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
1341 if (!dd->tx_opstats) {
1346 dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
1347 if (!dd->comp_vect) {
1352 kobject_init(&dd->kobj, &hfi1_devdata_type);
1356 hfi1_clean_devdata(dd);
1357 return ERR_PTR(ret);
1361 * Called from freeze mode handlers, and from PCI error
1362 * reporting code. Should be paranoid about state of
1363 * system and data structures.
1365 void hfi1_disable_after_error(struct hfi1_devdata *dd)
1367 if (dd->flags & HFI1_INITTED) {
1370 dd->flags &= ~HFI1_INITTED;
1372 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1373 struct hfi1_pportdata *ppd;
1375 ppd = dd->pport + pidx;
1376 if (dd->flags & HFI1_PRESENT)
1377 set_link_state(ppd, HLS_DN_DISABLE);
1380 *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1385 * Mark as having had an error for driver, and also
1386 * for /sys and status word mapped to user programs.
1387 * This marks unit as not usable, until reset.
1390 dd->status->dev |= HFI1_STATUS_HWERROR;
1393 static void remove_one(struct pci_dev *);
1394 static int init_one(struct pci_dev *, const struct pci_device_id *);
1395 static void shutdown_one(struct pci_dev *);
1397 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1398 #define PFX DRIVER_NAME ": "
1400 const struct pci_device_id hfi1_pci_tbl[] = {
1401 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1402 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1406 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1408 static struct pci_driver hfi1_pci_driver = {
1409 .name = DRIVER_NAME,
1411 .remove = remove_one,
1412 .shutdown = shutdown_one,
1413 .id_table = hfi1_pci_tbl,
1414 .err_handler = &hfi1_pci_err_handler,
1417 static void __init compute_krcvqs(void)
1421 for (i = 0; i < krcvqsset; i++)
1422 n_krcvqs += krcvqs[i];
1426 * Do all the generic driver unit- and chip-independent memory
1427 * allocation and initialization.
1429 static int __init hfi1_mod_init(void)
1437 ret = node_affinity_init();
1441 /* validate max MTU before any devices start */
1442 if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1443 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1444 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1445 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1447 /* valid CUs run from 1-128 in powers of 2 */
1448 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1450 /* valid credit return threshold is 0-100, variable is unsigned */
1451 if (user_credit_return_threshold > 100)
1452 user_credit_return_threshold = 100;
1456 * sanitize receive interrupt count, time must wait until after
1457 * the hardware type is known
1459 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1460 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1461 /* reject invalid combinations */
1462 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1463 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1466 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1468 * Avoid indefinite packet delivery by requiring a timeout
1471 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1472 rcv_intr_timeout = 1;
1474 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1476 * The dynamic algorithm expects a non-zero timeout
1479 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1480 rcv_intr_dynamic = 0;
1483 /* sanitize link CRC options */
1484 link_crc_mask &= SUPPORTED_CRCS;
1488 pr_err("Failed to allocate opfn_wq");
1492 hfi1_compute_tid_rdma_flow_wt();
1494 * These must be called before the driver is registered with
1495 * the PCI subsystem.
1498 ret = pci_register_driver(&hfi1_pci_driver);
1500 pr_err("Unable to register driver: error %d\n", -ret);
1503 goto bail; /* all OK */
1512 module_init(hfi1_mod_init);
1515 * Do the non-unit driver cleanup, memory free, etc. at unload.
1517 static void __exit hfi1_mod_cleanup(void)
1519 pci_unregister_driver(&hfi1_pci_driver);
1521 node_affinity_destroy_all();
1524 WARN_ON(!xa_empty(&hfi1_dev_table));
1525 dispose_firmware(); /* asymmetric with obtain_firmware() */
1529 module_exit(hfi1_mod_cleanup);
1531 /* this can only be called after a successful initialization */
1532 static void cleanup_device_data(struct hfi1_devdata *dd)
1537 /* users can't do anything more with chip */
1538 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1539 struct hfi1_pportdata *ppd = &dd->pport[pidx];
1540 struct cc_state *cc_state;
1544 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1546 for (i = 0; i < OPA_MAX_SLS; i++)
1547 hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1549 spin_lock(&ppd->cc_state_lock);
1550 cc_state = get_cc_state_protected(ppd);
1551 RCU_INIT_POINTER(ppd->cc_state, NULL);
1552 spin_unlock(&ppd->cc_state_lock);
1555 kfree_rcu(cc_state, rcu);
1558 free_credit_return(dd);
1560 if (dd->rcvhdrtail_dummy_kvaddr) {
1561 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1562 (void *)dd->rcvhdrtail_dummy_kvaddr,
1563 dd->rcvhdrtail_dummy_dma);
1564 dd->rcvhdrtail_dummy_kvaddr = NULL;
1568 * Free any resources still in use (usually just kernel contexts)
1569 * at unload; we do for ctxtcnt, because that's what we allocate.
1571 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
1572 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
1575 hfi1_free_ctxt_rcv_groups(rcd);
1576 hfi1_free_ctxt(rcd);
1584 /* must follow rcv context free - need to remove rcv's hooks */
1585 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1586 sc_free(dd->send_contexts[ctxt].sc);
1587 dd->num_send_contexts = 0;
1588 kfree(dd->send_contexts);
1589 dd->send_contexts = NULL;
1590 kfree(dd->hw_to_sw);
1591 dd->hw_to_sw = NULL;
1592 kfree(dd->boardname);
1598 * Clean up on unit shutdown, or error during unit load after
1599 * successful initialization.
1601 static void postinit_cleanup(struct hfi1_devdata *dd)
1603 hfi1_start_cleanup(dd);
1604 hfi1_comp_vectors_clean_up(dd);
1605 hfi1_dev_affinity_clean_up(dd);
1607 hfi1_pcie_ddcleanup(dd);
1608 hfi1_pcie_cleanup(dd->pcidev);
1610 cleanup_device_data(dd);
1612 hfi1_free_devdata(dd);
1615 static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
1617 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1618 dd_dev_err(dd, "Receive header queue count too small\n");
1622 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1624 "Receive header queue count cannot be greater than %u\n",
1625 HFI1_MAX_HDRQ_EGRBUF_CNT);
1629 if (thecnt % HDRQ_INCREMENT) {
1630 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
1631 thecnt, HDRQ_INCREMENT);
1638 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1640 int ret = 0, j, pidx, initfail;
1641 struct hfi1_devdata *dd;
1642 struct hfi1_pportdata *ppd;
1644 /* First, lock the non-writable module parameters */
1647 /* Validate dev ids */
1648 if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1649 ent->device == PCI_DEVICE_ID_INTEL1)) {
1650 dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
1656 /* Allocate the dd so we can get to work */
1657 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
1658 sizeof(struct hfi1_pportdata));
1664 /* Validate some global module parameters */
1665 ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt);
1669 /* use the encoding function as a sanitization check */
1670 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1671 dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
1677 /* The receive eager buffer size must be set before the receive
1678 * contexts are created.
1680 * Set the eager buffer size. Validate that it falls in a range
1681 * allowed by the hardware - all powers of 2 between the min and
1682 * max. The maximum valid MTU is within the eager buffer range
1683 * so we do not need to cap the max_mtu by an eager buffer size
1686 if (eager_buffer_size) {
1687 if (!is_power_of_2(eager_buffer_size))
1689 roundup_pow_of_two(eager_buffer_size);
1691 clamp_val(eager_buffer_size,
1692 MIN_EAGER_BUFFER * 8,
1693 MAX_EAGER_BUFFER_TOTAL);
1694 dd_dev_info(dd, "Eager buffer size %u\n",
1697 dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
1702 /* restrict value of hfi1_rcvarr_split */
1703 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1705 ret = hfi1_pcie_init(dd);
1710 * Do device-specific initialization, function table setup, dd
1713 ret = hfi1_init_dd(dd);
1715 goto clean_bail; /* error already printed */
1717 ret = create_workqueues(dd);
1721 /* do the generic initialization */
1722 initfail = hfi1_init(dd, 0);
1725 hfi1_vnic_setup(dd);
1727 ret = hfi1_register_ib_device(dd);
1730 * Now ready for use. this should be cleared whenever we
1731 * detect a reset, or initiate one. If earlier failure,
1732 * we still create devices, so diags, etc. can be used
1733 * to determine cause of problem.
1735 if (!initfail && !ret) {
1736 dd->flags |= HFI1_INITTED;
1737 /* create debufs files after init and ib register */
1738 hfi1_dbg_ibdev_init(&dd->verbs_dev);
1741 j = hfi1_device_create(dd);
1743 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1745 if (initfail || ret) {
1746 msix_clean_up_interrupts(dd);
1748 flush_workqueue(ib_wq);
1749 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1750 hfi1_quiet_serdes(dd->pport + pidx);
1751 ppd = dd->pport + pidx;
1753 destroy_workqueue(ppd->hfi1_wq);
1754 ppd->hfi1_wq = NULL;
1757 destroy_workqueue(ppd->link_wq);
1758 ppd->link_wq = NULL;
1762 hfi1_device_remove(dd);
1764 hfi1_unregister_ib_device(dd);
1765 hfi1_vnic_cleanup(dd);
1766 postinit_cleanup(dd);
1769 goto bail; /* everything already cleaned */
1777 hfi1_pcie_cleanup(pdev);
1782 static void wait_for_clients(struct hfi1_devdata *dd)
1785 * Remove the device init value and complete the device if there is
1786 * no clients or wait for active clients to finish.
1788 if (atomic_dec_and_test(&dd->user_refcount))
1789 complete(&dd->user_comp);
1791 wait_for_completion(&dd->user_comp);
1794 static void remove_one(struct pci_dev *pdev)
1796 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1798 /* close debugfs files before ib unregister */
1799 hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1801 /* remove the /dev hfi1 interface */
1802 hfi1_device_remove(dd);
1804 /* wait for existing user space clients to finish */
1805 wait_for_clients(dd);
1807 /* unregister from IB core */
1808 hfi1_unregister_ib_device(dd);
1811 hfi1_vnic_cleanup(dd);
1814 * Disable the IB link, disable interrupts on the device,
1815 * clear dma engines, etc.
1817 shutdown_device(dd);
1821 /* wait until all of our (qsfp) queue_work() calls complete */
1822 flush_workqueue(ib_wq);
1824 postinit_cleanup(dd);
1827 static void shutdown_one(struct pci_dev *pdev)
1829 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1831 shutdown_device(dd);
1835 * hfi1_create_rcvhdrq - create a receive header queue
1836 * @dd: the hfi1_ib device
1837 * @rcd: the context data
1839 * This must be contiguous memory (from an i/o perspective), and must be
1840 * DMA'able (which means for some systems, it will go through an IOMMU,
1841 * or be forced into a low address range).
1843 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1848 if (!rcd->rcvhdrq) {
1851 amt = rcvhdrq_size(rcd);
1853 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
1854 gfp_flags = GFP_KERNEL;
1856 gfp_flags = GFP_USER;
1857 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1859 gfp_flags | __GFP_COMP);
1861 if (!rcd->rcvhdrq) {
1863 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1868 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1869 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1870 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1872 &rcd->rcvhdrqtailaddr_dma,
1874 if (!rcd->rcvhdrtail_kvaddr)
1879 * These values are per-context:
1884 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1885 & RCV_HDR_CNT_CNT_MASK)
1886 << RCV_HDR_CNT_CNT_SHIFT;
1887 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1888 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1889 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1890 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1891 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1892 reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK)
1893 << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1894 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
1897 * Program dummy tail address for every receive context
1898 * before enabling any receive context
1900 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
1901 dd->rcvhdrtail_dummy_dma);
1907 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1909 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1911 rcd->rcvhdrq = NULL;
1917 * allocate eager buffers, both kernel and user contexts.
1918 * @rcd: the context we are setting up.
1920 * Allocate the eager TID buffers and program them into hip.
1921 * They are no longer completely contiguous, we do multiple allocation
1922 * calls. Otherwise we get the OOM code involved, by asking for too
1923 * much per call, with disastrous results on some kernels.
1925 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1927 struct hfi1_devdata *dd = rcd->dd;
1928 u32 max_entries, egrtop, alloced_bytes = 0;
1932 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1935 * GFP_USER, but without GFP_FS, so buffer cache can be
1936 * coalesced (we hope); otherwise, even at order 4,
1937 * heavy filesystem activity makes these fail, and we can
1938 * use compound pages.
1940 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1943 * The minimum size of the eager buffers is a groups of MTU-sized
1945 * The global eager_buffer_size parameter is checked against the
1946 * theoretical lower limit of the value. Here, we check against the
1949 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1950 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1952 * If using one-pkt-per-egr-buffer, lower the eager buffer
1953 * size to the max MTU (page-aligned).
1955 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1956 rcd->egrbufs.rcvtid_size = round_mtu;
1959 * Eager buffers sizes of 1MB or less require smaller TID sizes
1960 * to satisfy the "multiple of 8 RcvArray entries" requirement.
1962 if (rcd->egrbufs.size <= (1 << 20))
1963 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1964 rounddown_pow_of_two(rcd->egrbufs.size / 8));
1966 while (alloced_bytes < rcd->egrbufs.size &&
1967 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1968 rcd->egrbufs.buffers[idx].addr =
1969 dma_alloc_coherent(&dd->pcidev->dev,
1970 rcd->egrbufs.rcvtid_size,
1971 &rcd->egrbufs.buffers[idx].dma,
1973 if (rcd->egrbufs.buffers[idx].addr) {
1974 rcd->egrbufs.buffers[idx].len =
1975 rcd->egrbufs.rcvtid_size;
1976 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1977 rcd->egrbufs.buffers[idx].addr;
1978 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1979 rcd->egrbufs.buffers[idx].dma;
1980 rcd->egrbufs.alloced++;
1981 alloced_bytes += rcd->egrbufs.rcvtid_size;
1988 * Fail the eager buffer allocation if:
1989 * - we are already using the lowest acceptable size
1990 * - we are using one-pkt-per-egr-buffer (this implies
1991 * that we are accepting only one size)
1993 if (rcd->egrbufs.rcvtid_size == round_mtu ||
1994 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1995 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1998 goto bail_rcvegrbuf_phys;
2001 new_size = rcd->egrbufs.rcvtid_size / 2;
2004 * If the first attempt to allocate memory failed, don't
2005 * fail everything but continue with the next lower
2009 rcd->egrbufs.rcvtid_size = new_size;
2014 * Re-partition already allocated buffers to a smaller
2017 rcd->egrbufs.alloced = 0;
2018 for (i = 0, j = 0, offset = 0; j < idx; i++) {
2019 if (i >= rcd->egrbufs.count)
2021 rcd->egrbufs.rcvtids[i].dma =
2022 rcd->egrbufs.buffers[j].dma + offset;
2023 rcd->egrbufs.rcvtids[i].addr =
2024 rcd->egrbufs.buffers[j].addr + offset;
2025 rcd->egrbufs.alloced++;
2026 if ((rcd->egrbufs.buffers[j].dma + offset +
2028 (rcd->egrbufs.buffers[j].dma +
2029 rcd->egrbufs.buffers[j].len)) {
2036 rcd->egrbufs.rcvtid_size = new_size;
2039 rcd->egrbufs.numbufs = idx;
2040 rcd->egrbufs.size = alloced_bytes;
2043 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
2044 rcd->ctxt, rcd->egrbufs.alloced,
2045 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
2048 * Set the contexts rcv array head update threshold to the closest
2049 * power of 2 (so we can use a mask instead of modulo) below half
2050 * the allocated entries.
2052 rcd->egrbufs.threshold =
2053 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
2055 * Compute the expected RcvArray entry base. This is done after
2056 * allocating the eager buffers in order to maximize the
2057 * expected RcvArray entries for the context.
2059 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
2060 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
2061 rcd->expected_count = max_entries - egrtop;
2062 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
2063 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
2065 rcd->expected_base = rcd->eager_base + egrtop;
2066 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
2067 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
2068 rcd->eager_base, rcd->expected_base);
2070 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
2072 "ctxt%u: current Eager buffer size is invalid %u\n",
2073 rcd->ctxt, rcd->egrbufs.rcvtid_size);
2075 goto bail_rcvegrbuf_phys;
2078 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
2079 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
2080 rcd->egrbufs.rcvtids[idx].dma, order);
2086 bail_rcvegrbuf_phys:
2087 for (idx = 0; idx < rcd->egrbufs.alloced &&
2088 rcd->egrbufs.buffers[idx].addr;
2090 dma_free_coherent(&dd->pcidev->dev,
2091 rcd->egrbufs.buffers[idx].len,
2092 rcd->egrbufs.buffers[idx].addr,
2093 rcd->egrbufs.buffers[idx].dma);
2094 rcd->egrbufs.buffers[idx].addr = NULL;
2095 rcd->egrbufs.buffers[idx].dma = 0;
2096 rcd->egrbufs.buffers[idx].len = 0;