1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2017 - 2021 Intel Corporation */
7 static u32 icrdma_regs[IRDMA_MAX_REGS] = {
19 ICRDMA_DB_ADDR_OFFSET,
34 static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
35 ICRDMA_CCQPSTATUS_CCQP_DONE,
36 ICRDMA_CCQPSTATUS_CCQP_ERR,
37 ICRDMA_CQPSQ_STAG_PDID,
38 ICRDMA_CQPSQ_CQ_CEQID,
40 ICRDMA_COMMIT_FPM_CQCNT,
43 static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
44 ICRDMA_CCQPSTATUS_CCQP_DONE_S,
45 ICRDMA_CCQPSTATUS_CCQP_ERR_S,
46 ICRDMA_CQPSQ_STAG_PDID_S,
47 ICRDMA_CQPSQ_CQ_CEQID_S,
48 ICRDMA_CQPSQ_CQ_CQID_S,
49 ICRDMA_COMMIT_FPM_CQCNT_S,
53 * icrdma_ena_irq - Enable interrupt
54 * @dev: pointer to the device structure
57 static void icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
62 if (dev->ceq_itr && dev->aeq->msix_idx != idx)
63 interval = dev->ceq_itr >> 1; /* 2 usec units */
64 val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0) |
65 FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
66 FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
67 FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
69 if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
70 writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
72 writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
76 * icrdma_disable_irq - Disable interrupt
77 * @dev: pointer to the device structure
80 static void icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
82 if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
83 writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
85 writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
89 * icrdma_cfg_ceq- Configure CEQ interrupt
90 * @dev: pointer to the device structure
91 * @ceq_id: Completion Event Queue ID
93 * @enable: True to enable, False disables
95 static void icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
100 reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
101 FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
102 FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 3);
104 writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
107 static const struct irdma_irq_ops icrdma_irq_ops = {
108 .irdma_cfg_aeq = irdma_cfg_aeq,
109 .irdma_cfg_ceq = icrdma_cfg_ceq,
110 .irdma_dis_irq = icrdma_disable_irq,
111 .irdma_en_irq = icrdma_ena_irq,
114 void icrdma_init_hw(struct irdma_sc_dev *dev)
119 for (i = 0; i < IRDMA_MAX_REGS; ++i) {
120 hw_addr = dev->hw->hw_addr;
122 if (i == IRDMA_DB_ADDR_OFFSET)
125 dev->hw_regs[i] = (u32 __iomem *)(hw_addr + icrdma_regs[i]);
127 dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
128 dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
130 for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
131 dev->hw_shifts[i] = icrdma_shifts[i];
133 for (i = 0; i < IRDMA_MAX_MASKS; ++i)
134 dev->hw_masks[i] = icrdma_masks[i];
136 dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
137 dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
138 dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
139 dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
140 dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
141 dev->irq_ops = &icrdma_irq_ops;
142 dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
143 dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
144 dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
145 dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
147 dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
148 dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
149 IRDMA_FEATURE_CQ_RESIZE;