Merge branches 'pm-devfreq', 'pm-qos', 'pm-tools' and 'pm-docs'
[linux-2.6-block.git] / drivers / infiniband / hw / irdma / icrdma_hw.c
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2017 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "type.h"
5 #include "icrdma_hw.h"
6
7 static u32 icrdma_regs[IRDMA_MAX_REGS] = {
8         PFPE_CQPTAIL,
9         PFPE_CQPDB,
10         PFPE_CCQPSTATUS,
11         PFPE_CCQPHIGH,
12         PFPE_CCQPLOW,
13         PFPE_CQARM,
14         PFPE_CQACK,
15         PFPE_AEQALLOC,
16         PFPE_CQPERRCODES,
17         PFPE_WQEALLOC,
18         GLINT_DYN_CTL(0),
19         ICRDMA_DB_ADDR_OFFSET,
20
21         GLPCI_LBARCTRL,
22         GLPE_CPUSTATUS0,
23         GLPE_CPUSTATUS1,
24         GLPE_CPUSTATUS2,
25         PFINT_AEQCTL,
26         GLINT_CEQCTL(0),
27         VSIQF_PE_CTL1(0),
28         PFHMC_PDINV,
29         GLHMC_VFPDINV(0),
30         GLPE_CRITERR,
31         GLINT_RATE(0),
32 };
33
34 static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
35         ICRDMA_CCQPSTATUS_CCQP_DONE,
36         ICRDMA_CCQPSTATUS_CCQP_ERR,
37         ICRDMA_CQPSQ_STAG_PDID,
38         ICRDMA_CQPSQ_CQ_CEQID,
39         ICRDMA_CQPSQ_CQ_CQID,
40         ICRDMA_COMMIT_FPM_CQCNT,
41 };
42
43 static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
44         ICRDMA_CCQPSTATUS_CCQP_DONE_S,
45         ICRDMA_CCQPSTATUS_CCQP_ERR_S,
46         ICRDMA_CQPSQ_STAG_PDID_S,
47         ICRDMA_CQPSQ_CQ_CEQID_S,
48         ICRDMA_CQPSQ_CQ_CQID_S,
49         ICRDMA_COMMIT_FPM_CQCNT_S,
50 };
51
52 /**
53  * icrdma_ena_irq - Enable interrupt
54  * @dev: pointer to the device structure
55  * @idx: vector index
56  */
57 static void icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
58 {
59         u32 val;
60         u32 interval = 0;
61
62         if (dev->ceq_itr && dev->aeq->msix_idx != idx)
63                 interval = dev->ceq_itr >> 1; /* 2 usec units */
64         val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0) |
65               FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
66               FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
67               FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
68
69         if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
70                 writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
71         else
72                 writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
73 }
74
75 /**
76  * icrdma_disable_irq - Disable interrupt
77  * @dev: pointer to the device structure
78  * @idx: vector index
79  */
80 static void icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
81 {
82         if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
83                 writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
84         else
85                 writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
86 }
87
88 /**
89  * icrdma_cfg_ceq- Configure CEQ interrupt
90  * @dev: pointer to the device structure
91  * @ceq_id: Completion Event Queue ID
92  * @idx: vector index
93  * @enable: True to enable, False disables
94  */
95 static void icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
96                            bool enable)
97 {
98         u32 reg_val;
99
100         reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
101                   FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
102                   FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 3);
103
104         writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
105 }
106
107 static const struct irdma_irq_ops icrdma_irq_ops = {
108         .irdma_cfg_aeq = irdma_cfg_aeq,
109         .irdma_cfg_ceq = icrdma_cfg_ceq,
110         .irdma_dis_irq = icrdma_disable_irq,
111         .irdma_en_irq = icrdma_ena_irq,
112 };
113
114 void icrdma_init_hw(struct irdma_sc_dev *dev)
115 {
116         int i;
117         u8 __iomem *hw_addr;
118
119         for (i = 0; i < IRDMA_MAX_REGS; ++i) {
120                 hw_addr = dev->hw->hw_addr;
121
122                 if (i == IRDMA_DB_ADDR_OFFSET)
123                         hw_addr = NULL;
124
125                 dev->hw_regs[i] = (u32 __iomem *)(hw_addr + icrdma_regs[i]);
126         }
127         dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
128         dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
129
130         for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
131                 dev->hw_shifts[i] = icrdma_shifts[i];
132
133         for (i = 0; i < IRDMA_MAX_MASKS; ++i)
134                 dev->hw_masks[i] = icrdma_masks[i];
135
136         dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
137         dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
138         dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
139         dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
140         dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
141         dev->irq_ops = &icrdma_irq_ops;
142         dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
143         dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
144         dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
145         dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
146
147         dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
148         dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
149                                                 IRDMA_FEATURE_CQ_RESIZE;
150 }