1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <linux/acpi.h>
6 #include <linux/bitmap.h>
7 #include <linux/dma-mapping.h>
10 #include <linux/irqreturn.h>
11 #include <linux/log2.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/uacce.h>
16 #include <linux/uaccess.h>
17 #include <uapi/misc/uacce/hisi_qm.h>
18 #include <linux/hisi_acc_qm.h>
20 /* eq/aeq irq enable */
21 #define QM_VF_AEQ_INT_SOURCE 0x0
22 #define QM_VF_AEQ_INT_MASK 0x4
23 #define QM_VF_EQ_INT_SOURCE 0x8
24 #define QM_VF_EQ_INT_MASK 0xc
26 #define QM_IRQ_VECTOR_MASK GENMASK(15, 0)
27 #define QM_IRQ_TYPE_MASK GENMASK(15, 0)
28 #define QM_IRQ_TYPE_SHIFT 16
29 #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0)
32 #define QM_MB_PING_ALL_VFS 0xffff
33 #define QM_MB_CMD_DATA_SHIFT 32
34 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
35 #define QM_MB_STATUS_MASK GENMASK(12, 9)
38 #define QM_SQ_HOP_NUM_SHIFT 0
39 #define QM_SQ_PAGE_SIZE_SHIFT 4
40 #define QM_SQ_BUF_SIZE_SHIFT 8
41 #define QM_SQ_SQE_SIZE_SHIFT 12
42 #define QM_SQ_PRIORITY_SHIFT 0
43 #define QM_SQ_ORDERS_SHIFT 4
44 #define QM_SQ_TYPE_SHIFT 8
45 #define QM_QC_PASID_ENABLE 0x1
46 #define QM_QC_PASID_ENABLE_SHIFT 7
48 #define QM_SQ_TYPE_MASK GENMASK(3, 0)
49 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
52 #define QM_CQ_HOP_NUM_SHIFT 0
53 #define QM_CQ_PAGE_SIZE_SHIFT 4
54 #define QM_CQ_BUF_SIZE_SHIFT 8
55 #define QM_CQ_CQE_SIZE_SHIFT 12
56 #define QM_CQ_PHASE_SHIFT 0
57 #define QM_CQ_FLAG_SHIFT 1
59 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
60 #define QM_QC_CQE_SIZE 4
61 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
64 #define QM_EQE_AEQE_SIZE (2UL << 12)
65 #define QM_EQC_PHASE_SHIFT 16
67 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
68 #define QM_EQE_CQN_MASK GENMASK(15, 0)
70 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
71 #define QM_AEQE_TYPE_SHIFT 17
72 #define QM_AEQE_CQN_MASK GENMASK(15, 0)
73 #define QM_CQ_OVERFLOW 0
74 #define QM_EQ_OVERFLOW 1
75 #define QM_CQE_ERROR 2
77 #define QM_XQ_DEPTH_SHIFT 16
78 #define QM_XQ_DEPTH_MASK GENMASK(15, 0)
80 #define QM_DOORBELL_CMD_SQ 0
81 #define QM_DOORBELL_CMD_CQ 1
82 #define QM_DOORBELL_CMD_EQ 2
83 #define QM_DOORBELL_CMD_AEQ 3
85 #define QM_DOORBELL_BASE_V1 0x340
86 #define QM_DB_CMD_SHIFT_V1 16
87 #define QM_DB_INDEX_SHIFT_V1 32
88 #define QM_DB_PRIORITY_SHIFT_V1 48
89 #define QM_PAGE_SIZE 0x0034
90 #define QM_QP_DB_INTERVAL 0x10000
92 #define QM_MEM_START_INIT 0x100040
93 #define QM_MEM_INIT_DONE 0x100044
94 #define QM_VFT_CFG_RDY 0x10006c
95 #define QM_VFT_CFG_OP_WR 0x100058
96 #define QM_VFT_CFG_TYPE 0x10005c
97 #define QM_SQC_VFT 0x0
98 #define QM_CQC_VFT 0x1
99 #define QM_VFT_CFG 0x100060
100 #define QM_VFT_CFG_OP_ENABLE 0x100054
101 #define QM_PM_CTRL 0x100148
102 #define QM_IDLE_DISABLE BIT(9)
104 #define QM_VFT_CFG_DATA_L 0x100064
105 #define QM_VFT_CFG_DATA_H 0x100068
106 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
107 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
108 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
109 #define QM_SQC_VFT_START_SQN_SHIFT 28
110 #define QM_SQC_VFT_VALID (1ULL << 44)
111 #define QM_SQC_VFT_SQN_SHIFT 45
112 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
113 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
114 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
115 #define QM_CQC_VFT_VALID (1ULL << 28)
117 #define QM_SQC_VFT_BASE_SHIFT_V2 28
118 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
119 #define QM_SQC_VFT_NUM_SHIFT_V2 45
120 #define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
122 #define QM_DFX_CNT_CLR_CE 0x100118
124 #define QM_ABNORMAL_INT_SOURCE 0x100000
125 #define QM_ABNORMAL_INT_MASK 0x100004
126 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
127 #define QM_ABNORMAL_INT_STATUS 0x100008
128 #define QM_ABNORMAL_INT_SET 0x10000c
129 #define QM_ABNORMAL_INF00 0x100010
130 #define QM_FIFO_OVERFLOW_TYPE 0xc0
131 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
132 #define QM_FIFO_OVERFLOW_VF 0x3f
133 #define QM_ABNORMAL_INF01 0x100014
134 #define QM_DB_TIMEOUT_TYPE 0xc0
135 #define QM_DB_TIMEOUT_TYPE_SHIFT 6
136 #define QM_DB_TIMEOUT_VF 0x3f
137 #define QM_RAS_CE_ENABLE 0x1000ec
138 #define QM_RAS_FE_ENABLE 0x1000f0
139 #define QM_RAS_NFE_ENABLE 0x1000f4
140 #define QM_RAS_CE_THRESHOLD 0x1000f8
141 #define QM_RAS_CE_TIMES_PER_IRQ 1
142 #define QM_OOO_SHUTDOWN_SEL 0x1040f8
143 #define QM_ECC_MBIT BIT(2)
144 #define QM_DB_TIMEOUT BIT(10)
145 #define QM_OF_FIFO_OF BIT(11)
147 #define QM_RESET_WAIT_TIMEOUT 400
148 #define QM_PEH_VENDOR_ID 0x1000d8
149 #define ACC_VENDOR_ID_VALUE 0x5a5a
150 #define QM_PEH_DFX_INFO0 0x1000fc
151 #define QM_PEH_DFX_INFO1 0x100100
152 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2))
153 #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16)
154 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
155 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
156 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
157 #define ACC_MASTER_TRANS_RETURN_RW 3
158 #define ACC_MASTER_TRANS_RETURN 0x300150
159 #define ACC_MASTER_GLOBAL_CTRL 0x300000
160 #define ACC_AM_CFG_PORT_WR_EN 0x30001c
161 #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
162 #define ACC_AM_ROB_ECC_INT_STS 0x300104
163 #define ACC_ROB_ECC_ERR_MULTPL BIT(1)
164 #define QM_MSI_CAP_ENABLE BIT(16)
166 /* interfunction communication */
167 #define QM_IFC_READY_STATUS 0x100128
168 #define QM_IFC_C_STS_M 0x10012C
169 #define QM_IFC_INT_SET_P 0x100130
170 #define QM_IFC_INT_CFG 0x100134
171 #define QM_IFC_INT_SOURCE_P 0x100138
172 #define QM_IFC_INT_SOURCE_V 0x0020
173 #define QM_IFC_INT_MASK 0x0024
174 #define QM_IFC_INT_STATUS 0x0028
175 #define QM_IFC_INT_SET_V 0x002C
176 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
177 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
178 #define QM_IFC_INT_SOURCE_MASK BIT(0)
179 #define QM_IFC_INT_DISABLE BIT(0)
180 #define QM_IFC_INT_STATUS_MASK BIT(0)
181 #define QM_IFC_INT_SET_MASK BIT(0)
182 #define QM_WAIT_DST_ACK 10
183 #define QM_MAX_PF_WAIT_COUNT 10
184 #define QM_MAX_VF_WAIT_COUNT 40
185 #define QM_VF_RESET_WAIT_US 20000
186 #define QM_VF_RESET_WAIT_CNT 3000
187 #define QM_VF_RESET_WAIT_TIMEOUT_US \
188 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
190 #define QM_DFX_MB_CNT_VF 0x104010
191 #define QM_DFX_DB_CNT_VF 0x104020
192 #define QM_DFX_SQE_CNT_VF_SQN 0x104030
193 #define QM_DFX_CQE_CNT_VF_CQN 0x104040
194 #define QM_DFX_QN_SHIFT 16
195 #define CURRENT_FUN_MASK GENMASK(5, 0)
196 #define CURRENT_Q_MASK GENMASK(31, 16)
198 #define POLL_PERIOD 10
199 #define POLL_TIMEOUT 1000
200 #define WAIT_PERIOD_US_MAX 200
201 #define WAIT_PERIOD_US_MIN 100
202 #define MAX_WAIT_COUNTS 1000
203 #define QM_CACHE_WB_START 0x204
204 #define QM_CACHE_WB_DONE 0x208
205 #define QM_FUNC_CAPS_REG 0x3100
206 #define QM_CAPBILITY_VERSION GENMASK(7, 0)
210 #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
211 #define QMC_ALIGN(sz) ALIGN(sz, 32)
213 #define QM_DBG_READ_LEN 256
214 #define QM_DBG_WRITE_LEN 1024
215 #define QM_DBG_TMP_BUF_LEN 22
216 #define QM_PCI_COMMAND_INVALID ~0
217 #define QM_RESET_STOP_TX_OFFSET 1
218 #define QM_RESET_STOP_RX_OFFSET 2
220 #define WAIT_PERIOD 20
221 #define REMOVE_WAIT_DELAY 10
222 #define QM_SQE_ADDR_MASK GENMASK(7, 0)
224 #define QM_DRIVER_REMOVING 0
225 #define QM_RST_SCHED 1
226 #define QM_RESETTING 2
227 #define QM_QOS_PARAM_NUM 2
228 #define QM_QOS_VAL_NUM 1
229 #define QM_QOS_BDF_PARAM_NUM 4
230 #define QM_QOS_MAX_VAL 1000
231 #define QM_QOS_RATE 100
232 #define QM_QOS_EXPAND_RATE 1000
233 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0)
234 #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8)
235 #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11)
236 #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8
237 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11
238 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
239 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
240 #define QM_SHAPER_CBS_B 1
241 #define QM_SHAPER_CBS_S 16
242 #define QM_SHAPER_VFT_OFFSET 6
243 #define WAIT_FOR_QOS_VF 100
244 #define QM_QOS_MIN_ERROR_RATE 5
245 #define QM_QOS_TYPICAL_NUM 8
246 #define QM_SHAPER_MIN_CBS_S 8
247 #define QM_QOS_TICK 0x300U
248 #define QM_QOS_DIVISOR_CLK 0x1f40U
249 #define QM_QOS_MAX_CIR_B 200
250 #define QM_QOS_MIN_CIR_B 100
251 #define QM_QOS_MAX_CIR_U 6
252 #define QM_QOS_MAX_CIR_S 11
253 #define QM_QOS_VAL_MAX_LEN 32
254 #define QM_DFX_BASE 0x0100000
255 #define QM_DFX_STATE1 0x0104000
256 #define QM_DFX_STATE2 0x01040C8
257 #define QM_DFX_COMMON 0x0000
258 #define QM_DFX_BASE_LEN 0x5A
259 #define QM_DFX_STATE1_LEN 0x2E
260 #define QM_DFX_STATE2_LEN 0x11
261 #define QM_DFX_COMMON_LEN 0xC3
262 #define QM_DFX_REGS_LEN 4UL
263 #define QM_AUTOSUSPEND_DELAY 3000
265 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
266 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
267 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
268 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
269 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
271 #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
272 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
274 #define QM_MK_SQC_W13(priority, orders, alg_type) \
275 (((priority) << QM_SQ_PRIORITY_SHIFT) | \
276 ((orders) << QM_SQ_ORDERS_SHIFT) | \
277 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
279 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
280 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
281 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
282 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
283 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
285 #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
286 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
288 #define INIT_QC_COMMON(qc, base, pasid) do { \
291 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
292 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
296 (qc)->pasid = cpu_to_le16(pasid); \
307 enum acc_err_result {
319 QM_PF_FLR_PREPARE = 0x01,
331 QM_TOTAL_QP_NUM_CAP = 0x0,
338 QM_PF2VF_IRQ_TYPE_CAP,
343 static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
344 {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
345 {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
346 {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1},
347 {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
348 {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
351 static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
352 {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1},
355 static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
356 {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0},
359 static const struct hisi_qm_cap_info qm_basic_info[] = {
360 {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
361 {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
362 {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800},
363 {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
364 {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
365 {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
366 {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003},
367 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002},
368 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4},
369 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
454 struct hisi_qm_resource {
457 struct list_head list;
460 struct hisi_qm_hw_ops {
461 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
462 void (*qm_db)(struct hisi_qm *qm, u16 qn,
463 u8 cmd, u16 index, u8 priority);
464 int (*debug_init)(struct hisi_qm *qm);
465 void (*hw_error_init)(struct hisi_qm *qm);
466 void (*hw_error_uninit)(struct hisi_qm *qm);
467 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
468 int (*set_msi)(struct hisi_qm *qm, bool set);
476 static struct qm_dfx_item qm_dfx_files[] = {
477 {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
478 {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
479 {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
480 {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
481 {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
484 static const char * const qm_debug_file_name[] = {
485 [CURRENT_QM] = "current_qm",
486 [CURRENT_Q] = "current_q",
487 [CLEAR_ENABLE] = "clear_enable",
490 struct hisi_qm_hw_error {
495 static const struct hisi_qm_hw_error qm_hw_error[] = {
496 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
497 { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
498 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
499 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
500 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
501 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
502 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
503 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
504 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
505 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
506 { .int_msk = BIT(10), .msg = "qm_db_timeout" },
507 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
508 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
509 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
510 { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
514 /* define the QM's dfx regs region and region length */
515 static struct dfx_diff_registers qm_diff_regs[] = {
517 .reg_offset = QM_DFX_BASE,
518 .reg_len = QM_DFX_BASE_LEN,
520 .reg_offset = QM_DFX_STATE1,
521 .reg_len = QM_DFX_STATE1_LEN,
523 .reg_offset = QM_DFX_STATE2,
524 .reg_len = QM_DFX_STATE2_LEN,
526 .reg_offset = QM_DFX_COMMON,
527 .reg_len = QM_DFX_COMMON_LEN,
531 static const char * const qm_db_timeout[] = {
532 "sq", "cq", "eq", "aeq",
535 static const char * const qm_fifo_overflow[] = {
539 static const char * const qm_s[] = {
540 "init", "start", "close", "stop",
543 static const char * const qp_s[] = {
544 "none", "init", "start", "stop", "close",
547 struct qm_typical_qos_table {
553 /* the qos step is 100 */
554 static struct qm_typical_qos_table shaper_cir_s[] = {
562 static struct qm_typical_qos_table shaper_cbs_s[] = {
572 static void qm_irqs_unregister(struct hisi_qm *qm);
574 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
576 enum qm_state curr = atomic_read(&qm->status.flags);
581 if (new == QM_START || new == QM_CLOSE)
589 if (new == QM_CLOSE || new == QM_START)
596 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
597 qm_s[curr], qm_s[new]);
600 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
601 qm_s[curr], qm_s[new]);
606 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
609 enum qm_state qm_curr = atomic_read(&qm->status.flags);
610 enum qp_state qp_curr = 0;
614 qp_curr = atomic_read(&qp->qp_status.flags);
618 if (qm_curr == QM_START || qm_curr == QM_INIT)
622 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
623 (qm_curr == QM_START && qp_curr == QP_STOP))
627 if ((qm_curr == QM_START && qp_curr == QP_START) ||
628 (qp_curr == QP_INIT))
632 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
633 (qm_curr == QM_START && qp_curr == QP_STOP) ||
634 (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
635 (qm_curr == QM_STOP && qp_curr == QP_INIT))
642 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
643 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
646 dev_warn(&qm->pdev->dev,
647 "Can not change qp state from %s to %s in QM %s\n",
648 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
653 static u32 qm_get_hw_error_status(struct hisi_qm *qm)
655 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
658 static u32 qm_get_dev_err_status(struct hisi_qm *qm)
660 return qm->err_ini->get_dev_hw_err_status(qm);
663 /* Check if the error causes the master ooo block */
664 static bool qm_check_dev_error(struct hisi_qm *qm)
668 if (qm->fun_type == QM_HW_VF)
671 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
672 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
674 return val || dev_val;
677 static int qm_wait_reset_finish(struct hisi_qm *qm)
681 /* All reset requests need to be queued for processing */
682 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
684 if (delay > QM_RESET_WAIT_TIMEOUT)
691 static int qm_reset_prepare_ready(struct hisi_qm *qm)
693 struct pci_dev *pdev = qm->pdev;
694 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
697 * PF and VF on host doesnot support resetting at the
698 * same time on Kunpeng920.
700 if (qm->ver < QM_HW_V3)
701 return qm_wait_reset_finish(pf_qm);
703 return qm_wait_reset_finish(qm);
706 static void qm_reset_bit_clear(struct hisi_qm *qm)
708 struct pci_dev *pdev = qm->pdev;
709 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
711 if (qm->ver < QM_HW_V3)
712 clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
714 clear_bit(QM_RESETTING, &qm->misc_ctl);
717 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
718 u64 base, u16 queue, bool op)
720 mailbox->w0 = cpu_to_le16((cmd) |
721 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
722 (0x1 << QM_MB_BUSY_SHIFT));
723 mailbox->queue_num = cpu_to_le16(queue);
724 mailbox->base_l = cpu_to_le32(lower_32_bits(base));
725 mailbox->base_h = cpu_to_le32(upper_32_bits(base));
729 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
730 int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
734 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
735 val, !((val >> QM_MB_BUSY_SHIFT) &
736 0x1), POLL_PERIOD, POLL_TIMEOUT);
738 EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
740 /* 128 bit should be written to hardware at one time to trigger a mailbox */
741 static void qm_mb_write(struct hisi_qm *qm, const void *src)
743 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
744 unsigned long tmp0 = 0, tmp1 = 0;
746 if (!IS_ENABLED(CONFIG_ARM64)) {
747 memcpy_toio(fun_base, src, 16);
752 asm volatile("ldp %0, %1, %3\n"
757 "+Q" (*((char __iomem *)fun_base))
758 : "Q" (*((char *)src))
762 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
767 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
768 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
773 qm_mb_write(qm, mailbox);
775 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
776 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
781 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
782 if (val & QM_MB_STATUS_MASK) {
783 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
791 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
795 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
798 struct qm_mailbox mailbox;
801 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
802 queue, cmd, (unsigned long long)dma_addr);
804 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
806 mutex_lock(&qm->mailbox_lock);
807 ret = qm_mb_nolock(qm, &mailbox);
808 mutex_unlock(&qm->mailbox_lock);
812 EXPORT_SYMBOL_GPL(hisi_qm_mb);
814 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
818 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
819 ((u64)index << QM_DB_INDEX_SHIFT_V1) |
820 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
822 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
825 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
827 void __iomem *io_base = qm->io_base;
831 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
832 io_base = qm->db_io_base + (u64)qn * qm->db_interval +
833 QM_DOORBELL_SQ_CQ_BASE_V2;
835 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
837 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
838 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
839 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
840 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
842 writeq(doorbell, io_base);
845 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
847 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
850 qm->ops->qm_db(qm, qn, cmd, index, priority);
853 static void qm_disable_clock_gate(struct hisi_qm *qm)
857 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */
858 if (qm->ver < QM_HW_V3)
861 val = readl(qm->io_base + QM_PM_CTRL);
862 val |= QM_IDLE_DISABLE;
863 writel(val, qm->io_base + QM_PM_CTRL);
866 static int qm_dev_mem_reset(struct hisi_qm *qm)
870 writel(0x1, qm->io_base + QM_MEM_START_INIT);
871 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
872 val & BIT(0), POLL_PERIOD,
877 * hisi_qm_get_hw_info() - Get device information.
878 * @qm: The qm which want to get information.
879 * @info_table: Array for storing device information.
880 * @index: Index in info_table.
881 * @is_read: Whether read from reg, 0: not support read from reg.
883 * This function returns device information the caller needs.
885 u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
886 const struct hisi_qm_cap_info *info_table,
887 u32 index, bool is_read)
893 return info_table[index].v1_val;
895 return info_table[index].v2_val;
898 return info_table[index].v3_val;
900 val = readl(qm->io_base + info_table[index].offset);
901 return (val >> info_table[index].shift) & info_table[index].mask;
904 EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
906 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
907 u16 *high_bits, enum qm_basic_type type)
911 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
912 *high_bits = depth & QM_XQ_DEPTH_MASK;
913 *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
916 static u32 qm_get_irq_num(struct hisi_qm *qm)
918 if (qm->fun_type == QM_HW_PF)
919 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
921 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
924 static int qm_pm_get_sync(struct hisi_qm *qm)
926 struct device *dev = &qm->pdev->dev;
929 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
932 ret = pm_runtime_resume_and_get(dev);
934 dev_err(dev, "failed to get_sync(%d).\n", ret);
941 static void qm_pm_put_sync(struct hisi_qm *qm)
943 struct device *dev = &qm->pdev->dev;
945 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
948 pm_runtime_mark_last_busy(dev);
949 pm_runtime_put_autosuspend(dev);
952 static void qm_cq_head_update(struct hisi_qp *qp)
954 if (qp->qp_status.cq_head == qp->cq_depth - 1) {
955 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
956 qp->qp_status.cq_head = 0;
958 qp->qp_status.cq_head++;
962 static void qm_poll_req_cb(struct hisi_qp *qp)
964 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
965 struct hisi_qm *qm = qp->qm;
967 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
969 qp->req_cb(qp, qp->sqe + qm->sqe_size *
970 le16_to_cpu(cqe->sq_head));
971 qm_cq_head_update(qp);
972 cqe = qp->cqe + qp->qp_status.cq_head;
973 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
974 qp->qp_status.cq_head, 0);
975 atomic_dec(&qp->qp_status.used);
979 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
982 static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
984 struct hisi_qm *qm = poll_data->qm;
985 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
986 u16 eq_depth = qm->eq_depth;
990 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
991 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
992 poll_data->qp_finish_id[eqe_num] = cqn;
995 if (qm->status.eq_head == eq_depth - 1) {
996 qm->status.eqc_phase = !qm->status.eqc_phase;
998 qm->status.eq_head = 0;
1001 qm->status.eq_head++;
1004 if (eqe_num == (eq_depth >> 1) - 1)
1008 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
1013 static void qm_work_process(struct work_struct *work)
1015 struct hisi_qm_poll_data *poll_data =
1016 container_of(work, struct hisi_qm_poll_data, work);
1017 struct hisi_qm *qm = poll_data->qm;
1021 /* Get qp id of completed tasks and re-enable the interrupt. */
1022 eqe_num = qm_get_complete_eqe_num(poll_data);
1023 for (i = eqe_num - 1; i >= 0; i--) {
1024 qp = &qm->qp_array[poll_data->qp_finish_id[i]];
1025 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
1033 if (likely(qp->req_cb))
1038 static bool do_qm_irq(struct hisi_qm *qm)
1040 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
1041 struct hisi_qm_poll_data *poll_data;
1044 if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
1047 if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
1048 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
1049 poll_data = &qm->poll_data[cqn];
1050 queue_work(qm->wq, &poll_data->work);
1058 static irqreturn_t qm_irq(int irq, void *data)
1060 struct hisi_qm *qm = data;
1063 ret = do_qm_irq(qm);
1067 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
1068 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
1073 static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
1075 struct hisi_qm *qm = data;
1078 val = readl(qm->io_base + QM_IFC_INT_STATUS);
1079 val &= QM_IFC_INT_STATUS_MASK;
1083 schedule_work(&qm->cmd_process);
1088 static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
1092 if (qp->is_in_kernel)
1095 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
1098 /* make sure setup is completed */
1102 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
1104 struct hisi_qp *qp = &qm->qp_array[qp_id];
1106 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET);
1107 hisi_qm_stop_qp(qp);
1108 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET);
1111 static void qm_reset_function(struct hisi_qm *qm)
1113 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
1114 struct device *dev = &qm->pdev->dev;
1117 if (qm_check_dev_error(pf_qm))
1120 ret = qm_reset_prepare_ready(qm);
1122 dev_err(dev, "reset function not ready\n");
1126 ret = hisi_qm_stop(qm, QM_FLR);
1128 dev_err(dev, "failed to stop qm when reset function\n");
1132 ret = hisi_qm_start(qm);
1134 dev_err(dev, "failed to start qm when reset function\n");
1137 qm_reset_bit_clear(qm);
1140 static irqreturn_t qm_aeq_thread(int irq, void *data)
1142 struct hisi_qm *qm = data;
1143 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
1144 u16 aeq_depth = qm->aeq_depth;
1147 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
1148 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
1149 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
1152 case QM_EQ_OVERFLOW:
1153 dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
1154 qm_reset_function(qm);
1156 case QM_CQ_OVERFLOW:
1157 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
1161 qm_disable_qp(qm, qp_id);
1164 dev_err(&qm->pdev->dev, "unknown error type %u\n",
1169 if (qm->status.aeq_head == aeq_depth - 1) {
1170 qm->status.aeqc_phase = !qm->status.aeqc_phase;
1172 qm->status.aeq_head = 0;
1175 qm->status.aeq_head++;
1179 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
1184 static irqreturn_t qm_aeq_irq(int irq, void *data)
1186 struct hisi_qm *qm = data;
1188 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
1189 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
1192 return IRQ_WAKE_THREAD;
1195 static void qm_init_qp_status(struct hisi_qp *qp)
1197 struct hisi_qp_status *qp_status = &qp->qp_status;
1199 qp_status->sq_tail = 0;
1200 qp_status->cq_head = 0;
1201 qp_status->cqc_phase = true;
1202 atomic_set(&qp_status->used, 0);
1205 static void qm_init_prefetch(struct hisi_qm *qm)
1207 struct device *dev = &qm->pdev->dev;
1208 u32 page_type = 0x0;
1210 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
1213 switch (PAGE_SIZE) {
1224 dev_err(dev, "system page size is not support: %lu, default set to 4KB",
1228 writel(page_type, qm->io_base + QM_PAGE_SIZE);
1232 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value
1233 * is the expected qos calculated.
1235 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
1237 * IR_b * (2 ^ IR_u) * 8000
1238 * IR(Mbps) = -------------------------
1241 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
1243 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
1244 (QM_QOS_TICK * (1 << cir_s));
1247 static u32 acc_shaper_calc_cbs_s(u32 ir)
1249 int table_size = ARRAY_SIZE(shaper_cbs_s);
1252 for (i = 0; i < table_size; i++) {
1253 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end)
1254 return shaper_cbs_s[i].val;
1257 return QM_SHAPER_MIN_CBS_S;
1260 static u32 acc_shaper_calc_cir_s(u32 ir)
1262 int table_size = ARRAY_SIZE(shaper_cir_s);
1265 for (i = 0; i < table_size; i++) {
1266 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end)
1267 return shaper_cir_s[i].val;
1273 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
1275 u32 cir_b, cir_u, cir_s, ir_calc;
1278 factor->cbs_s = acc_shaper_calc_cbs_s(ir);
1279 cir_s = acc_shaper_calc_cir_s(ir);
1281 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
1282 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
1283 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
1285 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
1286 if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
1287 factor->cir_b = cir_b;
1288 factor->cir_u = cir_u;
1289 factor->cir_s = cir_s;
1298 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
1299 u32 number, struct qm_shaper_factor *factor)
1306 if (qm->ver == QM_HW_V1) {
1307 tmp = QM_SQC_VFT_BUF_SIZE |
1308 QM_SQC_VFT_SQC_SIZE |
1309 QM_SQC_VFT_INDEX_NUMBER |
1311 (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
1313 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
1315 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
1319 if (qm->ver == QM_HW_V1) {
1320 tmp = QM_CQC_VFT_BUF_SIZE |
1321 QM_CQC_VFT_SQC_SIZE |
1322 QM_CQC_VFT_INDEX_NUMBER |
1325 tmp = QM_CQC_VFT_VALID;
1330 tmp = factor->cir_b |
1331 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
1332 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
1333 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
1334 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
1340 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
1341 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
1344 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
1345 u32 fun_num, u32 base, u32 number)
1347 struct qm_shaper_factor *factor = NULL;
1351 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
1352 factor = &qm->factor[fun_num];
1354 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1355 val & BIT(0), POLL_PERIOD,
1360 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
1361 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
1362 if (type == SHAPER_VFT)
1363 fun_num |= base << QM_SHAPER_VFT_OFFSET;
1365 writel(fun_num, qm->io_base + QM_VFT_CFG);
1367 qm_vft_data_cfg(qm, type, base, number, factor);
1369 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
1370 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
1372 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1373 val & BIT(0), POLL_PERIOD,
1377 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
1379 u32 qos = qm->factor[fun_num].func_qos;
1382 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
1384 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
1387 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
1388 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
1389 /* The base number of queue reuse for different alg type */
1390 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
1398 /* The config should be conducted after qm_dev_mem_reset() */
1399 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1404 for (i = SQC_VFT; i <= CQC_VFT; i++) {
1405 ret = qm_set_vft_common(qm, i, fun_num, base, number);
1410 /* init default shaper qos val */
1411 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
1412 ret = qm_shaper_init_vft(qm, fun_num);
1419 for (i = SQC_VFT; i <= CQC_VFT; i++)
1420 qm_set_vft_common(qm, i, fun_num, 0, 0);
1425 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
1430 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
1434 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1435 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1436 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
1437 *number = (QM_SQC_VFT_NUM_MASK_v2 &
1438 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
1443 static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
1445 u32 remain_q_num, vfq_num;
1446 u32 num_vfs = qm->vfs_num;
1448 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
1449 if (vfq_num >= qm->max_qp_num)
1450 return qm->max_qp_num;
1452 remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
1453 if (vfq_num + remain_q_num <= qm->max_qp_num)
1454 return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
1457 * if vfq_num + remain_q_num > max_qp_num, the last VFs,
1458 * each with one more queue.
1460 return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
1463 static struct hisi_qm *file_to_qm(struct debugfs_file *file)
1465 struct qm_debug *debug = file->debug;
1467 return container_of(debug, struct hisi_qm, debug);
1470 static u32 current_q_read(struct hisi_qm *qm)
1472 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
1475 static int current_q_write(struct hisi_qm *qm, u32 val)
1479 if (val >= qm->debug.curr_qm_qp_num)
1482 tmp = val << QM_DFX_QN_SHIFT |
1483 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
1484 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1486 tmp = val << QM_DFX_QN_SHIFT |
1487 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
1488 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1493 static u32 clear_enable_read(struct hisi_qm *qm)
1495 return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
1498 /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
1499 static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
1501 if (rd_clr_ctrl > 1)
1504 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
1509 static u32 current_qm_read(struct hisi_qm *qm)
1511 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
1514 static int current_qm_write(struct hisi_qm *qm, u32 val)
1518 if (val > qm->vfs_num)
1521 /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
1523 qm->debug.curr_qm_qp_num = qm->qp_num;
1525 qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
1527 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
1528 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
1531 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
1532 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1535 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
1536 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1541 static ssize_t qm_debug_read(struct file *filp, char __user *buf,
1542 size_t count, loff_t *pos)
1544 struct debugfs_file *file = filp->private_data;
1545 enum qm_debug_file index = file->index;
1546 struct hisi_qm *qm = file_to_qm(file);
1547 char tbuf[QM_DBG_TMP_BUF_LEN];
1551 ret = hisi_qm_get_dfx_access(qm);
1555 mutex_lock(&file->lock);
1558 val = current_qm_read(qm);
1561 val = current_q_read(qm);
1564 val = clear_enable_read(qm);
1569 mutex_unlock(&file->lock);
1571 hisi_qm_put_dfx_access(qm);
1572 ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
1573 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
1576 mutex_unlock(&file->lock);
1577 hisi_qm_put_dfx_access(qm);
1581 static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
1582 size_t count, loff_t *pos)
1584 struct debugfs_file *file = filp->private_data;
1585 enum qm_debug_file index = file->index;
1586 struct hisi_qm *qm = file_to_qm(file);
1588 char tbuf[QM_DBG_TMP_BUF_LEN];
1594 if (count >= QM_DBG_TMP_BUF_LEN)
1597 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
1603 if (kstrtoul(tbuf, 0, &val))
1606 ret = hisi_qm_get_dfx_access(qm);
1610 mutex_lock(&file->lock);
1613 ret = current_qm_write(qm, val);
1616 ret = current_q_write(qm, val);
1619 ret = clear_enable_write(qm, val);
1624 mutex_unlock(&file->lock);
1626 hisi_qm_put_dfx_access(qm);
1634 static const struct file_operations qm_debug_fops = {
1635 .owner = THIS_MODULE,
1636 .open = simple_open,
1637 .read = qm_debug_read,
1638 .write = qm_debug_write,
1641 #define CNT_CYC_REGS_NUM 10
1642 static const struct debugfs_reg32 qm_dfx_regs[] = {
1643 /* XXX_CNT are reading clear register */
1644 {"QM_ECC_1BIT_CNT ", 0x104000ull},
1645 {"QM_ECC_MBIT_CNT ", 0x104008ull},
1646 {"QM_DFX_MB_CNT ", 0x104018ull},
1647 {"QM_DFX_DB_CNT ", 0x104028ull},
1648 {"QM_DFX_SQE_CNT ", 0x104038ull},
1649 {"QM_DFX_CQE_CNT ", 0x104048ull},
1650 {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
1651 {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
1652 {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
1653 {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
1654 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1655 {"QM_ECC_1BIT_INF ", 0x104004ull},
1656 {"QM_ECC_MBIT_INF ", 0x10400cull},
1657 {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
1658 {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
1659 {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
1660 {"QM_DFX_FF_ST0 ", 0x1040c8ull},
1661 {"QM_DFX_FF_ST1 ", 0x1040ccull},
1662 {"QM_DFX_FF_ST2 ", 0x1040d0ull},
1663 {"QM_DFX_FF_ST3 ", 0x1040d4ull},
1664 {"QM_DFX_FF_ST4 ", 0x1040d8ull},
1665 {"QM_DFX_FF_ST5 ", 0x1040dcull},
1666 {"QM_DFX_FF_ST6 ", 0x1040e0ull},
1667 {"QM_IN_IDLE_ST ", 0x1040e4ull},
1670 static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
1671 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1675 * hisi_qm_regs_dump() - Dump registers's value.
1676 * @s: debugfs file handle.
1677 * @regset: accelerator registers information.
1679 * Dump accelerator registers.
1681 void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
1683 struct pci_dev *pdev = to_pci_dev(regset->dev);
1684 struct hisi_qm *qm = pci_get_drvdata(pdev);
1685 const struct debugfs_reg32 *regs = regset->regs;
1686 int regs_len = regset->nregs;
1690 ret = hisi_qm_get_dfx_access(qm);
1694 for (i = 0; i < regs_len; i++) {
1695 val = readl(regset->base + regs[i].offset);
1696 seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
1699 hisi_qm_put_dfx_access(qm);
1701 EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
1703 static int qm_regs_show(struct seq_file *s, void *unused)
1705 struct hisi_qm *qm = s->private;
1706 struct debugfs_regset32 regset;
1708 if (qm->fun_type == QM_HW_PF) {
1709 regset.regs = qm_dfx_regs;
1710 regset.nregs = ARRAY_SIZE(qm_dfx_regs);
1712 regset.regs = qm_vf_dfx_regs;
1713 regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
1716 regset.base = qm->io_base;
1717 regset.dev = &qm->pdev->dev;
1719 hisi_qm_regs_dump(s, ®set);
1724 DEFINE_SHOW_ATTRIBUTE(qm_regs);
1726 static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
1727 const struct dfx_diff_registers *cregs, int reg_len)
1729 struct dfx_diff_registers *diff_regs;
1733 diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
1735 return ERR_PTR(-ENOMEM);
1737 for (i = 0; i < reg_len; i++) {
1738 if (!cregs[i].reg_len)
1741 diff_regs[i].reg_offset = cregs[i].reg_offset;
1742 diff_regs[i].reg_len = cregs[i].reg_len;
1743 diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
1745 if (!diff_regs[i].regs)
1748 for (j = 0; j < diff_regs[i].reg_len; j++) {
1749 base_offset = diff_regs[i].reg_offset +
1750 j * QM_DFX_REGS_LEN;
1751 diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
1760 kfree(diff_regs[i].regs);
1763 return ERR_PTR(-ENOMEM);
1766 static void dfx_regs_uninit(struct hisi_qm *qm,
1767 struct dfx_diff_registers *dregs, int reg_len)
1771 /* Setting the pointer is NULL to prevent double free */
1772 for (i = 0; i < reg_len; i++) {
1773 kfree(dregs[i].regs);
1774 dregs[i].regs = NULL;
1781 * hisi_qm_diff_regs_init() - Allocate memory for registers.
1782 * @qm: device qm handle.
1783 * @dregs: diff registers handle.
1784 * @reg_len: diff registers region length.
1786 int hisi_qm_diff_regs_init(struct hisi_qm *qm,
1787 struct dfx_diff_registers *dregs, int reg_len)
1789 if (!qm || !dregs || reg_len <= 0)
1792 if (qm->fun_type != QM_HW_PF)
1795 qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs,
1796 ARRAY_SIZE(qm_diff_regs));
1797 if (IS_ERR(qm->debug.qm_diff_regs))
1798 return PTR_ERR(qm->debug.qm_diff_regs);
1800 qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
1801 if (IS_ERR(qm->debug.acc_diff_regs)) {
1802 dfx_regs_uninit(qm, qm->debug.qm_diff_regs,
1803 ARRAY_SIZE(qm_diff_regs));
1804 return PTR_ERR(qm->debug.acc_diff_regs);
1809 EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_init);
1812 * hisi_qm_diff_regs_uninit() - Free memory for registers.
1813 * @qm: device qm handle.
1814 * @reg_len: diff registers region length.
1816 void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len)
1818 if (!qm || reg_len <= 0 || qm->fun_type != QM_HW_PF)
1821 dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
1822 dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
1824 EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_uninit);
1827 * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
1828 * @qm: device qm handle.
1829 * @s: Debugfs file handle.
1830 * @dregs: diff registers handle.
1831 * @regs_len: diff registers region length.
1833 void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
1834 struct dfx_diff_registers *dregs, int regs_len)
1836 u32 j, val, base_offset;
1839 if (!qm || !s || !dregs || regs_len <= 0)
1842 ret = hisi_qm_get_dfx_access(qm);
1846 down_read(&qm->qps_lock);
1847 for (i = 0; i < regs_len; i++) {
1848 if (!dregs[i].reg_len)
1851 for (j = 0; j < dregs[i].reg_len; j++) {
1852 base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
1853 val = readl(qm->io_base + base_offset);
1854 if (val != dregs[i].regs[j])
1855 seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
1856 base_offset, dregs[i].regs[j], val);
1859 up_read(&qm->qps_lock);
1861 hisi_qm_put_dfx_access(qm);
1863 EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
1865 static int qm_diff_regs_show(struct seq_file *s, void *unused)
1867 struct hisi_qm *qm = s->private;
1869 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
1870 ARRAY_SIZE(qm_diff_regs));
1874 DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
1876 static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
1877 size_t count, loff_t *pos)
1879 char buf[QM_DBG_READ_LEN];
1882 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
1883 "Please echo help to cmd to get help information");
1885 return simple_read_from_buffer(buffer, count, pos, buf, len);
1888 static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1889 dma_addr_t *dma_addr)
1891 struct device *dev = &qm->pdev->dev;
1894 ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1896 return ERR_PTR(-ENOMEM);
1898 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1899 if (dma_mapping_error(dev, *dma_addr)) {
1900 dev_err(dev, "DMA mapping error!\n");
1902 return ERR_PTR(-ENOMEM);
1908 static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1909 const void *ctx_addr, dma_addr_t *dma_addr)
1911 struct device *dev = &qm->pdev->dev;
1913 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1917 static void dump_show(struct hisi_qm *qm, void *info,
1918 unsigned int info_size, char *info_name)
1920 struct device *dev = &qm->pdev->dev;
1921 u8 *info_curr = info;
1923 #define BYTE_PER_DW 4
1925 dev_info(dev, "%s DUMP\n", info_name);
1926 for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
1927 pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
1928 *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
1932 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1934 return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1937 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1939 return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1942 static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
1944 struct device *dev = &qm->pdev->dev;
1945 struct qm_sqc *sqc, *sqc_curr;
1953 ret = kstrtou32(s, 0, &qp_id);
1954 if (ret || qp_id >= qm->qp_num) {
1955 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1959 sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
1961 return PTR_ERR(sqc);
1963 ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
1965 down_read(&qm->qps_lock);
1967 sqc_curr = qm->sqc + qp_id;
1969 dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
1971 up_read(&qm->qps_lock);
1976 dump_show(qm, sqc, sizeof(*sqc), "SQC");
1979 qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
1983 static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
1985 struct device *dev = &qm->pdev->dev;
1986 struct qm_cqc *cqc, *cqc_curr;
1994 ret = kstrtou32(s, 0, &qp_id);
1995 if (ret || qp_id >= qm->qp_num) {
1996 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
2000 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
2002 return PTR_ERR(cqc);
2004 ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
2006 down_read(&qm->qps_lock);
2008 cqc_curr = qm->cqc + qp_id;
2010 dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
2012 up_read(&qm->qps_lock);
2017 dump_show(qm, cqc, sizeof(*cqc), "CQC");
2020 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
2024 static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
2025 int cmd, char *name)
2027 struct device *dev = &qm->pdev->dev;
2028 dma_addr_t xeqc_dma;
2032 if (strsep(&s, " ")) {
2033 dev_err(dev, "Please do not input extra characters!\n");
2037 xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
2039 return PTR_ERR(xeqc);
2041 ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
2045 dump_show(qm, xeqc, size, name);
2048 qm_ctx_free(qm, size, xeqc, &xeqc_dma);
2052 static int q_dump_param_parse(struct hisi_qm *qm, char *s,
2053 u32 *e_id, u32 *q_id, u16 q_depth)
2055 struct device *dev = &qm->pdev->dev;
2056 unsigned int qp_num = qm->qp_num;
2060 presult = strsep(&s, " ");
2062 dev_err(dev, "Please input qp number!\n");
2066 ret = kstrtou32(presult, 0, q_id);
2067 if (ret || *q_id >= qp_num) {
2068 dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
2072 presult = strsep(&s, " ");
2074 dev_err(dev, "Please input sqe number!\n");
2078 ret = kstrtou32(presult, 0, e_id);
2079 if (ret || *e_id >= q_depth) {
2080 dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
2084 if (strsep(&s, " ")) {
2085 dev_err(dev, "Please do not input extra characters!\n");
2092 static int qm_sq_dump(struct hisi_qm *qm, char *s)
2094 u16 sq_depth = qm->qp_array->cq_depth;
2095 void *sqe, *sqe_curr;
2100 ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
2104 sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
2108 qp = &qm->qp_array[qp_id];
2109 memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
2110 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
2111 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
2112 qm->debug.sqe_mask_len);
2114 dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
2121 static int qm_cq_dump(struct hisi_qm *qm, char *s)
2123 struct qm_cqe *cqe_curr;
2128 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
2132 qp = &qm->qp_array[qp_id];
2133 cqe_curr = qp->cqe + cqe_id;
2134 dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
2139 static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
2140 size_t size, char *name)
2142 struct device *dev = &qm->pdev->dev;
2150 ret = kstrtou32(s, 0, &xeqe_id);
2154 if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
2155 dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
2157 } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
2158 dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
2162 down_read(&qm->qps_lock);
2164 if (qm->eqe && !strcmp(name, "EQE")) {
2165 xeqe = qm->eqe + xeqe_id;
2166 } else if (qm->aeqe && !strcmp(name, "AEQE")) {
2167 xeqe = qm->aeqe + xeqe_id;
2173 dump_show(qm, xeqe, size, name);
2176 up_read(&qm->qps_lock);
2180 static int qm_dbg_help(struct hisi_qm *qm, char *s)
2182 struct device *dev = &qm->pdev->dev;
2184 if (strsep(&s, " ")) {
2185 dev_err(dev, "Please do not input extra characters!\n");
2189 dev_info(dev, "available commands:\n");
2190 dev_info(dev, "sqc <num>\n");
2191 dev_info(dev, "cqc <num>\n");
2192 dev_info(dev, "eqc\n");
2193 dev_info(dev, "aeqc\n");
2194 dev_info(dev, "sq <num> <e>\n");
2195 dev_info(dev, "cq <num> <e>\n");
2196 dev_info(dev, "eq <e>\n");
2197 dev_info(dev, "aeq <e>\n");
2202 static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
2204 struct device *dev = &qm->pdev->dev;
2205 char *presult, *s, *s_tmp;
2208 s = kstrdup(cmd_buf, GFP_KERNEL);
2213 presult = strsep(&s, " ");
2216 goto err_buffer_free;
2219 if (!strcmp(presult, "sqc"))
2220 ret = qm_sqc_dump(qm, s);
2221 else if (!strcmp(presult, "cqc"))
2222 ret = qm_cqc_dump(qm, s);
2223 else if (!strcmp(presult, "eqc"))
2224 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
2225 QM_MB_CMD_EQC, "EQC");
2226 else if (!strcmp(presult, "aeqc"))
2227 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
2228 QM_MB_CMD_AEQC, "AEQC");
2229 else if (!strcmp(presult, "sq"))
2230 ret = qm_sq_dump(qm, s);
2231 else if (!strcmp(presult, "cq"))
2232 ret = qm_cq_dump(qm, s);
2233 else if (!strcmp(presult, "eq"))
2234 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
2235 else if (!strcmp(presult, "aeq"))
2236 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
2237 else if (!strcmp(presult, "help"))
2238 ret = qm_dbg_help(qm, s);
2243 dev_info(dev, "Please echo help\n");
2251 static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
2252 size_t count, loff_t *pos)
2254 struct hisi_qm *qm = filp->private_data;
2255 char *cmd_buf, *cmd_buf_tmp;
2261 ret = hisi_qm_get_dfx_access(qm);
2265 /* Judge if the instance is being reset. */
2266 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
2268 goto put_dfx_access;
2271 if (count > QM_DBG_WRITE_LEN) {
2273 goto put_dfx_access;
2276 cmd_buf = memdup_user_nul(buffer, count);
2277 if (IS_ERR(cmd_buf)) {
2278 ret = PTR_ERR(cmd_buf);
2279 goto put_dfx_access;
2282 cmd_buf_tmp = strchr(cmd_buf, '\n');
2284 *cmd_buf_tmp = '\0';
2285 count = cmd_buf_tmp - cmd_buf + 1;
2288 ret = qm_cmd_write_dump(qm, cmd_buf);
2291 goto put_dfx_access;
2299 hisi_qm_put_dfx_access(qm);
2303 static const struct file_operations qm_cmd_fops = {
2304 .owner = THIS_MODULE,
2305 .open = simple_open,
2306 .read = qm_cmd_read,
2307 .write = qm_cmd_write,
2310 static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
2311 enum qm_debug_file index)
2313 struct debugfs_file *file = qm->debug.files + index;
2315 debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
2318 file->index = index;
2319 mutex_init(&file->lock);
2320 file->debug = &qm->debug;
2323 static void qm_hw_error_init_v1(struct hisi_qm *qm)
2325 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
2328 static void qm_hw_error_cfg(struct hisi_qm *qm)
2330 struct hisi_qm_err_info *err_info = &qm->err_info;
2332 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
2333 /* clear QM hw residual error source */
2334 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
2336 /* configure error type */
2337 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
2338 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
2339 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
2340 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
2343 static void qm_hw_error_init_v2(struct hisi_qm *qm)
2347 qm_hw_error_cfg(qm);
2349 irq_unmask = ~qm->error_mask;
2350 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
2351 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
2354 static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
2356 u32 irq_mask = qm->error_mask;
2358 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
2359 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
2362 static void qm_hw_error_init_v3(struct hisi_qm *qm)
2366 qm_hw_error_cfg(qm);
2368 /* enable close master ooo when hardware error happened */
2369 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
2371 irq_unmask = ~qm->error_mask;
2372 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
2373 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
2376 static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
2378 u32 irq_mask = qm->error_mask;
2380 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
2381 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
2383 /* disable close master ooo when hardware error happened */
2384 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
2387 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
2389 const struct hisi_qm_hw_error *err;
2390 struct device *dev = &qm->pdev->dev;
2391 u32 reg_val, type, vf_num;
2394 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
2395 err = &qm_hw_error[i];
2396 if (!(err->int_msk & error_status))
2399 dev_err(dev, "%s [error status=0x%x] found\n",
2400 err->msg, err->int_msk);
2402 if (err->int_msk & QM_DB_TIMEOUT) {
2403 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
2404 type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
2405 QM_DB_TIMEOUT_TYPE_SHIFT;
2406 vf_num = reg_val & QM_DB_TIMEOUT_VF;
2407 dev_err(dev, "qm %s doorbell timeout in function %u\n",
2408 qm_db_timeout[type], vf_num);
2409 } else if (err->int_msk & QM_OF_FIFO_OF) {
2410 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
2411 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
2412 QM_FIFO_OVERFLOW_TYPE_SHIFT;
2413 vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
2415 if (type < ARRAY_SIZE(qm_fifo_overflow))
2416 dev_err(dev, "qm %s fifo overflow in function %u\n",
2417 qm_fifo_overflow[type], vf_num);
2419 dev_err(dev, "unknown error type\n");
2424 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
2426 u32 error_status, tmp;
2429 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
2430 error_status = qm->error_mask & tmp;
2433 if (error_status & QM_ECC_MBIT)
2434 qm->err_status.is_qm_ecc_mbit = true;
2436 qm_log_hw_error(qm, error_status);
2437 if (error_status & qm->err_info.qm_reset_mask)
2438 return ACC_ERR_NEED_RESET;
2440 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
2441 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
2444 return ACC_ERR_RECOVERED;
2447 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
2449 struct qm_mailbox mailbox;
2452 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
2453 mutex_lock(&qm->mailbox_lock);
2454 ret = qm_mb_nolock(qm, &mailbox);
2458 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
2459 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
2462 mutex_unlock(&qm->mailbox_lock);
2466 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
2470 if (qm->fun_type == QM_HW_PF)
2471 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
2473 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
2474 val |= QM_IFC_INT_SOURCE_MASK;
2475 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
2478 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
2480 struct device *dev = &qm->pdev->dev;
2485 ret = qm_get_mb_cmd(qm, &msg, vf_id);
2487 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
2491 cmd = msg & QM_MB_CMD_DATA_MASK;
2493 case QM_VF_PREPARE_FAIL:
2494 dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
2496 case QM_VF_START_FAIL:
2497 dev_err(dev, "failed to start VF(%u)!\n", vf_id);
2499 case QM_VF_PREPARE_DONE:
2500 case QM_VF_START_DONE:
2503 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
2508 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
2510 struct device *dev = &qm->pdev->dev;
2511 u32 vfs_num = qm->vfs_num;
2517 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2521 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
2522 /* All VFs send command to PF, break */
2523 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
2526 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
2531 msleep(QM_WAIT_DST_ACK);
2534 /* PF check VFs msg */
2535 for (i = 1; i <= vfs_num; i++) {
2537 qm_handle_vf_msg(qm, i);
2539 dev_err(dev, "VF(%u) not ping PF!\n", i);
2542 /* PF clear interrupt to ack VFs */
2543 qm_clear_cmd_interrupt(qm, val);
2548 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
2552 val = readl(qm->io_base + QM_IFC_INT_CFG);
2553 val &= ~QM_IFC_SEND_ALL_VFS;
2555 writel(val, qm->io_base + QM_IFC_INT_CFG);
2557 val = readl(qm->io_base + QM_IFC_INT_SET_P);
2558 val |= QM_IFC_INT_SET_MASK;
2559 writel(val, qm->io_base + QM_IFC_INT_SET_P);
2562 static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
2566 val = readl(qm->io_base + QM_IFC_INT_SET_V);
2567 val |= QM_IFC_INT_SET_MASK;
2568 writel(val, qm->io_base + QM_IFC_INT_SET_V);
2571 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
2573 struct device *dev = &qm->pdev->dev;
2574 struct qm_mailbox mailbox;
2579 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
2580 mutex_lock(&qm->mailbox_lock);
2581 ret = qm_mb_nolock(qm, &mailbox);
2583 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
2587 qm_trigger_vf_interrupt(qm, fun_num);
2589 msleep(QM_WAIT_DST_ACK);
2590 val = readq(qm->io_base + QM_IFC_READY_STATUS);
2591 /* if VF respond, PF notifies VF successfully. */
2592 if (!(val & BIT(fun_num)))
2595 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
2596 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
2603 mutex_unlock(&qm->mailbox_lock);
2607 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
2609 struct device *dev = &qm->pdev->dev;
2610 u32 vfs_num = qm->vfs_num;
2611 struct qm_mailbox mailbox;
2617 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
2618 mutex_lock(&qm->mailbox_lock);
2619 /* PF sends command to all VFs by mailbox */
2620 ret = qm_mb_nolock(qm, &mailbox);
2622 dev_err(dev, "failed to send command to VFs!\n");
2623 mutex_unlock(&qm->mailbox_lock);
2627 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
2629 msleep(QM_WAIT_DST_ACK);
2630 val = readq(qm->io_base + QM_IFC_READY_STATUS);
2631 /* If all VFs acked, PF notifies VFs successfully. */
2632 if (!(val & GENMASK(vfs_num, 1))) {
2633 mutex_unlock(&qm->mailbox_lock);
2637 if (++cnt > QM_MAX_PF_WAIT_COUNT)
2641 mutex_unlock(&qm->mailbox_lock);
2643 /* Check which vf respond timeout. */
2644 for (i = 1; i <= vfs_num; i++) {
2646 dev_err(dev, "failed to get response from VF(%u)!\n", i);
2652 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
2654 struct qm_mailbox mailbox;
2659 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
2660 mutex_lock(&qm->mailbox_lock);
2661 ret = qm_mb_nolock(qm, &mailbox);
2663 dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
2667 qm_trigger_pf_interrupt(qm);
2668 /* Waiting for PF response */
2670 msleep(QM_WAIT_DST_ACK);
2671 val = readl(qm->io_base + QM_IFC_INT_SET_V);
2672 if (!(val & QM_IFC_INT_STATUS_MASK))
2675 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
2682 mutex_unlock(&qm->mailbox_lock);
2686 static int qm_stop_qp(struct hisi_qp *qp)
2688 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
2691 static int qm_set_msi(struct hisi_qm *qm, bool set)
2693 struct pci_dev *pdev = qm->pdev;
2696 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
2699 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
2700 ACC_PEH_MSI_DISABLE);
2701 if (qm->err_status.is_qm_ecc_mbit ||
2702 qm->err_status.is_dev_ecc_mbit)
2706 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
2713 static void qm_wait_msi_finish(struct hisi_qm *qm)
2715 struct pci_dev *pdev = qm->pdev;
2722 pci_read_config_dword(pdev, pdev->msi_cap +
2723 PCI_MSI_PENDING_64, &cmd);
2727 if (++cnt > MAX_WAIT_COUNTS) {
2728 pci_warn(pdev, "failed to empty MSI PENDING!\n");
2735 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
2736 val, !(val & QM_PEH_DFX_MASK),
2737 POLL_PERIOD, POLL_TIMEOUT);
2739 pci_warn(pdev, "failed to empty PEH MSI!\n");
2741 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
2742 val, !(val & QM_PEH_MSI_FINISH_MASK),
2743 POLL_PERIOD, POLL_TIMEOUT);
2745 pci_warn(pdev, "failed to finish MSI operation!\n");
2748 static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
2750 struct pci_dev *pdev = qm->pdev;
2751 int ret = -ETIMEDOUT;
2754 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
2756 cmd |= QM_MSI_CAP_ENABLE;
2758 cmd &= ~QM_MSI_CAP_ENABLE;
2760 pci_write_config_dword(pdev, pdev->msi_cap, cmd);
2762 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
2763 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
2764 if (cmd & QM_MSI_CAP_ENABLE)
2770 udelay(WAIT_PERIOD_US_MIN);
2771 qm_wait_msi_finish(qm);
2778 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
2780 .hw_error_init = qm_hw_error_init_v1,
2781 .set_msi = qm_set_msi,
2784 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
2785 .get_vft = qm_get_vft_v2,
2787 .hw_error_init = qm_hw_error_init_v2,
2788 .hw_error_uninit = qm_hw_error_uninit_v2,
2789 .hw_error_handle = qm_hw_error_handle_v2,
2790 .set_msi = qm_set_msi,
2793 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
2794 .get_vft = qm_get_vft_v2,
2796 .hw_error_init = qm_hw_error_init_v3,
2797 .hw_error_uninit = qm_hw_error_uninit_v3,
2798 .hw_error_handle = qm_hw_error_handle_v2,
2799 .set_msi = qm_set_msi_v3,
2802 static void *qm_get_avail_sqe(struct hisi_qp *qp)
2804 struct hisi_qp_status *qp_status = &qp->qp_status;
2805 u16 sq_tail = qp_status->sq_tail;
2807 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1))
2810 return qp->sqe + sq_tail * qp->qm->sqe_size;
2813 static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
2817 /* Use last 64 bits of DUS to reset status. */
2818 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
2822 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
2824 struct device *dev = &qm->pdev->dev;
2828 if (!qm_qp_avail_state(qm, NULL, QP_INIT))
2829 return ERR_PTR(-EPERM);
2831 if (qm->qp_in_used == qm->qp_num) {
2832 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2834 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2835 return ERR_PTR(-EBUSY);
2838 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
2840 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2842 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2843 return ERR_PTR(-EBUSY);
2846 qp = &qm->qp_array[qp_id];
2847 hisi_qm_unset_hw_reset(qp);
2848 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth);
2850 qp->event_cb = NULL;
2853 qp->alg_type = alg_type;
2854 qp->is_in_kernel = true;
2856 atomic_set(&qp->qp_status.flags, QP_INIT);
2862 * hisi_qm_create_qp() - Create a queue pair from qm.
2863 * @qm: The qm we create a qp from.
2864 * @alg_type: Accelerator specific algorithm type in sqc.
2866 * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
2869 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
2874 ret = qm_pm_get_sync(qm);
2876 return ERR_PTR(ret);
2878 down_write(&qm->qps_lock);
2879 qp = qm_create_qp_nolock(qm, alg_type);
2880 up_write(&qm->qps_lock);
2889 * hisi_qm_release_qp() - Release a qp back to its qm.
2890 * @qp: The qp we want to release.
2892 * This function releases the resource of a qp.
2894 static void hisi_qm_release_qp(struct hisi_qp *qp)
2896 struct hisi_qm *qm = qp->qm;
2898 down_write(&qm->qps_lock);
2900 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
2901 up_write(&qm->qps_lock);
2906 idr_remove(&qm->qp_idr, qp->qp_id);
2908 up_write(&qm->qps_lock);
2913 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2915 struct hisi_qm *qm = qp->qm;
2916 struct device *dev = &qm->pdev->dev;
2917 enum qm_hw_ver ver = qm->ver;
2922 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
2926 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
2927 if (ver == QM_HW_V1) {
2928 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
2929 sqc->w8 = cpu_to_le16(qp->sq_depth - 1);
2931 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
2932 sqc->w8 = 0; /* rand_qc */
2934 sqc->cq_num = cpu_to_le16(qp_id);
2935 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
2937 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2938 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
2939 QM_QC_PASID_ENABLE_SHIFT);
2941 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
2943 if (dma_mapping_error(dev, sqc_dma)) {
2948 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
2949 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
2955 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2957 struct hisi_qm *qm = qp->qm;
2958 struct device *dev = &qm->pdev->dev;
2959 enum qm_hw_ver ver = qm->ver;
2964 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
2968 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
2969 if (ver == QM_HW_V1) {
2970 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
2972 cqc->w8 = cpu_to_le16(qp->cq_depth - 1);
2974 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
2975 cqc->w8 = 0; /* rand_qc */
2977 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
2979 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2980 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
2982 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
2984 if (dma_mapping_error(dev, cqc_dma)) {
2989 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
2990 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
2996 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
3000 qm_init_qp_status(qp);
3002 ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
3006 return qm_cq_ctx_cfg(qp, qp_id, pasid);
3009 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
3011 struct hisi_qm *qm = qp->qm;
3012 struct device *dev = &qm->pdev->dev;
3013 int qp_id = qp->qp_id;
3017 if (!qm_qp_avail_state(qm, qp, QP_START))
3020 ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
3024 atomic_set(&qp->qp_status.flags, QP_START);
3025 dev_dbg(dev, "queue %d started\n", qp_id);
3031 * hisi_qm_start_qp() - Start a qp into running.
3032 * @qp: The qp we want to start to run.
3033 * @arg: Accelerator specific argument.
3035 * After this function, qp can receive request from user. Return 0 if
3036 * successful, Return -EBUSY if failed.
3038 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
3040 struct hisi_qm *qm = qp->qm;
3043 down_write(&qm->qps_lock);
3044 ret = qm_start_qp_nolock(qp, arg);
3045 up_write(&qm->qps_lock);
3049 EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
3052 * qp_stop_fail_cb() - call request cb.
3053 * @qp: stopped failed qp.
3055 * Callback function should be called whether task completed or not.
3057 static void qp_stop_fail_cb(struct hisi_qp *qp)
3059 int qp_used = atomic_read(&qp->qp_status.used);
3060 u16 cur_tail = qp->qp_status.sq_tail;
3061 u16 sq_depth = qp->sq_depth;
3062 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth;
3063 struct hisi_qm *qm = qp->qm;
3067 for (i = 0; i < qp_used; i++) {
3068 pos = (i + cur_head) % sq_depth;
3069 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
3070 atomic_dec(&qp->qp_status.used);
3075 * qm_drain_qp() - Drain a qp.
3076 * @qp: The qp we want to drain.
3078 * Determine whether the queue is cleared by judging the tail pointers of
3081 static int qm_drain_qp(struct hisi_qp *qp)
3083 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
3084 struct hisi_qm *qm = qp->qm;
3085 struct device *dev = &qm->pdev->dev;
3088 dma_addr_t dma_addr;
3092 /* No need to judge if master OOO is blocked. */
3093 if (qm_check_dev_error(qm))
3096 /* Kunpeng930 supports drain qp by device */
3097 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
3098 ret = qm_stop_qp(qp);
3100 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
3104 addr = qm_ctx_alloc(qm, size, &dma_addr);
3106 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
3111 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
3113 dev_err_ratelimited(dev, "Failed to dump sqc!\n");
3118 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
3121 dev_err_ratelimited(dev, "Failed to dump cqc!\n");
3124 cqc = addr + sizeof(struct qm_sqc);
3126 if ((sqc->tail == cqc->tail) &&
3127 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
3130 if (i == MAX_WAIT_COUNTS) {
3131 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
3136 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
3139 qm_ctx_free(qm, size, addr, &dma_addr);
3144 static int qm_stop_qp_nolock(struct hisi_qp *qp)
3146 struct device *dev = &qp->qm->pdev->dev;
3150 * It is allowed to stop and release qp when reset, If the qp is
3151 * stopped when reset but still want to be released then, the
3152 * is_resetting flag should be set negative so that this qp will not
3153 * be restarted after reset.
3155 if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
3156 qp->is_resetting = false;
3160 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
3163 atomic_set(&qp->qp_status.flags, QP_STOP);
3165 ret = qm_drain_qp(qp);
3167 dev_err(dev, "Failed to drain out data for stopping!\n");
3170 flush_workqueue(qp->qm->wq);
3171 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
3172 qp_stop_fail_cb(qp);
3174 dev_dbg(dev, "stop queue %u!", qp->qp_id);
3180 * hisi_qm_stop_qp() - Stop a qp in qm.
3181 * @qp: The qp we want to stop.
3183 * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
3185 int hisi_qm_stop_qp(struct hisi_qp *qp)
3189 down_write(&qp->qm->qps_lock);
3190 ret = qm_stop_qp_nolock(qp);
3191 up_write(&qp->qm->qps_lock);
3195 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
3198 * hisi_qp_send() - Queue up a task in the hardware queue.
3199 * @qp: The qp in which to put the message.
3200 * @msg: The message.
3202 * This function will return -EBUSY if qp is currently full, and -EAGAIN
3203 * if qp related qm is resetting.
3205 * Note: This function may run with qm_irq_thread and ACC reset at same time.
3206 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
3207 * reset may happen, we have no lock here considering performance. This
3208 * causes current qm_db sending fail or can not receive sended sqe. QM
3209 * sync/async receive function should handle the error sqe. ACC reset
3210 * done function should clear used sqe to 0.
3212 int hisi_qp_send(struct hisi_qp *qp, const void *msg)
3214 struct hisi_qp_status *qp_status = &qp->qp_status;
3215 u16 sq_tail = qp_status->sq_tail;
3216 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
3217 void *sqe = qm_get_avail_sqe(qp);
3219 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
3220 atomic_read(&qp->qm->status.flags) == QM_STOP ||
3221 qp->is_resetting)) {
3222 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
3229 memcpy(sqe, msg, qp->qm->sqe_size);
3231 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
3232 atomic_inc(&qp->qp_status.used);
3233 qp_status->sq_tail = sq_tail_next;
3237 EXPORT_SYMBOL_GPL(hisi_qp_send);
3239 static void hisi_qm_cache_wb(struct hisi_qm *qm)
3243 if (qm->ver == QM_HW_V1)
3246 writel(0x1, qm->io_base + QM_CACHE_WB_START);
3247 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
3248 val, val & BIT(0), POLL_PERIOD,
3250 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
3253 static void qm_qp_event_notifier(struct hisi_qp *qp)
3255 wake_up_interruptible(&qp->uacce_q->wait);
3258 /* This function returns free number of qp in qm. */
3259 static int hisi_qm_get_available_instances(struct uacce_device *uacce)
3261 struct hisi_qm *qm = uacce->priv;
3264 down_read(&qm->qps_lock);
3265 ret = qm->qp_num - qm->qp_in_used;
3266 up_read(&qm->qps_lock);
3271 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
3275 for (i = 0; i < qm->qp_num; i++)
3276 qm_set_qp_disable(&qm->qp_array[i], offset);
3279 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
3281 struct uacce_queue *q)
3283 struct hisi_qm *qm = uacce->priv;
3287 qp = hisi_qm_create_qp(qm, alg_type);
3294 qp->event_cb = qm_qp_event_notifier;
3296 qp->is_in_kernel = false;
3301 static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
3303 struct hisi_qp *qp = q->priv;
3305 hisi_qm_release_qp(qp);
3308 /* map sq/cq/doorbell to user space */
3309 static int hisi_qm_uacce_mmap(struct uacce_queue *q,
3310 struct vm_area_struct *vma,
3311 struct uacce_qfile_region *qfr)
3313 struct hisi_qp *qp = q->priv;
3314 struct hisi_qm *qm = qp->qm;
3315 resource_size_t phys_base = qm->db_phys_base +
3316 qp->qp_id * qm->db_interval;
3317 size_t sz = vma->vm_end - vma->vm_start;
3318 struct pci_dev *pdev = qm->pdev;
3319 struct device *dev = &pdev->dev;
3320 unsigned long vm_pgoff;
3323 switch (qfr->type) {
3324 case UACCE_QFRT_MMIO:
3325 if (qm->ver == QM_HW_V1) {
3326 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
3328 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
3329 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
3330 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
3333 if (sz > qm->db_interval)
3337 vma->vm_flags |= VM_IO;
3339 return remap_pfn_range(vma, vma->vm_start,
3340 phys_base >> PAGE_SHIFT,
3341 sz, pgprot_noncached(vma->vm_page_prot));
3342 case UACCE_QFRT_DUS:
3343 if (sz != qp->qdma.size)
3347 * dma_mmap_coherent() requires vm_pgoff as 0
3348 * restore vm_pfoff to initial value for mmap()
3350 vm_pgoff = vma->vm_pgoff;
3352 ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
3354 vma->vm_pgoff = vm_pgoff;
3362 static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
3364 struct hisi_qp *qp = q->priv;
3366 return hisi_qm_start_qp(qp, qp->pasid);
3369 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
3371 hisi_qm_stop_qp(q->priv);
3374 static int hisi_qm_is_q_updated(struct uacce_queue *q)
3376 struct hisi_qp *qp = q->priv;
3377 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
3380 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
3381 /* make sure to read data from memory */
3383 qm_cq_head_update(qp);
3384 cqe = qp->cqe + qp->qp_status.cq_head;
3391 static void qm_set_sqctype(struct uacce_queue *q, u16 type)
3393 struct hisi_qm *qm = q->uacce->priv;
3394 struct hisi_qp *qp = q->priv;
3396 down_write(&qm->qps_lock);
3397 qp->alg_type = type;
3398 up_write(&qm->qps_lock);
3401 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
3404 struct hisi_qp *qp = q->priv;
3405 struct hisi_qp_info qp_info;
3406 struct hisi_qp_ctx qp_ctx;
3408 if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
3409 if (copy_from_user(&qp_ctx, (void __user *)arg,
3410 sizeof(struct hisi_qp_ctx)))
3413 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
3416 qm_set_sqctype(q, qp_ctx.qc_type);
3417 qp_ctx.id = qp->qp_id;
3419 if (copy_to_user((void __user *)arg, &qp_ctx,
3420 sizeof(struct hisi_qp_ctx)))
3424 } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) {
3425 if (copy_from_user(&qp_info, (void __user *)arg,
3426 sizeof(struct hisi_qp_info)))
3429 qp_info.sqe_size = qp->qm->sqe_size;
3430 qp_info.sq_depth = qp->sq_depth;
3431 qp_info.cq_depth = qp->cq_depth;
3433 if (copy_to_user((void __user *)arg, &qp_info,
3434 sizeof(struct hisi_qp_info)))
3443 static const struct uacce_ops uacce_qm_ops = {
3444 .get_available_instances = hisi_qm_get_available_instances,
3445 .get_queue = hisi_qm_uacce_get_queue,
3446 .put_queue = hisi_qm_uacce_put_queue,
3447 .start_queue = hisi_qm_uacce_start_queue,
3448 .stop_queue = hisi_qm_uacce_stop_queue,
3449 .mmap = hisi_qm_uacce_mmap,
3450 .ioctl = hisi_qm_uacce_ioctl,
3451 .is_q_updated = hisi_qm_is_q_updated,
3454 static int qm_alloc_uacce(struct hisi_qm *qm)
3456 struct pci_dev *pdev = qm->pdev;
3457 struct uacce_device *uacce;
3458 unsigned long mmio_page_nr;
3459 unsigned long dus_page_nr;
3460 u16 sq_depth, cq_depth;
3461 struct uacce_interface interface = {
3462 .flags = UACCE_DEV_SVA,
3463 .ops = &uacce_qm_ops,
3467 ret = strscpy(interface.name, dev_driver_string(&pdev->dev),
3468 sizeof(interface.name));
3470 return -ENAMETOOLONG;
3472 uacce = uacce_alloc(&pdev->dev, &interface);
3474 return PTR_ERR(uacce);
3476 if (uacce->flags & UACCE_DEV_SVA) {
3479 /* only consider sva case */
3480 uacce_remove(uacce);
3485 uacce->is_vf = pdev->is_virtfn;
3488 if (qm->ver == QM_HW_V1)
3489 uacce->api_ver = HISI_QM_API_VER_BASE;
3490 else if (qm->ver == QM_HW_V2)
3491 uacce->api_ver = HISI_QM_API_VER2_BASE;
3493 uacce->api_ver = HISI_QM_API_VER3_BASE;
3495 if (qm->ver == QM_HW_V1)
3496 mmio_page_nr = QM_DOORBELL_PAGE_NR;
3497 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
3498 mmio_page_nr = QM_DOORBELL_PAGE_NR +
3499 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
3501 mmio_page_nr = qm->db_interval / PAGE_SIZE;
3503 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
3505 /* Add one more page for device or qp status */
3506 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
3507 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >>
3510 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
3511 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
3519 * qm_frozen() - Try to froze QM to cut continuous queue request. If
3520 * there is user on the QM, return failure without doing anything.
3521 * @qm: The qm needed to be fronzen.
3523 * This function frozes QM, then we can do SRIOV disabling.
3525 static int qm_frozen(struct hisi_qm *qm)
3527 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
3530 down_write(&qm->qps_lock);
3532 if (!qm->qp_in_used) {
3533 qm->qp_in_used = qm->qp_num;
3534 up_write(&qm->qps_lock);
3535 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
3539 up_write(&qm->qps_lock);
3544 static int qm_try_frozen_vfs(struct pci_dev *pdev,
3545 struct hisi_qm_list *qm_list)
3547 struct hisi_qm *qm, *vf_qm;
3548 struct pci_dev *dev;
3551 if (!qm_list || !pdev)
3554 /* Try to frozen all the VFs as disable SRIOV */
3555 mutex_lock(&qm_list->lock);
3556 list_for_each_entry(qm, &qm_list->list, list) {
3560 if (pci_physfn(dev) == pdev) {
3561 vf_qm = pci_get_drvdata(dev);
3562 ret = qm_frozen(vf_qm);
3569 mutex_unlock(&qm_list->lock);
3575 * hisi_qm_wait_task_finish() - Wait until the task is finished
3576 * when removing the driver.
3577 * @qm: The qm needed to wait for the task to finish.
3578 * @qm_list: The list of all available devices.
3580 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3582 while (qm_frozen(qm) ||
3583 ((qm->fun_type == QM_HW_PF) &&
3584 qm_try_frozen_vfs(qm->pdev, qm_list))) {
3585 msleep(WAIT_PERIOD);
3588 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
3589 test_bit(QM_RESETTING, &qm->misc_ctl))
3590 msleep(WAIT_PERIOD);
3592 udelay(REMOVE_WAIT_DELAY);
3594 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
3596 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
3598 struct device *dev = &qm->pdev->dev;
3599 struct qm_dma *qdma;
3602 for (i = num - 1; i >= 0; i--) {
3603 qdma = &qm->qp_array[i].qdma;
3604 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
3605 kfree(qm->poll_data[i].qp_finish_id);
3608 kfree(qm->poll_data);
3609 kfree(qm->qp_array);
3612 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
3613 u16 sq_depth, u16 cq_depth)
3615 struct device *dev = &qm->pdev->dev;
3616 size_t off = qm->sqe_size * sq_depth;
3620 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16),
3622 if (!qm->poll_data[id].qp_finish_id)
3625 qp = &qm->qp_array[id];
3626 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
3629 goto err_free_qp_finish_id;
3631 qp->sqe = qp->qdma.va;
3632 qp->sqe_dma = qp->qdma.dma;
3633 qp->cqe = qp->qdma.va + off;
3634 qp->cqe_dma = qp->qdma.dma + off;
3635 qp->qdma.size = dma_size;
3636 qp->sq_depth = sq_depth;
3637 qp->cq_depth = cq_depth;
3643 err_free_qp_finish_id:
3644 kfree(qm->poll_data[id].qp_finish_id);
3648 static void hisi_qm_pre_init(struct hisi_qm *qm)
3650 struct pci_dev *pdev = qm->pdev;
3652 if (qm->ver == QM_HW_V1)
3653 qm->ops = &qm_hw_ops_v1;
3654 else if (qm->ver == QM_HW_V2)
3655 qm->ops = &qm_hw_ops_v2;
3657 qm->ops = &qm_hw_ops_v3;
3659 pci_set_drvdata(pdev, qm);
3660 mutex_init(&qm->mailbox_lock);
3661 init_rwsem(&qm->qps_lock);
3663 qm->misc_ctl = false;
3664 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
3665 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
3666 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
3670 static void qm_cmd_uninit(struct hisi_qm *qm)
3674 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
3677 val = readl(qm->io_base + QM_IFC_INT_MASK);
3678 val |= QM_IFC_INT_DISABLE;
3679 writel(val, qm->io_base + QM_IFC_INT_MASK);
3682 static void qm_cmd_init(struct hisi_qm *qm)
3686 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
3689 /* Clear communication interrupt source */
3690 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
3692 /* Enable pf to vf communication reg. */
3693 val = readl(qm->io_base + QM_IFC_INT_MASK);
3694 val &= ~QM_IFC_INT_DISABLE;
3695 writel(val, qm->io_base + QM_IFC_INT_MASK);
3698 static void qm_put_pci_res(struct hisi_qm *qm)
3700 struct pci_dev *pdev = qm->pdev;
3702 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
3703 iounmap(qm->db_io_base);
3705 iounmap(qm->io_base);
3706 pci_release_mem_regions(pdev);
3709 static void hisi_qm_pci_uninit(struct hisi_qm *qm)
3711 struct pci_dev *pdev = qm->pdev;
3713 pci_free_irq_vectors(pdev);
3715 pci_disable_device(pdev);
3718 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
3720 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF)
3721 writel(state, qm->io_base + QM_VF_STATE);
3724 static void qm_last_regs_uninit(struct hisi_qm *qm)
3726 struct qm_debug *debug = &qm->debug;
3728 if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
3731 kfree(debug->qm_last_words);
3732 debug->qm_last_words = NULL;
3735 static void hisi_qm_unint_work(struct hisi_qm *qm)
3737 destroy_workqueue(qm->wq);
3740 static void hisi_qm_memory_uninit(struct hisi_qm *qm)
3742 struct device *dev = &qm->pdev->dev;
3744 hisi_qp_memory_uninit(qm, qm->qp_num);
3746 hisi_qm_cache_wb(qm);
3747 dma_free_coherent(dev, qm->qdma.size,
3748 qm->qdma.va, qm->qdma.dma);
3751 idr_destroy(&qm->qp_idr);
3753 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
3758 * hisi_qm_uninit() - Uninitialize qm.
3759 * @qm: The qm needed uninit.
3761 * This function uninits qm related device resources.
3763 void hisi_qm_uninit(struct hisi_qm *qm)
3765 qm_last_regs_uninit(qm);
3768 hisi_qm_unint_work(qm);
3769 down_write(&qm->qps_lock);
3771 if (!qm_avail_state(qm, QM_CLOSE)) {
3772 up_write(&qm->qps_lock);
3776 hisi_qm_memory_uninit(qm);
3777 hisi_qm_set_state(qm, QM_NOT_READY);
3778 up_write(&qm->qps_lock);
3780 qm_irqs_unregister(qm);
3781 hisi_qm_pci_uninit(qm);
3783 uacce_remove(qm->uacce);
3787 EXPORT_SYMBOL_GPL(hisi_qm_uninit);
3790 * hisi_qm_get_vft() - Get vft from a qm.
3791 * @qm: The qm we want to get its vft.
3792 * @base: The base number of queue in vft.
3793 * @number: The number of queues in vft.
3795 * We can allocate multiple queues to a qm by configuring virtual function
3796 * table. We get related configures by this function. Normally, we call this
3797 * function in VF driver to get the queue information.
3799 * qm hw v1 does not support this interface.
3801 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
3803 if (!base || !number)
3806 if (!qm->ops->get_vft) {
3807 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
3811 return qm->ops->get_vft(qm, base, number);
3815 * hisi_qm_set_vft() - Set vft to a qm.
3816 * @qm: The qm we want to set its vft.
3817 * @fun_num: The function number.
3818 * @base: The base number of queue in vft.
3819 * @number: The number of queues in vft.
3821 * This function is alway called in PF driver, it is used to assign queues
3824 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3825 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3826 * (VF function number 0x2)
3828 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
3831 u32 max_q_num = qm->ctrl_qp_num;
3833 if (base >= max_q_num || number > max_q_num ||
3834 (base + number) > max_q_num)
3837 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
3840 static void qm_init_eq_aeq_status(struct hisi_qm *qm)
3842 struct hisi_qm_status *status = &qm->status;
3844 status->eq_head = 0;
3845 status->aeq_head = 0;
3846 status->eqc_phase = true;
3847 status->aeqc_phase = true;
3850 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
3852 /* Clear eq/aeq interrupt source */
3853 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
3854 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
3856 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
3857 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
3860 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
3862 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
3863 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
3866 static int qm_eq_ctx_cfg(struct hisi_qm *qm)
3868 struct device *dev = &qm->pdev->dev;
3873 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
3877 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
3878 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
3879 if (qm->ver == QM_HW_V1)
3880 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
3881 eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
3883 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
3885 if (dma_mapping_error(dev, eqc_dma)) {
3890 ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
3891 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
3897 static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
3899 struct device *dev = &qm->pdev->dev;
3900 struct qm_aeqc *aeqc;
3901 dma_addr_t aeqc_dma;
3904 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
3908 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
3909 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
3910 aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
3912 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
3914 if (dma_mapping_error(dev, aeqc_dma)) {
3919 ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
3920 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
3926 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
3928 struct device *dev = &qm->pdev->dev;
3931 qm_init_eq_aeq_status(qm);
3933 ret = qm_eq_ctx_cfg(qm);
3935 dev_err(dev, "Set eqc failed!\n");
3939 return qm_aeq_ctx_cfg(qm);
3942 static int __hisi_qm_start(struct hisi_qm *qm)
3946 WARN_ON(!qm->qdma.va);
3948 if (qm->fun_type == QM_HW_PF) {
3949 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
3954 ret = qm_eq_aeq_ctx_cfg(qm);
3958 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
3962 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
3966 qm_init_prefetch(qm);
3967 qm_enable_eq_aeq_interrupts(qm);
3973 * hisi_qm_start() - start qm
3974 * @qm: The qm to be started.
3976 * This function starts a qm, then we can allocate qp from this qm.
3978 int hisi_qm_start(struct hisi_qm *qm)
3980 struct device *dev = &qm->pdev->dev;
3983 down_write(&qm->qps_lock);
3985 if (!qm_avail_state(qm, QM_START)) {
3986 up_write(&qm->qps_lock);
3990 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
3993 dev_err(dev, "qp_num should not be 0\n");
3998 ret = __hisi_qm_start(qm);
4000 atomic_set(&qm->status.flags, QM_START);
4002 hisi_qm_set_state(qm, QM_READY);
4004 up_write(&qm->qps_lock);
4007 EXPORT_SYMBOL_GPL(hisi_qm_start);
4009 static int qm_restart(struct hisi_qm *qm)
4011 struct device *dev = &qm->pdev->dev;
4015 ret = hisi_qm_start(qm);
4019 down_write(&qm->qps_lock);
4020 for (i = 0; i < qm->qp_num; i++) {
4021 qp = &qm->qp_array[i];
4022 if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
4023 qp->is_resetting == true) {
4024 ret = qm_start_qp_nolock(qp, 0);
4026 dev_err(dev, "Failed to start qp%d!\n", i);
4028 up_write(&qm->qps_lock);
4031 qp->is_resetting = false;
4034 up_write(&qm->qps_lock);
4039 /* Stop started qps in reset flow */
4040 static int qm_stop_started_qp(struct hisi_qm *qm)
4042 struct device *dev = &qm->pdev->dev;
4046 for (i = 0; i < qm->qp_num; i++) {
4047 qp = &qm->qp_array[i];
4048 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
4049 qp->is_resetting = true;
4050 ret = qm_stop_qp_nolock(qp);
4052 dev_err(dev, "Failed to stop qp%d!\n", i);
4063 * qm_clear_queues() - Clear all queues memory in a qm.
4064 * @qm: The qm in which the queues will be cleared.
4066 * This function clears all queues memory in a qm. Reset of accelerator can
4067 * use this to clear queues.
4069 static void qm_clear_queues(struct hisi_qm *qm)
4074 for (i = 0; i < qm->qp_num; i++) {
4075 qp = &qm->qp_array[i];
4076 if (qp->is_in_kernel && qp->is_resetting)
4077 memset(qp->qdma.va, 0, qp->qdma.size);
4080 memset(qm->qdma.va, 0, qm->qdma.size);
4084 * hisi_qm_stop() - Stop a qm.
4085 * @qm: The qm which will be stopped.
4086 * @r: The reason to stop qm.
4088 * This function stops qm and its qps, then qm can not accept request.
4089 * Related resources are not released at this state, we can use hisi_qm_start
4090 * to let qm start again.
4092 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
4094 struct device *dev = &qm->pdev->dev;
4097 down_write(&qm->qps_lock);
4099 qm->status.stop_reason = r;
4100 if (!qm_avail_state(qm, QM_STOP)) {
4105 if (qm->status.stop_reason == QM_SOFT_RESET ||
4106 qm->status.stop_reason == QM_FLR) {
4107 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4108 ret = qm_stop_started_qp(qm);
4110 dev_err(dev, "Failed to stop started qp!\n");
4113 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4116 qm_disable_eq_aeq_interrupts(qm);
4117 if (qm->fun_type == QM_HW_PF) {
4118 ret = hisi_qm_set_vft(qm, 0, 0, 0);
4120 dev_err(dev, "Failed to set vft!\n");
4126 qm_clear_queues(qm);
4127 atomic_set(&qm->status.flags, QM_STOP);
4130 up_write(&qm->qps_lock);
4133 EXPORT_SYMBOL_GPL(hisi_qm_stop);
4135 static ssize_t qm_status_read(struct file *filp, char __user *buffer,
4136 size_t count, loff_t *pos)
4138 struct hisi_qm *qm = filp->private_data;
4139 char buf[QM_DBG_READ_LEN];
4142 val = atomic_read(&qm->status.flags);
4143 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
4145 return simple_read_from_buffer(buffer, count, pos, buf, len);
4148 static const struct file_operations qm_status_fops = {
4149 .owner = THIS_MODULE,
4150 .open = simple_open,
4151 .read = qm_status_read,
4154 static int qm_debugfs_atomic64_set(void *data, u64 val)
4159 atomic64_set((atomic64_t *)data, 0);
4164 static int qm_debugfs_atomic64_get(void *data, u64 *val)
4166 *val = atomic64_read((atomic64_t *)data);
4171 DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
4172 qm_debugfs_atomic64_set, "%llu\n");
4174 static void qm_hw_error_init(struct hisi_qm *qm)
4176 if (!qm->ops->hw_error_init) {
4177 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
4181 qm->ops->hw_error_init(qm);
4184 static void qm_hw_error_uninit(struct hisi_qm *qm)
4186 if (!qm->ops->hw_error_uninit) {
4187 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
4191 qm->ops->hw_error_uninit(qm);
4194 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
4196 if (!qm->ops->hw_error_handle) {
4197 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
4198 return ACC_ERR_NONE;
4201 return qm->ops->hw_error_handle(qm);
4205 * hisi_qm_dev_err_init() - Initialize device error configuration.
4206 * @qm: The qm for which we want to do error initialization.
4208 * Initialize QM and device error related configuration.
4210 void hisi_qm_dev_err_init(struct hisi_qm *qm)
4212 if (qm->fun_type == QM_HW_VF)
4215 qm_hw_error_init(qm);
4217 if (!qm->err_ini->hw_err_enable) {
4218 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
4221 qm->err_ini->hw_err_enable(qm);
4223 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
4226 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
4227 * @qm: The qm for which we want to do error uninitialization.
4229 * Uninitialize QM and device error related configuration.
4231 void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
4233 if (qm->fun_type == QM_HW_VF)
4236 qm_hw_error_uninit(qm);
4238 if (!qm->err_ini->hw_err_disable) {
4239 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
4242 qm->err_ini->hw_err_disable(qm);
4244 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
4247 * hisi_qm_free_qps() - free multiple queue pairs.
4248 * @qps: The queue pairs need to be freed.
4249 * @qp_num: The num of queue pairs.
4251 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
4255 if (!qps || qp_num <= 0)
4258 for (i = qp_num - 1; i >= 0; i--)
4259 hisi_qm_release_qp(qps[i]);
4261 EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
4263 static void free_list(struct list_head *head)
4265 struct hisi_qm_resource *res, *tmp;
4267 list_for_each_entry_safe(res, tmp, head, list) {
4268 list_del(&res->list);
4273 static int hisi_qm_sort_devices(int node, struct list_head *head,
4274 struct hisi_qm_list *qm_list)
4276 struct hisi_qm_resource *res, *tmp;
4278 struct list_head *n;
4282 list_for_each_entry(qm, &qm_list->list, list) {
4283 dev = &qm->pdev->dev;
4285 if (IS_ENABLED(CONFIG_NUMA)) {
4286 dev_node = dev_to_node(dev);
4291 res = kzalloc(sizeof(*res), GFP_KERNEL);
4296 res->distance = node_distance(dev_node, node);
4298 list_for_each_entry(tmp, head, list) {
4299 if (res->distance < tmp->distance) {
4304 list_add_tail(&res->list, n);
4311 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
4312 * @qm_list: The list of all available devices.
4313 * @qp_num: The number of queue pairs need created.
4314 * @alg_type: The algorithm type.
4315 * @node: The numa node.
4316 * @qps: The queue pairs need created.
4318 * This function will sort all available device according to numa distance.
4319 * Then try to create all queue pairs from one device, if all devices do
4320 * not meet the requirements will return error.
4322 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
4323 u8 alg_type, int node, struct hisi_qp **qps)
4325 struct hisi_qm_resource *tmp;
4330 if (!qps || !qm_list || qp_num <= 0)
4333 mutex_lock(&qm_list->lock);
4334 if (hisi_qm_sort_devices(node, &head, qm_list)) {
4335 mutex_unlock(&qm_list->lock);
4339 list_for_each_entry(tmp, &head, list) {
4340 for (i = 0; i < qp_num; i++) {
4341 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
4342 if (IS_ERR(qps[i])) {
4343 hisi_qm_free_qps(qps, i);
4354 mutex_unlock(&qm_list->lock);
4356 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
4357 node, alg_type, qp_num);
4363 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
4365 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
4367 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
4368 u32 max_qp_num = qm->max_qp_num;
4369 u32 q_base = qm->qp_num;
4375 vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
4377 /* If vfs_q_num is less than num_vfs, return error. */
4378 if (vfs_q_num < num_vfs)
4381 q_num = vfs_q_num / num_vfs;
4382 remain_q_num = vfs_q_num % num_vfs;
4384 for (i = num_vfs; i > 0; i--) {
4386 * if q_num + remain_q_num > max_qp_num in last vf, divide the
4387 * remaining queues equally.
4389 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
4390 act_q_num = q_num + remain_q_num;
4392 } else if (remain_q_num > 0) {
4393 act_q_num = q_num + 1;
4399 act_q_num = min_t(int, act_q_num, max_qp_num);
4400 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
4402 for (j = num_vfs; j > i; j--)
4403 hisi_qm_set_vft(qm, j, 0, 0);
4406 q_base += act_q_num;
4412 static int qm_clear_vft_config(struct hisi_qm *qm)
4417 for (i = 1; i <= qm->vfs_num; i++) {
4418 ret = hisi_qm_set_vft(qm, i, 0, 0);
4427 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
4429 struct device *dev = &qm->pdev->dev;
4430 u32 ir = qos * QM_QOS_RATE;
4431 int ret, total_vfs, i;
4433 total_vfs = pci_sriov_get_totalvfs(qm->pdev);
4434 if (fun_index > total_vfs)
4437 qm->factor[fun_index].func_qos = qos;
4439 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
4441 dev_err(dev, "failed to calculate shaper parameter!\n");
4445 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
4446 /* The base number of queue reuse for different alg type */
4447 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
4449 dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
4457 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
4459 u64 cir_u = 0, cir_b = 0, cir_s = 0;
4460 u64 shaper_vft, ir_calc, ir;
4465 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
4466 val & BIT(0), POLL_PERIOD,
4471 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
4472 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
4473 writel(fun_index, qm->io_base + QM_VFT_CFG);
4475 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
4476 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
4478 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
4479 val & BIT(0), POLL_PERIOD,
4484 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
4485 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
4487 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
4488 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
4489 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
4491 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
4492 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
4494 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
4496 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
4498 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
4499 if (error_rate > QM_QOS_MIN_ERROR_RATE) {
4500 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
4507 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
4509 struct device *dev = &qm->pdev->dev;
4514 qos = qm_get_shaper_vft_qos(qm, fun_num);
4516 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
4520 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
4521 ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
4523 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
4526 static int qm_vf_read_qos(struct hisi_qm *qm)
4531 /* reset mailbox qos val */
4534 /* vf ping pf to get function qos */
4535 ret = qm_ping_pf(qm, QM_VF_GET_QOS);
4537 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
4542 msleep(QM_WAIT_DST_ACK);
4546 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
4547 pci_err(qm->pdev, "PF ping VF timeout!\n");
4555 static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
4556 size_t count, loff_t *pos)
4558 struct hisi_qm *qm = filp->private_data;
4559 char tbuf[QM_DBG_READ_LEN];
4563 ret = hisi_qm_get_dfx_access(qm);
4567 /* Mailbox and reset cannot be operated at the same time */
4568 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4569 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
4571 goto err_put_dfx_access;
4574 if (qm->fun_type == QM_HW_PF) {
4575 ir = qm_get_shaper_vft_qos(qm, 0);
4577 ret = qm_vf_read_qos(qm);
4579 goto err_get_status;
4583 qos_val = ir / QM_QOS_RATE;
4584 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
4586 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
4589 clear_bit(QM_RESETTING, &qm->misc_ctl);
4591 hisi_qm_put_dfx_access(qm);
4595 static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
4597 int buflen = strlen(buf);
4600 for (i = 0; i < buflen; i++) {
4601 if (!isdigit(buf[i]))
4605 ret = sscanf(buf, "%lu", val);
4606 if (ret != QM_QOS_VAL_NUM)
4612 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
4614 unsigned int *fun_index)
4616 char tbuf_bdf[QM_DBG_READ_LEN] = {0};
4617 char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
4618 u32 tmp1, device, function;
4621 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf);
4622 if (ret != QM_QOS_PARAM_NUM)
4625 ret = qm_qos_value_init(val_buf, val);
4626 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) {
4627 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
4631 ret = sscanf(tbuf_bdf, "%u:%x:%u.%u", &tmp1, &bus, &device, &function);
4632 if (ret != QM_QOS_BDF_PARAM_NUM) {
4633 pci_err(qm->pdev, "input pci bdf value is error!\n");
4637 *fun_index = PCI_DEVFN(device, function);
4642 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
4643 size_t count, loff_t *pos)
4645 struct hisi_qm *qm = filp->private_data;
4646 char tbuf[QM_DBG_READ_LEN];
4647 unsigned int fun_index;
4651 if (qm->fun_type == QM_HW_VF)
4657 if (count >= QM_DBG_READ_LEN)
4660 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
4665 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
4669 /* Mailbox and reset cannot be operated at the same time */
4670 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4671 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
4675 ret = qm_pm_get_sync(qm);
4678 goto err_get_status;
4681 ret = qm_func_shaper_enable(qm, fun_index, val);
4683 pci_err(qm->pdev, "failed to enable function shaper!\n");
4688 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
4695 clear_bit(QM_RESETTING, &qm->misc_ctl);
4699 static const struct file_operations qm_algqos_fops = {
4700 .owner = THIS_MODULE,
4701 .open = simple_open,
4702 .read = qm_algqos_read,
4703 .write = qm_algqos_write,
4707 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
4708 * @qm: The qm for which we want to add debugfs files.
4710 * Create function qos debugfs files, VF ping PF to get function qos.
4712 static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
4714 if (qm->fun_type == QM_HW_PF)
4715 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
4716 qm, &qm_algqos_fops);
4717 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
4718 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
4719 qm, &qm_algqos_fops);
4723 * hisi_qm_debug_init() - Initialize qm related debugfs files.
4724 * @qm: The qm for which we want to add debugfs files.
4726 * Create qm related debugfs files.
4728 void hisi_qm_debug_init(struct hisi_qm *qm)
4730 struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
4731 struct qm_dfx *dfx = &qm->debug.dfx;
4732 struct dentry *qm_d;
4736 qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
4737 qm->debug.qm_d = qm_d;
4739 /* only show this in PF */
4740 if (qm->fun_type == QM_HW_PF) {
4741 qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
4742 for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
4743 qm_create_debugfs_file(qm, qm->debug.qm_d, i);
4747 debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
4748 qm, &qm_diff_regs_fops);
4750 debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
4752 debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
4754 debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
4756 for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
4757 data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
4758 debugfs_create_file(qm_dfx_files[i].name,
4765 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
4766 hisi_qm_set_algqos_init(qm);
4768 EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
4771 * hisi_qm_debug_regs_clear() - clear qm debug related registers.
4772 * @qm: The qm for which we want to clear its debug registers.
4774 void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
4776 const struct debugfs_reg32 *regs;
4779 /* clear current_qm */
4780 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
4781 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
4783 /* clear current_q */
4784 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
4785 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
4788 * these registers are reading and clearing, so clear them after
4791 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
4794 for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
4795 readl(qm->io_base + regs->offset);
4799 /* clear clear_enable */
4800 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
4802 EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
4804 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
4808 for (i = 1; i <= total_func; i++)
4809 qm->factor[i].func_qos = QM_QOS_MAX_VAL;
4813 * hisi_qm_sriov_enable() - enable virtual functions
4814 * @pdev: the PCIe device
4815 * @max_vfs: the number of virtual functions to enable
4817 * Returns the number of enabled VFs. If there are VFs enabled already or
4818 * max_vfs is more than the total number of device can be enabled, returns
4821 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
4823 struct hisi_qm *qm = pci_get_drvdata(pdev);
4824 int pre_existing_vfs, num_vfs, total_vfs, ret;
4826 ret = qm_pm_get_sync(qm);
4830 total_vfs = pci_sriov_get_totalvfs(pdev);
4831 pre_existing_vfs = pci_num_vf(pdev);
4832 if (pre_existing_vfs) {
4833 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
4838 if (max_vfs > total_vfs) {
4839 pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs);
4846 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
4847 hisi_qm_init_vf_qos(qm, num_vfs);
4849 ret = qm_vf_q_assign(qm, num_vfs);
4851 pci_err(pdev, "Can't assign queues for VF!\n");
4855 qm->vfs_num = num_vfs;
4857 ret = pci_enable_sriov(pdev, num_vfs);
4859 pci_err(pdev, "Can't enable VF!\n");
4860 qm_clear_vft_config(qm);
4864 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
4872 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
4875 * hisi_qm_sriov_disable - disable virtual functions
4876 * @pdev: the PCI device.
4877 * @is_frozen: true when all the VFs are frozen.
4879 * Return failure if there are VFs assigned already or VF is in used.
4881 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
4883 struct hisi_qm *qm = pci_get_drvdata(pdev);
4886 if (pci_vfs_assigned(pdev)) {
4887 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
4891 /* While VF is in used, SRIOV cannot be disabled. */
4892 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
4893 pci_err(pdev, "Task is using its VF!\n");
4897 pci_disable_sriov(pdev);
4899 ret = qm_clear_vft_config(qm);
4907 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
4910 * hisi_qm_sriov_configure - configure the number of VFs
4911 * @pdev: The PCI device
4912 * @num_vfs: The number of VFs need enabled
4914 * Enable SR-IOV according to num_vfs, 0 means disable.
4916 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
4919 return hisi_qm_sriov_disable(pdev, false);
4921 return hisi_qm_sriov_enable(pdev, num_vfs);
4923 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
4925 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
4929 if (!qm->err_ini->get_dev_hw_err_status) {
4930 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
4931 return ACC_ERR_NONE;
4934 /* get device hardware error status */
4935 err_sts = qm->err_ini->get_dev_hw_err_status(qm);
4937 if (err_sts & qm->err_info.ecc_2bits_mask)
4938 qm->err_status.is_dev_ecc_mbit = true;
4940 if (qm->err_ini->log_dev_hw_err)
4941 qm->err_ini->log_dev_hw_err(qm, err_sts);
4943 if (err_sts & qm->err_info.dev_reset_mask)
4944 return ACC_ERR_NEED_RESET;
4946 if (qm->err_ini->clear_dev_hw_err_status)
4947 qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
4950 return ACC_ERR_RECOVERED;
4953 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
4955 enum acc_err_result qm_ret, dev_ret;
4958 qm_ret = qm_hw_error_handle(qm);
4960 /* log device error */
4961 dev_ret = qm_dev_err_handle(qm);
4963 return (qm_ret == ACC_ERR_NEED_RESET ||
4964 dev_ret == ACC_ERR_NEED_RESET) ?
4965 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
4969 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4970 * @pdev: The PCI device which need report error.
4971 * @state: The connectivity between CPU and device.
4973 * We register this function into PCIe AER handlers, It will report device or
4974 * qm hardware error status when error occur.
4976 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
4977 pci_channel_state_t state)
4979 struct hisi_qm *qm = pci_get_drvdata(pdev);
4980 enum acc_err_result ret;
4982 if (pdev->is_virtfn)
4983 return PCI_ERS_RESULT_NONE;
4985 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
4986 if (state == pci_channel_io_perm_failure)
4987 return PCI_ERS_RESULT_DISCONNECT;
4989 ret = qm_process_dev_error(qm);
4990 if (ret == ACC_ERR_NEED_RESET)
4991 return PCI_ERS_RESULT_NEED_RESET;
4993 return PCI_ERS_RESULT_RECOVERED;
4995 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
4997 static int qm_check_req_recv(struct hisi_qm *qm)
4999 struct pci_dev *pdev = qm->pdev;
5003 if (qm->ver >= QM_HW_V3)
5006 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
5007 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
5008 (val == ACC_VENDOR_ID_VALUE),
5009 POLL_PERIOD, POLL_TIMEOUT);
5011 dev_err(&pdev->dev, "Fails to read QM reg!\n");
5015 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
5016 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
5017 (val == PCI_VENDOR_ID_HUAWEI),
5018 POLL_PERIOD, POLL_TIMEOUT);
5020 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
5025 static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
5027 struct pci_dev *pdev = qm->pdev;
5031 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
5033 cmd |= PCI_COMMAND_MEMORY;
5035 cmd &= ~PCI_COMMAND_MEMORY;
5037 pci_write_config_word(pdev, PCI_COMMAND, cmd);
5038 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
5039 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
5040 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
5049 static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
5051 struct pci_dev *pdev = qm->pdev;
5056 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
5057 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
5059 sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
5061 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
5062 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
5064 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
5065 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
5066 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
5067 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
5076 static int qm_vf_reset_prepare(struct hisi_qm *qm,
5077 enum qm_stop_reason stop_reason)
5079 struct hisi_qm_list *qm_list = qm->qm_list;
5080 struct pci_dev *pdev = qm->pdev;
5081 struct pci_dev *virtfn;
5082 struct hisi_qm *vf_qm;
5085 mutex_lock(&qm_list->lock);
5086 list_for_each_entry(vf_qm, &qm_list->list, list) {
5087 virtfn = vf_qm->pdev;
5091 if (pci_physfn(virtfn) == pdev) {
5092 /* save VFs PCIE BAR configuration */
5093 pci_save_state(virtfn);
5095 ret = hisi_qm_stop(vf_qm, stop_reason);
5102 mutex_unlock(&qm_list->lock);
5106 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
5107 enum qm_stop_reason stop_reason)
5109 struct pci_dev *pdev = qm->pdev;
5115 /* Kunpeng930 supports to notify VFs to stop before PF reset */
5116 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
5117 ret = qm_ping_all_vfs(qm, cmd);
5119 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
5121 ret = qm_vf_reset_prepare(qm, stop_reason);
5123 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret);
5129 static int qm_controller_reset_prepare(struct hisi_qm *qm)
5131 struct pci_dev *pdev = qm->pdev;
5134 ret = qm_reset_prepare_ready(qm);
5136 pci_err(pdev, "Controller reset not ready!\n");
5140 /* PF obtains the information of VF by querying the register. */
5143 /* Whether VFs stop successfully, soft reset will continue. */
5144 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
5146 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n");
5148 ret = hisi_qm_stop(qm, QM_SOFT_RESET);
5150 pci_err(pdev, "Fails to stop QM!\n");
5151 qm_reset_bit_clear(qm);
5155 ret = qm_wait_vf_prepare_finish(qm);
5157 pci_err(pdev, "failed to stop by vfs in soft reset!\n");
5159 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5164 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
5168 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
5169 if (qm->ver >= QM_HW_V3)
5172 if (!qm->err_status.is_dev_ecc_mbit &&
5173 qm->err_status.is_qm_ecc_mbit &&
5174 qm->err_ini->close_axi_master_ooo) {
5176 qm->err_ini->close_axi_master_ooo(qm);
5178 } else if (qm->err_status.is_dev_ecc_mbit &&
5179 !qm->err_status.is_qm_ecc_mbit &&
5180 !qm->err_ini->close_axi_master_ooo) {
5182 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
5183 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
5184 qm->io_base + QM_RAS_NFE_ENABLE);
5185 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
5189 static int qm_soft_reset(struct hisi_qm *qm)
5191 struct pci_dev *pdev = qm->pdev;
5195 /* Ensure all doorbells and mailboxes received by QM */
5196 ret = qm_check_req_recv(qm);
5201 ret = qm_set_vf_mse(qm, false);
5203 pci_err(pdev, "Fails to disable vf MSE bit.\n");
5208 ret = qm->ops->set_msi(qm, false);
5210 pci_err(pdev, "Fails to disable PEH MSI bit.\n");
5214 qm_dev_ecc_mbit_handle(qm);
5216 /* OOO register set and check */
5217 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
5218 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
5220 /* If bus lock, reset chip */
5221 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
5223 (val == ACC_MASTER_TRANS_RETURN_RW),
5224 POLL_PERIOD, POLL_TIMEOUT);
5226 pci_emerg(pdev, "Bus lock! Please reset system.\n");
5230 if (qm->err_ini->close_sva_prefetch)
5231 qm->err_ini->close_sva_prefetch(qm);
5233 ret = qm_set_pf_mse(qm, false);
5235 pci_err(pdev, "Fails to disable pf MSE bit.\n");
5239 /* The reset related sub-control registers are not in PCI BAR */
5240 if (ACPI_HANDLE(&pdev->dev)) {
5241 unsigned long long value = 0;
5244 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
5245 qm->err_info.acpi_rst,
5247 if (ACPI_FAILURE(s)) {
5248 pci_err(pdev, "NO controller reset method!\n");
5253 pci_err(pdev, "Reset step %llu failed!\n", value);
5257 pci_err(pdev, "No reset method!\n");
5264 static int qm_vf_reset_done(struct hisi_qm *qm)
5266 struct hisi_qm_list *qm_list = qm->qm_list;
5267 struct pci_dev *pdev = qm->pdev;
5268 struct pci_dev *virtfn;
5269 struct hisi_qm *vf_qm;
5272 mutex_lock(&qm_list->lock);
5273 list_for_each_entry(vf_qm, &qm_list->list, list) {
5274 virtfn = vf_qm->pdev;
5278 if (pci_physfn(virtfn) == pdev) {
5279 /* enable VFs PCIE BAR configuration */
5280 pci_restore_state(virtfn);
5282 ret = qm_restart(vf_qm);
5289 mutex_unlock(&qm_list->lock);
5293 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
5295 struct pci_dev *pdev = qm->pdev;
5301 ret = qm_vf_q_assign(qm, qm->vfs_num);
5303 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret);
5307 /* Kunpeng930 supports to notify VFs to start after PF reset. */
5308 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
5309 ret = qm_ping_all_vfs(qm, cmd);
5311 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
5313 ret = qm_vf_reset_done(qm);
5315 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret);
5321 static int qm_dev_hw_init(struct hisi_qm *qm)
5323 return qm->err_ini->hw_init(qm);
5326 static void qm_restart_prepare(struct hisi_qm *qm)
5330 if (qm->err_ini->open_sva_prefetch)
5331 qm->err_ini->open_sva_prefetch(qm);
5333 if (qm->ver >= QM_HW_V3)
5336 if (!qm->err_status.is_qm_ecc_mbit &&
5337 !qm->err_status.is_dev_ecc_mbit)
5340 /* temporarily close the OOO port used for PEH to write out MSI */
5341 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5342 writel(value & ~qm->err_info.msi_wr_port,
5343 qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5345 /* clear dev ecc 2bit error source if having */
5346 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
5347 if (value && qm->err_ini->clear_dev_hw_err_status)
5348 qm->err_ini->clear_dev_hw_err_status(qm, value);
5350 /* clear QM ecc mbit error source */
5351 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
5353 /* clear AM Reorder Buffer ecc mbit source */
5354 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
5357 static void qm_restart_done(struct hisi_qm *qm)
5361 if (qm->ver >= QM_HW_V3)
5364 if (!qm->err_status.is_qm_ecc_mbit &&
5365 !qm->err_status.is_dev_ecc_mbit)
5368 /* open the OOO port for PEH to write out MSI */
5369 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5370 value |= qm->err_info.msi_wr_port;
5371 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5374 qm->err_status.is_qm_ecc_mbit = false;
5375 qm->err_status.is_dev_ecc_mbit = false;
5378 static int qm_controller_reset_done(struct hisi_qm *qm)
5380 struct pci_dev *pdev = qm->pdev;
5383 ret = qm->ops->set_msi(qm, true);
5385 pci_err(pdev, "Fails to enable PEH MSI bit!\n");
5389 ret = qm_set_pf_mse(qm, true);
5391 pci_err(pdev, "Fails to enable pf MSE bit!\n");
5396 ret = qm_set_vf_mse(qm, true);
5398 pci_err(pdev, "Fails to enable vf MSE bit!\n");
5403 ret = qm_dev_hw_init(qm);
5405 pci_err(pdev, "Failed to init device\n");
5409 qm_restart_prepare(qm);
5410 hisi_qm_dev_err_init(qm);
5411 if (qm->err_ini->open_axi_master_ooo)
5412 qm->err_ini->open_axi_master_ooo(qm);
5414 ret = qm_dev_mem_reset(qm);
5416 pci_err(pdev, "failed to reset device memory\n");
5420 ret = qm_restart(qm);
5422 pci_err(pdev, "Failed to start QM!\n");
5426 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
5428 pci_err(pdev, "failed to start vfs by pf in soft reset.\n");
5430 ret = qm_wait_vf_prepare_finish(qm);
5432 pci_err(pdev, "failed to start by vfs in soft reset!\n");
5435 qm_restart_done(qm);
5437 qm_reset_bit_clear(qm);
5442 static void qm_show_last_dfx_regs(struct hisi_qm *qm)
5444 struct qm_debug *debug = &qm->debug;
5445 struct pci_dev *pdev = qm->pdev;
5449 if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
5452 for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
5453 val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
5454 if (debug->qm_last_words[i] != val)
5455 pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
5456 qm_dfx_regs[i].name, debug->qm_last_words[i], val);
5460 static int qm_controller_reset(struct hisi_qm *qm)
5462 struct pci_dev *pdev = qm->pdev;
5465 pci_info(pdev, "Controller resetting...\n");
5467 ret = qm_controller_reset_prepare(qm);
5469 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5470 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5471 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5475 qm_show_last_dfx_regs(qm);
5476 if (qm->err_ini->show_last_dfx_regs)
5477 qm->err_ini->show_last_dfx_regs(qm);
5479 ret = qm_soft_reset(qm);
5481 pci_err(pdev, "Controller reset failed (%d)\n", ret);
5482 qm_reset_bit_clear(qm);
5486 ret = qm_controller_reset_done(qm);
5488 qm_reset_bit_clear(qm);
5492 pci_info(pdev, "Controller reset complete\n");
5498 * hisi_qm_dev_slot_reset() - slot reset
5499 * @pdev: the PCIe device
5501 * This function offers QM relate PCIe device reset interface. Drivers which
5502 * use QM can use this function as slot_reset in its struct pci_error_handlers.
5504 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
5506 struct hisi_qm *qm = pci_get_drvdata(pdev);
5509 if (pdev->is_virtfn)
5510 return PCI_ERS_RESULT_RECOVERED;
5512 /* reset pcie device controller */
5513 ret = qm_controller_reset(qm);
5515 pci_err(pdev, "Controller reset failed (%d)\n", ret);
5516 return PCI_ERS_RESULT_DISCONNECT;
5519 return PCI_ERS_RESULT_RECOVERED;
5521 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
5523 void hisi_qm_reset_prepare(struct pci_dev *pdev)
5525 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
5526 struct hisi_qm *qm = pci_get_drvdata(pdev);
5530 hisi_qm_dev_err_uninit(pf_qm);
5533 * Check whether there is an ECC mbit error, If it occurs, need to
5534 * wait for soft reset to fix it.
5536 while (qm_check_dev_error(pf_qm)) {
5538 if (delay > QM_RESET_WAIT_TIMEOUT)
5542 ret = qm_reset_prepare_ready(qm);
5544 pci_err(pdev, "FLR not ready!\n");
5548 /* PF obtains the information of VF by querying the register. */
5549 if (qm->fun_type == QM_HW_PF)
5552 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
5554 pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
5556 ret = hisi_qm_stop(qm, QM_FLR);
5558 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
5559 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5560 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5564 ret = qm_wait_vf_prepare_finish(qm);
5566 pci_err(pdev, "failed to stop by vfs in FLR!\n");
5568 pci_info(pdev, "FLR resetting...\n");
5570 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
5572 static bool qm_flr_reset_complete(struct pci_dev *pdev)
5574 struct pci_dev *pf_pdev = pci_physfn(pdev);
5575 struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
5578 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
5579 if (id == QM_PCI_COMMAND_INVALID) {
5580 pci_err(pdev, "Device can not be used!\n");
5587 void hisi_qm_reset_done(struct pci_dev *pdev)
5589 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
5590 struct hisi_qm *qm = pci_get_drvdata(pdev);
5593 if (qm->fun_type == QM_HW_PF) {
5594 ret = qm_dev_hw_init(qm);
5596 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
5601 hisi_qm_dev_err_init(pf_qm);
5603 ret = qm_restart(qm);
5605 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
5609 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
5611 pci_err(pdev, "failed to start vfs by pf in FLR.\n");
5613 ret = qm_wait_vf_prepare_finish(qm);
5615 pci_err(pdev, "failed to start by vfs in FLR!\n");
5618 if (qm->fun_type == QM_HW_PF)
5621 if (qm_flr_reset_complete(pdev))
5622 pci_info(pdev, "FLR reset complete\n");
5624 qm_reset_bit_clear(qm);
5626 EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
5628 static irqreturn_t qm_abnormal_irq(int irq, void *data)
5630 struct hisi_qm *qm = data;
5631 enum acc_err_result ret;
5633 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
5634 ret = qm_process_dev_error(qm);
5635 if (ret == ACC_ERR_NEED_RESET &&
5636 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
5637 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
5638 schedule_work(&qm->rst_work);
5645 * hisi_qm_dev_shutdown() - Shutdown device.
5646 * @pdev: The device will be shutdown.
5648 * This function will stop qm when OS shutdown or rebooting.
5650 void hisi_qm_dev_shutdown(struct pci_dev *pdev)
5652 struct hisi_qm *qm = pci_get_drvdata(pdev);
5655 ret = hisi_qm_stop(qm, QM_NORMAL);
5657 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
5659 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
5661 static void hisi_qm_controller_reset(struct work_struct *rst_work)
5663 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
5666 ret = qm_pm_get_sync(qm);
5668 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5672 /* reset pcie device controller */
5673 ret = qm_controller_reset(qm);
5675 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
5680 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
5681 enum qm_stop_reason stop_reason)
5683 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
5684 struct pci_dev *pdev = qm->pdev;
5687 ret = qm_reset_prepare_ready(qm);
5689 dev_err(&pdev->dev, "reset prepare not ready!\n");
5690 atomic_set(&qm->status.flags, QM_STOP);
5691 cmd = QM_VF_PREPARE_FAIL;
5695 ret = hisi_qm_stop(qm, stop_reason);
5697 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret);
5698 atomic_set(&qm->status.flags, QM_STOP);
5699 cmd = QM_VF_PREPARE_FAIL;
5706 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5707 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5709 pci_save_state(pdev);
5710 ret = qm_ping_pf(qm, cmd);
5712 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
5715 static void qm_pf_reset_vf_done(struct hisi_qm *qm)
5717 enum qm_mb_cmd cmd = QM_VF_START_DONE;
5718 struct pci_dev *pdev = qm->pdev;
5721 pci_restore_state(pdev);
5722 ret = hisi_qm_start(qm);
5724 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret);
5725 cmd = QM_VF_START_FAIL;
5728 ret = qm_ping_pf(qm, cmd);
5730 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
5732 qm_reset_bit_clear(qm);
5735 static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
5737 struct device *dev = &qm->pdev->dev;
5742 /* Wait for reset to finish */
5743 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
5744 val == BIT(0), QM_VF_RESET_WAIT_US,
5745 QM_VF_RESET_WAIT_TIMEOUT_US);
5746 /* hardware completion status should be available by this time */
5748 dev_err(dev, "couldn't get reset done status from PF, timeout!\n");
5753 * Whether message is got successfully,
5754 * VF needs to ack PF by clearing the interrupt.
5756 ret = qm_get_mb_cmd(qm, &msg, 0);
5757 qm_clear_cmd_interrupt(qm, 0);
5759 dev_err(dev, "failed to get msg from PF in reset done!\n");
5763 cmd = msg & QM_MB_CMD_DATA_MASK;
5764 if (cmd != QM_PF_RESET_DONE) {
5765 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
5772 static void qm_pf_reset_vf_process(struct hisi_qm *qm,
5773 enum qm_stop_reason stop_reason)
5775 struct device *dev = &qm->pdev->dev;
5778 dev_info(dev, "device reset start...\n");
5780 /* The message is obtained by querying the register during resetting */
5782 qm_pf_reset_vf_prepare(qm, stop_reason);
5784 ret = qm_wait_pf_reset_finish(qm);
5786 goto err_get_status;
5788 qm_pf_reset_vf_done(qm);
5791 dev_info(dev, "device reset done.\n");
5797 qm_reset_bit_clear(qm);
5800 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
5802 struct device *dev = &qm->pdev->dev;
5808 * Get the msg from source by sending mailbox. Whether message is got
5809 * successfully, destination needs to ack source by clearing the interrupt.
5811 ret = qm_get_mb_cmd(qm, &msg, fun_num);
5812 qm_clear_cmd_interrupt(qm, BIT(fun_num));
5814 dev_err(dev, "failed to get msg from source!\n");
5818 cmd = msg & QM_MB_CMD_DATA_MASK;
5820 case QM_PF_FLR_PREPARE:
5821 qm_pf_reset_vf_process(qm, QM_FLR);
5823 case QM_PF_SRST_PREPARE:
5824 qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
5827 qm_vf_get_qos(qm, fun_num);
5830 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
5833 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
5838 static void qm_cmd_process(struct work_struct *cmd_process)
5840 struct hisi_qm *qm = container_of(cmd_process,
5841 struct hisi_qm, cmd_process);
5842 u32 vfs_num = qm->vfs_num;
5846 if (qm->fun_type == QM_HW_PF) {
5847 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
5851 for (i = 1; i <= vfs_num; i++) {
5853 qm_handle_cmd_msg(qm, i);
5859 qm_handle_cmd_msg(qm, 0);
5863 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
5864 * @qm: The qm needs add.
5865 * @qm_list: The qm list.
5867 * This function adds qm to qm list, and will register algorithm to
5868 * crypto when the qm list is empty.
5870 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
5872 struct device *dev = &qm->pdev->dev;
5876 mutex_lock(&qm_list->lock);
5877 if (list_empty(&qm_list->list))
5879 list_add_tail(&qm->list, &qm_list->list);
5880 mutex_unlock(&qm_list->lock);
5882 if (qm->ver <= QM_HW_V2 && qm->use_sva) {
5883 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
5888 ret = qm_list->register_to_crypto(qm);
5890 mutex_lock(&qm_list->lock);
5891 list_del(&qm->list);
5892 mutex_unlock(&qm_list->lock);
5898 EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
5901 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
5903 * @qm: The qm needs delete.
5904 * @qm_list: The qm list.
5906 * This function deletes qm from qm list, and will unregister algorithm
5907 * from crypto when the qm list is empty.
5909 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
5911 mutex_lock(&qm_list->lock);
5912 list_del(&qm->list);
5913 mutex_unlock(&qm_list->lock);
5915 if (qm->ver <= QM_HW_V2 && qm->use_sva)
5918 if (list_empty(&qm_list->list))
5919 qm_list->unregister_from_crypto(qm);
5921 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
5923 static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
5925 struct pci_dev *pdev = qm->pdev;
5926 u32 irq_vector, val;
5928 if (qm->fun_type == QM_HW_VF)
5931 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
5932 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
5935 irq_vector = val & QM_IRQ_VECTOR_MASK;
5936 free_irq(pci_irq_vector(pdev, irq_vector), qm);
5939 static int qm_register_abnormal_irq(struct hisi_qm *qm)
5941 struct pci_dev *pdev = qm->pdev;
5942 u32 irq_vector, val;
5945 if (qm->fun_type == QM_HW_VF)
5948 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
5949 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
5952 irq_vector = val & QM_IRQ_VECTOR_MASK;
5953 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
5955 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
5960 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
5962 struct pci_dev *pdev = qm->pdev;
5963 u32 irq_vector, val;
5965 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
5966 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
5969 irq_vector = val & QM_IRQ_VECTOR_MASK;
5970 free_irq(pci_irq_vector(pdev, irq_vector), qm);
5973 static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
5975 struct pci_dev *pdev = qm->pdev;
5976 u32 irq_vector, val;
5979 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
5980 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
5983 irq_vector = val & QM_IRQ_VECTOR_MASK;
5984 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
5986 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret);
5991 static void qm_unregister_aeq_irq(struct hisi_qm *qm)
5993 struct pci_dev *pdev = qm->pdev;
5994 u32 irq_vector, val;
5996 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
5997 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
6000 irq_vector = val & QM_IRQ_VECTOR_MASK;
6001 free_irq(pci_irq_vector(pdev, irq_vector), qm);
6004 static int qm_register_aeq_irq(struct hisi_qm *qm)
6006 struct pci_dev *pdev = qm->pdev;
6007 u32 irq_vector, val;
6010 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
6011 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
6014 irq_vector = val & QM_IRQ_VECTOR_MASK;
6015 ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
6016 qm_aeq_thread, 0, qm->dev_name, qm);
6018 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
6023 static void qm_unregister_eq_irq(struct hisi_qm *qm)
6025 struct pci_dev *pdev = qm->pdev;
6026 u32 irq_vector, val;
6028 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
6029 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
6032 irq_vector = val & QM_IRQ_VECTOR_MASK;
6033 free_irq(pci_irq_vector(pdev, irq_vector), qm);
6036 static int qm_register_eq_irq(struct hisi_qm *qm)
6038 struct pci_dev *pdev = qm->pdev;
6039 u32 irq_vector, val;
6042 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
6043 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
6046 irq_vector = val & QM_IRQ_VECTOR_MASK;
6047 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm);
6049 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
6054 static void qm_irqs_unregister(struct hisi_qm *qm)
6056 qm_unregister_mb_cmd_irq(qm);
6057 qm_unregister_abnormal_irq(qm);
6058 qm_unregister_aeq_irq(qm);
6059 qm_unregister_eq_irq(qm);
6062 static int qm_irqs_register(struct hisi_qm *qm)
6066 ret = qm_register_eq_irq(qm);
6070 ret = qm_register_aeq_irq(qm);
6074 ret = qm_register_abnormal_irq(qm);
6078 ret = qm_register_mb_cmd_irq(qm);
6080 goto free_abnormal_irq;
6085 qm_unregister_abnormal_irq(qm);
6087 qm_unregister_aeq_irq(qm);
6089 qm_unregister_eq_irq(qm);
6093 static int qm_get_qp_num(struct hisi_qm *qm)
6095 bool is_db_isolation;
6097 /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
6098 if (qm->fun_type == QM_HW_VF) {
6099 if (qm->ver != QM_HW_V1)
6100 /* v2 starts to support get vft by mailbox */
6101 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
6106 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
6107 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
6108 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
6109 QM_FUNC_MAX_QP_CAP, is_db_isolation);
6111 /* check if qp number is valid */
6112 if (qm->qp_num > qm->max_qp_num) {
6113 dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
6114 qm->qp_num, qm->max_qp_num);
6121 static void qm_get_hw_caps(struct hisi_qm *qm)
6123 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
6124 qm_cap_info_pf : qm_cap_info_vf;
6125 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
6126 ARRAY_SIZE(qm_cap_info_vf);
6129 /* Doorbell isolate register is a independent register. */
6130 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
6132 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
6134 if (qm->ver >= QM_HW_V3) {
6135 val = readl(qm->io_base + QM_FUNC_CAPS_REG);
6136 qm->cap_ver = val & QM_CAPBILITY_VERSION;
6139 /* Get PF/VF common capbility */
6140 for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) {
6141 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
6143 set_bit(qm_cap_info_comm[i].type, &qm->caps);
6146 /* Get PF/VF different capbility */
6147 for (i = 0; i < size; i++) {
6148 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
6150 set_bit(cap_info[i].type, &qm->caps);
6154 static int qm_get_pci_res(struct hisi_qm *qm)
6156 struct pci_dev *pdev = qm->pdev;
6157 struct device *dev = &pdev->dev;
6160 ret = pci_request_mem_regions(pdev, qm->dev_name);
6162 dev_err(dev, "Failed to request mem regions!\n");
6166 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
6167 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
6170 goto err_request_mem_regions;
6174 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
6175 qm->db_interval = QM_QP_DB_INTERVAL;
6176 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
6177 qm->db_io_base = ioremap(qm->db_phys_base,
6178 pci_resource_len(pdev, PCI_BAR_4));
6179 if (!qm->db_io_base) {
6184 qm->db_phys_base = qm->phys_base;
6185 qm->db_io_base = qm->io_base;
6186 qm->db_interval = 0;
6189 ret = qm_get_qp_num(qm);
6191 goto err_db_ioremap;
6196 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
6197 iounmap(qm->db_io_base);
6199 iounmap(qm->io_base);
6200 err_request_mem_regions:
6201 pci_release_mem_regions(pdev);
6205 static int hisi_qm_pci_init(struct hisi_qm *qm)
6207 struct pci_dev *pdev = qm->pdev;
6208 struct device *dev = &pdev->dev;
6209 unsigned int num_vec;
6212 ret = pci_enable_device_mem(pdev);
6214 dev_err(dev, "Failed to enable device mem!\n");
6218 ret = qm_get_pci_res(qm);
6220 goto err_disable_pcidev;
6222 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
6224 goto err_get_pci_res;
6225 pci_set_master(pdev);
6227 num_vec = qm_get_irq_num(qm);
6228 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
6230 dev_err(dev, "Failed to enable MSI vectors!\n");
6231 goto err_get_pci_res;
6239 pci_disable_device(pdev);
6243 static int hisi_qm_init_work(struct hisi_qm *qm)
6247 for (i = 0; i < qm->qp_num; i++)
6248 INIT_WORK(&qm->poll_data[i].work, qm_work_process);
6250 if (qm->fun_type == QM_HW_PF)
6251 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
6253 if (qm->ver > QM_HW_V2)
6254 INIT_WORK(&qm->cmd_process, qm_cmd_process);
6256 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
6257 WQ_UNBOUND, num_online_cpus(),
6258 pci_name(qm->pdev));
6260 pci_err(qm->pdev, "failed to alloc workqueue!\n");
6267 static int hisi_qp_alloc_memory(struct hisi_qm *qm)
6269 struct device *dev = &qm->pdev->dev;
6270 u16 sq_depth, cq_depth;
6274 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
6278 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL);
6279 if (!qm->poll_data) {
6280 kfree(qm->qp_array);
6284 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
6286 /* one more page for device or qp statuses */
6287 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
6288 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
6289 for (i = 0; i < qm->qp_num; i++) {
6290 qm->poll_data[i].qm = qm;
6291 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
6293 goto err_init_qp_mem;
6295 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
6300 hisi_qp_memory_uninit(qm, i);
6305 static int hisi_qm_memory_init(struct hisi_qm *qm)
6307 struct device *dev = &qm->pdev->dev;
6308 int ret, total_func;
6311 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
6312 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
6313 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
6317 /* Only the PF value needs to be initialized */
6318 qm->factor[0].func_qos = QM_QOS_MAX_VAL;
6321 #define QM_INIT_BUF(qm, type, num) do { \
6322 (qm)->type = ((qm)->qdma.va + (off)); \
6323 (qm)->type##_dma = (qm)->qdma.dma + (off); \
6324 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
6327 idr_init(&qm->qp_idr);
6328 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
6329 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
6330 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
6331 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
6332 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
6333 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
6335 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
6338 goto err_destroy_idr;
6341 QM_INIT_BUF(qm, eqe, qm->eq_depth);
6342 QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
6343 QM_INIT_BUF(qm, sqc, qm->qp_num);
6344 QM_INIT_BUF(qm, cqc, qm->qp_num);
6346 ret = hisi_qp_alloc_memory(qm);
6348 goto err_alloc_qp_array;
6353 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
6355 idr_destroy(&qm->qp_idr);
6356 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
6362 static void qm_last_regs_init(struct hisi_qm *qm)
6364 int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
6365 struct qm_debug *debug = &qm->debug;
6368 if (qm->fun_type == QM_HW_VF)
6371 debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int),
6373 if (!debug->qm_last_words)
6376 for (i = 0; i < dfx_regs_num; i++) {
6377 debug->qm_last_words[i] = readl_relaxed(qm->io_base +
6378 qm_dfx_regs[i].offset);
6383 * hisi_qm_init() - Initialize configures about qm.
6384 * @qm: The qm needing init.
6386 * This function init qm, then we can call hisi_qm_start to put qm into work.
6388 int hisi_qm_init(struct hisi_qm *qm)
6390 struct pci_dev *pdev = qm->pdev;
6391 struct device *dev = &pdev->dev;
6394 hisi_qm_pre_init(qm);
6396 ret = hisi_qm_pci_init(qm);
6400 ret = qm_irqs_register(qm);
6404 if (qm->fun_type == QM_HW_PF) {
6405 qm_disable_clock_gate(qm);
6406 ret = qm_dev_mem_reset(qm);
6408 dev_err(dev, "failed to reset device memory\n");
6409 goto err_irq_register;
6413 if (qm->mode == UACCE_MODE_SVA) {
6414 ret = qm_alloc_uacce(qm);
6416 dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
6419 ret = hisi_qm_memory_init(qm);
6421 goto err_alloc_uacce;
6423 ret = hisi_qm_init_work(qm);
6425 goto err_free_qm_memory;
6428 atomic_set(&qm->status.flags, QM_INIT);
6430 qm_last_regs_init(qm);
6435 hisi_qm_memory_uninit(qm);
6438 uacce_remove(qm->uacce);
6442 qm_irqs_unregister(qm);
6444 hisi_qm_pci_uninit(qm);
6447 EXPORT_SYMBOL_GPL(hisi_qm_init);
6450 * hisi_qm_get_dfx_access() - Try to get dfx access.
6451 * @qm: pointer to accelerator device.
6453 * Try to get dfx access, then user can get message.
6455 * If device is in suspended, return failure, otherwise
6456 * bump up the runtime PM usage counter.
6458 int hisi_qm_get_dfx_access(struct hisi_qm *qm)
6460 struct device *dev = &qm->pdev->dev;
6462 if (pm_runtime_suspended(dev)) {
6463 dev_info(dev, "can not read/write - device in suspended.\n");
6467 return qm_pm_get_sync(qm);
6469 EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
6472 * hisi_qm_put_dfx_access() - Put dfx access.
6473 * @qm: pointer to accelerator device.
6475 * Put dfx access, drop runtime PM usage counter.
6477 void hisi_qm_put_dfx_access(struct hisi_qm *qm)
6481 EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
6484 * hisi_qm_pm_init() - Initialize qm runtime PM.
6485 * @qm: pointer to accelerator device.
6487 * Function that initialize qm runtime PM.
6489 void hisi_qm_pm_init(struct hisi_qm *qm)
6491 struct device *dev = &qm->pdev->dev;
6493 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
6496 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
6497 pm_runtime_use_autosuspend(dev);
6498 pm_runtime_put_noidle(dev);
6500 EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
6503 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
6504 * @qm: pointer to accelerator device.
6506 * Function that uninitialize qm runtime PM.
6508 void hisi_qm_pm_uninit(struct hisi_qm *qm)
6510 struct device *dev = &qm->pdev->dev;
6512 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
6515 pm_runtime_get_noresume(dev);
6516 pm_runtime_dont_use_autosuspend(dev);
6518 EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
6520 static int qm_prepare_for_suspend(struct hisi_qm *qm)
6522 struct pci_dev *pdev = qm->pdev;
6526 ret = qm->ops->set_msi(qm, false);
6528 pci_err(pdev, "failed to disable MSI before suspending!\n");
6532 /* shutdown OOO register */
6533 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
6534 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
6536 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
6538 (val == ACC_MASTER_TRANS_RETURN_RW),
6539 POLL_PERIOD, POLL_TIMEOUT);
6541 pci_emerg(pdev, "Bus lock! Please reset system.\n");
6545 ret = qm_set_pf_mse(qm, false);
6547 pci_err(pdev, "failed to disable MSE before suspending!\n");
6552 static int qm_rebuild_for_resume(struct hisi_qm *qm)
6554 struct pci_dev *pdev = qm->pdev;
6557 ret = qm_set_pf_mse(qm, true);
6559 pci_err(pdev, "failed to enable MSE after resuming!\n");
6563 ret = qm->ops->set_msi(qm, true);
6565 pci_err(pdev, "failed to enable MSI after resuming!\n");
6569 ret = qm_dev_hw_init(qm);
6571 pci_err(pdev, "failed to init device after resuming\n");
6576 hisi_qm_dev_err_init(qm);
6577 qm_disable_clock_gate(qm);
6578 ret = qm_dev_mem_reset(qm);
6580 pci_err(pdev, "failed to reset device memory\n");
6586 * hisi_qm_suspend() - Runtime suspend of given device.
6587 * @dev: device to suspend.
6589 * Function that suspend the device.
6591 int hisi_qm_suspend(struct device *dev)
6593 struct pci_dev *pdev = to_pci_dev(dev);
6594 struct hisi_qm *qm = pci_get_drvdata(pdev);
6597 pci_info(pdev, "entering suspended state\n");
6599 ret = hisi_qm_stop(qm, QM_NORMAL);
6601 pci_err(pdev, "failed to stop qm(%d)\n", ret);
6605 ret = qm_prepare_for_suspend(qm);
6607 pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
6611 EXPORT_SYMBOL_GPL(hisi_qm_suspend);
6614 * hisi_qm_resume() - Runtime resume of given device.
6615 * @dev: device to resume.
6617 * Function that resume the device.
6619 int hisi_qm_resume(struct device *dev)
6621 struct pci_dev *pdev = to_pci_dev(dev);
6622 struct hisi_qm *qm = pci_get_drvdata(pdev);
6625 pci_info(pdev, "resuming from suspend state\n");
6627 ret = qm_rebuild_for_resume(qm);
6629 pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
6633 ret = hisi_qm_start(qm);
6635 pci_err(pdev, "failed to start qm(%d)\n", ret);
6639 EXPORT_SYMBOL_GPL(hisi_qm_resume);
6641 MODULE_LICENSE("GPL v2");
6642 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
6643 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");