Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / crypto / hisilicon / hpre / hpre_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2019 HiSilicon Limited. */
3 #include <linux/acpi.h>
4 #include <linux/aer.h>
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/init.h>
8 #include <linux/io.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/topology.h>
13 #include "hpre.h"
14
15 #define HPRE_VF_NUM                     63
16 #define HPRE_QUEUE_NUM_V2               1024
17 #define HPRE_QM_ABNML_INT_MASK          0x100004
18 #define HPRE_CTRL_CNT_CLR_CE_BIT        BIT(0)
19 #define HPRE_COMM_CNT_CLR_CE            0x0
20 #define HPRE_CTRL_CNT_CLR_CE            0x301000
21 #define HPRE_FSM_MAX_CNT                0x301008
22 #define HPRE_VFG_AXQOS                  0x30100c
23 #define HPRE_VFG_AXCACHE                0x301010
24 #define HPRE_RDCHN_INI_CFG              0x301014
25 #define HPRE_AWUSR_FP_CFG               0x301018
26 #define HPRE_BD_ENDIAN                  0x301020
27 #define HPRE_ECC_BYPASS                 0x301024
28 #define HPRE_RAS_WIDTH_CFG              0x301028
29 #define HPRE_POISON_BYPASS              0x30102c
30 #define HPRE_BD_ARUSR_CFG               0x301030
31 #define HPRE_BD_AWUSR_CFG               0x301034
32 #define HPRE_TYPES_ENB                  0x301038
33 #define HPRE_DATA_RUSER_CFG             0x30103c
34 #define HPRE_DATA_WUSER_CFG             0x301040
35 #define HPRE_INT_MASK                   0x301400
36 #define HPRE_INT_STATUS                 0x301800
37 #define HPRE_CORE_INT_ENABLE            0
38 #define HPRE_CORE_INT_DISABLE           0x003fffff
39 #define HPRE_RAS_ECC_1BIT_TH            0x30140c
40 #define HPRE_RDCHN_INI_ST               0x301a00
41 #define HPRE_CLSTR_BASE                 0x302000
42 #define HPRE_CORE_EN_OFFSET             0x04
43 #define HPRE_CORE_INI_CFG_OFFSET        0x20
44 #define HPRE_CORE_INI_STATUS_OFFSET     0x80
45 #define HPRE_CORE_HTBT_WARN_OFFSET      0x8c
46 #define HPRE_CORE_IS_SCHD_OFFSET        0x90
47
48 #define HPRE_RAS_CE_ENB                 0x301410
49 #define HPRE_HAC_RAS_CE_ENABLE          0x3f
50 #define HPRE_RAS_NFE_ENB                0x301414
51 #define HPRE_HAC_RAS_NFE_ENABLE         0x3fffc0
52 #define HPRE_RAS_FE_ENB                 0x301418
53 #define HPRE_HAC_RAS_FE_ENABLE          0
54
55 #define HPRE_CORE_ENB           (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
56 #define HPRE_CORE_INI_CFG       (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
57 #define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
58 #define HPRE_HAC_ECC1_CNT               0x301a04
59 #define HPRE_HAC_ECC2_CNT               0x301a08
60 #define HPRE_HAC_INT_STATUS             0x301800
61 #define HPRE_HAC_SOURCE_INT             0x301600
62 #define MASTER_GLOBAL_CTRL_SHUTDOWN     1
63 #define MASTER_TRANS_RETURN_RW          3
64 #define HPRE_MASTER_TRANS_RETURN        0x300150
65 #define HPRE_MASTER_GLOBAL_CTRL         0x300000
66 #define HPRE_CLSTR_ADDR_INTRVL          0x1000
67 #define HPRE_CLUSTER_INQURY             0x100
68 #define HPRE_CLSTR_ADDR_INQRY_RSLT      0x104
69 #define HPRE_TIMEOUT_ABNML_BIT          6
70 #define HPRE_PASID_EN_BIT               9
71 #define HPRE_REG_RD_INTVRL_US           10
72 #define HPRE_REG_RD_TMOUT_US            1000
73 #define HPRE_DBGFS_VAL_MAX_LEN          20
74 #define HPRE_PCI_DEVICE_ID              0xa258
75 #define HPRE_PCI_VF_DEVICE_ID           0xa259
76 #define HPRE_ADDR(qm, offset)           (qm->io_base + (offset))
77 #define HPRE_QM_USR_CFG_MASK            0xfffffffe
78 #define HPRE_QM_AXI_CFG_MASK            0xffff
79 #define HPRE_QM_VFG_AX_MASK             0xff
80 #define HPRE_BD_USR_MASK                0x3
81 #define HPRE_CLUSTER_CORE_MASK          0xf
82
83 #define HPRE_VIA_MSI_DSM                1
84
85 static LIST_HEAD(hpre_list);
86 static DEFINE_MUTEX(hpre_list_lock);
87 static const char hpre_name[] = "hisi_hpre";
88 static struct dentry *hpre_debugfs_root;
89 static const struct pci_device_id hpre_dev_ids[] = {
90         { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) },
91         { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) },
92         { 0, }
93 };
94
95 MODULE_DEVICE_TABLE(pci, hpre_dev_ids);
96
97 struct hpre_hw_error {
98         u32 int_msk;
99         const char *msg;
100 };
101
102 static const char * const hpre_debug_file_name[] = {
103         [HPRE_CURRENT_QM]   = "current_qm",
104         [HPRE_CLEAR_ENABLE] = "rdclr_en",
105         [HPRE_CLUSTER_CTRL] = "cluster_ctrl",
106 };
107
108 static const struct hpre_hw_error hpre_hw_errors[] = {
109         { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
110         { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
111         { .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
112         { .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
113         { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
114         { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
115         { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
116         { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
117         { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
118         { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
119         { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" },
120         { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" },
121         { /* sentinel */ }
122 };
123
124 static const u64 hpre_cluster_offsets[] = {
125         [HPRE_CLUSTER0] =
126                 HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,
127         [HPRE_CLUSTER1] =
128                 HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,
129         [HPRE_CLUSTER2] =
130                 HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,
131         [HPRE_CLUSTER3] =
132                 HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
133 };
134
135 static struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
136         {"CORES_EN_STATUS          ",  HPRE_CORE_EN_OFFSET},
137         {"CORES_INI_CFG              ",  HPRE_CORE_INI_CFG_OFFSET},
138         {"CORES_INI_STATUS         ",  HPRE_CORE_INI_STATUS_OFFSET},
139         {"CORES_HTBT_WARN         ",  HPRE_CORE_HTBT_WARN_OFFSET},
140         {"CORES_IS_SCHD               ",  HPRE_CORE_IS_SCHD_OFFSET},
141 };
142
143 static struct debugfs_reg32 hpre_com_dfx_regs[] = {
144         {"READ_CLR_EN          ",  HPRE_CTRL_CNT_CLR_CE},
145         {"AXQOS                   ",  HPRE_VFG_AXQOS},
146         {"AWUSR_CFG              ",  HPRE_AWUSR_FP_CFG},
147         {"QM_ARUSR_MCFG1           ",  QM_ARUSER_M_CFG_1},
148         {"QM_AWUSR_MCFG1           ",  QM_AWUSER_M_CFG_1},
149         {"BD_ENDIAN               ",  HPRE_BD_ENDIAN},
150         {"ECC_CHECK_CTRL       ",  HPRE_ECC_BYPASS},
151         {"RAS_INT_WIDTH       ",  HPRE_RAS_WIDTH_CFG},
152         {"POISON_BYPASS       ",  HPRE_POISON_BYPASS},
153         {"BD_ARUSER               ",  HPRE_BD_ARUSR_CFG},
154         {"BD_AWUSER               ",  HPRE_BD_AWUSR_CFG},
155         {"DATA_ARUSER            ",  HPRE_DATA_RUSER_CFG},
156         {"DATA_AWUSER           ",  HPRE_DATA_WUSER_CFG},
157         {"INT_STATUS               ",  HPRE_INT_STATUS},
158 };
159
160 static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
161 {
162         struct pci_dev *pdev;
163         u32 n, q_num;
164         u8 rev_id;
165         int ret;
166
167         if (!val)
168                 return -EINVAL;
169
170         pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
171         if (!pdev) {
172                 q_num = HPRE_QUEUE_NUM_V2;
173                 pr_info("No device found currently, suppose queue number is %d\n",
174                         q_num);
175         } else {
176                 rev_id = pdev->revision;
177                 if (rev_id != QM_HW_V2)
178                         return -EINVAL;
179
180                 q_num = HPRE_QUEUE_NUM_V2;
181         }
182
183         ret = kstrtou32(val, 10, &n);
184         if (ret != 0 || n == 0 || n > q_num)
185                 return -EINVAL;
186
187         return param_set_int(val, kp);
188 }
189
190 static const struct kernel_param_ops hpre_pf_q_num_ops = {
191         .set = hpre_pf_q_num_set,
192         .get = param_get_int,
193 };
194
195 static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
196 module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
197 MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
198
199 static inline void hpre_add_to_list(struct hpre *hpre)
200 {
201         mutex_lock(&hpre_list_lock);
202         list_add_tail(&hpre->list, &hpre_list);
203         mutex_unlock(&hpre_list_lock);
204 }
205
206 static inline void hpre_remove_from_list(struct hpre *hpre)
207 {
208         mutex_lock(&hpre_list_lock);
209         list_del(&hpre->list);
210         mutex_unlock(&hpre_list_lock);
211 }
212
213 struct hpre *hpre_find_device(int node)
214 {
215         struct hpre *hpre, *ret = NULL;
216         int min_distance = INT_MAX;
217         struct device *dev;
218         int dev_node = 0;
219
220         mutex_lock(&hpre_list_lock);
221         list_for_each_entry(hpre, &hpre_list, list) {
222                 dev = &hpre->qm.pdev->dev;
223 #ifdef CONFIG_NUMA
224                 dev_node = dev->numa_node;
225                 if (dev_node < 0)
226                         dev_node = 0;
227 #endif
228                 if (node_distance(dev_node, node) < min_distance) {
229                         ret = hpre;
230                         min_distance = node_distance(dev_node, node);
231                 }
232         }
233         mutex_unlock(&hpre_list_lock);
234
235         return ret;
236 }
237
238 static int hpre_cfg_by_dsm(struct hisi_qm *qm)
239 {
240         struct device *dev = &qm->pdev->dev;
241         union acpi_object *obj;
242         guid_t guid;
243
244         if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {
245                 dev_err(dev, "Hpre GUID failed\n");
246                 return -EINVAL;
247         }
248
249         /* Switch over to MSI handling due to non-standard PCI implementation */
250         obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,
251                                 0, HPRE_VIA_MSI_DSM, NULL);
252         if (!obj) {
253                 dev_err(dev, "ACPI handle failed!\n");
254                 return -EIO;
255         }
256
257         ACPI_FREE(obj);
258
259         return 0;
260 }
261
262 static int hpre_set_user_domain_and_cache(struct hpre *hpre)
263 {
264         struct hisi_qm *qm = &hpre->qm;
265         struct device *dev = &qm->pdev->dev;
266         unsigned long offset;
267         int ret, i;
268         u32 val;
269
270         writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
271         writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
272         writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
273
274         /* disable FLR triggered by BME(bus master enable) */
275         writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
276         writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
277
278         /* HPRE need more time, we close this interrupt */
279         val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
280         val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
281         writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
282
283         writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB));
284         writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
285         writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
286         writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
287         writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH));
288         writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
289         writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
290         writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
291
292         writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG));
293         writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
294         writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
295         ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
296                         val & BIT(0),
297                         HPRE_REG_RD_INTVRL_US,
298                         HPRE_REG_RD_TMOUT_US);
299         if (ret) {
300                 dev_err(dev, "read rd channel timeout fail!\n");
301                 return -ETIMEDOUT;
302         }
303
304         for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
305                 offset = i * HPRE_CLSTR_ADDR_INTRVL;
306
307                 /* clusters initiating */
308                 writel(HPRE_CLUSTER_CORE_MASK,
309                        HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
310                 writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
311                 ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
312                                         HPRE_CORE_INI_STATUS), val,
313                                         ((val & HPRE_CLUSTER_CORE_MASK) ==
314                                         HPRE_CLUSTER_CORE_MASK),
315                                         HPRE_REG_RD_INTVRL_US,
316                                         HPRE_REG_RD_TMOUT_US);
317                 if (ret) {
318                         dev_err(dev,
319                                 "cluster %d int st status timeout!\n", i);
320                         return -ETIMEDOUT;
321                 }
322         }
323
324         ret = hpre_cfg_by_dsm(qm);
325         if (ret)
326                 dev_err(dev, "acpi_evaluate_dsm err.\n");
327
328         return ret;
329 }
330
331 static void hpre_cnt_regs_clear(struct hisi_qm *qm)
332 {
333         unsigned long offset;
334         int i;
335
336         /* clear current_qm */
337         writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
338         writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
339
340         /* clear clusterX/cluster_ctrl */
341         for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
342                 offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
343                 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
344         }
345
346         /* clear rdclr_en */
347         writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
348
349         hisi_qm_debug_regs_clear(qm);
350 }
351
352 static void hpre_hw_error_disable(struct hpre *hpre)
353 {
354         struct hisi_qm *qm = &hpre->qm;
355
356         /* disable hpre hw error interrupts */
357         writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
358 }
359
360 static void hpre_hw_error_enable(struct hpre *hpre)
361 {
362         struct hisi_qm *qm = &hpre->qm;
363
364         /* enable hpre hw error interrupts */
365         writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
366         writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
367         writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
368         writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
369 }
370
371 static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
372 {
373         struct hpre *hpre = container_of(file->debug, struct hpre, debug);
374
375         return &hpre->qm;
376 }
377
378 static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
379 {
380         struct hisi_qm *qm = hpre_file_to_qm(file);
381
382         return readl(qm->io_base + QM_DFX_MB_CNT_VF);
383 }
384
385 static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
386 {
387         struct hisi_qm *qm = hpre_file_to_qm(file);
388         struct hpre_debug *debug = file->debug;
389         struct hpre *hpre = container_of(debug, struct hpre, debug);
390         u32 num_vfs = hpre->num_vfs;
391         u32 vfq_num, tmp;
392
393
394         if (val > num_vfs)
395                 return -EINVAL;
396
397         /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
398         if (val == 0) {
399                 qm->debug.curr_qm_qp_num = qm->qp_num;
400         } else {
401                 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
402                 if (val == num_vfs) {
403                         qm->debug.curr_qm_qp_num =
404                         qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
405                 } else {
406                         qm->debug.curr_qm_qp_num = vfq_num;
407                 }
408         }
409
410         writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
411         writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
412
413         tmp = val |
414               (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
415         writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
416
417         tmp = val |
418               (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
419         writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
420
421         return  0;
422 }
423
424 static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
425 {
426         struct hisi_qm *qm = hpre_file_to_qm(file);
427
428         return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
429                HPRE_CTRL_CNT_CLR_CE_BIT;
430 }
431
432 static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
433 {
434         struct hisi_qm *qm = hpre_file_to_qm(file);
435         u32 tmp;
436
437         if (val != 1 && val != 0)
438                 return -EINVAL;
439
440         tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
441                ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
442         writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
443
444         return  0;
445 }
446
447 static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
448 {
449         struct hisi_qm *qm = hpre_file_to_qm(file);
450         int cluster_index = file->index - HPRE_CLUSTER_CTRL;
451         unsigned long offset = HPRE_CLSTR_BASE +
452                                cluster_index * HPRE_CLSTR_ADDR_INTRVL;
453
454         return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
455 }
456
457 static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
458 {
459         struct hisi_qm *qm = hpre_file_to_qm(file);
460         int cluster_index = file->index - HPRE_CLUSTER_CTRL;
461         unsigned long offset = HPRE_CLSTR_BASE + cluster_index *
462                                HPRE_CLSTR_ADDR_INTRVL;
463
464         writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
465
466         return  0;
467 }
468
469 static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
470                                size_t count, loff_t *pos)
471 {
472         struct hpre_debugfs_file *file = filp->private_data;
473         char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
474         u32 val;
475         int ret;
476
477         spin_lock_irq(&file->lock);
478         switch (file->type) {
479         case HPRE_CURRENT_QM:
480                 val = hpre_current_qm_read(file);
481                 break;
482         case HPRE_CLEAR_ENABLE:
483                 val = hpre_clear_enable_read(file);
484                 break;
485         case HPRE_CLUSTER_CTRL:
486                 val = hpre_cluster_inqry_read(file);
487                 break;
488         default:
489                 spin_unlock_irq(&file->lock);
490                 return -EINVAL;
491         }
492         spin_unlock_irq(&file->lock);
493         ret = sprintf(tbuf, "%u\n", val);
494         return simple_read_from_buffer(buf, count, pos, tbuf, ret);
495 }
496
497 static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
498                                 size_t count, loff_t *pos)
499 {
500         struct hpre_debugfs_file *file = filp->private_data;
501         char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
502         unsigned long val;
503         int len, ret;
504
505         if (*pos != 0)
506                 return 0;
507
508         if (count >= HPRE_DBGFS_VAL_MAX_LEN)
509                 return -ENOSPC;
510
511         len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,
512                                      pos, buf, count);
513         if (len < 0)
514                 return len;
515
516         tbuf[len] = '\0';
517         if (kstrtoul(tbuf, 0, &val))
518                 return -EFAULT;
519
520         spin_lock_irq(&file->lock);
521         switch (file->type) {
522         case HPRE_CURRENT_QM:
523                 ret = hpre_current_qm_write(file, val);
524                 if (ret)
525                         goto err_input;
526                 break;
527         case HPRE_CLEAR_ENABLE:
528                 ret = hpre_clear_enable_write(file, val);
529                 if (ret)
530                         goto err_input;
531                 break;
532         case HPRE_CLUSTER_CTRL:
533                 ret = hpre_cluster_inqry_write(file, val);
534                 if (ret)
535                         goto err_input;
536                 break;
537         default:
538                 ret = -EINVAL;
539                 goto err_input;
540         }
541         spin_unlock_irq(&file->lock);
542
543         return count;
544
545 err_input:
546         spin_unlock_irq(&file->lock);
547         return ret;
548 }
549
550 static const struct file_operations hpre_ctrl_debug_fops = {
551         .owner = THIS_MODULE,
552         .open = simple_open,
553         .read = hpre_ctrl_debug_read,
554         .write = hpre_ctrl_debug_write,
555 };
556
557 static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
558                                     enum hpre_ctrl_dbgfs_file type, int indx)
559 {
560         struct dentry *tmp, *file_dir;
561
562         if (dir)
563                 file_dir = dir;
564         else
565                 file_dir = dbg->debug_root;
566
567         if (type >= HPRE_DEBUG_FILE_NUM)
568                 return -EINVAL;
569
570         spin_lock_init(&dbg->files[indx].lock);
571         dbg->files[indx].debug = dbg;
572         dbg->files[indx].type = type;
573         dbg->files[indx].index = indx;
574         tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
575                                   dbg->files + indx, &hpre_ctrl_debug_fops);
576         if (!tmp)
577                 return -ENOENT;
578
579         return 0;
580 }
581
582 static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
583 {
584         struct hpre *hpre = container_of(debug, struct hpre, debug);
585         struct hisi_qm *qm = &hpre->qm;
586         struct device *dev = &qm->pdev->dev;
587         struct debugfs_regset32 *regset;
588         struct dentry *tmp;
589
590         regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
591         if (!regset)
592                 return -ENOMEM;
593
594         regset->regs = hpre_com_dfx_regs;
595         regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
596         regset->base = qm->io_base;
597
598         tmp = debugfs_create_regset32("regs", 0444,  debug->debug_root, regset);
599         if (!tmp)
600                 return -ENOENT;
601
602         return 0;
603 }
604
605 static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
606 {
607         struct hpre *hpre = container_of(debug, struct hpre, debug);
608         struct hisi_qm *qm = &hpre->qm;
609         struct device *dev = &qm->pdev->dev;
610         char buf[HPRE_DBGFS_VAL_MAX_LEN];
611         struct debugfs_regset32 *regset;
612         struct dentry *tmp_d, *tmp;
613         int i, ret;
614
615         for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
616                 sprintf(buf, "cluster%d", i);
617
618                 tmp_d = debugfs_create_dir(buf, debug->debug_root);
619                 if (!tmp_d)
620                         return -ENOENT;
621
622                 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
623                 if (!regset)
624                         return -ENOMEM;
625
626                 regset->regs = hpre_cluster_dfx_regs;
627                 regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
628                 regset->base = qm->io_base + hpre_cluster_offsets[i];
629
630                 tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
631                 if (!tmp)
632                         return -ENOENT;
633                 ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
634                                                i + HPRE_CLUSTER_CTRL);
635                 if (ret)
636                         return ret;
637         }
638
639         return 0;
640 }
641
642 static int hpre_ctrl_debug_init(struct hpre_debug *debug)
643 {
644         int ret;
645
646         ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM,
647                                        HPRE_CURRENT_QM);
648         if (ret)
649                 return ret;
650
651         ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE,
652                                        HPRE_CLEAR_ENABLE);
653         if (ret)
654                 return ret;
655
656         ret = hpre_pf_comm_regs_debugfs_init(debug);
657         if (ret)
658                 return ret;
659
660         return hpre_cluster_debugfs_init(debug);
661 }
662
663 static int hpre_debugfs_init(struct hpre *hpre)
664 {
665         struct hisi_qm *qm = &hpre->qm;
666         struct device *dev = &qm->pdev->dev;
667         struct dentry *dir;
668         int ret;
669
670         dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
671         if (!dir)
672                 return -ENOENT;
673
674         qm->debug.debug_root = dir;
675
676         ret = hisi_qm_debug_init(qm);
677         if (ret)
678                 goto failed_to_create;
679
680         if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
681                 hpre->debug.debug_root = dir;
682                 ret = hpre_ctrl_debug_init(&hpre->debug);
683                 if (ret)
684                         goto failed_to_create;
685         }
686         return 0;
687
688 failed_to_create:
689         debugfs_remove_recursive(qm->debug.debug_root);
690         return ret;
691 }
692
693 static void hpre_debugfs_exit(struct hpre *hpre)
694 {
695         struct hisi_qm *qm = &hpre->qm;
696
697         debugfs_remove_recursive(qm->debug.debug_root);
698 }
699
700 static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
701 {
702         enum qm_hw_ver rev_id;
703
704         rev_id = hisi_qm_get_hw_version(pdev);
705         if (rev_id < 0)
706                 return -ENODEV;
707
708         if (rev_id == QM_HW_V1) {
709                 pci_warn(pdev, "HPRE version 1 is not supported!\n");
710                 return -EINVAL;
711         }
712
713         qm->pdev = pdev;
714         qm->ver = rev_id;
715         qm->sqe_size = HPRE_SQE_SIZE;
716         qm->dev_name = hpre_name;
717         qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
718                        QM_HW_PF : QM_HW_VF;
719         if (pdev->is_physfn) {
720                 qm->qp_base = HPRE_PF_DEF_Q_BASE;
721                 qm->qp_num = hpre_pf_q_num;
722         }
723         qm->use_dma_api = true;
724
725         return 0;
726 }
727
728 static void hpre_hw_err_init(struct hpre *hpre)
729 {
730         hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, QM_BASE_NFE,
731                               0, QM_DB_RANDOM_INVALID);
732         hpre_hw_error_enable(hpre);
733 }
734
735 static int hpre_pf_probe_init(struct hpre *hpre)
736 {
737         struct hisi_qm *qm = &hpre->qm;
738         int ret;
739
740         qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
741
742         ret = hpre_set_user_domain_and_cache(hpre);
743         if (ret)
744                 return ret;
745
746         hpre_hw_err_init(hpre);
747
748         return 0;
749 }
750
751 static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
752 {
753         struct hisi_qm *qm;
754         struct hpre *hpre;
755         int ret;
756
757         hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
758         if (!hpre)
759                 return -ENOMEM;
760
761         pci_set_drvdata(pdev, hpre);
762
763         qm = &hpre->qm;
764         ret = hpre_qm_pre_init(qm, pdev);
765         if (ret)
766                 return ret;
767
768         ret = hisi_qm_init(qm);
769         if (ret)
770                 return ret;
771
772         if (pdev->is_physfn) {
773                 ret = hpre_pf_probe_init(hpre);
774                 if (ret)
775                         goto err_with_qm_init;
776         } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) {
777                 /* v2 starts to support get vft by mailbox */
778                 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
779                 if (ret)
780                         goto err_with_qm_init;
781         }
782
783         ret = hisi_qm_start(qm);
784         if (ret)
785                 goto err_with_err_init;
786
787         ret = hpre_debugfs_init(hpre);
788         if (ret)
789                 dev_warn(&pdev->dev, "init debugfs fail!\n");
790
791         hpre_add_to_list(hpre);
792
793         ret = hpre_algs_register();
794         if (ret < 0) {
795                 hpre_remove_from_list(hpre);
796                 pci_err(pdev, "fail to register algs to crypto!\n");
797                 goto err_with_qm_start;
798         }
799         return 0;
800
801 err_with_qm_start:
802         hisi_qm_stop(qm);
803
804 err_with_err_init:
805         if (pdev->is_physfn)
806                 hpre_hw_error_disable(hpre);
807
808 err_with_qm_init:
809         hisi_qm_uninit(qm);
810
811         return ret;
812 }
813
814 static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
815 {
816         struct hisi_qm *qm = &hpre->qm;
817         u32 qp_num = qm->qp_num;
818         int q_num, remain_q_num, i;
819         u32 q_base = qp_num;
820         int ret;
821
822         if (!num_vfs)
823                 return -EINVAL;
824
825         remain_q_num = qm->ctrl_qp_num - qp_num;
826
827         /* If remaining queues are not enough, return error. */
828         if (remain_q_num < num_vfs)
829                 return -EINVAL;
830
831         q_num = remain_q_num / num_vfs;
832         for (i = 1; i <= num_vfs; i++) {
833                 if (i == num_vfs)
834                         q_num += remain_q_num % num_vfs;
835                 ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
836                 if (ret)
837                         return ret;
838                 q_base += q_num;
839         }
840
841         return 0;
842 }
843
844 static int hpre_clear_vft_config(struct hpre *hpre)
845 {
846         struct hisi_qm *qm = &hpre->qm;
847         u32 num_vfs = hpre->num_vfs;
848         int ret;
849         u32 i;
850
851         for (i = 1; i <= num_vfs; i++) {
852                 ret = hisi_qm_set_vft(qm, i, 0, 0);
853                 if (ret)
854                         return ret;
855         }
856         hpre->num_vfs = 0;
857
858         return 0;
859 }
860
861 static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
862 {
863         struct hpre *hpre = pci_get_drvdata(pdev);
864         int pre_existing_vfs, num_vfs, ret;
865
866         pre_existing_vfs = pci_num_vf(pdev);
867         if (pre_existing_vfs) {
868                 pci_err(pdev,
869                         "Can't enable VF. Please disable pre-enabled VFs!\n");
870                 return 0;
871         }
872
873         num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
874         ret = hpre_vf_q_assign(hpre, num_vfs);
875         if (ret) {
876                 pci_err(pdev, "Can't assign queues for VF!\n");
877                 return ret;
878         }
879
880         hpre->num_vfs = num_vfs;
881
882         ret = pci_enable_sriov(pdev, num_vfs);
883         if (ret) {
884                 pci_err(pdev, "Can't enable VF!\n");
885                 hpre_clear_vft_config(hpre);
886                 return ret;
887         }
888
889         return num_vfs;
890 }
891
892 static int hpre_sriov_disable(struct pci_dev *pdev)
893 {
894         struct hpre *hpre = pci_get_drvdata(pdev);
895
896         if (pci_vfs_assigned(pdev)) {
897                 pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
898                 return -EPERM;
899         }
900
901         /* remove in hpre_pci_driver will be called to free VF resources */
902         pci_disable_sriov(pdev);
903
904         return hpre_clear_vft_config(hpre);
905 }
906
907 static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
908 {
909         if (num_vfs)
910                 return hpre_sriov_enable(pdev, num_vfs);
911         else
912                 return hpre_sriov_disable(pdev);
913 }
914
915 static void hpre_remove(struct pci_dev *pdev)
916 {
917         struct hpre *hpre = pci_get_drvdata(pdev);
918         struct hisi_qm *qm = &hpre->qm;
919         int ret;
920
921         hpre_algs_unregister();
922         hpre_remove_from_list(hpre);
923         if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
924                 ret = hpre_sriov_disable(pdev);
925                 if (ret) {
926                         pci_err(pdev, "Disable SRIOV fail!\n");
927                         return;
928                 }
929         }
930         if (qm->fun_type == QM_HW_PF) {
931                 hpre_cnt_regs_clear(qm);
932                 qm->debug.curr_qm_qp_num = 0;
933         }
934
935         hpre_debugfs_exit(hpre);
936         hisi_qm_stop(qm);
937         if (qm->fun_type == QM_HW_PF)
938                 hpre_hw_error_disable(hpre);
939         hisi_qm_uninit(qm);
940 }
941
942 static void hpre_log_hw_error(struct hpre *hpre, u32 err_sts)
943 {
944         const struct hpre_hw_error *err = hpre_hw_errors;
945         struct device *dev = &hpre->qm.pdev->dev;
946
947         while (err->msg) {
948                 if (err->int_msk & err_sts)
949                         dev_warn(dev, "%s [error status=0x%x] found\n",
950                                  err->msg, err->int_msk);
951                 err++;
952         }
953 }
954
955 static pci_ers_result_t hpre_hw_error_handle(struct hpre *hpre)
956 {
957         u32 err_sts;
958
959         /* read err sts */
960         err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS);
961         if (err_sts) {
962                 hpre_log_hw_error(hpre, err_sts);
963
964                 /* clear error interrupts */
965                 writel(err_sts, hpre->qm.io_base + HPRE_HAC_SOURCE_INT);
966                 return PCI_ERS_RESULT_NEED_RESET;
967         }
968
969         return PCI_ERS_RESULT_RECOVERED;
970 }
971
972 static pci_ers_result_t hpre_process_hw_error(struct pci_dev *pdev)
973 {
974         struct hpre *hpre = pci_get_drvdata(pdev);
975         pci_ers_result_t qm_ret, hpre_ret;
976
977         /* log qm error */
978         qm_ret = hisi_qm_hw_error_handle(&hpre->qm);
979
980         /* log hpre error */
981         hpre_ret = hpre_hw_error_handle(hpre);
982
983         return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
984                 hpre_ret == PCI_ERS_RESULT_NEED_RESET) ?
985                 PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
986 }
987
988 static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
989                                             pci_channel_state_t state)
990 {
991         pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
992         if (state == pci_channel_io_perm_failure)
993                 return PCI_ERS_RESULT_DISCONNECT;
994
995         return hpre_process_hw_error(pdev);
996 }
997
998 static const struct pci_error_handlers hpre_err_handler = {
999         .error_detected         = hpre_error_detected,
1000 };
1001
1002 static struct pci_driver hpre_pci_driver = {
1003         .name                   = hpre_name,
1004         .id_table               = hpre_dev_ids,
1005         .probe                  = hpre_probe,
1006         .remove                 = hpre_remove,
1007         .sriov_configure        = hpre_sriov_configure,
1008         .err_handler            = &hpre_err_handler,
1009 };
1010
1011 static void hpre_register_debugfs(void)
1012 {
1013         if (!debugfs_initialized())
1014                 return;
1015
1016         hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
1017         if (IS_ERR_OR_NULL(hpre_debugfs_root))
1018                 hpre_debugfs_root = NULL;
1019 }
1020
1021 static void hpre_unregister_debugfs(void)
1022 {
1023         debugfs_remove_recursive(hpre_debugfs_root);
1024 }
1025
1026 static int __init hpre_init(void)
1027 {
1028         int ret;
1029
1030         hpre_register_debugfs();
1031
1032         ret = pci_register_driver(&hpre_pci_driver);
1033         if (ret) {
1034                 hpre_unregister_debugfs();
1035                 pr_err("hpre: can't register hisi hpre driver.\n");
1036         }
1037
1038         return ret;
1039 }
1040
1041 static void __exit hpre_exit(void)
1042 {
1043         pci_unregister_driver(&hpre_pci_driver);
1044         hpre_unregister_debugfs();
1045 }
1046
1047 module_init(hpre_init);
1048 module_exit(hpre_exit);
1049
1050 MODULE_LICENSE("GPL v2");
1051 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1052 MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");