License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / drivers / crypto / cavium / nitrox / nitrox_isr.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pci.h>
3 #include <linux/printk.h>
4 #include <linux/slab.h>
5
6 #include "nitrox_dev.h"
7 #include "nitrox_csr.h"
8 #include "nitrox_common.h"
9
10 #define NR_RING_VECTORS 3
11 #define NPS_CORE_INT_ACTIVE_ENTRY 192
12
13 /**
14  * nps_pkt_slc_isr - IRQ handler for NPS solicit port
15  * @irq: irq number
16  * @data: argument
17  */
18 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
19 {
20         struct bh_data *slc = data;
21         union nps_pkt_slc_cnts pkt_slc_cnts;
22
23         pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
24         /* New packet on SLC output port */
25         if (pkt_slc_cnts.s.slc_int)
26                 tasklet_hi_schedule(&slc->resp_handler);
27
28         return IRQ_HANDLED;
29 }
30
31 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
32 {
33         u64 value;
34
35         /* Write 1 to clear */
36         value = nitrox_read_csr(ndev, NPS_CORE_INT);
37         nitrox_write_csr(ndev, NPS_CORE_INT, value);
38
39         dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT  0x%016llx\n", value);
40 }
41
42 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
43 {
44         union nps_pkt_int pkt_int;
45         unsigned long value, offset;
46         int i;
47
48         pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
49         dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT  0x%016llx\n",
50                             pkt_int.value);
51
52         if (pkt_int.s.slc_err) {
53                 offset = NPS_PKT_SLC_ERR_TYPE;
54                 value = nitrox_read_csr(ndev, offset);
55                 nitrox_write_csr(ndev, offset, value);
56                 dev_err_ratelimited(DEV(ndev),
57                                     "NPS_PKT_SLC_ERR_TYPE  0x%016lx\n", value);
58
59                 offset = NPS_PKT_SLC_RERR_LO;
60                 value = nitrox_read_csr(ndev, offset);
61                 nitrox_write_csr(ndev, offset, value);
62                 /* enable the solicit ports */
63                 for_each_set_bit(i, &value, BITS_PER_LONG)
64                         enable_pkt_solicit_port(ndev, i);
65
66                 dev_err_ratelimited(DEV(ndev),
67                                     "NPS_PKT_SLC_RERR_LO  0x%016lx\n", value);
68
69                 offset = NPS_PKT_SLC_RERR_HI;
70                 value = nitrox_read_csr(ndev, offset);
71                 nitrox_write_csr(ndev, offset, value);
72                 dev_err_ratelimited(DEV(ndev),
73                                     "NPS_PKT_SLC_RERR_HI  0x%016lx\n", value);
74         }
75
76         if (pkt_int.s.in_err) {
77                 offset = NPS_PKT_IN_ERR_TYPE;
78                 value = nitrox_read_csr(ndev, offset);
79                 nitrox_write_csr(ndev, offset, value);
80                 dev_err_ratelimited(DEV(ndev),
81                                     "NPS_PKT_IN_ERR_TYPE  0x%016lx\n", value);
82                 offset = NPS_PKT_IN_RERR_LO;
83                 value = nitrox_read_csr(ndev, offset);
84                 nitrox_write_csr(ndev, offset, value);
85                 /* enable the input ring */
86                 for_each_set_bit(i, &value, BITS_PER_LONG)
87                         enable_pkt_input_ring(ndev, i);
88
89                 dev_err_ratelimited(DEV(ndev),
90                                     "NPS_PKT_IN_RERR_LO  0x%016lx\n", value);
91
92                 offset = NPS_PKT_IN_RERR_HI;
93                 value = nitrox_read_csr(ndev, offset);
94                 nitrox_write_csr(ndev, offset, value);
95                 dev_err_ratelimited(DEV(ndev),
96                                     "NPS_PKT_IN_RERR_HI  0x%016lx\n", value);
97         }
98 }
99
100 static void clear_pom_err_intr(struct nitrox_device *ndev)
101 {
102         u64 value;
103
104         value = nitrox_read_csr(ndev, POM_INT);
105         nitrox_write_csr(ndev, POM_INT, value);
106         dev_err_ratelimited(DEV(ndev), "POM_INT  0x%016llx\n", value);
107 }
108
109 static void clear_pem_err_intr(struct nitrox_device *ndev)
110 {
111         u64 value;
112
113         value = nitrox_read_csr(ndev, PEM0_INT);
114         nitrox_write_csr(ndev, PEM0_INT, value);
115         dev_err_ratelimited(DEV(ndev), "PEM(0)_INT  0x%016llx\n", value);
116 }
117
118 static void clear_lbc_err_intr(struct nitrox_device *ndev)
119 {
120         union lbc_int lbc_int;
121         u64 value, offset;
122         int i;
123
124         lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
125         dev_err_ratelimited(DEV(ndev), "LBC_INT  0x%016llx\n", lbc_int.value);
126
127         if (lbc_int.s.dma_rd_err) {
128                 for (i = 0; i < NR_CLUSTERS; i++) {
129                         offset = EFL_CORE_VF_ERR_INT0X(i);
130                         value = nitrox_read_csr(ndev, offset);
131                         nitrox_write_csr(ndev, offset, value);
132                         offset = EFL_CORE_VF_ERR_INT1X(i);
133                         value = nitrox_read_csr(ndev, offset);
134                         nitrox_write_csr(ndev, offset, value);
135                 }
136         }
137
138         if (lbc_int.s.cam_soft_err) {
139                 dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
140                 invalidate_lbc(ndev);
141         }
142
143         if (lbc_int.s.pref_dat_len_mismatch_err) {
144                 offset = LBC_PLM_VF1_64_INT;
145                 value = nitrox_read_csr(ndev, offset);
146                 nitrox_write_csr(ndev, offset, value);
147                 offset = LBC_PLM_VF65_128_INT;
148                 value = nitrox_read_csr(ndev, offset);
149                 nitrox_write_csr(ndev, offset, value);
150         }
151
152         if (lbc_int.s.rd_dat_len_mismatch_err) {
153                 offset = LBC_ELM_VF1_64_INT;
154                 value = nitrox_read_csr(ndev, offset);
155                 nitrox_write_csr(ndev, offset, value);
156                 offset = LBC_ELM_VF65_128_INT;
157                 value = nitrox_read_csr(ndev, offset);
158                 nitrox_write_csr(ndev, offset, value);
159         }
160         nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
161 }
162
163 static void clear_efl_err_intr(struct nitrox_device *ndev)
164 {
165         int i;
166
167         for (i = 0; i < NR_CLUSTERS; i++) {
168                 union efl_core_int core_int;
169                 u64 value, offset;
170
171                 offset = EFL_CORE_INTX(i);
172                 core_int.value = nitrox_read_csr(ndev, offset);
173                 nitrox_write_csr(ndev, offset, core_int.value);
174                 dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT  0x%016llx\n",
175                                     i, core_int.value);
176                 if (core_int.s.se_err) {
177                         offset = EFL_CORE_SE_ERR_INTX(i);
178                         value = nitrox_read_csr(ndev, offset);
179                         nitrox_write_csr(ndev, offset, value);
180                 }
181         }
182 }
183
184 static void clear_bmi_err_intr(struct nitrox_device *ndev)
185 {
186         u64 value;
187
188         value = nitrox_read_csr(ndev, BMI_INT);
189         nitrox_write_csr(ndev, BMI_INT, value);
190         dev_err_ratelimited(DEV(ndev), "BMI_INT  0x%016llx\n", value);
191 }
192
193 /**
194  * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
195  * @ndev: NITROX device
196  */
197 static void clear_nps_core_int_active(struct nitrox_device *ndev)
198 {
199         union nps_core_int_active core_int_active;
200
201         core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
202
203         if (core_int_active.s.nps_core)
204                 clear_nps_core_err_intr(ndev);
205
206         if (core_int_active.s.nps_pkt)
207                 clear_nps_pkt_err_intr(ndev);
208
209         if (core_int_active.s.pom)
210                 clear_pom_err_intr(ndev);
211
212         if (core_int_active.s.pem)
213                 clear_pem_err_intr(ndev);
214
215         if (core_int_active.s.lbc)
216                 clear_lbc_err_intr(ndev);
217
218         if (core_int_active.s.efl)
219                 clear_efl_err_intr(ndev);
220
221         if (core_int_active.s.bmi)
222                 clear_bmi_err_intr(ndev);
223
224         /* If more work callback the ISR, set resend */
225         core_int_active.s.resend = 1;
226         nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
227 }
228
229 static irqreturn_t nps_core_int_isr(int irq, void *data)
230 {
231         struct nitrox_device *ndev = data;
232
233         clear_nps_core_int_active(ndev);
234
235         return IRQ_HANDLED;
236 }
237
238 static int nitrox_enable_msix(struct nitrox_device *ndev)
239 {
240         struct msix_entry *entries;
241         char **names;
242         int i, nr_entries, ret;
243
244         /*
245          * PF MSI-X vectors
246          *
247          * Entry 0: NPS PKT ring 0
248          * Entry 1: AQMQ ring 0
249          * Entry 2: ZQM ring 0
250          * Entry 3: NPS PKT ring 1
251          * Entry 4: AQMQ ring 1
252          * Entry 5: ZQM ring 1
253          * ....
254          * Entry 192: NPS_CORE_INT_ACTIVE
255          */
256         nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
257         entries = kzalloc_node(nr_entries * sizeof(struct msix_entry),
258                                GFP_KERNEL, ndev->node);
259         if (!entries)
260                 return -ENOMEM;
261
262         names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
263         if (!names) {
264                 kfree(entries);
265                 return -ENOMEM;
266         }
267
268         /* fill entires */
269         for (i = 0; i < (nr_entries - 1); i++)
270                 entries[i].entry = i;
271
272         entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
273
274         for (i = 0; i < nr_entries; i++) {
275                 *(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
276                 if (!(*(names + i))) {
277                         ret = -ENOMEM;
278                         goto msix_fail;
279                 }
280         }
281         ndev->msix.entries = entries;
282         ndev->msix.names = names;
283         ndev->msix.nr_entries = nr_entries;
284
285         ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
286                                     ndev->msix.nr_entries);
287         if (ret) {
288                 dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
289                         ret);
290                 goto msix_fail;
291         }
292         return 0;
293
294 msix_fail:
295         for (i = 0; i < nr_entries; i++)
296                 kfree(*(names + i));
297
298         kfree(entries);
299         kfree(names);
300         return ret;
301 }
302
303 static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
304 {
305         int i;
306
307         if (!ndev->bh.slc)
308                 return;
309
310         for (i = 0; i < ndev->nr_queues; i++) {
311                 struct bh_data *bh = &ndev->bh.slc[i];
312
313                 tasklet_disable(&bh->resp_handler);
314                 tasklet_kill(&bh->resp_handler);
315         }
316         kfree(ndev->bh.slc);
317         ndev->bh.slc = NULL;
318 }
319
320 static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
321 {
322         u32 size;
323         int i;
324
325         size = ndev->nr_queues * sizeof(struct bh_data);
326         ndev->bh.slc = kzalloc(size, GFP_KERNEL);
327         if (!ndev->bh.slc)
328                 return -ENOMEM;
329
330         for (i = 0; i < ndev->nr_queues; i++) {
331                 struct bh_data *bh = &ndev->bh.slc[i];
332                 u64 offset;
333
334                 offset = NPS_PKT_SLC_CNTSX(i);
335                 /* pre calculate completion count address */
336                 bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
337                 bh->cmdq = &ndev->pkt_cmdqs[i];
338
339                 tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
340                              (unsigned long)bh);
341         }
342
343         return 0;
344 }
345
346 static int nitrox_request_irqs(struct nitrox_device *ndev)
347 {
348         struct pci_dev *pdev = ndev->pdev;
349         struct msix_entry *msix_ent = ndev->msix.entries;
350         int nr_ring_vectors, i = 0, ring, cpu, ret;
351         char *name;
352
353         /*
354          * PF MSI-X vectors
355          *
356          * Entry 0: NPS PKT ring 0
357          * Entry 1: AQMQ ring 0
358          * Entry 2: ZQM ring 0
359          * Entry 3: NPS PKT ring 1
360          * ....
361          * Entry 192: NPS_CORE_INT_ACTIVE
362          */
363         nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
364
365         /* request irq for pkt ring/ports only */
366         while (i < nr_ring_vectors) {
367                 name = *(ndev->msix.names + i);
368                 ring = (i / NR_RING_VECTORS);
369                 snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
370                          ndev->idx, ring);
371
372                 ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
373                                   name, &ndev->bh.slc[ring]);
374                 if (ret) {
375                         dev_err(&pdev->dev, "failed to get irq %d for %s\n",
376                                 msix_ent[i].vector, name);
377                         return ret;
378                 }
379                 cpu = ring % num_online_cpus();
380                 irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
381
382                 set_bit(i, ndev->msix.irqs);
383                 i += NR_RING_VECTORS;
384         }
385
386         /* Request IRQ for NPS_CORE_INT_ACTIVE */
387         name = *(ndev->msix.names + i);
388         snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
389         ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
390         if (ret) {
391                 dev_err(&pdev->dev, "failed to get irq %d for %s\n",
392                         msix_ent[i].vector, name);
393                 return ret;
394         }
395         set_bit(i, ndev->msix.irqs);
396
397         return 0;
398 }
399
400 static void nitrox_disable_msix(struct nitrox_device *ndev)
401 {
402         struct msix_entry *msix_ent = ndev->msix.entries;
403         char **names = ndev->msix.names;
404         int i = 0, ring, nr_ring_vectors;
405
406         nr_ring_vectors = ndev->msix.nr_entries - 1;
407
408         /* clear pkt ring irqs */
409         while (i < nr_ring_vectors) {
410                 if (test_and_clear_bit(i, ndev->msix.irqs)) {
411                         ring = (i / NR_RING_VECTORS);
412                         irq_set_affinity_hint(msix_ent[i].vector, NULL);
413                         free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
414                 }
415                 i += NR_RING_VECTORS;
416         }
417         irq_set_affinity_hint(msix_ent[i].vector, NULL);
418         free_irq(msix_ent[i].vector, ndev);
419         clear_bit(i, ndev->msix.irqs);
420
421         kfree(ndev->msix.entries);
422         for (i = 0; i < ndev->msix.nr_entries; i++)
423                 kfree(*(names + i));
424
425         kfree(names);
426         pci_disable_msix(ndev->pdev);
427 }
428
429 /**
430  * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
431  * @ndev: NITROX device
432  */
433 void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
434 {
435         nitrox_disable_msix(ndev);
436         nitrox_cleanup_pkt_slc_bh(ndev);
437 }
438
439 /**
440  * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
441  * @ndev: NITROX device
442  *
443  * Return: 0 on success, a negative value on failure.
444  */
445 int nitrox_pf_init_isr(struct nitrox_device *ndev)
446 {
447         int err;
448
449         err = nitrox_setup_pkt_slc_bh(ndev);
450         if (err)
451                 return err;
452
453         err = nitrox_enable_msix(ndev);
454         if (err)
455                 goto msix_fail;
456
457         err = nitrox_request_irqs(ndev);
458         if (err)
459                 goto irq_fail;
460
461         return 0;
462
463 irq_fail:
464         nitrox_disable_msix(ndev);
465 msix_fail:
466         nitrox_cleanup_pkt_slc_bh(ndev);
467         return err;
468 }