Merge tag 'efi-fixes-for-v6.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / misc / pci_endpoint_test.c
CommitLineData
6b1baefe 1// SPDX-License-Identifier: GPL-2.0-only
1aa3f2b0 2/*
2c156ac7
KVA
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
2c156ac7
KVA
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/fs.h>
12#include <linux/io.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/miscdevice.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/random.h>
19#include <linux/slab.h>
cf376b4b 20#include <linux/uaccess.h>
2c156ac7
KVA
21#include <linux/pci.h>
22#include <linux/pci_ids.h>
23
24#include <linux/pci_regs.h>
25
26#include <uapi/linux/pcitest.h>
27
e8817de7
GP
28#define DRV_MODULE_NAME "pci-endpoint-test"
29
e0332712 30#define IRQ_TYPE_UNDEFINED -1
e8817de7
GP
31#define IRQ_TYPE_LEGACY 0
32#define IRQ_TYPE_MSI 1
c2e00e31 33#define IRQ_TYPE_MSIX 2
e8817de7
GP
34
35#define PCI_ENDPOINT_TEST_MAGIC 0x0
36
37#define PCI_ENDPOINT_TEST_COMMAND 0x4
38#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
39#define COMMAND_RAISE_MSI_IRQ BIT(1)
c2e00e31 40#define COMMAND_RAISE_MSIX_IRQ BIT(2)
e8817de7
GP
41#define COMMAND_READ BIT(3)
42#define COMMAND_WRITE BIT(4)
43#define COMMAND_COPY BIT(5)
44
45#define PCI_ENDPOINT_TEST_STATUS 0x8
46#define STATUS_READ_SUCCESS BIT(0)
47#define STATUS_READ_FAIL BIT(1)
48#define STATUS_WRITE_SUCCESS BIT(2)
49#define STATUS_WRITE_FAIL BIT(3)
50#define STATUS_COPY_SUCCESS BIT(4)
51#define STATUS_COPY_FAIL BIT(5)
52#define STATUS_IRQ_RAISED BIT(6)
53#define STATUS_SRC_ADDR_INVALID BIT(7)
54#define STATUS_DST_ADDR_INVALID BIT(8)
55
56#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
2c156ac7
KVA
57#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
58
59#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
60#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
61
e8817de7
GP
62#define PCI_ENDPOINT_TEST_SIZE 0x1c
63#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
64
65#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
66#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
2c156ac7 67
cf376b4b
KVA
68#define PCI_ENDPOINT_TEST_FLAGS 0x2c
69#define FLAG_USE_DMA BIT(0)
70
5bb04b19 71#define PCI_DEVICE_ID_TI_AM654 0xb00c
7c52009d
KVA
72#define PCI_DEVICE_ID_TI_J7200 0xb00f
73#define PCI_DEVICE_ID_TI_AM64 0xb010
6b8ab421 74#define PCI_DEVICE_ID_LS1088A 0x80c0
01ea5ede 75#define PCI_DEVICE_ID_IMX8 0x0808
5bb04b19
KVA
76
77#define is_am654_pci_dev(pdev) \
78 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
79
cfb824dd
LP
80#define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
81#define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
b03025c5 82#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
a63c5f3d 83#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
b03025c5 84
2c156ac7
KVA
85static DEFINE_IDA(pci_endpoint_test_ida);
86
87#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
88 miscdev)
0c8a5f9d
KVA
89
90static bool no_msi;
91module_param(no_msi, bool, 0444);
92MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
93
9133e394
GP
94static int irq_type = IRQ_TYPE_MSI;
95module_param(irq_type, int, 0444);
c2e00e31 96MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
9133e394 97
2c156ac7
KVA
98enum pci_barno {
99 BAR_0,
100 BAR_1,
101 BAR_2,
102 BAR_3,
103 BAR_4,
104 BAR_5,
105};
106
107struct pci_endpoint_test {
108 struct pci_dev *pdev;
109 void __iomem *base;
c9c13ba4 110 void __iomem *bar[PCI_STD_NUM_BARS];
2c156ac7
KVA
111 struct completion irq_raised;
112 int last_irq;
b7636e81 113 int num_irqs;
b2ba9225 114 int irq_type;
2c156ac7
KVA
115 /* mutex to protect the ioctls */
116 struct mutex mutex;
117 struct miscdevice miscdev;
834b9051 118 enum pci_barno test_reg_bar;
13107c60 119 size_t alignment;
c2be14ab 120 const char *name;
2c156ac7
KVA
121};
122
834b9051
KVA
123struct pci_endpoint_test_data {
124 enum pci_barno test_reg_bar;
13107c60 125 size_t alignment;
9133e394 126 int irq_type;
834b9051
KVA
127};
128
2c156ac7
KVA
129static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
130 u32 offset)
131{
132 return readl(test->base + offset);
133}
134
135static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
136 u32 offset, u32 value)
137{
138 writel(value, test->base + offset);
139}
140
141static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
142 int bar, int offset)
143{
144 return readl(test->bar[bar] + offset);
145}
146
147static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
148 int bar, u32 offset, u32 value)
149{
150 writel(value, test->bar[bar] + offset);
151}
152
153static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
154{
155 struct pci_endpoint_test *test = dev_id;
156 u32 reg;
157
158 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
159 if (reg & STATUS_IRQ_RAISED) {
160 test->last_irq = irq;
161 complete(&test->irq_raised);
2c156ac7 162 }
2c156ac7
KVA
163
164 return IRQ_HANDLED;
165}
166
e0332712
GP
167static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
168{
169 struct pci_dev *pdev = test->pdev;
170
171 pci_free_irq_vectors(pdev);
b2ba9225 172 test->irq_type = IRQ_TYPE_UNDEFINED;
e0332712
GP
173}
174
175static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
176 int type)
177{
178 int irq = -1;
179 struct pci_dev *pdev = test->pdev;
180 struct device *dev = &pdev->dev;
181 bool res = true;
182
183 switch (type) {
184 case IRQ_TYPE_LEGACY:
185 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
186 if (irq < 0)
187 dev_err(dev, "Failed to get Legacy interrupt\n");
188 break;
189 case IRQ_TYPE_MSI:
190 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
191 if (irq < 0)
192 dev_err(dev, "Failed to get MSI interrupts\n");
193 break;
194 case IRQ_TYPE_MSIX:
195 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
196 if (irq < 0)
197 dev_err(dev, "Failed to get MSI-X interrupts\n");
198 break;
199 default:
200 dev_err(dev, "Invalid IRQ type selected\n");
201 }
202
203 if (irq < 0) {
204 irq = 0;
205 res = false;
206 }
b2ba9225
KVA
207
208 test->irq_type = type;
e0332712
GP
209 test->num_irqs = irq;
210
211 return res;
212}
213
214static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
215{
216 int i;
217 struct pci_dev *pdev = test->pdev;
218 struct device *dev = &pdev->dev;
219
220 for (i = 0; i < test->num_irqs; i++)
221 devm_free_irq(dev, pci_irq_vector(pdev, i), test);
222
223 test->num_irqs = 0;
224}
225
226static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
227{
228 int i;
229 int err;
230 struct pci_dev *pdev = test->pdev;
231 struct device *dev = &pdev->dev;
232
233 for (i = 0; i < test->num_irqs; i++) {
234 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
235 pci_endpoint_test_irqhandler,
c2be14ab 236 IRQF_SHARED, test->name, test);
e0332712
GP
237 if (err)
238 goto fail;
239 }
240
241 return true;
242
243fail:
244 switch (irq_type) {
245 case IRQ_TYPE_LEGACY:
246 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
247 pci_irq_vector(pdev, i));
248 break;
249 case IRQ_TYPE_MSI:
250 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
251 pci_irq_vector(pdev, i),
252 i + 1);
253 break;
254 case IRQ_TYPE_MSIX:
255 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
256 pci_irq_vector(pdev, i),
257 i + 1);
258 break;
259 }
260
261 return false;
262}
263
2c156ac7
KVA
264static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
265 enum pci_barno barno)
266{
267 int j;
268 u32 val;
269 int size;
cda370ec 270 struct pci_dev *pdev = test->pdev;
2c156ac7
KVA
271
272 if (!test->bar[barno])
273 return false;
274
cda370ec 275 size = pci_resource_len(pdev, barno);
2c156ac7 276
834b9051
KVA
277 if (barno == test->test_reg_bar)
278 size = 0x4;
279
2c156ac7
KVA
280 for (j = 0; j < size; j += 4)
281 pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
282
283 for (j = 0; j < size; j += 4) {
284 val = pci_endpoint_test_bar_readl(test, barno, j);
285 if (val != 0xA0A0A0A0)
286 return false;
287 }
288
289 return true;
290}
291
292static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
293{
294 u32 val;
295
e8817de7
GP
296 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
297 IRQ_TYPE_LEGACY);
298 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
2c156ac7
KVA
299 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
300 COMMAND_RAISE_LEGACY_IRQ);
301 val = wait_for_completion_timeout(&test->irq_raised,
302 msecs_to_jiffies(1000));
303 if (!val)
304 return false;
305
306 return true;
307}
308
309static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
c2e00e31 310 u16 msi_num, bool msix)
2c156ac7
KVA
311{
312 u32 val;
313 struct pci_dev *pdev = test->pdev;
314
e8817de7 315 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
4c50f933 316 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
e8817de7 317 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
2c156ac7 318 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
4c50f933
DLM
319 msix ? COMMAND_RAISE_MSIX_IRQ :
320 COMMAND_RAISE_MSI_IRQ);
2c156ac7
KVA
321 val = wait_for_completion_timeout(&test->irq_raised,
322 msecs_to_jiffies(1000));
323 if (!val)
324 return false;
325
4c50f933 326 return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
2c156ac7
KVA
327}
328
3e42deaa
SM
329static int pci_endpoint_test_validate_xfer_params(struct device *dev,
330 struct pci_endpoint_test_xfer_param *param, size_t alignment)
331{
8e30538e
SM
332 if (!param->size) {
333 dev_dbg(dev, "Data size is zero\n");
334 return -EINVAL;
335 }
336
3e42deaa
SM
337 if (param->size > SIZE_MAX - alignment) {
338 dev_dbg(dev, "Maximum transfer data size exceeded\n");
339 return -EINVAL;
340 }
341
342 return 0;
343}
344
cf376b4b
KVA
345static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
346 unsigned long arg)
2c156ac7 347{
cf376b4b 348 struct pci_endpoint_test_xfer_param param;
2c156ac7
KVA
349 bool ret = false;
350 void *src_addr;
351 void *dst_addr;
cf376b4b
KVA
352 u32 flags = 0;
353 bool use_dma;
354 size_t size;
2c156ac7
KVA
355 dma_addr_t src_phys_addr;
356 dma_addr_t dst_phys_addr;
357 struct pci_dev *pdev = test->pdev;
358 struct device *dev = &pdev->dev;
13107c60
KVA
359 void *orig_src_addr;
360 dma_addr_t orig_src_phys_addr;
361 void *orig_dst_addr;
362 dma_addr_t orig_dst_phys_addr;
363 size_t offset;
364 size_t alignment = test->alignment;
b2ba9225 365 int irq_type = test->irq_type;
2c156ac7
KVA
366 u32 src_crc32;
367 u32 dst_crc32;
cf376b4b 368 int err;
2c156ac7 369
cf376b4b
KVA
370 err = copy_from_user(&param, (void __user *)arg, sizeof(param));
371 if (err) {
372 dev_err(dev, "Failed to get transfer param\n");
373 return false;
374 }
375
3e42deaa
SM
376 err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
377 if (err)
378 return false;
379
cf376b4b 380 size = param.size;
343dc693 381
cf376b4b
KVA
382 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
383 if (use_dma)
384 flags |= FLAG_USE_DMA;
385
e0332712
GP
386 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
387 dev_err(dev, "Invalid IRQ type option\n");
388 goto err;
389 }
390
0a121f9b 391 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 392 if (!orig_src_addr) {
0e52ea61 393 dev_err(dev, "Failed to allocate source buffer\n");
2c156ac7
KVA
394 ret = false;
395 goto err;
396 }
397
0a121f9b
KVA
398 get_random_bytes(orig_src_addr, size + alignment);
399 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
400 size + alignment, DMA_TO_DEVICE);
401 if (dma_mapping_error(dev, orig_src_phys_addr)) {
402 dev_err(dev, "failed to map source buffer address\n");
403 ret = false;
404 goto err_src_phys_addr;
405 }
406
13107c60
KVA
407 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
408 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
409 offset = src_phys_addr - orig_src_phys_addr;
410 src_addr = orig_src_addr + offset;
411 } else {
412 src_phys_addr = orig_src_phys_addr;
413 src_addr = orig_src_addr;
414 }
415
2c156ac7
KVA
416 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
417 lower_32_bits(src_phys_addr));
418
419 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
420 upper_32_bits(src_phys_addr));
421
2c156ac7
KVA
422 src_crc32 = crc32_le(~0, src_addr, size);
423
0a121f9b 424 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 425 if (!orig_dst_addr) {
0e52ea61 426 dev_err(dev, "Failed to allocate destination address\n");
2c156ac7 427 ret = false;
0a121f9b
KVA
428 goto err_dst_addr;
429 }
430
431 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
432 size + alignment, DMA_FROM_DEVICE);
433 if (dma_mapping_error(dev, orig_dst_phys_addr)) {
434 dev_err(dev, "failed to map destination buffer address\n");
435 ret = false;
436 goto err_dst_phys_addr;
13107c60
KVA
437 }
438
439 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
440 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
441 offset = dst_phys_addr - orig_dst_phys_addr;
442 dst_addr = orig_dst_addr + offset;
443 } else {
444 dst_phys_addr = orig_dst_phys_addr;
445 dst_addr = orig_dst_addr;
2c156ac7
KVA
446 }
447
448 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
449 lower_32_bits(dst_phys_addr));
450 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
451 upper_32_bits(dst_phys_addr));
452
453 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
454 size);
455
cf376b4b 456 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
9133e394 457 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
e8817de7 458 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
2c156ac7 459 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
e8817de7 460 COMMAND_COPY);
2c156ac7
KVA
461
462 wait_for_completion(&test->irq_raised);
463
0a121f9b
KVA
464 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
465 DMA_FROM_DEVICE);
466
2c156ac7
KVA
467 dst_crc32 = crc32_le(~0, dst_addr, size);
468 if (dst_crc32 == src_crc32)
469 ret = true;
470
0a121f9b
KVA
471err_dst_phys_addr:
472 kfree(orig_dst_addr);
2c156ac7 473
0a121f9b
KVA
474err_dst_addr:
475 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
476 DMA_TO_DEVICE);
477
478err_src_phys_addr:
479 kfree(orig_src_addr);
2c156ac7
KVA
480
481err:
482 return ret;
483}
484
cf376b4b
KVA
485static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
486 unsigned long arg)
2c156ac7 487{
cf376b4b 488 struct pci_endpoint_test_xfer_param param;
2c156ac7 489 bool ret = false;
cf376b4b
KVA
490 u32 flags = 0;
491 bool use_dma;
2c156ac7
KVA
492 u32 reg;
493 void *addr;
494 dma_addr_t phys_addr;
495 struct pci_dev *pdev = test->pdev;
496 struct device *dev = &pdev->dev;
13107c60
KVA
497 void *orig_addr;
498 dma_addr_t orig_phys_addr;
499 size_t offset;
500 size_t alignment = test->alignment;
b2ba9225 501 int irq_type = test->irq_type;
cf376b4b 502 size_t size;
2c156ac7 503 u32 crc32;
cf376b4b 504 int err;
2c156ac7 505
cf376b4b
KVA
506 err = copy_from_user(&param, (void __user *)arg, sizeof(param));
507 if (err != 0) {
508 dev_err(dev, "Failed to get transfer param\n");
509 return false;
510 }
511
3e42deaa
SM
512 err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
513 if (err)
514 return false;
515
cf376b4b 516 size = param.size;
343dc693 517
cf376b4b
KVA
518 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
519 if (use_dma)
520 flags |= FLAG_USE_DMA;
521
e0332712
GP
522 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
523 dev_err(dev, "Invalid IRQ type option\n");
524 goto err;
525 }
526
0a121f9b 527 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 528 if (!orig_addr) {
0e52ea61 529 dev_err(dev, "Failed to allocate address\n");
2c156ac7
KVA
530 ret = false;
531 goto err;
532 }
533
0a121f9b
KVA
534 get_random_bytes(orig_addr, size + alignment);
535
536 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
537 DMA_TO_DEVICE);
538 if (dma_mapping_error(dev, orig_phys_addr)) {
539 dev_err(dev, "failed to map source buffer address\n");
540 ret = false;
541 goto err_phys_addr;
542 }
543
13107c60
KVA
544 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
545 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
546 offset = phys_addr - orig_phys_addr;
547 addr = orig_addr + offset;
548 } else {
549 phys_addr = orig_phys_addr;
550 addr = orig_addr;
551 }
552
2c156ac7
KVA
553 crc32 = crc32_le(~0, addr, size);
554 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
555 crc32);
556
557 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
558 lower_32_bits(phys_addr));
559 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
560 upper_32_bits(phys_addr));
561
562 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
563
cf376b4b 564 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
9133e394 565 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
e8817de7 566 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
2c156ac7 567 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
e8817de7 568 COMMAND_READ);
2c156ac7
KVA
569
570 wait_for_completion(&test->irq_raised);
571
572 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
573 if (reg & STATUS_READ_SUCCESS)
574 ret = true;
575
0a121f9b
KVA
576 dma_unmap_single(dev, orig_phys_addr, size + alignment,
577 DMA_TO_DEVICE);
578
579err_phys_addr:
580 kfree(orig_addr);
2c156ac7
KVA
581
582err:
583 return ret;
584}
585
cf376b4b
KVA
586static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
587 unsigned long arg)
2c156ac7 588{
cf376b4b 589 struct pci_endpoint_test_xfer_param param;
2c156ac7 590 bool ret = false;
cf376b4b
KVA
591 u32 flags = 0;
592 bool use_dma;
593 size_t size;
2c156ac7
KVA
594 void *addr;
595 dma_addr_t phys_addr;
596 struct pci_dev *pdev = test->pdev;
597 struct device *dev = &pdev->dev;
13107c60
KVA
598 void *orig_addr;
599 dma_addr_t orig_phys_addr;
600 size_t offset;
601 size_t alignment = test->alignment;
b2ba9225 602 int irq_type = test->irq_type;
2c156ac7 603 u32 crc32;
cf376b4b 604 int err;
2c156ac7 605
cf376b4b
KVA
606 err = copy_from_user(&param, (void __user *)arg, sizeof(param));
607 if (err) {
608 dev_err(dev, "Failed to get transfer param\n");
609 return false;
610 }
611
3e42deaa
SM
612 err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
613 if (err)
614 return false;
615
cf376b4b 616 size = param.size;
343dc693 617
cf376b4b
KVA
618 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
619 if (use_dma)
620 flags |= FLAG_USE_DMA;
621
e0332712
GP
622 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
623 dev_err(dev, "Invalid IRQ type option\n");
624 goto err;
625 }
626
0a121f9b 627 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 628 if (!orig_addr) {
0e52ea61 629 dev_err(dev, "Failed to allocate destination address\n");
2c156ac7
KVA
630 ret = false;
631 goto err;
632 }
633
0a121f9b
KVA
634 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
635 DMA_FROM_DEVICE);
636 if (dma_mapping_error(dev, orig_phys_addr)) {
637 dev_err(dev, "failed to map source buffer address\n");
638 ret = false;
639 goto err_phys_addr;
640 }
641
13107c60
KVA
642 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
643 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
644 offset = phys_addr - orig_phys_addr;
645 addr = orig_addr + offset;
646 } else {
647 phys_addr = orig_phys_addr;
648 addr = orig_addr;
649 }
650
2c156ac7
KVA
651 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
652 lower_32_bits(phys_addr));
653 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
654 upper_32_bits(phys_addr));
655
656 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
657
cf376b4b 658 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
9133e394 659 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
e8817de7 660 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
2c156ac7 661 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
e8817de7 662 COMMAND_WRITE);
2c156ac7
KVA
663
664 wait_for_completion(&test->irq_raised);
665
0a121f9b
KVA
666 dma_unmap_single(dev, orig_phys_addr, size + alignment,
667 DMA_FROM_DEVICE);
668
2c156ac7
KVA
669 crc32 = crc32_le(~0, addr, size);
670 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
671 ret = true;
672
0a121f9b
KVA
673err_phys_addr:
674 kfree(orig_addr);
2c156ac7
KVA
675err:
676 return ret;
677}
678
475007f9
KVA
679static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
680{
681 pci_endpoint_test_release_irq(test);
682 pci_endpoint_test_free_irq_vectors(test);
683 return true;
684}
685
e0332712
GP
686static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
687 int req_irq_type)
688{
689 struct pci_dev *pdev = test->pdev;
690 struct device *dev = &pdev->dev;
691
692 if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
693 dev_err(dev, "Invalid IRQ type option\n");
694 return false;
695 }
696
b2ba9225 697 if (test->irq_type == req_irq_type)
e0332712
GP
698 return true;
699
700 pci_endpoint_test_release_irq(test);
701 pci_endpoint_test_free_irq_vectors(test);
702
703 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
704 goto err;
705
706 if (!pci_endpoint_test_request_irq(test))
707 goto err;
708
e0332712
GP
709 return true;
710
711err:
712 pci_endpoint_test_free_irq_vectors(test);
e0332712
GP
713 return false;
714}
715
2c156ac7
KVA
716static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
717 unsigned long arg)
718{
719 int ret = -EINVAL;
720 enum pci_barno bar;
721 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
5bb04b19 722 struct pci_dev *pdev = test->pdev;
2c156ac7
KVA
723
724 mutex_lock(&test->mutex);
fb620ae7
DLM
725
726 reinit_completion(&test->irq_raised);
727 test->last_irq = -ENODATA;
728
2c156ac7
KVA
729 switch (cmd) {
730 case PCITEST_BAR:
731 bar = arg;
33fcc549 732 if (bar > BAR_5)
2c156ac7 733 goto ret;
5bb04b19
KVA
734 if (is_am654_pci_dev(pdev) && bar == BAR_0)
735 goto ret;
2c156ac7
KVA
736 ret = pci_endpoint_test_bar(test, bar);
737 break;
738 case PCITEST_LEGACY_IRQ:
739 ret = pci_endpoint_test_legacy_irq(test);
740 break;
741 case PCITEST_MSI:
c2e00e31
GP
742 case PCITEST_MSIX:
743 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
2c156ac7
KVA
744 break;
745 case PCITEST_WRITE:
746 ret = pci_endpoint_test_write(test, arg);
747 break;
748 case PCITEST_READ:
749 ret = pci_endpoint_test_read(test, arg);
750 break;
751 case PCITEST_COPY:
752 ret = pci_endpoint_test_copy(test, arg);
753 break;
e0332712
GP
754 case PCITEST_SET_IRQTYPE:
755 ret = pci_endpoint_test_set_irq(test, arg);
756 break;
757 case PCITEST_GET_IRQTYPE:
758 ret = irq_type;
759 break;
475007f9
KVA
760 case PCITEST_CLEAR_IRQ:
761 ret = pci_endpoint_test_clear_irq(test);
762 break;
2c156ac7
KVA
763 }
764
765ret:
766 mutex_unlock(&test->mutex);
767 return ret;
768}
769
770static const struct file_operations pci_endpoint_test_fops = {
771 .owner = THIS_MODULE,
772 .unlocked_ioctl = pci_endpoint_test_ioctl,
773};
774
775static int pci_endpoint_test_probe(struct pci_dev *pdev,
776 const struct pci_device_id *ent)
777{
2c156ac7 778 int err;
2c156ac7 779 int id;
6b443e5c 780 char name[24];
2c156ac7
KVA
781 enum pci_barno bar;
782 void __iomem *base;
783 struct device *dev = &pdev->dev;
784 struct pci_endpoint_test *test;
834b9051
KVA
785 struct pci_endpoint_test_data *data;
786 enum pci_barno test_reg_bar = BAR_0;
2c156ac7
KVA
787 struct miscdevice *misc_device;
788
789 if (pci_is_bridge(pdev))
790 return -ENODEV;
791
792 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
793 if (!test)
794 return -ENOMEM;
795
834b9051 796 test->test_reg_bar = 0;
13107c60 797 test->alignment = 0;
2c156ac7 798 test->pdev = pdev;
b2ba9225 799 test->irq_type = IRQ_TYPE_UNDEFINED;
834b9051 800
9133e394
GP
801 if (no_msi)
802 irq_type = IRQ_TYPE_LEGACY;
803
834b9051 804 data = (struct pci_endpoint_test_data *)ent->driver_data;
13107c60 805 if (data) {
834b9051 806 test_reg_bar = data->test_reg_bar;
8f220664 807 test->test_reg_bar = test_reg_bar;
13107c60 808 test->alignment = data->alignment;
9133e394 809 irq_type = data->irq_type;
13107c60 810 }
834b9051 811
2c156ac7
KVA
812 init_completion(&test->irq_raised);
813 mutex_init(&test->mutex);
814
0a121f9b
KVA
815 if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
816 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
817 dev_err(dev, "Cannot set DMA mask\n");
818 return -EINVAL;
819 }
820
2c156ac7
KVA
821 err = pci_enable_device(pdev);
822 if (err) {
823 dev_err(dev, "Cannot enable PCI device\n");
824 return err;
825 }
826
827 err = pci_request_regions(pdev, DRV_MODULE_NAME);
828 if (err) {
829 dev_err(dev, "Cannot obtain PCI resources\n");
830 goto err_disable_pdev;
831 }
832
833 pci_set_master(pdev);
834
1749c904
XW
835 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
836 err = -EINVAL;
e0332712 837 goto err_disable_irq;
1749c904 838 }
2c156ac7 839
c9c13ba4 840 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
16b17cad
NC
841 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
842 base = pci_ioremap_bar(pdev, bar);
843 if (!base) {
0e52ea61 844 dev_err(dev, "Failed to read BAR%d\n", bar);
16b17cad
NC
845 WARN_ON(bar == test_reg_bar);
846 }
847 test->bar[bar] = base;
2c156ac7 848 }
2c156ac7
KVA
849 }
850
834b9051 851 test->base = test->bar[test_reg_bar];
2c156ac7 852 if (!test->base) {
80068c93 853 err = -ENOMEM;
834b9051
KVA
854 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
855 test_reg_bar);
2c156ac7
KVA
856 goto err_iounmap;
857 }
858
859 pci_set_drvdata(pdev, test);
860
861 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
862 if (id < 0) {
80068c93 863 err = id;
0e52ea61 864 dev_err(dev, "Unable to get id\n");
2c156ac7
KVA
865 goto err_iounmap;
866 }
867
868 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
c2be14ab
KVA
869 test->name = kstrdup(name, GFP_KERNEL);
870 if (!test->name) {
871 err = -ENOMEM;
872 goto err_ida_remove;
873 }
874
1749c904
XW
875 if (!pci_endpoint_test_request_irq(test)) {
876 err = -EINVAL;
c2be14ab 877 goto err_kfree_test_name;
1749c904 878 }
c2be14ab 879
2c156ac7
KVA
880 misc_device = &test->miscdev;
881 misc_device->minor = MISC_DYNAMIC_MINOR;
139838ff
KVA
882 misc_device->name = kstrdup(name, GFP_KERNEL);
883 if (!misc_device->name) {
884 err = -ENOMEM;
c2be14ab 885 goto err_release_irq;
139838ff 886 }
74a03c20 887 misc_device->parent = &pdev->dev;
560dbc46 888 misc_device->fops = &pci_endpoint_test_fops;
2c156ac7
KVA
889
890 err = misc_register(misc_device);
891 if (err) {
0e52ea61 892 dev_err(dev, "Failed to register device\n");
139838ff 893 goto err_kfree_name;
2c156ac7
KVA
894 }
895
896 return 0;
897
139838ff
KVA
898err_kfree_name:
899 kfree(misc_device->name);
900
c2be14ab
KVA
901err_release_irq:
902 pci_endpoint_test_release_irq(test);
903
904err_kfree_test_name:
905 kfree(test->name);
906
2c156ac7
KVA
907err_ida_remove:
908 ida_simple_remove(&pci_endpoint_test_ida, id);
909
910err_iounmap:
c9c13ba4 911 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
2c156ac7
KVA
912 if (test->bar[bar])
913 pci_iounmap(pdev, test->bar[bar]);
914 }
915
e0332712
GP
916err_disable_irq:
917 pci_endpoint_test_free_irq_vectors(test);
2c156ac7
KVA
918 pci_release_regions(pdev);
919
920err_disable_pdev:
921 pci_disable_device(pdev);
922
923 return err;
924}
925
926static void pci_endpoint_test_remove(struct pci_dev *pdev)
927{
928 int id;
929 enum pci_barno bar;
930 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
931 struct miscdevice *misc_device = &test->miscdev;
932
933 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
934 return;
a2db2663
DC
935 if (id < 0)
936 return;
2c156ac7 937
f61b7634
DLM
938 pci_endpoint_test_release_irq(test);
939 pci_endpoint_test_free_irq_vectors(test);
940
2c156ac7 941 misc_deregister(&test->miscdev);
139838ff 942 kfree(misc_device->name);
c2be14ab 943 kfree(test->name);
2c156ac7 944 ida_simple_remove(&pci_endpoint_test_ida, id);
c9c13ba4 945 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
2c156ac7
KVA
946 if (test->bar[bar])
947 pci_iounmap(pdev, test->bar[bar]);
948 }
e0332712 949
2c156ac7
KVA
950 pci_release_regions(pdev);
951 pci_disable_device(pdev);
952}
953
0a121f9b
KVA
954static const struct pci_endpoint_test_data default_data = {
955 .test_reg_bar = BAR_0,
956 .alignment = SZ_4K,
957 .irq_type = IRQ_TYPE_MSI,
958};
959
5bb04b19
KVA
960static const struct pci_endpoint_test_data am654_data = {
961 .test_reg_bar = BAR_2,
962 .alignment = SZ_64K,
963 .irq_type = IRQ_TYPE_MSI,
964};
965
6546ae29
KVA
966static const struct pci_endpoint_test_data j721e_data = {
967 .alignment = 256,
968 .irq_type = IRQ_TYPE_MSI,
969};
970
2c156ac7 971static const struct pci_device_id pci_endpoint_test_tbl[] = {
0a121f9b
KVA
972 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
973 .driver_data = (kernel_ulong_t)&default_data,
974 },
975 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
976 .driver_data = (kernel_ulong_t)&default_data,
977 },
09fb37b3
HZ
978 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
979 .driver_data = (kernel_ulong_t)&default_data,
980 },
01ea5ede 981 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
09fb37b3
HZ
982 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
983 .driver_data = (kernel_ulong_t)&default_data,
984 },
1f418f46 985 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
5bb04b19
KVA
986 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
987 .driver_data = (kernel_ulong_t)&am654_data
988 },
cfb824dd
LP
989 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
990 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
991 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
a63c5f3d 992 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
6546ae29
KVA
993 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
994 .driver_data = (kernel_ulong_t)&j721e_data,
995 },
7c52009d
KVA
996 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
997 .driver_data = (kernel_ulong_t)&j721e_data,
998 },
999 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1000 .driver_data = (kernel_ulong_t)&j721e_data,
1001 },
2c156ac7
KVA
1002 { }
1003};
1004MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1005
1006static struct pci_driver pci_endpoint_test_driver = {
1007 .name = DRV_MODULE_NAME,
1008 .id_table = pci_endpoint_test_tbl,
1009 .probe = pci_endpoint_test_probe,
1010 .remove = pci_endpoint_test_remove,
489b1f41 1011 .sriov_configure = pci_sriov_configure_simple,
2c156ac7
KVA
1012};
1013module_pci_driver(pci_endpoint_test_driver);
1014
1015MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1016MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1017MODULE_LICENSE("GPL v2");