Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / drivers / misc / pci_endpoint_test.c
CommitLineData
6b1baefe 1// SPDX-License-Identifier: GPL-2.0-only
1aa3f2b0 2/*
2c156ac7
KVA
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
2c156ac7
KVA
7 */
8
9#include <linux/crc32.h>
2a35703a 10#include <linux/cleanup.h>
2c156ac7
KVA
11#include <linux/delay.h>
12#include <linux/fs.h>
13#include <linux/io.h>
14#include <linux/interrupt.h>
15#include <linux/irq.h>
16#include <linux/miscdevice.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/random.h>
20#include <linux/slab.h>
cf376b4b 21#include <linux/uaccess.h>
2c156ac7
KVA
22#include <linux/pci.h>
23#include <linux/pci_ids.h>
24
25#include <linux/pci_regs.h>
26
27#include <uapi/linux/pcitest.h>
28
e8817de7
GP
29#define DRV_MODULE_NAME "pci-endpoint-test"
30
e8817de7
GP
31#define PCI_ENDPOINT_TEST_MAGIC 0x0
32
33#define PCI_ENDPOINT_TEST_COMMAND 0x4
acd28866 34#define COMMAND_RAISE_INTX_IRQ BIT(0)
e8817de7 35#define COMMAND_RAISE_MSI_IRQ BIT(1)
c2e00e31 36#define COMMAND_RAISE_MSIX_IRQ BIT(2)
e8817de7
GP
37#define COMMAND_READ BIT(3)
38#define COMMAND_WRITE BIT(4)
39#define COMMAND_COPY BIT(5)
40
41#define PCI_ENDPOINT_TEST_STATUS 0x8
42#define STATUS_READ_SUCCESS BIT(0)
43#define STATUS_READ_FAIL BIT(1)
44#define STATUS_WRITE_SUCCESS BIT(2)
45#define STATUS_WRITE_FAIL BIT(3)
46#define STATUS_COPY_SUCCESS BIT(4)
47#define STATUS_COPY_FAIL BIT(5)
48#define STATUS_IRQ_RAISED BIT(6)
49#define STATUS_SRC_ADDR_INVALID BIT(7)
50#define STATUS_DST_ADDR_INVALID BIT(8)
51
52#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
2c156ac7
KVA
53#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
54
55#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
56#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
57
e8817de7
GP
58#define PCI_ENDPOINT_TEST_SIZE 0x1c
59#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
60
61#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
62#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
2c156ac7 63
cf376b4b
KVA
64#define PCI_ENDPOINT_TEST_FLAGS 0x2c
65#define FLAG_USE_DMA BIT(0)
66
0d292a1e
NC
67#define PCI_ENDPOINT_TEST_CAPS 0x30
68#define CAP_UNALIGNED_ACCESS BIT(0)
08818c6d
NC
69#define CAP_MSI BIT(1)
70#define CAP_MSIX BIT(2)
71#define CAP_INTX BIT(3)
0d292a1e 72
5bb04b19 73#define PCI_DEVICE_ID_TI_AM654 0xb00c
7c52009d
KVA
74#define PCI_DEVICE_ID_TI_J7200 0xb00f
75#define PCI_DEVICE_ID_TI_AM64 0xb010
8293703a 76#define PCI_DEVICE_ID_TI_J721S2 0xb013
6b8ab421 77#define PCI_DEVICE_ID_LS1088A 0x80c0
01ea5ede 78#define PCI_DEVICE_ID_IMX8 0x0808
5bb04b19
KVA
79
80#define is_am654_pci_dev(pdev) \
81 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
82
cfb824dd
LP
83#define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
84#define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
b03025c5 85#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
a63c5f3d 86#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
6c4b3993 87#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
b03025c5 88
199b03db
NC
89#define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
90
2c156ac7
KVA
91static DEFINE_IDA(pci_endpoint_test_ida);
92
93#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
94 miscdev)
0c8a5f9d 95
2c156ac7
KVA
96enum pci_barno {
97 BAR_0,
98 BAR_1,
99 BAR_2,
100 BAR_3,
101 BAR_4,
102 BAR_5,
103};
104
105struct pci_endpoint_test {
106 struct pci_dev *pdev;
107 void __iomem *base;
c9c13ba4 108 void __iomem *bar[PCI_STD_NUM_BARS];
2c156ac7
KVA
109 struct completion irq_raised;
110 int last_irq;
b7636e81 111 int num_irqs;
b2ba9225 112 int irq_type;
2c156ac7
KVA
113 /* mutex to protect the ioctls */
114 struct mutex mutex;
115 struct miscdevice miscdev;
834b9051 116 enum pci_barno test_reg_bar;
13107c60 117 size_t alignment;
08818c6d 118 u32 ep_caps;
c2be14ab 119 const char *name;
2c156ac7
KVA
120};
121
834b9051
KVA
122struct pci_endpoint_test_data {
123 enum pci_barno test_reg_bar;
13107c60 124 size_t alignment;
834b9051
KVA
125};
126
2c156ac7
KVA
127static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
128 u32 offset)
129{
130 return readl(test->base + offset);
131}
132
133static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
134 u32 offset, u32 value)
135{
136 writel(value, test->base + offset);
137}
138
2c156ac7
KVA
139static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
140{
141 struct pci_endpoint_test *test = dev_id;
142 u32 reg;
143
144 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
145 if (reg & STATUS_IRQ_RAISED) {
146 test->last_irq = irq;
147 complete(&test->irq_raised);
2c156ac7 148 }
2c156ac7
KVA
149
150 return IRQ_HANDLED;
151}
152
e0332712
GP
153static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
154{
155 struct pci_dev *pdev = test->pdev;
156
157 pci_free_irq_vectors(pdev);
64a7704a 158 test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
e0332712
GP
159}
160
f26d37ee 161static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
e0332712
GP
162 int type)
163{
f26d37ee 164 int irq;
e0332712
GP
165 struct pci_dev *pdev = test->pdev;
166 struct device *dev = &pdev->dev;
e0332712
GP
167
168 switch (type) {
64a7704a 169 case PCITEST_IRQ_TYPE_INTX:
acd28866 170 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
f26d37ee 171 if (irq < 0) {
e0332712 172 dev_err(dev, "Failed to get Legacy interrupt\n");
f26d37ee
MS
173 return irq;
174 }
175
e0332712 176 break;
64a7704a 177 case PCITEST_IRQ_TYPE_MSI:
e0332712 178 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
f26d37ee 179 if (irq < 0) {
e0332712 180 dev_err(dev, "Failed to get MSI interrupts\n");
f26d37ee
MS
181 return irq;
182 }
183
e0332712 184 break;
64a7704a 185 case PCITEST_IRQ_TYPE_MSIX:
e0332712 186 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
f26d37ee 187 if (irq < 0) {
e0332712 188 dev_err(dev, "Failed to get MSI-X interrupts\n");
f26d37ee
MS
189 return irq;
190 }
191
e0332712
GP
192 break;
193 default:
194 dev_err(dev, "Invalid IRQ type selected\n");
f26d37ee 195 return -EINVAL;
e0332712 196 }
b2ba9225
KVA
197
198 test->irq_type = type;
e0332712
GP
199 test->num_irqs = irq;
200
f26d37ee 201 return 0;
e0332712
GP
202}
203
204static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
205{
206 int i;
207 struct pci_dev *pdev = test->pdev;
e0332712
GP
208
209 for (i = 0; i < test->num_irqs; i++)
e1ec81eb 210 free_irq(pci_irq_vector(pdev, i), test);
e0332712
GP
211
212 test->num_irqs = 0;
213}
214
f26d37ee 215static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
e0332712
GP
216{
217 int i;
f26d37ee 218 int ret;
e0332712
GP
219 struct pci_dev *pdev = test->pdev;
220 struct device *dev = &pdev->dev;
221
222 for (i = 0; i < test->num_irqs; i++) {
e1ec81eb
KH
223 ret = request_irq(pci_irq_vector(pdev, i),
224 pci_endpoint_test_irqhandler, IRQF_SHARED,
225 test->name, test);
f26d37ee 226 if (ret)
e0332712
GP
227 goto fail;
228 }
229
f26d37ee 230 return 0;
e0332712
GP
231
232fail:
919d1460 233 switch (test->irq_type) {
64a7704a 234 case PCITEST_IRQ_TYPE_INTX:
e0332712
GP
235 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
236 pci_irq_vector(pdev, i));
237 break;
64a7704a 238 case PCITEST_IRQ_TYPE_MSI:
e0332712
GP
239 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
240 pci_irq_vector(pdev, i),
241 i + 1);
242 break;
64a7704a 243 case PCITEST_IRQ_TYPE_MSIX:
e0332712
GP
244 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
245 pci_irq_vector(pdev, i),
246 i + 1);
247 break;
248 }
249
f6cb7828
KH
250 test->num_irqs = i;
251 pci_endpoint_test_release_irq(test);
252
f26d37ee 253 return ret;
e0332712
GP
254}
255
516f3664
NC
256static const u32 bar_test_pattern[] = {
257 0xA0A0A0A0,
258 0xA1A1A1A1,
259 0xA2A2A2A2,
260 0xA3A3A3A3,
261 0xA4A4A4A4,
262 0xA5A5A5A5,
263};
264
2a35703a 265static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
7962c82a
NC
266 enum pci_barno barno,
267 resource_size_t offset, void *write_buf,
268 void *read_buf, int size)
2a35703a
NC
269{
270 memset(write_buf, bar_test_pattern[barno], size);
271 memcpy_toio(test->bar[barno] + offset, write_buf, size);
272
273 memcpy_fromio(read_buf, test->bar[barno] + offset, size);
274
275 return memcmp(write_buf, read_buf, size);
276}
277
f26d37ee 278static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
2c156ac7
KVA
279 enum pci_barno barno)
280{
7962c82a 281 resource_size_t bar_size, offset = 0;
2a35703a
NC
282 void *write_buf __free(kfree) = NULL;
283 void *read_buf __free(kfree) = NULL;
cda370ec 284 struct pci_dev *pdev = test->pdev;
7962c82a 285 int buf_size;
2c156ac7 286
7e80bbef
NC
287 bar_size = pci_resource_len(pdev, barno);
288 if (!bar_size)
289 return -ENODATA;
290
2c156ac7 291 if (!test->bar[barno])
f26d37ee 292 return -ENOMEM;
2c156ac7 293
834b9051 294 if (barno == test->test_reg_bar)
2a35703a 295 bar_size = 0x4;
834b9051 296
2a35703a
NC
297 /*
298 * Allocate a buffer of max size 1MB, and reuse that buffer while
299 * iterating over the whole BAR size (which might be much larger).
300 */
301 buf_size = min(SZ_1M, bar_size);
2c156ac7 302
2a35703a
NC
303 write_buf = kmalloc(buf_size, GFP_KERNEL);
304 if (!write_buf)
f26d37ee 305 return -ENOMEM;
2a35703a
NC
306
307 read_buf = kmalloc(buf_size, GFP_KERNEL);
308 if (!read_buf)
f26d37ee 309 return -ENOMEM;
2a35703a 310
7962c82a
NC
311 while (offset < bar_size) {
312 if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf,
313 read_buf, buf_size))
f26d37ee 314 return -EIO;
7962c82a
NC
315 offset += buf_size;
316 }
2a35703a 317
f26d37ee 318 return 0;
2c156ac7
KVA
319}
320
d6658d33
NC
321static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
322{
323 u32 val;
324
325 /* Keep the BAR pattern in the top byte. */
326 val = bar_test_pattern[barno] & 0xff000000;
327 /* Store the (partial) offset in the remaining bytes. */
328 val |= offset & 0x00ffffff;
329
330 return val;
331}
332
f26d37ee 333static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
d6658d33
NC
334 enum pci_barno barno)
335{
336 struct pci_dev *pdev = test->pdev;
337 int j, size;
338
339 size = pci_resource_len(pdev, barno);
340
341 if (barno == test->test_reg_bar)
342 size = 0x4;
343
344 for (j = 0; j < size; j += 4)
345 writel_relaxed(bar_test_pattern_with_offset(barno, j),
346 test->bar[barno] + j);
d6658d33
NC
347}
348
f26d37ee 349static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
d6658d33
NC
350 enum pci_barno barno)
351{
352 struct pci_dev *pdev = test->pdev;
353 struct device *dev = &pdev->dev;
354 int j, size;
355 u32 val;
356
357 size = pci_resource_len(pdev, barno);
358
359 if (barno == test->test_reg_bar)
360 size = 0x4;
361
362 for (j = 0; j < size; j += 4) {
363 u32 expected = bar_test_pattern_with_offset(barno, j);
364
365 val = readl_relaxed(test->bar[barno] + j);
366 if (val != expected) {
367 dev_err(dev,
368 "BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
369 barno, j, val, expected);
f26d37ee 370 return -EIO;
d6658d33
NC
371 }
372 }
373
f26d37ee 374 return 0;
d6658d33
NC
375}
376
f26d37ee 377static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
d6658d33
NC
378{
379 enum pci_barno bar;
2a93192d 380 int ret;
d6658d33
NC
381
382 /* Write all BARs in order (without reading). */
383 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
384 if (test->bar[bar])
385 pci_endpoint_test_bars_write_bar(test, bar);
386
387 /*
388 * Read all BARs in order (without writing).
389 * If there is an address translation issue on the EP, writing one BAR
390 * might have overwritten another BAR. Ensure that this is not the case.
391 * (Reading back the BAR directly after writing can not detect this.)
392 */
393 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
394 if (test->bar[bar]) {
395 ret = pci_endpoint_test_bars_read_bar(test, bar);
2a93192d 396 if (ret)
d6658d33
NC
397 return ret;
398 }
399 }
400
f26d37ee 401 return 0;
d6658d33
NC
402}
403
f26d37ee 404static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
2c156ac7
KVA
405{
406 u32 val;
407
e8817de7 408 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
64a7704a 409 PCITEST_IRQ_TYPE_INTX);
e8817de7 410 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
2c156ac7 411 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
acd28866 412 COMMAND_RAISE_INTX_IRQ);
2c156ac7
KVA
413 val = wait_for_completion_timeout(&test->irq_raised,
414 msecs_to_jiffies(1000));
415 if (!val)
f26d37ee 416 return -ETIMEDOUT;
2c156ac7 417
f26d37ee 418 return 0;
2c156ac7
KVA
419}
420
f26d37ee 421static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
c2e00e31 422 u16 msi_num, bool msix)
2c156ac7 423{
2c156ac7 424 struct pci_dev *pdev = test->pdev;
f26d37ee
MS
425 u32 val;
426 int ret;
2c156ac7 427
e8817de7 428 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
64a7704a
NC
429 msix ? PCITEST_IRQ_TYPE_MSIX :
430 PCITEST_IRQ_TYPE_MSI);
e8817de7 431 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
2c156ac7 432 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
4c50f933
DLM
433 msix ? COMMAND_RAISE_MSIX_IRQ :
434 COMMAND_RAISE_MSI_IRQ);
2c156ac7
KVA
435 val = wait_for_completion_timeout(&test->irq_raised,
436 msecs_to_jiffies(1000));
437 if (!val)
f26d37ee 438 return -ETIMEDOUT;
2c156ac7 439
f26d37ee
MS
440 ret = pci_irq_vector(pdev, msi_num - 1);
441 if (ret < 0)
442 return ret;
443
444 if (ret != test->last_irq)
445 return -EIO;
446
447 return 0;
2c156ac7
KVA
448}
449
3e42deaa
SM
450static int pci_endpoint_test_validate_xfer_params(struct device *dev,
451 struct pci_endpoint_test_xfer_param *param, size_t alignment)
452{
8e30538e
SM
453 if (!param->size) {
454 dev_dbg(dev, "Data size is zero\n");
455 return -EINVAL;
456 }
457
3e42deaa
SM
458 if (param->size > SIZE_MAX - alignment) {
459 dev_dbg(dev, "Maximum transfer data size exceeded\n");
460 return -EINVAL;
461 }
462
463 return 0;
464}
465
f26d37ee 466static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
cf376b4b 467 unsigned long arg)
2c156ac7 468{
cf376b4b 469 struct pci_endpoint_test_xfer_param param;
2c156ac7
KVA
470 void *src_addr;
471 void *dst_addr;
cf376b4b
KVA
472 u32 flags = 0;
473 bool use_dma;
474 size_t size;
2c156ac7
KVA
475 dma_addr_t src_phys_addr;
476 dma_addr_t dst_phys_addr;
477 struct pci_dev *pdev = test->pdev;
478 struct device *dev = &pdev->dev;
13107c60
KVA
479 void *orig_src_addr;
480 dma_addr_t orig_src_phys_addr;
481 void *orig_dst_addr;
482 dma_addr_t orig_dst_phys_addr;
483 size_t offset;
484 size_t alignment = test->alignment;
b2ba9225 485 int irq_type = test->irq_type;
2c156ac7
KVA
486 u32 src_crc32;
487 u32 dst_crc32;
f26d37ee 488 int ret;
2c156ac7 489
f26d37ee
MS
490 ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
491 if (ret) {
cf376b4b 492 dev_err(dev, "Failed to get transfer param\n");
f26d37ee 493 return -EFAULT;
cf376b4b
KVA
494 }
495
f26d37ee
MS
496 ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
497 if (ret)
498 return ret;
3e42deaa 499
cf376b4b 500 size = param.size;
343dc693 501
cf376b4b
KVA
502 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
503 if (use_dma)
504 flags |= FLAG_USE_DMA;
505
64a7704a
NC
506 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
507 irq_type > PCITEST_IRQ_TYPE_MSIX) {
e0332712 508 dev_err(dev, "Invalid IRQ type option\n");
f26d37ee 509 return -EINVAL;
e0332712
GP
510 }
511
0a121f9b 512 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 513 if (!orig_src_addr) {
0e52ea61 514 dev_err(dev, "Failed to allocate source buffer\n");
f26d37ee 515 return -ENOMEM;
2c156ac7
KVA
516 }
517
0a121f9b
KVA
518 get_random_bytes(orig_src_addr, size + alignment);
519 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
520 size + alignment, DMA_TO_DEVICE);
f26d37ee
MS
521 ret = dma_mapping_error(dev, orig_src_phys_addr);
522 if (ret) {
0a121f9b 523 dev_err(dev, "failed to map source buffer address\n");
0a121f9b
KVA
524 goto err_src_phys_addr;
525 }
526
13107c60
KVA
527 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
528 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
529 offset = src_phys_addr - orig_src_phys_addr;
530 src_addr = orig_src_addr + offset;
531 } else {
532 src_phys_addr = orig_src_phys_addr;
533 src_addr = orig_src_addr;
534 }
535
2c156ac7
KVA
536 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
537 lower_32_bits(src_phys_addr));
538
539 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
540 upper_32_bits(src_phys_addr));
541
2c156ac7
KVA
542 src_crc32 = crc32_le(~0, src_addr, size);
543
0a121f9b 544 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 545 if (!orig_dst_addr) {
0e52ea61 546 dev_err(dev, "Failed to allocate destination address\n");
f26d37ee 547 ret = -ENOMEM;
0a121f9b
KVA
548 goto err_dst_addr;
549 }
550
551 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
552 size + alignment, DMA_FROM_DEVICE);
f26d37ee
MS
553 ret = dma_mapping_error(dev, orig_dst_phys_addr);
554 if (ret) {
0a121f9b 555 dev_err(dev, "failed to map destination buffer address\n");
0a121f9b 556 goto err_dst_phys_addr;
13107c60
KVA
557 }
558
559 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
560 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
561 offset = dst_phys_addr - orig_dst_phys_addr;
562 dst_addr = orig_dst_addr + offset;
563 } else {
564 dst_phys_addr = orig_dst_phys_addr;
565 dst_addr = orig_dst_addr;
2c156ac7
KVA
566 }
567
568 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
569 lower_32_bits(dst_phys_addr));
570 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
571 upper_32_bits(dst_phys_addr));
572
573 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
574 size);
575
cf376b4b 576 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
9133e394 577 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
e8817de7 578 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
2c156ac7 579 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
e8817de7 580 COMMAND_COPY);
2c156ac7
KVA
581
582 wait_for_completion(&test->irq_raised);
583
0a121f9b
KVA
584 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
585 DMA_FROM_DEVICE);
586
2c156ac7 587 dst_crc32 = crc32_le(~0, dst_addr, size);
f26d37ee
MS
588 if (dst_crc32 != src_crc32)
589 ret = -EIO;
2c156ac7 590
0a121f9b
KVA
591err_dst_phys_addr:
592 kfree(orig_dst_addr);
2c156ac7 593
0a121f9b
KVA
594err_dst_addr:
595 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
596 DMA_TO_DEVICE);
597
598err_src_phys_addr:
599 kfree(orig_src_addr);
2c156ac7
KVA
600 return ret;
601}
602
f26d37ee 603static int pci_endpoint_test_write(struct pci_endpoint_test *test,
cf376b4b 604 unsigned long arg)
2c156ac7 605{
cf376b4b 606 struct pci_endpoint_test_xfer_param param;
cf376b4b
KVA
607 u32 flags = 0;
608 bool use_dma;
2c156ac7
KVA
609 u32 reg;
610 void *addr;
611 dma_addr_t phys_addr;
612 struct pci_dev *pdev = test->pdev;
613 struct device *dev = &pdev->dev;
13107c60
KVA
614 void *orig_addr;
615 dma_addr_t orig_phys_addr;
616 size_t offset;
617 size_t alignment = test->alignment;
b2ba9225 618 int irq_type = test->irq_type;
cf376b4b 619 size_t size;
2c156ac7 620 u32 crc32;
f26d37ee 621 int ret;
2c156ac7 622
f26d37ee
MS
623 ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
624 if (ret) {
cf376b4b 625 dev_err(dev, "Failed to get transfer param\n");
f26d37ee 626 return -EFAULT;
cf376b4b
KVA
627 }
628
f26d37ee
MS
629 ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
630 if (ret)
631 return ret;
3e42deaa 632
cf376b4b 633 size = param.size;
343dc693 634
cf376b4b
KVA
635 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
636 if (use_dma)
637 flags |= FLAG_USE_DMA;
638
64a7704a
NC
639 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
640 irq_type > PCITEST_IRQ_TYPE_MSIX) {
e0332712 641 dev_err(dev, "Invalid IRQ type option\n");
f26d37ee 642 return -EINVAL;
e0332712
GP
643 }
644
0a121f9b 645 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 646 if (!orig_addr) {
0e52ea61 647 dev_err(dev, "Failed to allocate address\n");
f26d37ee 648 return -ENOMEM;
2c156ac7
KVA
649 }
650
0a121f9b
KVA
651 get_random_bytes(orig_addr, size + alignment);
652
653 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
654 DMA_TO_DEVICE);
f26d37ee
MS
655 ret = dma_mapping_error(dev, orig_phys_addr);
656 if (ret) {
0a121f9b 657 dev_err(dev, "failed to map source buffer address\n");
0a121f9b
KVA
658 goto err_phys_addr;
659 }
660
13107c60
KVA
661 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
662 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
663 offset = phys_addr - orig_phys_addr;
664 addr = orig_addr + offset;
665 } else {
666 phys_addr = orig_phys_addr;
667 addr = orig_addr;
668 }
669
2c156ac7
KVA
670 crc32 = crc32_le(~0, addr, size);
671 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
672 crc32);
673
674 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
675 lower_32_bits(phys_addr));
676 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
677 upper_32_bits(phys_addr));
678
679 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
680
cf376b4b 681 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
9133e394 682 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
e8817de7 683 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
2c156ac7 684 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
e8817de7 685 COMMAND_READ);
2c156ac7
KVA
686
687 wait_for_completion(&test->irq_raised);
688
689 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
f26d37ee
MS
690 if (!(reg & STATUS_READ_SUCCESS))
691 ret = -EIO;
2c156ac7 692
0a121f9b
KVA
693 dma_unmap_single(dev, orig_phys_addr, size + alignment,
694 DMA_TO_DEVICE);
695
696err_phys_addr:
697 kfree(orig_addr);
2c156ac7
KVA
698 return ret;
699}
700
f26d37ee 701static int pci_endpoint_test_read(struct pci_endpoint_test *test,
cf376b4b 702 unsigned long arg)
2c156ac7 703{
cf376b4b 704 struct pci_endpoint_test_xfer_param param;
cf376b4b
KVA
705 u32 flags = 0;
706 bool use_dma;
707 size_t size;
2c156ac7
KVA
708 void *addr;
709 dma_addr_t phys_addr;
710 struct pci_dev *pdev = test->pdev;
711 struct device *dev = &pdev->dev;
13107c60
KVA
712 void *orig_addr;
713 dma_addr_t orig_phys_addr;
714 size_t offset;
715 size_t alignment = test->alignment;
b2ba9225 716 int irq_type = test->irq_type;
2c156ac7 717 u32 crc32;
f26d37ee 718 int ret;
2c156ac7 719
f26d37ee
MS
720 ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
721 if (ret) {
cf376b4b 722 dev_err(dev, "Failed to get transfer param\n");
f26d37ee 723 return -EFAULT;
cf376b4b
KVA
724 }
725
f26d37ee
MS
726 ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
727 if (ret)
728 return ret;
3e42deaa 729
cf376b4b 730 size = param.size;
343dc693 731
cf376b4b
KVA
732 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
733 if (use_dma)
734 flags |= FLAG_USE_DMA;
735
64a7704a
NC
736 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
737 irq_type > PCITEST_IRQ_TYPE_MSIX) {
e0332712 738 dev_err(dev, "Invalid IRQ type option\n");
f26d37ee 739 return -EINVAL;
e0332712
GP
740 }
741
0a121f9b 742 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
13107c60 743 if (!orig_addr) {
0e52ea61 744 dev_err(dev, "Failed to allocate destination address\n");
f26d37ee 745 return -ENOMEM;
2c156ac7
KVA
746 }
747
0a121f9b
KVA
748 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
749 DMA_FROM_DEVICE);
f26d37ee
MS
750 ret = dma_mapping_error(dev, orig_phys_addr);
751 if (ret) {
0a121f9b 752 dev_err(dev, "failed to map source buffer address\n");
0a121f9b
KVA
753 goto err_phys_addr;
754 }
755
13107c60
KVA
756 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
757 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
758 offset = phys_addr - orig_phys_addr;
759 addr = orig_addr + offset;
760 } else {
761 phys_addr = orig_phys_addr;
762 addr = orig_addr;
763 }
764
2c156ac7
KVA
765 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
766 lower_32_bits(phys_addr));
767 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
768 upper_32_bits(phys_addr));
769
770 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
771
cf376b4b 772 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
9133e394 773 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
e8817de7 774 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
2c156ac7 775 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
e8817de7 776 COMMAND_WRITE);
2c156ac7
KVA
777
778 wait_for_completion(&test->irq_raised);
779
0a121f9b
KVA
780 dma_unmap_single(dev, orig_phys_addr, size + alignment,
781 DMA_FROM_DEVICE);
782
2c156ac7 783 crc32 = crc32_le(~0, addr, size);
f26d37ee
MS
784 if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
785 ret = -EIO;
2c156ac7 786
0a121f9b
KVA
787err_phys_addr:
788 kfree(orig_addr);
2c156ac7
KVA
789 return ret;
790}
791
f26d37ee 792static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
475007f9
KVA
793{
794 pci_endpoint_test_release_irq(test);
795 pci_endpoint_test_free_irq_vectors(test);
f26d37ee
MS
796
797 return 0;
475007f9
KVA
798}
799
f26d37ee 800static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
e0332712
GP
801 int req_irq_type)
802{
803 struct pci_dev *pdev = test->pdev;
804 struct device *dev = &pdev->dev;
f26d37ee 805 int ret;
e0332712 806
64a7704a 807 if (req_irq_type < PCITEST_IRQ_TYPE_INTX ||
08818c6d 808 req_irq_type > PCITEST_IRQ_TYPE_AUTO) {
e0332712 809 dev_err(dev, "Invalid IRQ type option\n");
f26d37ee 810 return -EINVAL;
e0332712
GP
811 }
812
08818c6d
NC
813 if (req_irq_type == PCITEST_IRQ_TYPE_AUTO) {
814 if (test->ep_caps & CAP_MSI)
815 req_irq_type = PCITEST_IRQ_TYPE_MSI;
816 else if (test->ep_caps & CAP_MSIX)
817 req_irq_type = PCITEST_IRQ_TYPE_MSIX;
818 else if (test->ep_caps & CAP_INTX)
819 req_irq_type = PCITEST_IRQ_TYPE_INTX;
820 else
821 /* fallback to MSI if no caps defined */
822 req_irq_type = PCITEST_IRQ_TYPE_MSI;
823 }
824
b2ba9225 825 if (test->irq_type == req_irq_type)
f26d37ee 826 return 0;
e0332712
GP
827
828 pci_endpoint_test_release_irq(test);
829 pci_endpoint_test_free_irq_vectors(test);
830
f26d37ee
MS
831 ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
832 if (ret)
833 return ret;
e0332712 834
f26d37ee
MS
835 ret = pci_endpoint_test_request_irq(test);
836 if (ret) {
837 pci_endpoint_test_free_irq_vectors(test);
838 return ret;
839 }
e0332712 840
f26d37ee 841 return 0;
e0332712
GP
842}
843
2c156ac7
KVA
844static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
845 unsigned long arg)
846{
847 int ret = -EINVAL;
848 enum pci_barno bar;
849 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
5bb04b19 850 struct pci_dev *pdev = test->pdev;
2c156ac7
KVA
851
852 mutex_lock(&test->mutex);
fb620ae7
DLM
853
854 reinit_completion(&test->irq_raised);
855 test->last_irq = -ENODATA;
856
2c156ac7
KVA
857 switch (cmd) {
858 case PCITEST_BAR:
859 bar = arg;
33fcc549 860 if (bar > BAR_5)
2c156ac7 861 goto ret;
5bb04b19
KVA
862 if (is_am654_pci_dev(pdev) && bar == BAR_0)
863 goto ret;
2c156ac7
KVA
864 ret = pci_endpoint_test_bar(test, bar);
865 break;
d6658d33
NC
866 case PCITEST_BARS:
867 ret = pci_endpoint_test_bars(test);
868 break;
acd28866
DLM
869 case PCITEST_INTX_IRQ:
870 ret = pci_endpoint_test_intx_irq(test);
2c156ac7
KVA
871 break;
872 case PCITEST_MSI:
c2e00e31
GP
873 case PCITEST_MSIX:
874 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
2c156ac7
KVA
875 break;
876 case PCITEST_WRITE:
877 ret = pci_endpoint_test_write(test, arg);
878 break;
879 case PCITEST_READ:
880 ret = pci_endpoint_test_read(test, arg);
881 break;
882 case PCITEST_COPY:
883 ret = pci_endpoint_test_copy(test, arg);
884 break;
e0332712
GP
885 case PCITEST_SET_IRQTYPE:
886 ret = pci_endpoint_test_set_irq(test, arg);
887 break;
888 case PCITEST_GET_IRQTYPE:
a402006d 889 ret = test->irq_type;
e0332712 890 break;
475007f9
KVA
891 case PCITEST_CLEAR_IRQ:
892 ret = pci_endpoint_test_clear_irq(test);
893 break;
2c156ac7
KVA
894 }
895
896ret:
897 mutex_unlock(&test->mutex);
898 return ret;
899}
900
901static const struct file_operations pci_endpoint_test_fops = {
902 .owner = THIS_MODULE,
903 .unlocked_ioctl = pci_endpoint_test_ioctl,
904};
905
0d292a1e
NC
906static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
907{
908 struct pci_dev *pdev = test->pdev;
909 struct device *dev = &pdev->dev;
0d292a1e 910
08818c6d
NC
911 test->ep_caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
912 dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", test->ep_caps);
0d292a1e
NC
913
914 /* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
08818c6d 915 if (test->ep_caps & CAP_UNALIGNED_ACCESS)
0d292a1e
NC
916 test->alignment = 0;
917}
918
2c156ac7
KVA
919static int pci_endpoint_test_probe(struct pci_dev *pdev,
920 const struct pci_device_id *ent)
921{
f26d37ee 922 int ret;
2c156ac7 923 int id;
c727ebe9 924 char name[29];
2c156ac7
KVA
925 enum pci_barno bar;
926 void __iomem *base;
927 struct device *dev = &pdev->dev;
928 struct pci_endpoint_test *test;
834b9051
KVA
929 struct pci_endpoint_test_data *data;
930 enum pci_barno test_reg_bar = BAR_0;
2c156ac7
KVA
931 struct miscdevice *misc_device;
932
933 if (pci_is_bridge(pdev))
934 return -ENODEV;
935
936 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
937 if (!test)
938 return -ENOMEM;
939
834b9051 940 test->test_reg_bar = 0;
13107c60 941 test->alignment = 0;
2c156ac7 942 test->pdev = pdev;
64a7704a 943 test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
834b9051
KVA
944
945 data = (struct pci_endpoint_test_data *)ent->driver_data;
13107c60 946 if (data) {
834b9051 947 test_reg_bar = data->test_reg_bar;
8f220664 948 test->test_reg_bar = test_reg_bar;
13107c60
KVA
949 test->alignment = data->alignment;
950 }
834b9051 951
2c156ac7
KVA
952 init_completion(&test->irq_raised);
953 mutex_init(&test->mutex);
954
a50c7de0 955 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
0a121f9b 956
f26d37ee
MS
957 ret = pci_enable_device(pdev);
958 if (ret) {
2c156ac7 959 dev_err(dev, "Cannot enable PCI device\n");
f26d37ee 960 return ret;
2c156ac7
KVA
961 }
962
f26d37ee
MS
963 ret = pci_request_regions(pdev, DRV_MODULE_NAME);
964 if (ret) {
2c156ac7
KVA
965 dev_err(dev, "Cannot obtain PCI resources\n");
966 goto err_disable_pdev;
967 }
968
969 pci_set_master(pdev);
970
c9c13ba4 971 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
16b17cad
NC
972 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
973 base = pci_ioremap_bar(pdev, bar);
974 if (!base) {
0e52ea61 975 dev_err(dev, "Failed to read BAR%d\n", bar);
16b17cad
NC
976 WARN_ON(bar == test_reg_bar);
977 }
978 test->bar[bar] = base;
2c156ac7 979 }
2c156ac7
KVA
980 }
981
834b9051 982 test->base = test->bar[test_reg_bar];
2c156ac7 983 if (!test->base) {
f26d37ee 984 ret = -ENOMEM;
834b9051
KVA
985 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
986 test_reg_bar);
2c156ac7
KVA
987 goto err_iounmap;
988 }
989
990 pci_set_drvdata(pdev, test);
991
130f3356 992 id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
2c156ac7 993 if (id < 0) {
f26d37ee 994 ret = id;
0e52ea61 995 dev_err(dev, "Unable to get id\n");
2c156ac7
KVA
996 goto err_iounmap;
997 }
998
999 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
c2be14ab
KVA
1000 test->name = kstrdup(name, GFP_KERNEL);
1001 if (!test->name) {
f26d37ee 1002 ret = -ENOMEM;
c2be14ab
KVA
1003 goto err_ida_remove;
1004 }
1005
0d292a1e
NC
1006 pci_endpoint_test_get_capabilities(test);
1007
2c156ac7
KVA
1008 misc_device = &test->miscdev;
1009 misc_device->minor = MISC_DYNAMIC_MINOR;
139838ff
KVA
1010 misc_device->name = kstrdup(name, GFP_KERNEL);
1011 if (!misc_device->name) {
f26d37ee 1012 ret = -ENOMEM;
442cacac 1013 goto err_kfree_test_name;
139838ff 1014 }
74a03c20 1015 misc_device->parent = &pdev->dev;
560dbc46 1016 misc_device->fops = &pci_endpoint_test_fops;
2c156ac7 1017
f26d37ee
MS
1018 ret = misc_register(misc_device);
1019 if (ret) {
0e52ea61 1020 dev_err(dev, "Failed to register device\n");
139838ff 1021 goto err_kfree_name;
2c156ac7
KVA
1022 }
1023
1024 return 0;
1025
139838ff
KVA
1026err_kfree_name:
1027 kfree(misc_device->name);
1028
c2be14ab
KVA
1029err_kfree_test_name:
1030 kfree(test->name);
1031
2c156ac7 1032err_ida_remove:
130f3356 1033 ida_free(&pci_endpoint_test_ida, id);
2c156ac7
KVA
1034
1035err_iounmap:
c9c13ba4 1036 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
2c156ac7
KVA
1037 if (test->bar[bar])
1038 pci_iounmap(pdev, test->bar[bar]);
1039 }
1040
2c156ac7
KVA
1041 pci_release_regions(pdev);
1042
1043err_disable_pdev:
1044 pci_disable_device(pdev);
1045
f26d37ee 1046 return ret;
2c156ac7
KVA
1047}
1048
1049static void pci_endpoint_test_remove(struct pci_dev *pdev)
1050{
1051 int id;
1052 enum pci_barno bar;
1053 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
1054 struct miscdevice *misc_device = &test->miscdev;
1055
1056 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
1057 return;
a2db2663
DC
1058 if (id < 0)
1059 return;
2c156ac7 1060
f61b7634
DLM
1061 pci_endpoint_test_release_irq(test);
1062 pci_endpoint_test_free_irq_vectors(test);
1063
2c156ac7 1064 misc_deregister(&test->miscdev);
139838ff 1065 kfree(misc_device->name);
c2be14ab 1066 kfree(test->name);
130f3356 1067 ida_free(&pci_endpoint_test_ida, id);
c9c13ba4 1068 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
2c156ac7
KVA
1069 if (test->bar[bar])
1070 pci_iounmap(pdev, test->bar[bar]);
1071 }
e0332712 1072
2c156ac7
KVA
1073 pci_release_regions(pdev);
1074 pci_disable_device(pdev);
1075}
1076
0a121f9b
KVA
1077static const struct pci_endpoint_test_data default_data = {
1078 .test_reg_bar = BAR_0,
1079 .alignment = SZ_4K,
0a121f9b
KVA
1080};
1081
5bb04b19
KVA
1082static const struct pci_endpoint_test_data am654_data = {
1083 .test_reg_bar = BAR_2,
1084 .alignment = SZ_64K,
5bb04b19
KVA
1085};
1086
6546ae29
KVA
1087static const struct pci_endpoint_test_data j721e_data = {
1088 .alignment = 256,
6546ae29
KVA
1089};
1090
199b03db
NC
1091static const struct pci_endpoint_test_data rk3588_data = {
1092 .alignment = SZ_64K,
199b03db
NC
1093};
1094
76084965
YS
1095/*
1096 * If the controller's Vendor/Device ID are programmable, you may be able to
1097 * use one of the existing entries for testing instead of adding a new one.
1098 */
2c156ac7 1099static const struct pci_device_id pci_endpoint_test_tbl[] = {
0a121f9b
KVA
1100 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
1101 .driver_data = (kernel_ulong_t)&default_data,
1102 },
1103 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
1104 .driver_data = (kernel_ulong_t)&default_data,
1105 },
09fb37b3
HZ
1106 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
1107 .driver_data = (kernel_ulong_t)&default_data,
1108 },
01ea5ede 1109 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
09fb37b3
HZ
1110 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
1111 .driver_data = (kernel_ulong_t)&default_data,
1112 },
1f418f46 1113 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
5bb04b19
KVA
1114 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
1115 .driver_data = (kernel_ulong_t)&am654_data
1116 },
cfb824dd
LP
1117 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1118 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1119 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
a63c5f3d 1120 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
6c4b3993
YS
1121 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1122 .driver_data = (kernel_ulong_t)&default_data,
1123 },
6546ae29
KVA
1124 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1125 .driver_data = (kernel_ulong_t)&j721e_data,
1126 },
7c52009d
KVA
1127 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1128 .driver_data = (kernel_ulong_t)&j721e_data,
1129 },
1130 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1131 .driver_data = (kernel_ulong_t)&j721e_data,
1132 },
8293703a
SV
1133 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1134 .driver_data = (kernel_ulong_t)&j721e_data,
1135 },
199b03db
NC
1136 { PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
1137 .driver_data = (kernel_ulong_t)&rk3588_data,
1138 },
2c156ac7
KVA
1139 { }
1140};
1141MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1142
1143static struct pci_driver pci_endpoint_test_driver = {
1144 .name = DRV_MODULE_NAME,
1145 .id_table = pci_endpoint_test_tbl,
1146 .probe = pci_endpoint_test_probe,
1147 .remove = pci_endpoint_test_remove,
489b1f41 1148 .sriov_configure = pci_sriov_configure_simple,
2c156ac7
KVA
1149};
1150module_pci_driver(pci_endpoint_test_driver);
1151
1152MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1153MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1154MODULE_LICENSE("GPL v2");