Merge branch 'pci/endpoint'
[linux-2.6-block.git] / drivers / pci / endpoint / functions / pci-epf-test.c
CommitLineData
8cfab3cf 1// SPDX-License-Identifier: GPL-2.0
43395d9e 2/*
349e7a85
KVA
3 * Test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
349e7a85
KVA
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
5ebf3fc5 11#include <linux/dmaengine.h>
349e7a85
KVA
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/pci_ids.h>
16#include <linux/random.h>
17
18#include <linux/pci-epc.h>
19#include <linux/pci-epf.h>
20#include <linux/pci_regs.h>
21
5b0fbadc 22#define IRQ_TYPE_INTX 0
e8817de7 23#define IRQ_TYPE_MSI 1
c2e00e31 24#define IRQ_TYPE_MSIX 2
e8817de7 25
5b0fbadc 26#define COMMAND_RAISE_INTX_IRQ BIT(0)
349e7a85 27#define COMMAND_RAISE_MSI_IRQ BIT(1)
c2e00e31 28#define COMMAND_RAISE_MSIX_IRQ BIT(2)
e8817de7
GP
29#define COMMAND_READ BIT(3)
30#define COMMAND_WRITE BIT(4)
31#define COMMAND_COPY BIT(5)
349e7a85
KVA
32
33#define STATUS_READ_SUCCESS BIT(0)
34#define STATUS_READ_FAIL BIT(1)
35#define STATUS_WRITE_SUCCESS BIT(2)
36#define STATUS_WRITE_FAIL BIT(3)
37#define STATUS_COPY_SUCCESS BIT(4)
38#define STATUS_COPY_FAIL BIT(5)
39#define STATUS_IRQ_RAISED BIT(6)
40#define STATUS_SRC_ADDR_INVALID BIT(7)
41#define STATUS_DST_ADDR_INVALID BIT(8)
42
5ebf3fc5
KVA
43#define FLAG_USE_DMA BIT(0)
44
349e7a85
KVA
45#define TIMER_RESOLUTION 1
46
47static struct workqueue_struct *kpcitest_workqueue;
48
49struct pci_epf_test {
c9c13ba4 50 void *reg[PCI_STD_NUM_BARS];
349e7a85 51 struct pci_epf *epf;
3235b994 52 enum pci_barno test_reg_bar;
83153d9f 53 size_t msix_table_offset;
349e7a85 54 struct delayed_work cmd_handler;
8353813c
FL
55 struct dma_chan *dma_chan_tx;
56 struct dma_chan *dma_chan_rx;
933f31a2
DLM
57 struct dma_chan *transfer_chan;
58 dma_cookie_t transfer_cookie;
59 enum dma_status transfer_status;
5ebf3fc5
KVA
60 struct completion transfer_complete;
61 bool dma_supported;
8353813c 62 bool dma_private;
2c04c5b8 63 const struct pci_epc_features *epc_features;
349e7a85
KVA
64};
65
66struct pci_epf_test_reg {
67 u32 magic;
68 u32 command;
69 u32 status;
70 u64 src_addr;
71 u64 dst_addr;
72 u32 size;
73 u32 checksum;
e8817de7
GP
74 u32 irq_type;
75 u32 irq_number;
5ebf3fc5 76 u32 flags;
349e7a85
KVA
77} __packed;
78
79static struct pci_epf_header test_header = {
80 .vendorid = PCI_ANY_ID,
81 .deviceid = PCI_ANY_ID,
82 .baseclass_code = PCI_CLASS_OTHERS,
83 .interrupt_pin = PCI_INTERRUPT_INTA,
84};
85
bf597574 86static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
349e7a85 87
5ebf3fc5
KVA
88static void pci_epf_test_dma_callback(void *param)
89{
90 struct pci_epf_test *epf_test = param;
933f31a2
DLM
91 struct dma_tx_state state;
92
93 epf_test->transfer_status =
94 dmaengine_tx_status(epf_test->transfer_chan,
95 epf_test->transfer_cookie, &state);
96 if (epf_test->transfer_status == DMA_COMPLETE ||
97 epf_test->transfer_status == DMA_ERROR)
98 complete(&epf_test->transfer_complete);
5ebf3fc5
KVA
99}
100
101/**
102 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
103 * data between PCIe EP and remote PCIe RC
104 * @epf_test: the EPF test device that performs the data transfer operation
105 * @dma_dst: The destination address of the data transfer. It can be a physical
106 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
107 * @dma_src: The source address of the data transfer. It can be a physical
108 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
109 * @len: The size of the data transfer
8353813c
FL
110 * @dma_remote: remote RC physical address
111 * @dir: DMA transfer direction
5ebf3fc5
KVA
112 *
113 * Function that uses dmaengine API to transfer data between PCIe EP and remote
114 * PCIe RC. The source and destination address can be a physical address given
115 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
116 *
117 * The function returns '0' on success and negative value on failure.
118 */
119static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
120 dma_addr_t dma_dst, dma_addr_t dma_src,
8353813c
FL
121 size_t len, dma_addr_t dma_remote,
122 enum dma_transfer_direction dir)
5ebf3fc5 123{
880d51c7 124 struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
8353813c
FL
125 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
126 dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
5ebf3fc5 127 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
5ebf3fc5
KVA
128 struct pci_epf *epf = epf_test->epf;
129 struct dma_async_tx_descriptor *tx;
8353813c 130 struct dma_slave_config sconf = {};
5ebf3fc5 131 struct device *dev = &epf->dev;
5ebf3fc5
KVA
132 int ret;
133
134 if (IS_ERR_OR_NULL(chan)) {
135 dev_err(dev, "Invalid DMA memcpy channel\n");
136 return -EINVAL;
137 }
138
8353813c
FL
139 if (epf_test->dma_private) {
140 sconf.direction = dir;
141 if (dir == DMA_MEM_TO_DEV)
142 sconf.dst_addr = dma_remote;
143 else
144 sconf.src_addr = dma_remote;
145
146 if (dmaengine_slave_config(chan, &sconf)) {
147 dev_err(dev, "DMA slave config fail\n");
148 return -EIO;
149 }
150 tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
151 flags);
152 } else {
153 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
154 flags);
155 }
156
5ebf3fc5
KVA
157 if (!tx) {
158 dev_err(dev, "Failed to prepare DMA memcpy\n");
159 return -EIO;
160 }
161
4aca56f8 162 reinit_completion(&epf_test->transfer_complete);
933f31a2 163 epf_test->transfer_chan = chan;
5ebf3fc5
KVA
164 tx->callback = pci_epf_test_dma_callback;
165 tx->callback_param = epf_test;
349d5c84 166 epf_test->transfer_cookie = dmaengine_submit(tx);
5ebf3fc5 167
933f31a2 168 ret = dma_submit_error(epf_test->transfer_cookie);
5ebf3fc5 169 if (ret) {
933f31a2
DLM
170 dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
171 goto terminate;
5ebf3fc5
KVA
172 }
173
174 dma_async_issue_pending(chan);
175 ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
176 if (ret < 0) {
933f31a2
DLM
177 dev_err(dev, "DMA wait_for_completion interrupted\n");
178 goto terminate;
5ebf3fc5
KVA
179 }
180
933f31a2
DLM
181 if (epf_test->transfer_status == DMA_ERROR) {
182 dev_err(dev, "DMA transfer failed\n");
183 ret = -EIO;
184 }
185
186terminate:
187 dmaengine_terminate_sync(chan);
188
189 return ret;
5ebf3fc5
KVA
190}
191
8353813c
FL
192struct epf_dma_filter {
193 struct device *dev;
194 u32 dma_mask;
195};
196
197static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
198{
199 struct epf_dma_filter *filter = node;
200 struct dma_slave_caps caps;
201
202 memset(&caps, 0, sizeof(caps));
203 dma_get_slave_caps(chan, &caps);
204
205 return chan->device->dev == filter->dev
206 && (filter->dma_mask & caps.directions);
207}
208
5ebf3fc5
KVA
209/**
210 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
211 * @epf_test: the EPF test device that performs data transfer operation
212 *
213 * Function to initialize EPF test DMA channel.
214 */
215static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
216{
217 struct pci_epf *epf = epf_test->epf;
218 struct device *dev = &epf->dev;
8353813c 219 struct epf_dma_filter filter;
5ebf3fc5
KVA
220 struct dma_chan *dma_chan;
221 dma_cap_mask_t mask;
222 int ret;
223
8353813c
FL
224 filter.dev = epf->epc->dev.parent;
225 filter.dma_mask = BIT(DMA_DEV_TO_MEM);
226
227 dma_cap_zero(mask);
228 dma_cap_set(DMA_SLAVE, mask);
229 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
230 if (!dma_chan) {
231 dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
232 goto fail_back_tx;
233 }
234
235 epf_test->dma_chan_rx = dma_chan;
236
237 filter.dma_mask = BIT(DMA_MEM_TO_DEV);
238 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
239
240 if (!dma_chan) {
241 dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
242 goto fail_back_rx;
243 }
244
245 epf_test->dma_chan_tx = dma_chan;
246 epf_test->dma_private = true;
247
248 init_completion(&epf_test->transfer_complete);
249
250 return 0;
251
252fail_back_rx:
253 dma_release_channel(epf_test->dma_chan_rx);
254 epf_test->dma_chan_tx = NULL;
255
256fail_back_tx:
5ebf3fc5
KVA
257 dma_cap_zero(mask);
258 dma_cap_set(DMA_MEMCPY, mask);
259
260 dma_chan = dma_request_chan_by_mask(&mask);
261 if (IS_ERR(dma_chan)) {
262 ret = PTR_ERR(dma_chan);
263 if (ret != -EPROBE_DEFER)
264 dev_err(dev, "Failed to get DMA channel\n");
265 return ret;
266 }
267 init_completion(&epf_test->transfer_complete);
268
8353813c 269 epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
5ebf3fc5
KVA
270
271 return 0;
272}
273
274/**
275 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
9b41d19a 276 * @epf_test: the EPF test device that performs data transfer operation
5ebf3fc5
KVA
277 *
278 * Helper to cleanup EPF test DMA channel.
279 */
280static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
281{
0e86d981
KH
282 if (!epf_test->dma_supported)
283 return;
284
8353813c
FL
285 dma_release_channel(epf_test->dma_chan_tx);
286 if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
287 epf_test->dma_chan_tx = NULL;
288 epf_test->dma_chan_rx = NULL;
289 return;
290 }
291
292 dma_release_channel(epf_test->dma_chan_rx);
293 epf_test->dma_chan_rx = NULL;
294
295 return;
5ebf3fc5
KVA
296}
297
1754dfd2
DLM
298static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
299 const char *op, u64 size,
5893c2e5
KVA
300 struct timespec64 *start,
301 struct timespec64 *end, bool dma)
302{
1754dfd2
DLM
303 struct timespec64 ts = timespec64_sub(*end, *start);
304 u64 rate = 0, ns;
5893c2e5
KVA
305
306 /* calculate the rate */
1754dfd2
DLM
307 ns = timespec64_to_ns(&ts);
308 if (ns)
309 rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
5893c2e5 310
1754dfd2
DLM
311 dev_info(&epf_test->epf->dev,
312 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
313 op, size, dma ? "YES" : "NO",
314 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
5893c2e5
KVA
315}
316
2eec4bec
DLM
317static void pci_epf_test_copy(struct pci_epf_test *epf_test,
318 struct pci_epf_test_reg *reg)
349e7a85
KVA
319{
320 int ret;
321 void __iomem *src_addr;
322 void __iomem *dst_addr;
323 phys_addr_t src_phys_addr;
324 phys_addr_t dst_phys_addr;
5893c2e5 325 struct timespec64 start, end;
349e7a85
KVA
326 struct pci_epf *epf = epf_test->epf;
327 struct device *dev = &epf->dev;
328 struct pci_epc *epc = epf->epc;
349e7a85
KVA
329
330 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
331 if (!src_addr) {
798c0441 332 dev_err(dev, "Failed to allocate source address\n");
349e7a85
KVA
333 reg->status = STATUS_SRC_ADDR_INVALID;
334 ret = -ENOMEM;
335 goto err;
336 }
337
53fd3cbe
KVA
338 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
339 reg->src_addr, reg->size);
349e7a85 340 if (ret) {
798c0441 341 dev_err(dev, "Failed to map source address\n");
349e7a85
KVA
342 reg->status = STATUS_SRC_ADDR_INVALID;
343 goto err_src_addr;
344 }
345
346 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
347 if (!dst_addr) {
798c0441 348 dev_err(dev, "Failed to allocate destination address\n");
349e7a85
KVA
349 reg->status = STATUS_DST_ADDR_INVALID;
350 ret = -ENOMEM;
351 goto err_src_map_addr;
352 }
353
53fd3cbe
KVA
354 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
355 reg->dst_addr, reg->size);
349e7a85 356 if (ret) {
798c0441 357 dev_err(dev, "Failed to map destination address\n");
349e7a85
KVA
358 reg->status = STATUS_DST_ADDR_INVALID;
359 goto err_dst_addr;
360 }
361
5893c2e5 362 ktime_get_ts64(&start);
2566cbea 363 if (reg->flags & FLAG_USE_DMA) {
8353813c
FL
364 if (epf_test->dma_private) {
365 dev_err(dev, "Cannot transfer data using DMA\n");
366 ret = -EINVAL;
367 goto err_map_addr;
368 }
369
5ebf3fc5 370 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
8353813c
FL
371 src_phys_addr, reg->size, 0,
372 DMA_MEM_TO_MEM);
5ebf3fc5
KVA
373 if (ret)
374 dev_err(dev, "Data transfer failed\n");
375 } else {
829cc0e2
HZ
376 void *buf;
377
378 buf = kzalloc(reg->size, GFP_KERNEL);
379 if (!buf) {
380 ret = -ENOMEM;
381 goto err_map_addr;
382 }
383
384 memcpy_fromio(buf, src_addr, reg->size);
385 memcpy_toio(dst_addr, buf, reg->size);
386 kfree(buf);
5ebf3fc5 387 }
5893c2e5 388 ktime_get_ts64(&end);
1754dfd2 389 pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end,
2566cbea 390 reg->flags & FLAG_USE_DMA);
5ebf3fc5
KVA
391
392err_map_addr:
53fd3cbe 393 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
349e7a85
KVA
394
395err_dst_addr:
396 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
397
398err_src_map_addr:
53fd3cbe 399 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
349e7a85
KVA
400
401err_src_addr:
402 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
403
404err:
2eec4bec
DLM
405 if (!ret)
406 reg->status |= STATUS_COPY_SUCCESS;
407 else
408 reg->status |= STATUS_COPY_FAIL;
349e7a85
KVA
409}
410
2eec4bec
DLM
411static void pci_epf_test_read(struct pci_epf_test *epf_test,
412 struct pci_epf_test_reg *reg)
349e7a85
KVA
413{
414 int ret;
415 void __iomem *src_addr;
416 void *buf;
417 u32 crc32;
418 phys_addr_t phys_addr;
5ebf3fc5 419 phys_addr_t dst_phys_addr;
5893c2e5 420 struct timespec64 start, end;
349e7a85
KVA
421 struct pci_epf *epf = epf_test->epf;
422 struct device *dev = &epf->dev;
423 struct pci_epc *epc = epf->epc;
5ebf3fc5 424 struct device *dma_dev = epf->epc->dev.parent;
349e7a85
KVA
425
426 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
427 if (!src_addr) {
798c0441 428 dev_err(dev, "Failed to allocate address\n");
349e7a85
KVA
429 reg->status = STATUS_SRC_ADDR_INVALID;
430 ret = -ENOMEM;
431 goto err;
432 }
433
53fd3cbe
KVA
434 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
435 reg->src_addr, reg->size);
349e7a85 436 if (ret) {
798c0441 437 dev_err(dev, "Failed to map address\n");
349e7a85
KVA
438 reg->status = STATUS_SRC_ADDR_INVALID;
439 goto err_addr;
440 }
441
442 buf = kzalloc(reg->size, GFP_KERNEL);
443 if (!buf) {
444 ret = -ENOMEM;
445 goto err_map_addr;
446 }
447
2566cbea 448 if (reg->flags & FLAG_USE_DMA) {
5ebf3fc5
KVA
449 dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
450 DMA_FROM_DEVICE);
451 if (dma_mapping_error(dma_dev, dst_phys_addr)) {
452 dev_err(dev, "Failed to map destination buffer addr\n");
453 ret = -ENOMEM;
454 goto err_dma_map;
455 }
456
5893c2e5 457 ktime_get_ts64(&start);
5ebf3fc5 458 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
8353813c
FL
459 phys_addr, reg->size,
460 reg->src_addr, DMA_DEV_TO_MEM);
5ebf3fc5
KVA
461 if (ret)
462 dev_err(dev, "Data transfer failed\n");
5893c2e5 463 ktime_get_ts64(&end);
5ebf3fc5
KVA
464
465 dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
466 DMA_FROM_DEVICE);
467 } else {
5893c2e5 468 ktime_get_ts64(&start);
5ebf3fc5 469 memcpy_fromio(buf, src_addr, reg->size);
5893c2e5 470 ktime_get_ts64(&end);
5ebf3fc5 471 }
349e7a85 472
1754dfd2 473 pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end,
2566cbea 474 reg->flags & FLAG_USE_DMA);
5893c2e5 475
349e7a85
KVA
476 crc32 = crc32_le(~0, buf, reg->size);
477 if (crc32 != reg->checksum)
478 ret = -EIO;
479
5ebf3fc5 480err_dma_map:
349e7a85
KVA
481 kfree(buf);
482
483err_map_addr:
53fd3cbe 484 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
349e7a85
KVA
485
486err_addr:
487 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
488
489err:
2eec4bec
DLM
490 if (!ret)
491 reg->status |= STATUS_READ_SUCCESS;
492 else
493 reg->status |= STATUS_READ_FAIL;
349e7a85
KVA
494}
495
2eec4bec
DLM
496static void pci_epf_test_write(struct pci_epf_test *epf_test,
497 struct pci_epf_test_reg *reg)
349e7a85
KVA
498{
499 int ret;
500 void __iomem *dst_addr;
501 void *buf;
502 phys_addr_t phys_addr;
5ebf3fc5 503 phys_addr_t src_phys_addr;
5893c2e5 504 struct timespec64 start, end;
349e7a85
KVA
505 struct pci_epf *epf = epf_test->epf;
506 struct device *dev = &epf->dev;
507 struct pci_epc *epc = epf->epc;
5ebf3fc5 508 struct device *dma_dev = epf->epc->dev.parent;
349e7a85
KVA
509
510 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
511 if (!dst_addr) {
798c0441 512 dev_err(dev, "Failed to allocate address\n");
349e7a85
KVA
513 reg->status = STATUS_DST_ADDR_INVALID;
514 ret = -ENOMEM;
515 goto err;
516 }
517
53fd3cbe
KVA
518 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
519 reg->dst_addr, reg->size);
349e7a85 520 if (ret) {
798c0441 521 dev_err(dev, "Failed to map address\n");
349e7a85
KVA
522 reg->status = STATUS_DST_ADDR_INVALID;
523 goto err_addr;
524 }
525
526 buf = kzalloc(reg->size, GFP_KERNEL);
527 if (!buf) {
528 ret = -ENOMEM;
529 goto err_map_addr;
530 }
531
532 get_random_bytes(buf, reg->size);
533 reg->checksum = crc32_le(~0, buf, reg->size);
534
2566cbea 535 if (reg->flags & FLAG_USE_DMA) {
5ebf3fc5
KVA
536 src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
537 DMA_TO_DEVICE);
538 if (dma_mapping_error(dma_dev, src_phys_addr)) {
539 dev_err(dev, "Failed to map source buffer addr\n");
540 ret = -ENOMEM;
541 goto err_dma_map;
542 }
543
5893c2e5 544 ktime_get_ts64(&start);
8353813c 545
5ebf3fc5 546 ret = pci_epf_test_data_transfer(epf_test, phys_addr,
8353813c
FL
547 src_phys_addr, reg->size,
548 reg->dst_addr,
549 DMA_MEM_TO_DEV);
5ebf3fc5
KVA
550 if (ret)
551 dev_err(dev, "Data transfer failed\n");
5893c2e5 552 ktime_get_ts64(&end);
5ebf3fc5
KVA
553
554 dma_unmap_single(dma_dev, src_phys_addr, reg->size,
555 DMA_TO_DEVICE);
556 } else {
5893c2e5 557 ktime_get_ts64(&start);
5ebf3fc5 558 memcpy_toio(dst_addr, buf, reg->size);
5893c2e5 559 ktime_get_ts64(&end);
5ebf3fc5 560 }
349e7a85 561
1754dfd2 562 pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end,
2566cbea 563 reg->flags & FLAG_USE_DMA);
5893c2e5 564
349e7a85
KVA
565 /*
566 * wait 1ms inorder for the write to complete. Without this delay L3
567 * error in observed in the host system.
568 */
9f96b9b7 569 usleep_range(1000, 2000);
349e7a85 570
5ebf3fc5 571err_dma_map:
349e7a85
KVA
572 kfree(buf);
573
574err_map_addr:
53fd3cbe 575 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
349e7a85
KVA
576
577err_addr:
578 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
579
580err:
2eec4bec
DLM
581 if (!ret)
582 reg->status |= STATUS_WRITE_SUCCESS;
583 else
584 reg->status |= STATUS_WRITE_FAIL;
349e7a85
KVA
585}
586
5444737e
DLM
587static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
588 struct pci_epf_test_reg *reg)
349e7a85 589{
349e7a85 590 struct pci_epf *epf = epf_test->epf;
e8817de7 591 struct device *dev = &epf->dev;
349e7a85 592 struct pci_epc *epc = epf->epc;
fc97f5f7 593 u32 status = reg->status | STATUS_IRQ_RAISED;
48d19fc6 594 int count;
349e7a85 595
fc97f5f7
DLM
596 /*
597 * Set the status before raising the IRQ to ensure that the host sees
598 * the updated value when it gets the IRQ.
599 */
600 WRITE_ONCE(reg->status, status);
e8817de7 601
5444737e 602 switch (reg->irq_type) {
5b0fbadc 603 case IRQ_TYPE_INTX:
53fd3cbe 604 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
74955cb8 605 PCI_IRQ_INTX, 0);
e8817de7
GP
606 break;
607 case IRQ_TYPE_MSI:
48d19fc6
DLM
608 count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
609 if (reg->irq_number > count || count <= 0) {
610 dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
611 reg->irq_number, count);
612 return;
613 }
53fd3cbe 614 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
74955cb8 615 PCI_IRQ_MSI, reg->irq_number);
e8817de7 616 break;
c2e00e31 617 case IRQ_TYPE_MSIX:
48d19fc6
DLM
618 count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
619 if (reg->irq_number > count || count <= 0) {
620 dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
621 reg->irq_number, count);
622 return;
623 }
53fd3cbe 624 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
74955cb8 625 PCI_IRQ_MSIX, reg->irq_number);
c2e00e31 626 break;
e8817de7
GP
627 default:
628 dev_err(dev, "Failed to raise IRQ, unknown type\n");
629 break;
630 }
349e7a85
KVA
631}
632
633static void pci_epf_test_cmd_handler(struct work_struct *work)
634{
3ecf3232 635 u32 command;
349e7a85
KVA
636 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
637 cmd_handler.work);
638 struct pci_epf *epf = epf_test->epf;
e8817de7 639 struct device *dev = &epf->dev;
3235b994
KVA
640 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
641 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
349e7a85 642
fc97f5f7 643 command = READ_ONCE(reg->command);
3ecf3232 644 if (!command)
349e7a85
KVA
645 goto reset_handler;
646
fc97f5f7
DLM
647 WRITE_ONCE(reg->command, 0);
648 WRITE_ONCE(reg->status, 0);
3ecf3232 649
2566cbea
DLM
650 if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
651 !epf_test->dma_supported) {
652 dev_err(dev, "Cannot transfer data using DMA\n");
653 goto reset_handler;
654 }
655
c2e00e31 656 if (reg->irq_type > IRQ_TYPE_MSIX) {
e8817de7
GP
657 dev_err(dev, "Failed to detect IRQ type\n");
658 goto reset_handler;
659 }
749aaf33 660
96d513f5 661 switch (command) {
5b0fbadc 662 case COMMAND_RAISE_INTX_IRQ:
96d513f5
DLM
663 case COMMAND_RAISE_MSI_IRQ:
664 case COMMAND_RAISE_MSIX_IRQ:
48d19fc6 665 pci_epf_test_raise_irq(epf_test, reg);
96d513f5
DLM
666 break;
667 case COMMAND_WRITE:
2eec4bec 668 pci_epf_test_write(epf_test, reg);
5444737e 669 pci_epf_test_raise_irq(epf_test, reg);
96d513f5
DLM
670 break;
671 case COMMAND_READ:
2eec4bec 672 pci_epf_test_read(epf_test, reg);
5444737e 673 pci_epf_test_raise_irq(epf_test, reg);
96d513f5
DLM
674 break;
675 case COMMAND_COPY:
2eec4bec 676 pci_epf_test_copy(epf_test, reg);
5444737e 677 pci_epf_test_raise_irq(epf_test, reg);
96d513f5
DLM
678 break;
679 default:
680 dev_err(dev, "Invalid command 0x%x\n", command);
681 break;
349e7a85
KVA
682 }
683
349e7a85 684reset_handler:
349e7a85
KVA
685 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
686 msecs_to_jiffies(1));
687}
688
349e7a85
KVA
689static void pci_epf_test_unbind(struct pci_epf *epf)
690{
691 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
692 struct pci_epc *epc = epf->epc;
693 int bar;
694
695 cancel_delayed_work(&epf_test->cmd_handler);
5ebf3fc5 696 pci_epf_test_clean_dma_chan(epf_test);
c9c13ba4 697 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
597ac0fa
NC
698 if (!epf_test->reg[bar])
699 continue;
77d08dbd 700
597ac0fa
NC
701 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
702 &epf->bar[bar]);
703 pci_epf_free_space(epf, epf_test->reg[bar], bar,
704 PRIMARY_INTERFACE);
349e7a85
KVA
705 }
706}
707
708static int pci_epf_test_set_bar(struct pci_epf *epf)
709{
e49eab94 710 int bar, ret;
349e7a85
KVA
711 struct pci_epc *epc = epf->epc;
712 struct device *dev = &epf->dev;
713 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
3235b994 714 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
bf597574 715
e49eab94
NC
716 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
717 if (!epf_test->reg[bar])
2c04c5b8
KVA
718 continue;
719
53fd3cbe 720 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
e49eab94 721 &epf->bar[bar]);
349e7a85 722 if (ret) {
63840ff5
KVA
723 pci_epf_free_space(epf, epf_test->reg[bar], bar,
724 PRIMARY_INTERFACE);
798c0441 725 dev_err(dev, "Failed to set BAR%d\n", bar);
3235b994 726 if (bar == test_reg_bar)
349e7a85
KVA
727 return ret;
728 }
729 }
730
731 return 0;
732}
733
5e50ee27
VS
734static int pci_epf_test_core_init(struct pci_epf *epf)
735{
83153d9f 736 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
5e50ee27
VS
737 struct pci_epf_header *header = epf->header;
738 const struct pci_epc_features *epc_features;
739 struct pci_epc *epc = epf->epc;
740 struct device *dev = &epf->dev;
a01e7214 741 bool linkup_notifier = false;
5e50ee27
VS
742 bool msix_capable = false;
743 bool msi_capable = true;
744 int ret;
745
53fd3cbe 746 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
5e50ee27
VS
747 if (epc_features) {
748 msix_capable = epc_features->msix_capable;
749 msi_capable = epc_features->msi_capable;
750 }
751
53fd3cbe
KVA
752 if (epf->vfunc_no <= 1) {
753 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
754 if (ret) {
755 dev_err(dev, "Configuration header write failed\n");
756 return ret;
757 }
5e50ee27
VS
758 }
759
760 ret = pci_epf_test_set_bar(epf);
761 if (ret)
762 return ret;
763
764 if (msi_capable) {
53fd3cbe
KVA
765 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
766 epf->msi_interrupts);
5e50ee27
VS
767 if (ret) {
768 dev_err(dev, "MSI configuration failed\n");
769 return ret;
770 }
771 }
772
773 if (msix_capable) {
53fd3cbe
KVA
774 ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
775 epf->msix_interrupts,
83153d9f
KVA
776 epf_test->test_reg_bar,
777 epf_test->msix_table_offset);
5e50ee27
VS
778 if (ret) {
779 dev_err(dev, "MSI-X configuration failed\n");
780 return ret;
781 }
782 }
783
a01e7214
MS
784 linkup_notifier = epc_features->linkup_notifier;
785 if (!linkup_notifier)
786 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
787
5e50ee27
VS
788 return 0;
789}
790
f5edd871 791static int pci_epf_test_link_up(struct pci_epf *epf)
5e50ee27 792{
5e50ee27 793 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
5e50ee27 794
f5edd871
MS
795 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
796 msecs_to_jiffies(1));
5e50ee27 797
f5edd871 798 return 0;
5e50ee27
VS
799}
800
f5edd871
MS
801static const struct pci_epc_event_ops pci_epf_test_event_ops = {
802 .core_init = pci_epf_test_core_init,
803 .link_up = pci_epf_test_link_up,
804};
805
349e7a85
KVA
806static int pci_epf_test_alloc_space(struct pci_epf *epf)
807{
808 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
809 struct device *dev = &epf->dev;
83153d9f
KVA
810 size_t msix_table_size = 0;
811 size_t test_reg_bar_size;
812 size_t pba_size = 0;
813 bool msix_capable;
349e7a85 814 void *base;
3235b994 815 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
41766052 816 enum pci_barno bar;
2c04c5b8 817 const struct pci_epc_features *epc_features;
f16fb16e 818 size_t test_reg_size;
2c04c5b8
KVA
819
820 epc_features = epf_test->epc_features;
349e7a85 821
83153d9f
KVA
822 test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
823
824 msix_capable = epc_features->msix_capable;
825 if (msix_capable) {
826 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
827 epf_test->msix_table_offset = test_reg_bar_size;
828 /* Align to QWORD or 8 Bytes */
829 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
830 }
831 test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
832
83153d9f 833 base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
e891becd 834 epc_features, PRIMARY_INTERFACE);
349e7a85 835 if (!base) {
798c0441 836 dev_err(dev, "Failed to allocated register space\n");
349e7a85
KVA
837 return -ENOMEM;
838 }
3235b994 839 epf_test->reg[test_reg_bar] = base;
349e7a85 840
41766052
NC
841 for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
842 bar = pci_epc_get_next_free_bar(epc_features, bar);
843 if (bar == NO_BAR)
844 break;
3041a643 845
3235b994
KVA
846 if (bar == test_reg_bar)
847 continue;
2c04c5b8 848
2a9a8016 849 base = pci_epf_alloc_space(epf, bar_size[bar], bar,
e891becd 850 epc_features, PRIMARY_INTERFACE);
349e7a85 851 if (!base)
798c0441 852 dev_err(dev, "Failed to allocate space for BAR%d\n",
349e7a85
KVA
853 bar);
854 epf_test->reg[bar] = base;
855 }
856
857 return 0;
858}
859
860static int pci_epf_test_bind(struct pci_epf *epf)
861{
862 int ret;
702a3ed9 863 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
2c04c5b8
KVA
864 const struct pci_epc_features *epc_features;
865 enum pci_barno test_reg_bar = BAR_0;
349e7a85 866 struct pci_epc *epc = epf->epc;
349e7a85
KVA
867
868 if (WARN_ON_ONCE(!epc))
869 return -EINVAL;
870
53fd3cbe 871 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
6613bc23
ST
872 if (!epc_features) {
873 dev_err(&epf->dev, "epc_features not implemented\n");
874 return -EOPNOTSUPP;
2c04c5b8 875 }
c2e00e31 876
6613bc23
ST
877 test_reg_bar = pci_epc_get_first_free_bar(epc_features);
878 if (test_reg_bar < 0)
879 return -EINVAL;
6613bc23 880
2c04c5b8
KVA
881 epf_test->test_reg_bar = test_reg_bar;
882 epf_test->epc_features = epc_features;
1d906b22 883
349e7a85
KVA
884 ret = pci_epf_test_alloc_space(epf);
885 if (ret)
886 return ret;
887
5ebf3fc5
KVA
888 epf_test->dma_supported = true;
889
890 ret = pci_epf_test_init_dma_chan(epf_test);
891 if (ret)
892 epf_test->dma_supported = false;
893
349e7a85
KVA
894 return 0;
895}
896
3235b994
KVA
897static const struct pci_epf_device_id pci_epf_test_ids[] = {
898 {
899 .name = "pci_epf_test",
900 },
901 {},
902};
903
081c715d
MS
904static int pci_epf_test_probe(struct pci_epf *epf,
905 const struct pci_epf_device_id *id)
349e7a85
KVA
906{
907 struct pci_epf_test *epf_test;
908 struct device *dev = &epf->dev;
909
910 epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
911 if (!epf_test)
912 return -ENOMEM;
913
914 epf->header = &test_header;
915 epf_test->epf = epf;
916
917 INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
918
838125b0
MS
919 epf->event_ops = &pci_epf_test_event_ops;
920
349e7a85
KVA
921 epf_set_drvdata(epf, epf_test);
922 return 0;
923}
924
6f517e04 925static const struct pci_epf_ops ops = {
349e7a85
KVA
926 .unbind = pci_epf_test_unbind,
927 .bind = pci_epf_test_bind,
349e7a85
KVA
928};
929
349e7a85
KVA
930static struct pci_epf_driver test_driver = {
931 .driver.name = "pci_epf_test",
932 .probe = pci_epf_test_probe,
349e7a85
KVA
933 .id_table = pci_epf_test_ids,
934 .ops = &ops,
935 .owner = THIS_MODULE,
936};
937
938static int __init pci_epf_test_init(void)
939{
940 int ret;
941
942 kpcitest_workqueue = alloc_workqueue("kpcitest",
943 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
507b8200
KL
944 if (!kpcitest_workqueue) {
945 pr_err("Failed to allocate the kpcitest work queue\n");
946 return -ENOMEM;
947 }
948
349e7a85
KVA
949 ret = pci_epf_register_driver(&test_driver);
950 if (ret) {
acaef798 951 destroy_workqueue(kpcitest_workqueue);
798c0441 952 pr_err("Failed to register pci epf test driver --> %d\n", ret);
349e7a85
KVA
953 return ret;
954 }
955
956 return 0;
957}
958module_init(pci_epf_test_init);
959
960static void __exit pci_epf_test_exit(void)
961{
acaef798
YY
962 if (kpcitest_workqueue)
963 destroy_workqueue(kpcitest_workqueue);
349e7a85
KVA
964 pci_epf_unregister_driver(&test_driver);
965}
966module_exit(pci_epf_test_exit);
967
968MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
969MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
970MODULE_LICENSE("GPL v2");