1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (C) 2022 Microchip Technology Inc.
4 // Authors: Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>
5 // Kumaravel Thiagarajan <Kumaravel.Thiagarajan@microchip.com>
8 #include <linux/bitfield.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/iopoll.h>
11 #include <linux/irq.h>
12 #include <linux/module.h>
13 #include <linux/msi.h>
14 #include <linux/pci_regs.h>
15 #include <linux/pci.h>
16 #include <linux/spinlock.h>
17 #include <linux/spi/spi.h>
18 #include <linux/delay.h>
20 #define DRV_NAME "spi-pci1xxxx"
22 #define SYS_FREQ_DEFAULT (62500000)
24 #define PCI1XXXX_SPI_MAX_CLOCK_HZ (30000000)
25 #define PCI1XXXX_SPI_CLK_20MHZ (20000000)
26 #define PCI1XXXX_SPI_CLK_15MHZ (15000000)
27 #define PCI1XXXX_SPI_CLK_12MHZ (12000000)
28 #define PCI1XXXX_SPI_CLK_10MHZ (10000000)
29 #define PCI1XXXX_SPI_MIN_CLOCK_HZ (2000000)
31 #define PCI1XXXX_SPI_BUFFER_SIZE (320)
33 #define SPI_MST_CTL_DEVSEL_MASK (GENMASK(27, 25))
34 #define SPI_MST_CTL_CMD_LEN_MASK (GENMASK(16, 8))
35 #define SPI_MST_CTL_SPEED_MASK (GENMASK(7, 5))
36 #define SPI_MSI_VECTOR_SEL_MASK (GENMASK(4, 4))
38 #define SPI_MST_CTL_FORCE_CE (BIT(4))
39 #define SPI_MST_CTL_MODE_SEL (BIT(2))
40 #define SPI_MST_CTL_GO (BIT(0))
42 #define SPI_PERI_ADDR_BASE (0x160000)
43 #define SPI_SYSTEM_ADDR_BASE (0x2000)
44 #define SPI_MST1_ADDR_BASE (0x800)
46 #define DEV_REV_REG (SPI_SYSTEM_ADDR_BASE + 0x00)
47 #define SPI_SYSLOCK_REG (SPI_SYSTEM_ADDR_BASE + 0xA0)
48 #define SPI_CONFIG_PERI_ENABLE_REG (SPI_SYSTEM_ADDR_BASE + 0x108)
50 #define SPI_PERI_ENBLE_PF_MASK (GENMASK(17, 16))
51 #define DEV_REV_MASK (GENMASK(7, 0))
53 #define SPI_SYSLOCK BIT(4)
57 /* DMA Related Registers */
58 #define SPI_DMA_ADDR_BASE (0x1000)
59 #define SPI_DMA_GLOBAL_WR_ENGINE_EN (SPI_DMA_ADDR_BASE + 0x0C)
60 #define SPI_DMA_WR_DOORBELL_REG (SPI_DMA_ADDR_BASE + 0x10)
61 #define SPI_DMA_GLOBAL_RD_ENGINE_EN (SPI_DMA_ADDR_BASE + 0x2C)
62 #define SPI_DMA_RD_DOORBELL_REG (SPI_DMA_ADDR_BASE + 0x30)
63 #define SPI_DMA_INTR_WR_STS (SPI_DMA_ADDR_BASE + 0x4C)
64 #define SPI_DMA_WR_INT_MASK (SPI_DMA_ADDR_BASE + 0x54)
65 #define SPI_DMA_INTR_WR_CLR (SPI_DMA_ADDR_BASE + 0x58)
66 #define SPI_DMA_ERR_WR_STS (SPI_DMA_ADDR_BASE + 0x5C)
67 #define SPI_DMA_INTR_IMWR_WDONE_LOW (SPI_DMA_ADDR_BASE + 0x60)
68 #define SPI_DMA_INTR_IMWR_WDONE_HIGH (SPI_DMA_ADDR_BASE + 0x64)
69 #define SPI_DMA_INTR_IMWR_WABORT_LOW (SPI_DMA_ADDR_BASE + 0x68)
70 #define SPI_DMA_INTR_IMWR_WABORT_HIGH (SPI_DMA_ADDR_BASE + 0x6C)
71 #define SPI_DMA_INTR_WR_IMWR_DATA (SPI_DMA_ADDR_BASE + 0x70)
72 #define SPI_DMA_INTR_RD_STS (SPI_DMA_ADDR_BASE + 0xA0)
73 #define SPI_DMA_RD_INT_MASK (SPI_DMA_ADDR_BASE + 0xA8)
74 #define SPI_DMA_INTR_RD_CLR (SPI_DMA_ADDR_BASE + 0xAC)
75 #define SPI_DMA_ERR_RD_STS (SPI_DMA_ADDR_BASE + 0xB8)
76 #define SPI_DMA_INTR_IMWR_RDONE_LOW (SPI_DMA_ADDR_BASE + 0xCC)
77 #define SPI_DMA_INTR_IMWR_RDONE_HIGH (SPI_DMA_ADDR_BASE + 0xD0)
78 #define SPI_DMA_INTR_IMWR_RABORT_LOW (SPI_DMA_ADDR_BASE + 0xD4)
79 #define SPI_DMA_INTR_IMWR_RABORT_HIGH (SPI_DMA_ADDR_BASE + 0xD8)
80 #define SPI_DMA_INTR_RD_IMWR_DATA (SPI_DMA_ADDR_BASE + 0xDC)
82 #define SPI_DMA_CH0_WR_BASE (SPI_DMA_ADDR_BASE + 0x200)
83 #define SPI_DMA_CH0_RD_BASE (SPI_DMA_ADDR_BASE + 0x300)
84 #define SPI_DMA_CH1_WR_BASE (SPI_DMA_ADDR_BASE + 0x400)
85 #define SPI_DMA_CH1_RD_BASE (SPI_DMA_ADDR_BASE + 0x500)
87 #define SPI_DMA_CH_CTL1_OFFSET (0x00)
88 #define SPI_DMA_CH_XFER_LEN_OFFSET (0x08)
89 #define SPI_DMA_CH_SAR_LO_OFFSET (0x0C)
90 #define SPI_DMA_CH_SAR_HI_OFFSET (0x10)
91 #define SPI_DMA_CH_DAR_LO_OFFSET (0x14)
92 #define SPI_DMA_CH_DAR_HI_OFFSET (0x18)
94 #define SPI_DMA_CH0_DONE_INT BIT(0)
95 #define SPI_DMA_CH1_DONE_INT BIT(1)
96 #define SPI_DMA_CH0_ABORT_INT BIT(16)
97 #define SPI_DMA_CH1_ABORT_INT BIT(17)
98 #define SPI_DMA_DONE_INT_MASK (SPI_DMA_CH0_DONE_INT | SPI_DMA_CH1_DONE_INT)
99 #define SPI_DMA_ABORT_INT_MASK (SPI_DMA_CH0_ABORT_INT | SPI_DMA_CH1_ABORT_INT)
100 #define DMA_CH_CONTROL_LIE BIT(3)
101 #define DMA_CH_CONTROL_RIE BIT(4)
102 #define DMA_INTR_EN (DMA_CH_CONTROL_RIE | DMA_CH_CONTROL_LIE)
104 /* x refers to SPI Host Controller HW instance id in the below macros - 0 or 1 */
106 #define SPI_MST_CMD_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x00)
107 #define SPI_MST_RSP_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x200)
108 #define SPI_MST_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x400)
109 #define SPI_MST_EVENT_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x420)
110 #define SPI_MST_EVENT_MASK_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x424)
111 #define SPI_MST_PAD_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x460)
112 #define SPIALERT_MST_DB_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x464)
113 #define SPIALERT_MST_VAL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x468)
114 #define SPI_PCI_CTRL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x480)
116 #define PCI1XXXX_IRQ_FLAGS (IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE)
117 #define SPI_MAX_DATA_LEN 320
119 #define PCI1XXXX_SPI_TIMEOUT (msecs_to_jiffies(100))
120 #define SYSLOCK_RETRY_CNT (1000)
121 #define SPI_DMA_ENGINE_EN (0x1)
122 #define SPI_DMA_ENGINE_DIS (0x0)
124 #define SPI_INTR BIT(8)
125 #define SPI_FORCE_CE BIT(4)
127 #define SPI_CHIP_SEL_COUNT 7
128 #define VENDOR_ID_MCHP 0x1055
130 #define SPI_SUSPEND_CONFIG 0x101
131 #define SPI_RESUME_CONFIG 0x203
133 struct pci1xxxx_spi_internal {
138 bool spi_xfer_in_progress;
144 struct scatterlist *tx_sgl, *rx_sgl;
146 struct completion spi_xfer_done;
147 struct spi_controller *spi_host;
148 struct pci1xxxx_spi *parent;
149 struct spi_transfer *xfer;
151 unsigned int dev_sel : 3;
152 unsigned int msi_vector_sel : 1;
156 struct pci1xxxx_spi {
158 u8 total_hw_instances;
160 void __iomem *reg_base;
161 void __iomem *dma_offset_bar;
162 /* lock to safely access the DMA registers in isr */
163 spinlock_t dma_reg_lock;
165 struct pci1xxxx_spi_internal *spi_int[] __counted_by(total_hw_instances);
168 static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
169 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
170 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
171 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
172 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
173 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
174 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
175 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
176 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
177 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
178 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
179 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
180 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
181 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
182 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
183 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
184 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
185 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
186 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
187 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
188 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
192 MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table);
194 static int pci1xxxx_set_sys_lock(struct pci1xxxx_spi *par)
196 writel(SPI_SYSLOCK, par->reg_base + SPI_SYSLOCK_REG);
197 return readl(par->reg_base + SPI_SYSLOCK_REG);
200 static int pci1xxxx_acquire_sys_lock(struct pci1xxxx_spi *par)
204 return readx_poll_timeout(pci1xxxx_set_sys_lock, par, regval,
205 (regval & SPI_SYSLOCK), 100,
206 SYSLOCK_RETRY_CNT * 100);
209 static void pci1xxxx_release_sys_lock(struct pci1xxxx_spi *par)
211 writel(0x0, par->reg_base + SPI_SYSLOCK_REG);
214 static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
216 struct pci_dev *pdev = spi_bus->dev;
222 * DEV REV Registers is a system register, HW Syslock bit
223 * should be acquired before accessing the register
225 ret = pci1xxxx_acquire_sys_lock(spi_bus);
227 dev_err(&pdev->dev, "Error failed to acquire syslock\n");
231 regval = readl(spi_bus->reg_base + DEV_REV_REG);
232 spi_bus->dev_rev = regval & DEV_REV_MASK;
233 if (spi_bus->dev_rev >= 0xC0) {
234 regval = readl(spi_bus->reg_base +
235 SPI_CONFIG_PERI_ENABLE_REG);
236 pf_num = regval & SPI_PERI_ENBLE_PF_MASK;
239 pci1xxxx_release_sys_lock(spi_bus);
242 * DMA is supported only from C0 and SPI can use DMA only if
243 * it is mapped to PF0
245 if (spi_bus->dev_rev < 0xC0 || pf_num)
249 * DMA Supported only with MSI Interrupts
250 * One of the SPI instance's MSI vector address and data
251 * is used for DMA Interrupt
253 if (!irq_get_msi_desc(irq)) {
254 dev_warn(&pdev->dev, "Error MSI Interrupt not supported, will operate in PIO mode\n");
258 spi_bus->dma_offset_bar = pcim_iomap(pdev, 2, pci_resource_len(pdev, 2));
259 if (!spi_bus->dma_offset_bar) {
260 dev_warn(&pdev->dev, "Error failed to map dma bar, will operate in PIO mode\n");
264 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
265 dev_warn(&pdev->dev, "Error failed to set DMA mask, will operate in PIO mode\n");
266 pcim_iounmap(pdev, spi_bus->dma_offset_bar);
267 spi_bus->dma_offset_bar = NULL;
274 static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int irq)
279 ret = pci1xxxx_check_spi_can_dma(spi_bus, irq);
283 spin_lock_init(&spi_bus->dma_reg_lock);
284 get_cached_msi_msg(irq, &msi);
285 writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
286 writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
287 writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_HIGH);
288 writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_HIGH);
289 writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_HIGH);
290 writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_HIGH);
291 writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_LOW);
292 writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_LOW);
293 writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_LOW);
294 writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_LOW);
295 writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
296 writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
297 dma_set_max_seg_size(&spi_bus->dev->dev, PCI1XXXX_SPI_BUFFER_SIZE);
298 spi_bus->can_dma = true;
302 static void pci1xxxx_spi_set_cs(struct spi_device *spi, bool enable)
304 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi->controller);
305 struct pci1xxxx_spi *par = p->parent;
308 /* Set the DEV_SEL bits of the SPI_MST_CTL_REG */
309 regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
311 regval |= SPI_FORCE_CE;
312 regval &= ~SPI_MST_CTL_DEVSEL_MASK;
313 regval |= (spi_get_chipselect(spi, 0) << 25);
315 regval &= ~SPI_FORCE_CE;
317 writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
320 static u8 pci1xxxx_get_clock_div(u32 hz)
324 if (hz >= PCI1XXXX_SPI_MAX_CLOCK_HZ)
326 else if ((hz < PCI1XXXX_SPI_MAX_CLOCK_HZ) && (hz >= PCI1XXXX_SPI_CLK_20MHZ))
328 else if ((hz < PCI1XXXX_SPI_CLK_20MHZ) && (hz >= PCI1XXXX_SPI_CLK_15MHZ))
330 else if ((hz < PCI1XXXX_SPI_CLK_15MHZ) && (hz >= PCI1XXXX_SPI_CLK_12MHZ))
332 else if ((hz < PCI1XXXX_SPI_CLK_12MHZ) && (hz >= PCI1XXXX_SPI_CLK_10MHZ))
334 else if ((hz < PCI1XXXX_SPI_CLK_10MHZ) && (hz >= PCI1XXXX_SPI_MIN_CLOCK_HZ))
342 static void pci1xxxx_spi_setup_dma_to_io(struct pci1xxxx_spi_internal *p,
343 dma_addr_t dma_addr, u32 len)
348 base = p->parent->dma_offset_bar + SPI_DMA_CH0_RD_BASE;
350 base = p->parent->dma_offset_bar + SPI_DMA_CH1_RD_BASE;
352 writel(DMA_INTR_EN, base + SPI_DMA_CH_CTL1_OFFSET);
353 writel(len, base + SPI_DMA_CH_XFER_LEN_OFFSET);
354 writel(lower_32_bits(dma_addr), base + SPI_DMA_CH_SAR_LO_OFFSET);
355 writel(upper_32_bits(dma_addr), base + SPI_DMA_CH_SAR_HI_OFFSET);
356 /* Updated SPI Command Registers */
357 writel(lower_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_CMD_BUF_OFFSET(p->hw_inst)),
358 base + SPI_DMA_CH_DAR_LO_OFFSET);
359 writel(upper_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_CMD_BUF_OFFSET(p->hw_inst)),
360 base + SPI_DMA_CH_DAR_HI_OFFSET);
363 static void pci1xxxx_spi_setup_dma_from_io(struct pci1xxxx_spi_internal *p,
364 dma_addr_t dma_addr, u32 len)
369 base = p->parent->dma_offset_bar + SPI_DMA_CH0_WR_BASE;
371 base = p->parent->dma_offset_bar + SPI_DMA_CH1_WR_BASE;
373 writel(DMA_INTR_EN, base + SPI_DMA_CH_CTL1_OFFSET);
374 writel(len, base + SPI_DMA_CH_XFER_LEN_OFFSET);
375 writel(lower_32_bits(dma_addr), base + SPI_DMA_CH_DAR_LO_OFFSET);
376 writel(upper_32_bits(dma_addr), base + SPI_DMA_CH_DAR_HI_OFFSET);
377 writel(lower_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_RSP_BUF_OFFSET(p->hw_inst)),
378 base + SPI_DMA_CH_SAR_LO_OFFSET);
379 writel(upper_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_RSP_BUF_OFFSET(p->hw_inst)),
380 base + SPI_DMA_CH_SAR_HI_OFFSET);
383 static void pci1xxxx_spi_setup(struct pci1xxxx_spi *par, u8 hw_inst, u32 mode,
388 regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
389 regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
390 SPI_MST_CTL_SPEED_MASK);
392 if (mode == SPI_MODE_3)
393 regval |= SPI_MST_CTL_MODE_SEL;
395 regval |= FIELD_PREP(SPI_MST_CTL_CMD_LEN_MASK, len);
396 regval |= FIELD_PREP(SPI_MST_CTL_SPEED_MASK, clkdiv);
397 writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
400 static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p, u8 hw_inst)
404 regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
405 regval |= SPI_MST_CTL_GO;
406 writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
409 static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr,
410 struct spi_device *spi, struct spi_transfer *xfer)
412 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
413 struct pci1xxxx_spi *par = p->parent;
414 int len, loop_iter, transfer_len;
415 unsigned long bytes_transfered;
416 unsigned long bytes_recvd;
417 unsigned long loop_count;
423 p->spi_xfer_in_progress = true;
425 clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
426 tx_buf = xfer->tx_buf;
427 rx_buf = xfer->rx_buf;
428 transfer_len = xfer->len;
429 regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
430 writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
433 bytes_transfered = 0;
435 loop_count = transfer_len / SPI_MAX_DATA_LEN;
436 if (transfer_len % SPI_MAX_DATA_LEN != 0)
439 for (loop_iter = 0; loop_iter < loop_count; loop_iter++) {
440 len = SPI_MAX_DATA_LEN;
441 if ((transfer_len % SPI_MAX_DATA_LEN != 0) &&
442 (loop_iter == loop_count - 1))
443 len = transfer_len % SPI_MAX_DATA_LEN;
445 reinit_completion(&p->spi_xfer_done);
446 memcpy_toio(par->reg_base + SPI_MST_CMD_BUF_OFFSET(p->hw_inst),
447 &tx_buf[bytes_transfered], len);
448 bytes_transfered += len;
449 pci1xxxx_spi_setup(par, p->hw_inst, spi->mode, clkdiv, len);
450 pci1xxxx_start_spi_xfer(p, p->hw_inst);
452 /* Wait for DMA_TERM interrupt */
453 result = wait_for_completion_timeout(&p->spi_xfer_done,
454 PCI1XXXX_SPI_TIMEOUT);
459 memcpy_fromio(&rx_buf[bytes_recvd], par->reg_base +
460 SPI_MST_RSP_BUF_OFFSET(p->hw_inst), len);
465 p->spi_xfer_in_progress = false;
470 static int pci1xxxx_spi_transfer_with_dma(struct spi_controller *spi_ctlr,
471 struct spi_device *spi,
472 struct spi_transfer *xfer)
474 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
475 struct pci1xxxx_spi *par = p->parent;
476 dma_addr_t rx_dma_addr = 0;
477 dma_addr_t tx_dma_addr = 0;
481 p->spi_xfer_in_progress = true;
482 p->tx_sgl = xfer->tx_sg.sgl;
483 p->rx_sgl = xfer->rx_sg.sgl;
484 p->rx_buf = xfer->rx_buf;
485 regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
486 writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
488 if (!xfer->tx_buf || !p->tx_sgl) {
494 p->clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
496 p->rx_buf = xfer->rx_buf;
497 regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
498 writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
500 tx_dma_addr = sg_dma_address(p->tx_sgl);
501 rx_dma_addr = sg_dma_address(p->rx_sgl);
502 p->tx_sgl_len = sg_dma_len(p->tx_sgl);
503 p->rx_sgl_len = sg_dma_len(p->rx_sgl);
504 pci1xxxx_spi_setup(par, p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len);
505 pci1xxxx_spi_setup_dma_to_io(p, (tx_dma_addr), p->tx_sgl_len);
507 pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len);
508 writel(p->hw_inst, par->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG);
510 reinit_completion(&p->spi_xfer_done);
511 /* Wait for DMA_TERM interrupt */
512 ret = wait_for_completion_timeout(&p->spi_xfer_done, PCI1XXXX_SPI_TIMEOUT);
515 if (p->dma_aborted_rd) {
516 writel(SPI_DMA_ENGINE_DIS,
517 par->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
519 * DMA ENGINE reset takes time if any TLP
520 * completeion in progress, should wait
521 * till DMA Engine reset is completed.
523 ret = readl_poll_timeout(par->dma_offset_bar +
524 SPI_DMA_GLOBAL_RD_ENGINE_EN, regval,
525 (regval == 0x0), 0, USEC_PER_MSEC);
530 writel(SPI_DMA_ENGINE_EN,
531 par->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
532 p->dma_aborted_rd = false;
535 if (p->dma_aborted_wr) {
536 writel(SPI_DMA_ENGINE_DIS,
537 par->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
540 * DMA ENGINE reset takes time if any TLP
541 * completeion in progress, should wait
542 * till DMA Engine reset is completed.
544 ret = readl_poll_timeout(par->dma_offset_bar +
545 SPI_DMA_GLOBAL_WR_ENGINE_EN, regval,
546 (regval == 0x0), 0, USEC_PER_MSEC);
552 writel(SPI_DMA_ENGINE_EN,
553 par->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
554 p->dma_aborted_wr = false;
562 p->spi_xfer_in_progress = false;
567 static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
568 struct spi_device *spi, struct spi_transfer *xfer)
570 if (spi_ctlr->can_dma(spi_ctlr, spi, xfer) && spi_ctlr->cur_msg_mapped)
571 return pci1xxxx_spi_transfer_with_dma(spi_ctlr, spi, xfer);
573 return pci1xxxx_spi_transfer_with_io(spi_ctlr, spi, xfer);
576 static irqreturn_t pci1xxxx_spi_isr_io(int irq, void *dev)
578 struct pci1xxxx_spi_internal *p = dev;
579 irqreturn_t spi_int_fired = IRQ_NONE;
582 /* Clear the SPI GO_BIT Interrupt */
583 regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
584 if (regval & SPI_INTR) {
585 /* Clear xfer_done */
586 if (p->parent->can_dma && p->rx_buf)
587 writel(p->hw_inst, p->parent->dma_offset_bar +
588 SPI_DMA_WR_DOORBELL_REG);
590 complete(&p->parent->spi_int[p->hw_inst]->spi_xfer_done);
591 spi_int_fired = IRQ_HANDLED;
593 writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
594 return spi_int_fired;
597 static void pci1xxxx_spi_setup_next_dma_transfer(struct pci1xxxx_spi_internal *p)
599 dma_addr_t tx_dma_addr = 0;
600 dma_addr_t rx_dma_addr = 0;
603 p->tx_sgl = sg_next(p->tx_sgl);
605 p->rx_sgl = sg_next(p->rx_sgl);
607 /* Clear xfer_done */
608 complete(&p->spi_xfer_done);
610 tx_dma_addr = sg_dma_address(p->tx_sgl);
611 prev_len = p->tx_sgl_len;
612 p->tx_sgl_len = sg_dma_len(p->tx_sgl);
613 if (prev_len != p->tx_sgl_len)
614 pci1xxxx_spi_setup(p->parent,
615 p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len);
616 pci1xxxx_spi_setup_dma_to_io(p, tx_dma_addr, p->tx_sgl_len);
618 rx_dma_addr = sg_dma_address(p->rx_sgl);
619 p->rx_sgl_len = sg_dma_len(p->rx_sgl);
620 pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len);
622 writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG);
626 static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
628 struct pci1xxxx_spi_internal *p = dev;
629 irqreturn_t spi_int_fired = IRQ_NONE;
633 spin_lock_irqsave(&p->parent->dma_reg_lock, flags);
634 /* Clear the DMA RD INT and start spi xfer*/
635 regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_RD_STS);
636 if (regval & SPI_DMA_DONE_INT_MASK) {
637 if (regval & SPI_DMA_CH0_DONE_INT)
638 pci1xxxx_start_spi_xfer(p, SPI0);
639 if (regval & SPI_DMA_CH1_DONE_INT)
640 pci1xxxx_start_spi_xfer(p, SPI1);
641 spi_int_fired = IRQ_HANDLED;
643 if (regval & SPI_DMA_ABORT_INT_MASK) {
644 p->dma_aborted_rd = true;
645 spi_int_fired = IRQ_HANDLED;
647 writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR);
649 /* Clear the DMA WR INT */
650 regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_WR_STS);
651 if (regval & SPI_DMA_DONE_INT_MASK) {
652 if (regval & SPI_DMA_CH0_DONE_INT)
653 pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI0]);
655 if (regval & SPI_DMA_CH1_DONE_INT)
656 pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI1]);
658 spi_int_fired = IRQ_HANDLED;
660 if (regval & SPI_DMA_ABORT_INT_MASK) {
661 p->dma_aborted_wr = true;
662 spi_int_fired = IRQ_HANDLED;
664 writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_WR_CLR);
665 spin_unlock_irqrestore(&p->parent->dma_reg_lock, flags);
667 /* Clear the SPI GO_BIT Interrupt */
668 regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
669 if (regval & SPI_INTR) {
670 writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_WR_DOORBELL_REG);
671 spi_int_fired = IRQ_HANDLED;
673 writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
674 return spi_int_fired;
677 static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
679 struct pci1xxxx_spi_internal *p = dev;
681 if (p->spi_host->can_dma(p->spi_host, NULL, p->xfer))
682 return pci1xxxx_spi_isr_dma(irq, dev);
684 return pci1xxxx_spi_isr_io(irq, dev);
687 static bool pci1xxxx_spi_can_dma(struct spi_controller *host,
688 struct spi_device *spi,
689 struct spi_transfer *xfer)
691 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(host);
692 struct pci1xxxx_spi *par = p->parent;
697 static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
699 u8 hw_inst_cnt, iter, start, only_sec_inst;
700 struct pci1xxxx_spi_internal *spi_sub_ptr;
701 struct device *dev = &pdev->dev;
702 struct pci1xxxx_spi *spi_bus;
703 struct spi_controller *spi_host;
707 hw_inst_cnt = ent->driver_data & 0x0f;
708 start = (ent->driver_data & 0xf0) >> 4;
714 spi_bus = devm_kzalloc(&pdev->dev,
715 struct_size(spi_bus, spi_int, hw_inst_cnt),
721 spi_bus->total_hw_instances = hw_inst_cnt;
722 pci_set_master(pdev);
724 for (iter = 0; iter < hw_inst_cnt; iter++) {
725 spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
726 sizeof(struct pci1xxxx_spi_internal),
728 if (!spi_bus->spi_int[iter])
730 spi_sub_ptr = spi_bus->spi_int[iter];
731 spi_sub_ptr->spi_host = devm_spi_alloc_host(dev, sizeof(struct spi_controller));
732 if (!spi_sub_ptr->spi_host)
735 spi_sub_ptr->parent = spi_bus;
736 spi_sub_ptr->spi_xfer_in_progress = false;
739 ret = pcim_enable_device(pdev);
743 ret = pci_request_regions(pdev, DRV_NAME);
747 spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
748 if (!spi_bus->reg_base) {
753 ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt,
756 dev_err(&pdev->dev, "Error allocating MSI vectors\n");
760 init_completion(&spi_sub_ptr->spi_xfer_done);
761 /* Initialize Interrupts - SPI_INT */
762 regval = readl(spi_bus->reg_base +
763 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
765 writel(regval, spi_bus->reg_base +
766 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
767 spi_sub_ptr->irq = pci_irq_vector(pdev, 0);
769 ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
770 pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
771 pci_name(pdev), spi_sub_ptr);
773 dev_err(&pdev->dev, "Unable to request irq : %d",
779 ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq);
780 if (ret && ret != -EOPNOTSUPP)
783 /* This register is only applicable for 1st instance */
784 regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
790 writel(regval, spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
793 spi_sub_ptr->hw_inst = start++;
796 init_completion(&spi_sub_ptr->spi_xfer_done);
797 /* Initialize Interrupts - SPI_INT */
798 regval = readl(spi_bus->reg_base +
799 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
801 writel(regval, spi_bus->reg_base +
802 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
803 spi_sub_ptr->irq = pci_irq_vector(pdev, iter);
804 ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
805 pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
806 pci_name(pdev), spi_sub_ptr);
808 dev_err(&pdev->dev, "Unable to request irq : %d",
815 spi_host = spi_sub_ptr->spi_host;
816 spi_host->num_chipselect = SPI_CHIP_SEL_COUNT;
817 spi_host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_RX_DUAL |
818 SPI_TX_DUAL | SPI_LOOP;
819 spi_host->can_dma = pci1xxxx_spi_can_dma;
820 spi_host->transfer_one = pci1xxxx_spi_transfer_one;
822 spi_host->set_cs = pci1xxxx_spi_set_cs;
823 spi_host->bits_per_word_mask = SPI_BPW_MASK(8);
824 spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ;
825 spi_host->min_speed_hz = PCI1XXXX_SPI_MIN_CLOCK_HZ;
826 spi_host->flags = SPI_CONTROLLER_MUST_TX;
827 spi_controller_set_devdata(spi_host, spi_sub_ptr);
828 ret = devm_spi_register_controller(dev, spi_host);
832 pci_set_drvdata(pdev, spi_bus);
837 pci_release_regions(pdev);
841 static void store_restore_config(struct pci1xxxx_spi *spi_ptr,
842 struct pci1xxxx_spi_internal *spi_sub_ptr,
848 regval = readl(spi_ptr->reg_base +
849 SPI_MST_CTL_REG_OFFSET(spi_sub_ptr->hw_inst));
850 regval &= SPI_MST_CTL_DEVSEL_MASK;
851 spi_sub_ptr->prev_val.dev_sel = (regval >> 25) & 7;
852 regval = readl(spi_ptr->reg_base +
853 SPI_PCI_CTRL_REG_OFFSET(spi_sub_ptr->hw_inst));
854 regval &= SPI_MSI_VECTOR_SEL_MASK;
855 spi_sub_ptr->prev_val.msi_vector_sel = (regval >> 4) & 1;
857 regval = readl(spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
858 regval &= ~SPI_MST_CTL_DEVSEL_MASK;
859 regval |= (spi_sub_ptr->prev_val.dev_sel << 25);
861 spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
862 writel((spi_sub_ptr->prev_val.msi_vector_sel << 4),
863 spi_ptr->reg_base + SPI_PCI_CTRL_REG_OFFSET(inst));
867 static int pci1xxxx_spi_resume(struct device *dev)
869 struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
870 struct pci1xxxx_spi_internal *spi_sub_ptr;
871 u32 regval = SPI_RESUME_CONFIG;
874 for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
875 spi_sub_ptr = spi_ptr->spi_int[iter];
876 spi_controller_resume(spi_sub_ptr->spi_host);
877 writel(regval, spi_ptr->reg_base +
878 SPI_MST_EVENT_MASK_REG_OFFSET(iter));
880 /* Restore config at resume */
881 store_restore_config(spi_ptr, spi_sub_ptr, iter, 0);
887 static int pci1xxxx_spi_suspend(struct device *dev)
889 struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
890 struct pci1xxxx_spi_internal *spi_sub_ptr;
891 u32 reg1 = SPI_SUSPEND_CONFIG;
894 for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
895 spi_sub_ptr = spi_ptr->spi_int[iter];
897 while (spi_sub_ptr->spi_xfer_in_progress)
900 /* Store existing config before suspend */
901 store_restore_config(spi_ptr, spi_sub_ptr, iter, 1);
902 spi_controller_suspend(spi_sub_ptr->spi_host);
903 writel(reg1, spi_ptr->reg_base +
904 SPI_MST_EVENT_MASK_REG_OFFSET(iter));
910 static DEFINE_SIMPLE_DEV_PM_OPS(spi_pm_ops, pci1xxxx_spi_suspend,
911 pci1xxxx_spi_resume);
913 static struct pci_driver pci1xxxx_spi_driver = {
915 .id_table = pci1xxxx_spi_pci_id_table,
916 .probe = pci1xxxx_spi_probe,
918 .pm = pm_sleep_ptr(&spi_pm_ops),
922 module_pci_driver(pci1xxxx_spi_driver);
924 MODULE_DESCRIPTION("Microchip Technology Inc. pci1xxxx SPI bus driver");
925 MODULE_AUTHOR("Tharun Kumar P<tharunkumar.pasumarthi@microchip.com>");
926 MODULE_AUTHOR("Kumaravel Thiagarajan<kumaravel.thiagarajan@microchip.com>");
927 MODULE_LICENSE("GPL v2");