Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
f8043872 CB |
2 | /* |
3 | * Driver for Broadcom BCM2835 SPI Controllers | |
4 | * | |
5 | * Copyright (C) 2012 Chris Boot | |
6 | * Copyright (C) 2013 Stephen Warren | |
e34ff011 | 7 | * Copyright (C) 2015 Martin Sperl |
f8043872 CB |
8 | * |
9 | * This driver is inspired by: | |
10 | * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> | |
11 | * spi-atmel.c, Copyright (C) 2006 Atmel Corporation | |
f8043872 CB |
12 | */ |
13 | ||
14 | #include <linux/clk.h> | |
15 | #include <linux/completion.h> | |
154f7da5 | 16 | #include <linux/debugfs.h> |
f8043872 | 17 | #include <linux/delay.h> |
3ecd37ed MS |
18 | #include <linux/dma-mapping.h> |
19 | #include <linux/dmaengine.h> | |
f8043872 CB |
20 | #include <linux/err.h> |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/io.h> | |
23 | #include <linux/kernel.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/of.h> | |
3ecd37ed | 26 | #include <linux/of_address.h> |
f8043872 | 27 | #include <linux/of_device.h> |
3bd158c5 LW |
28 | #include <linux/gpio/consumer.h> |
29 | #include <linux/gpio/machine.h> /* FIXME: using chip internals */ | |
30 | #include <linux/gpio/driver.h> /* FIXME: using chip internals */ | |
3ecd37ed | 31 | #include <linux/of_irq.h> |
f8043872 CB |
32 | #include <linux/spi/spi.h> |
33 | ||
34 | /* SPI register offsets */ | |
35 | #define BCM2835_SPI_CS 0x00 | |
36 | #define BCM2835_SPI_FIFO 0x04 | |
37 | #define BCM2835_SPI_CLK 0x08 | |
38 | #define BCM2835_SPI_DLEN 0x0c | |
39 | #define BCM2835_SPI_LTOH 0x10 | |
40 | #define BCM2835_SPI_DC 0x14 | |
41 | ||
42 | /* Bitfields in CS */ | |
43 | #define BCM2835_SPI_CS_LEN_LONG 0x02000000 | |
44 | #define BCM2835_SPI_CS_DMA_LEN 0x01000000 | |
45 | #define BCM2835_SPI_CS_CSPOL2 0x00800000 | |
46 | #define BCM2835_SPI_CS_CSPOL1 0x00400000 | |
47 | #define BCM2835_SPI_CS_CSPOL0 0x00200000 | |
48 | #define BCM2835_SPI_CS_RXF 0x00100000 | |
49 | #define BCM2835_SPI_CS_RXR 0x00080000 | |
50 | #define BCM2835_SPI_CS_TXD 0x00040000 | |
51 | #define BCM2835_SPI_CS_RXD 0x00020000 | |
52 | #define BCM2835_SPI_CS_DONE 0x00010000 | |
53 | #define BCM2835_SPI_CS_LEN 0x00002000 | |
54 | #define BCM2835_SPI_CS_REN 0x00001000 | |
55 | #define BCM2835_SPI_CS_ADCS 0x00000800 | |
56 | #define BCM2835_SPI_CS_INTR 0x00000400 | |
57 | #define BCM2835_SPI_CS_INTD 0x00000200 | |
58 | #define BCM2835_SPI_CS_DMAEN 0x00000100 | |
59 | #define BCM2835_SPI_CS_TA 0x00000080 | |
60 | #define BCM2835_SPI_CS_CSPOL 0x00000040 | |
61 | #define BCM2835_SPI_CS_CLEAR_RX 0x00000020 | |
62 | #define BCM2835_SPI_CS_CLEAR_TX 0x00000010 | |
63 | #define BCM2835_SPI_CS_CPOL 0x00000008 | |
64 | #define BCM2835_SPI_CS_CPHA 0x00000004 | |
65 | #define BCM2835_SPI_CS_CS_10 0x00000002 | |
66 | #define BCM2835_SPI_CS_CS_01 0x00000001 | |
67 | ||
2e0733bc LW |
68 | #define BCM2835_SPI_FIFO_SIZE 64 |
69 | #define BCM2835_SPI_FIFO_SIZE_3_4 48 | |
3ecd37ed | 70 | #define BCM2835_SPI_DMA_MIN_LENGTH 96 |
6935224d MS |
71 | #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ |
72 | | SPI_NO_CS | SPI_3WIRE) | |
f8043872 CB |
73 | |
74 | #define DRV_NAME "spi-bcm2835" | |
75 | ||
ff245d90 | 76 | /* define polling limits */ |
cbd632ea | 77 | static unsigned int polling_limit_us = 30; |
ff245d90 MS |
78 | module_param(polling_limit_us, uint, 0664); |
79 | MODULE_PARM_DESC(polling_limit_us, | |
80 | "time in us to run a transfer in polling mode\n"); | |
81 | ||
acf0f856 LW |
82 | /** |
83 | * struct bcm2835_spi - BCM2835 SPI controller | |
84 | * @regs: base address of register map | |
85 | * @clk: core clock, divided to calculate serial clock | |
86 | * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full | |
3bd7f658 | 87 | * @tfr: SPI transfer currently processed |
afe7e363 | 88 | * @ctlr: SPI controller reverse lookup |
acf0f856 LW |
89 | * @tx_buf: pointer whence next transmitted byte is read |
90 | * @rx_buf: pointer where next received byte is written | |
91 | * @tx_len: remaining bytes to transmit | |
92 | * @rx_len: remaining bytes to receive | |
3bd7f658 LW |
93 | * @tx_prologue: bytes transmitted without DMA if first TX sglist entry's |
94 | * length is not a multiple of 4 (to overcome hardware limitation) | |
95 | * @rx_prologue: bytes received without DMA if first RX sglist entry's | |
96 | * length is not a multiple of 4 (to overcome hardware limitation) | |
97 | * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry | |
154f7da5 MS |
98 | * @debugfs_dir: the debugfs directory - neede to remove debugfs when |
99 | * unloading the module | |
100 | * @count_transfer_polling: count of how often polling mode is used | |
101 | * @count_transfer_irq: count of how often interrupt mode is used | |
102 | * @count_transfer_irq_after_polling: count of how often we fall back to | |
103 | * interrupt mode after starting in polling mode. | |
104 | * These are counted as well in @count_transfer_polling and | |
105 | * @count_transfer_irq | |
106 | * @count_transfer_dma: count how often dma mode is used | |
ec679bda | 107 | * @slv: SPI slave currently selected |
8259bf66 LW |
108 | * (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs) |
109 | * @tx_dma_active: whether a TX DMA descriptor is in progress | |
110 | * @rx_dma_active: whether a RX DMA descriptor is in progress | |
111 | * (used by bcm2835_spi_dma_tx_done() to handle a race) | |
2b8279ae LW |
112 | * @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers |
113 | * (cyclically copies from zero page to TX FIFO) | |
114 | * @fill_tx_addr: bus address of zero page | |
acf0f856 | 115 | */ |
f8043872 CB |
116 | struct bcm2835_spi { |
117 | void __iomem *regs; | |
118 | struct clk *clk; | |
119 | int irq; | |
3bd7f658 | 120 | struct spi_transfer *tfr; |
afe7e363 | 121 | struct spi_controller *ctlr; |
f8043872 CB |
122 | const u8 *tx_buf; |
123 | u8 *rx_buf; | |
e34ff011 MS |
124 | int tx_len; |
125 | int rx_len; | |
3bd7f658 LW |
126 | int tx_prologue; |
127 | int rx_prologue; | |
b31a9299 | 128 | unsigned int tx_spillover; |
154f7da5 MS |
129 | |
130 | struct dentry *debugfs_dir; | |
131 | u64 count_transfer_polling; | |
132 | u64 count_transfer_irq; | |
133 | u64 count_transfer_irq_after_polling; | |
134 | u64 count_transfer_dma; | |
8259bf66 | 135 | |
ec679bda | 136 | struct bcm2835_spidev *slv; |
8259bf66 LW |
137 | unsigned int tx_dma_active; |
138 | unsigned int rx_dma_active; | |
2b8279ae LW |
139 | struct dma_async_tx_descriptor *fill_tx_desc; |
140 | dma_addr_t fill_tx_addr; | |
ec679bda LW |
141 | }; |
142 | ||
143 | /** | |
144 | * struct bcm2835_spidev - BCM2835 SPI slave | |
145 | * @prepare_cs: precalculated CS register value for ->prepare_message() | |
146 | * (uses slave-specific clock polarity and phase settings) | |
147 | * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers | |
148 | * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register) | |
149 | * @clear_rx_addr: bus address of @clear_rx_cs | |
150 | * @clear_rx_cs: precalculated CS register value to clear RX FIFO | |
151 | * (uses slave-specific clock polarity and phase settings) | |
152 | */ | |
153 | struct bcm2835_spidev { | |
154 | u32 prepare_cs; | |
155 | struct dma_async_tx_descriptor *clear_rx_desc; | |
8259bf66 | 156 | dma_addr_t clear_rx_addr; |
ec679bda | 157 | u32 clear_rx_cs ____cacheline_aligned; |
f8043872 CB |
158 | }; |
159 | ||
154f7da5 MS |
160 | #if defined(CONFIG_DEBUG_FS) |
161 | static void bcm2835_debugfs_create(struct bcm2835_spi *bs, | |
162 | const char *dname) | |
163 | { | |
164 | char name[64]; | |
165 | struct dentry *dir; | |
166 | ||
167 | /* get full name */ | |
168 | snprintf(name, sizeof(name), "spi-bcm2835-%s", dname); | |
169 | ||
170 | /* the base directory */ | |
171 | dir = debugfs_create_dir(name, NULL); | |
172 | bs->debugfs_dir = dir; | |
173 | ||
174 | /* the counters */ | |
175 | debugfs_create_u64("count_transfer_polling", 0444, dir, | |
176 | &bs->count_transfer_polling); | |
177 | debugfs_create_u64("count_transfer_irq", 0444, dir, | |
178 | &bs->count_transfer_irq); | |
179 | debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir, | |
180 | &bs->count_transfer_irq_after_polling); | |
181 | debugfs_create_u64("count_transfer_dma", 0444, dir, | |
182 | &bs->count_transfer_dma); | |
183 | } | |
184 | ||
185 | static void bcm2835_debugfs_remove(struct bcm2835_spi *bs) | |
186 | { | |
187 | debugfs_remove_recursive(bs->debugfs_dir); | |
188 | bs->debugfs_dir = NULL; | |
189 | } | |
190 | #else | |
191 | static void bcm2835_debugfs_create(struct bcm2835_spi *bs, | |
192 | const char *dname) | |
193 | { | |
194 | } | |
195 | ||
196 | static void bcm2835_debugfs_remove(struct bcm2835_spi *bs) | |
197 | { | |
198 | } | |
199 | #endif /* CONFIG_DEBUG_FS */ | |
200 | ||
e37687c9 | 201 | static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned int reg) |
f8043872 CB |
202 | { |
203 | return readl(bs->regs + reg); | |
204 | } | |
205 | ||
e37687c9 | 206 | static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned int reg, u32 val) |
f8043872 CB |
207 | { |
208 | writel(val, bs->regs + reg); | |
209 | } | |
210 | ||
4adf3129 | 211 | static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs) |
f8043872 CB |
212 | { |
213 | u8 byte; | |
214 | ||
e34ff011 MS |
215 | while ((bs->rx_len) && |
216 | (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) { | |
f8043872 CB |
217 | byte = bcm2835_rd(bs, BCM2835_SPI_FIFO); |
218 | if (bs->rx_buf) | |
219 | *bs->rx_buf++ = byte; | |
e34ff011 | 220 | bs->rx_len--; |
f8043872 CB |
221 | } |
222 | } | |
223 | ||
4adf3129 | 224 | static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs) |
f8043872 CB |
225 | { |
226 | u8 byte; | |
227 | ||
e34ff011 | 228 | while ((bs->tx_len) && |
4adf3129 | 229 | (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) { |
f8043872 CB |
230 | byte = bs->tx_buf ? *bs->tx_buf++ : 0; |
231 | bcm2835_wr(bs, BCM2835_SPI_FIFO, byte); | |
e34ff011 | 232 | bs->tx_len--; |
f8043872 CB |
233 | } |
234 | } | |
235 | ||
3bd7f658 LW |
236 | /** |
237 | * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO | |
238 | * @bs: BCM2835 SPI controller | |
239 | * @count: bytes to read from RX FIFO | |
240 | * | |
241 | * The caller must ensure that @bs->rx_len is greater than or equal to @count, | |
242 | * that the RX FIFO contains at least @count bytes and that the DMA Enable flag | |
243 | * in the CS register is set (such that a read from the FIFO register receives | |
b31a9299 | 244 | * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL. |
3bd7f658 LW |
245 | */ |
246 | static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count) | |
247 | { | |
248 | u32 val; | |
b31a9299 | 249 | int len; |
3bd7f658 LW |
250 | |
251 | bs->rx_len -= count; | |
252 | ||
26751de2 | 253 | do { |
3bd7f658 | 254 | val = bcm2835_rd(bs, BCM2835_SPI_FIFO); |
b31a9299 LW |
255 | len = min(count, 4); |
256 | memcpy(bs->rx_buf, &val, len); | |
257 | bs->rx_buf += len; | |
3bd7f658 | 258 | count -= 4; |
26751de2 | 259 | } while (count > 0); |
3bd7f658 LW |
260 | } |
261 | ||
262 | /** | |
263 | * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO | |
264 | * @bs: BCM2835 SPI controller | |
265 | * @count: bytes to write to TX FIFO | |
266 | * | |
267 | * The caller must ensure that @bs->tx_len is greater than or equal to @count, | |
268 | * that the TX FIFO can accommodate @count bytes and that the DMA Enable flag | |
269 | * in the CS register is set (such that a write to the FIFO register transmits | |
270 | * 32-bit instead of just 8-bit). | |
271 | */ | |
272 | static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count) | |
273 | { | |
274 | u32 val; | |
b31a9299 | 275 | int len; |
3bd7f658 LW |
276 | |
277 | bs->tx_len -= count; | |
278 | ||
26751de2 | 279 | do { |
3bd7f658 | 280 | if (bs->tx_buf) { |
b31a9299 | 281 | len = min(count, 4); |
3bd7f658 LW |
282 | memcpy(&val, bs->tx_buf, len); |
283 | bs->tx_buf += len; | |
284 | } else { | |
285 | val = 0; | |
286 | } | |
287 | bcm2835_wr(bs, BCM2835_SPI_FIFO, val); | |
288 | count -= 4; | |
26751de2 | 289 | } while (count > 0); |
3bd7f658 LW |
290 | } |
291 | ||
292 | /** | |
293 | * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty | |
294 | * @bs: BCM2835 SPI controller | |
b31a9299 LW |
295 | * |
296 | * The caller must ensure that the RX FIFO can accommodate as many bytes | |
297 | * as have been written to the TX FIFO: Transmission is halted once the | |
298 | * RX FIFO is full, causing this function to spin forever. | |
3bd7f658 LW |
299 | */ |
300 | static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs) | |
301 | { | |
302 | while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE)) | |
303 | cpu_relax(); | |
304 | } | |
305 | ||
2e0733bc LW |
306 | /** |
307 | * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO | |
308 | * @bs: BCM2835 SPI controller | |
309 | * @count: bytes available for reading in RX FIFO | |
310 | */ | |
311 | static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count) | |
312 | { | |
313 | u8 val; | |
314 | ||
315 | count = min(count, bs->rx_len); | |
316 | bs->rx_len -= count; | |
317 | ||
26751de2 | 318 | do { |
2e0733bc LW |
319 | val = bcm2835_rd(bs, BCM2835_SPI_FIFO); |
320 | if (bs->rx_buf) | |
321 | *bs->rx_buf++ = val; | |
26751de2 | 322 | } while (--count); |
2e0733bc LW |
323 | } |
324 | ||
325 | /** | |
326 | * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO | |
327 | * @bs: BCM2835 SPI controller | |
328 | * @count: bytes available for writing in TX FIFO | |
329 | */ | |
330 | static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count) | |
331 | { | |
332 | u8 val; | |
333 | ||
334 | count = min(count, bs->tx_len); | |
335 | bs->tx_len -= count; | |
336 | ||
26751de2 | 337 | do { |
2e0733bc LW |
338 | val = bs->tx_buf ? *bs->tx_buf++ : 0; |
339 | bcm2835_wr(bs, BCM2835_SPI_FIFO, val); | |
26751de2 | 340 | } while (--count); |
2e0733bc LW |
341 | } |
342 | ||
ac4648b5 | 343 | static void bcm2835_spi_reset_hw(struct bcm2835_spi *bs) |
e34ff011 | 344 | { |
e34ff011 MS |
345 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); |
346 | ||
347 | /* Disable SPI interrupts and transfer */ | |
348 | cs &= ~(BCM2835_SPI_CS_INTR | | |
349 | BCM2835_SPI_CS_INTD | | |
3ecd37ed | 350 | BCM2835_SPI_CS_DMAEN | |
e34ff011 | 351 | BCM2835_SPI_CS_TA); |
4c524191 LW |
352 | /* |
353 | * Transmission sometimes breaks unless the DONE bit is written at the | |
354 | * end of every transfer. The spec says it's a RO bit. Either the | |
355 | * spec is wrong and the bit is actually of type RW1C, or it's a | |
356 | * hardware erratum. | |
357 | */ | |
358 | cs |= BCM2835_SPI_CS_DONE; | |
e34ff011 MS |
359 | /* and reset RX/TX FIFOS */ |
360 | cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX; | |
361 | ||
362 | /* and reset the SPI_HW */ | |
363 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); | |
3ecd37ed MS |
364 | /* as well as DLEN */ |
365 | bcm2835_wr(bs, BCM2835_SPI_DLEN, 0); | |
e34ff011 MS |
366 | } |
367 | ||
f8043872 CB |
368 | static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) |
369 | { | |
afe7e363 | 370 | struct bcm2835_spi *bs = dev_id; |
2e0733bc LW |
371 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); |
372 | ||
373 | /* | |
374 | * An interrupt is signaled either if DONE is set (TX FIFO empty) | |
375 | * or if RXR is set (RX FIFO >= ¾ full). | |
376 | */ | |
377 | if (cs & BCM2835_SPI_CS_RXF) | |
378 | bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); | |
379 | else if (cs & BCM2835_SPI_CS_RXR) | |
380 | bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4); | |
381 | ||
382 | if (bs->tx_len && cs & BCM2835_SPI_CS_DONE) | |
383 | bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); | |
f8043872 | 384 | |
4adf3129 MS |
385 | /* Read as many bytes as possible from FIFO */ |
386 | bcm2835_rd_fifo(bs); | |
e34ff011 MS |
387 | /* Write as many bytes as possible to FIFO */ |
388 | bcm2835_wr_fifo(bs); | |
389 | ||
56c17234 | 390 | if (!bs->rx_len) { |
e34ff011 | 391 | /* Transfer complete - reset SPI HW */ |
ac4648b5 | 392 | bcm2835_spi_reset_hw(bs); |
e34ff011 | 393 | /* wake up the framework */ |
ccae0b40 | 394 | spi_finalize_current_transfer(bs->ctlr); |
f8043872 CB |
395 | } |
396 | ||
4adf3129 | 397 | return IRQ_HANDLED; |
f8043872 CB |
398 | } |
399 | ||
5f336ea5 | 400 | static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr, |
704f32d4 MS |
401 | struct spi_device *spi, |
402 | struct spi_transfer *tfr, | |
2e0733bc | 403 | u32 cs, bool fifo_empty) |
704f32d4 | 404 | { |
5f336ea5 | 405 | struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); |
704f32d4 | 406 | |
154f7da5 MS |
407 | /* update usage statistics */ |
408 | bs->count_transfer_irq++; | |
409 | ||
704f32d4 | 410 | /* |
5c09e42f LW |
411 | * Enable HW block, but with interrupts still disabled. |
412 | * Otherwise the empty TX FIFO would immediately trigger an interrupt. | |
704f32d4 | 413 | */ |
5c09e42f LW |
414 | bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); |
415 | ||
416 | /* fill TX FIFO as much as possible */ | |
2e0733bc LW |
417 | if (fifo_empty) |
418 | bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); | |
5c09e42f LW |
419 | bcm2835_wr_fifo(bs); |
420 | ||
421 | /* enable interrupts */ | |
704f32d4 MS |
422 | cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; |
423 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); | |
424 | ||
425 | /* signal that we need to wait for completion */ | |
426 | return 1; | |
427 | } | |
428 | ||
3bd7f658 LW |
429 | /** |
430 | * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA | |
5f336ea5 | 431 | * @ctlr: SPI master controller |
3bd7f658 LW |
432 | * @tfr: SPI transfer |
433 | * @bs: BCM2835 SPI controller | |
434 | * @cs: CS register | |
435 | * | |
436 | * A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks. | |
437 | * Only the final write access is permitted to transmit less than 4 bytes, the | |
438 | * SPI controller deduces its intended size from the DLEN register. | |
439 | * | |
440 | * If a TX or RX sglist contains multiple entries, one per page, and the first | |
441 | * entry starts in the middle of a page, that first entry's length may not be | |
442 | * a multiple of 4. Subsequent entries are fine because they span an entire | |
443 | * page, hence do have a length that's a multiple of 4. | |
444 | * | |
445 | * This cannot happen with kmalloc'ed buffers (which is what most clients use) | |
446 | * because they are contiguous in physical memory and therefore not split on | |
447 | * page boundaries by spi_map_buf(). But it *can* happen with vmalloc'ed | |
448 | * buffers. | |
449 | * | |
450 | * The DMA engine is incapable of combining sglist entries into a continuous | |
451 | * stream of 4 byte chunks, it treats every entry separately: A TX entry is | |
452 | * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX | |
453 | * entry is rounded up by throwing away received bytes. | |
454 | * | |
455 | * Overcome this limitation by transferring the first few bytes without DMA: | |
456 | * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42, | |
457 | * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO. | |
458 | * The residue of 1 byte in the RX FIFO is picked up by DMA. Together with | |
459 | * the rest of the first RX sglist entry it makes up a multiple of 4 bytes. | |
460 | * | |
461 | * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1, | |
462 | * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO. | |
463 | * Caution, the additional 4 bytes spill over to the second TX sglist entry | |
464 | * if the length of the first is *exactly* 1. | |
465 | * | |
466 | * At most 6 bytes are written and at most 3 bytes read. Do we know the | |
467 | * transfer has this many bytes? Yes, see BCM2835_SPI_DMA_MIN_LENGTH. | |
468 | * | |
469 | * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width | |
470 | * by the DMA engine. Toggling the DMA Enable flag in the CS register switches | |
471 | * the width but also garbles the FIFO's contents. The prologue must therefore | |
472 | * be transmitted in 32-bit width to ensure that the following DMA transfer can | |
473 | * pick up the residue in the RX FIFO in ungarbled form. | |
474 | */ | |
5f336ea5 | 475 | static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr, |
3bd7f658 LW |
476 | struct spi_transfer *tfr, |
477 | struct bcm2835_spi *bs, | |
478 | u32 cs) | |
479 | { | |
480 | int tx_remaining; | |
481 | ||
482 | bs->tfr = tfr; | |
483 | bs->tx_prologue = 0; | |
484 | bs->rx_prologue = 0; | |
485 | bs->tx_spillover = false; | |
486 | ||
2b8279ae | 487 | if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0])) |
3bd7f658 LW |
488 | bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3; |
489 | ||
8259bf66 | 490 | if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) { |
3bd7f658 LW |
491 | bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3; |
492 | ||
493 | if (bs->rx_prologue > bs->tx_prologue) { | |
2b8279ae | 494 | if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) { |
3bd7f658 LW |
495 | bs->tx_prologue = bs->rx_prologue; |
496 | } else { | |
497 | bs->tx_prologue += 4; | |
498 | bs->tx_spillover = | |
499 | !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3); | |
500 | } | |
501 | } | |
502 | } | |
503 | ||
504 | /* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */ | |
505 | if (!bs->tx_prologue) | |
506 | return; | |
507 | ||
508 | /* Write and read RX prologue. Adjust first entry in RX sglist. */ | |
509 | if (bs->rx_prologue) { | |
510 | bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue); | |
511 | bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA | |
512 | | BCM2835_SPI_CS_DMAEN); | |
513 | bcm2835_wr_fifo_count(bs, bs->rx_prologue); | |
514 | bcm2835_wait_tx_fifo_empty(bs); | |
515 | bcm2835_rd_fifo_count(bs, bs->rx_prologue); | |
4c524191 LW |
516 | bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX |
517 | | BCM2835_SPI_CS_CLEAR_TX | |
518 | | BCM2835_SPI_CS_DONE); | |
3bd7f658 | 519 | |
5f336ea5 | 520 | dma_sync_single_for_device(ctlr->dma_rx->device->dev, |
b31a9299 LW |
521 | sg_dma_address(&tfr->rx_sg.sgl[0]), |
522 | bs->rx_prologue, DMA_FROM_DEVICE); | |
3bd7f658 | 523 | |
b31a9299 LW |
524 | sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue; |
525 | sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue; | |
3bd7f658 LW |
526 | } |
527 | ||
2b8279ae LW |
528 | if (!bs->tx_buf) |
529 | return; | |
530 | ||
3bd7f658 LW |
531 | /* |
532 | * Write remaining TX prologue. Adjust first entry in TX sglist. | |
533 | * Also adjust second entry if prologue spills over to it. | |
534 | */ | |
535 | tx_remaining = bs->tx_prologue - bs->rx_prologue; | |
536 | if (tx_remaining) { | |
537 | bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining); | |
538 | bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA | |
539 | | BCM2835_SPI_CS_DMAEN); | |
540 | bcm2835_wr_fifo_count(bs, tx_remaining); | |
541 | bcm2835_wait_tx_fifo_empty(bs); | |
4c524191 LW |
542 | bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX |
543 | | BCM2835_SPI_CS_DONE); | |
3bd7f658 LW |
544 | } |
545 | ||
546 | if (likely(!bs->tx_spillover)) { | |
b31a9299 LW |
547 | sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue; |
548 | sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue; | |
3bd7f658 | 549 | } else { |
b31a9299 LW |
550 | sg_dma_len(&tfr->tx_sg.sgl[0]) = 0; |
551 | sg_dma_address(&tfr->tx_sg.sgl[1]) += 4; | |
552 | sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4; | |
3bd7f658 LW |
553 | } |
554 | } | |
555 | ||
556 | /** | |
557 | * bcm2835_spi_undo_prologue() - reconstruct original sglist state | |
558 | * @bs: BCM2835 SPI controller | |
559 | * | |
560 | * Undo changes which were made to an SPI transfer's sglist when transmitting | |
561 | * the prologue. This is necessary to ensure the same memory ranges are | |
562 | * unmapped that were originally mapped. | |
563 | */ | |
564 | static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs) | |
565 | { | |
566 | struct spi_transfer *tfr = bs->tfr; | |
567 | ||
568 | if (!bs->tx_prologue) | |
569 | return; | |
570 | ||
571 | if (bs->rx_prologue) { | |
b31a9299 LW |
572 | sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue; |
573 | sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue; | |
3bd7f658 LW |
574 | } |
575 | ||
2b8279ae LW |
576 | if (!bs->tx_buf) |
577 | goto out; | |
578 | ||
3bd7f658 | 579 | if (likely(!bs->tx_spillover)) { |
b31a9299 LW |
580 | sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue; |
581 | sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue; | |
3bd7f658 | 582 | } else { |
b31a9299 LW |
583 | sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4; |
584 | sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4; | |
585 | sg_dma_len(&tfr->tx_sg.sgl[1]) += 4; | |
3bd7f658 | 586 | } |
2b8279ae | 587 | out: |
1513ceee | 588 | bs->tx_prologue = 0; |
3bd7f658 LW |
589 | } |
590 | ||
8259bf66 LW |
591 | /** |
592 | * bcm2835_spi_dma_rx_done() - callback for DMA RX channel | |
593 | * @data: SPI master controller | |
594 | * | |
595 | * Used for bidirectional and RX-only transfers. | |
596 | */ | |
597 | static void bcm2835_spi_dma_rx_done(void *data) | |
3ecd37ed | 598 | { |
5f336ea5 LW |
599 | struct spi_controller *ctlr = data; |
600 | struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); | |
3ecd37ed | 601 | |
2b8279ae | 602 | /* terminate tx-dma as we do not have an irq for it |
3ecd37ed MS |
603 | * because when the rx dma will terminate and this callback |
604 | * is called the tx-dma must have finished - can't get to this | |
605 | * situation otherwise... | |
606 | */ | |
1513ceee | 607 | dmaengine_terminate_async(ctlr->dma_tx); |
8259bf66 LW |
608 | bs->tx_dma_active = false; |
609 | bs->rx_dma_active = false; | |
1513ceee | 610 | bcm2835_spi_undo_prologue(bs); |
3ecd37ed | 611 | |
2b8279ae | 612 | /* reset fifo and HW */ |
ac4648b5 | 613 | bcm2835_spi_reset_hw(bs); |
3ecd37ed MS |
614 | |
615 | /* and mark as completed */; | |
ccae0b40 | 616 | spi_finalize_current_transfer(ctlr); |
3ecd37ed MS |
617 | } |
618 | ||
8259bf66 LW |
619 | /** |
620 | * bcm2835_spi_dma_tx_done() - callback for DMA TX channel | |
621 | * @data: SPI master controller | |
622 | * | |
623 | * Used for TX-only transfers. | |
624 | */ | |
625 | static void bcm2835_spi_dma_tx_done(void *data) | |
626 | { | |
627 | struct spi_controller *ctlr = data; | |
628 | struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); | |
629 | ||
630 | /* busy-wait for TX FIFO to empty */ | |
631 | while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE)) | |
ec679bda | 632 | bcm2835_wr(bs, BCM2835_SPI_CS, bs->slv->clear_rx_cs); |
8259bf66 LW |
633 | |
634 | bs->tx_dma_active = false; | |
635 | smp_wmb(); | |
636 | ||
637 | /* | |
638 | * In case of a very short transfer, RX DMA may not have been | |
639 | * issued yet. The onus is then on bcm2835_spi_transfer_one_dma() | |
640 | * to terminate it immediately after issuing. | |
641 | */ | |
642 | if (cmpxchg(&bs->rx_dma_active, true, false)) | |
643 | dmaengine_terminate_async(ctlr->dma_rx); | |
644 | ||
645 | bcm2835_spi_undo_prologue(bs); | |
ac4648b5 | 646 | bcm2835_spi_reset_hw(bs); |
ccae0b40 | 647 | spi_finalize_current_transfer(ctlr); |
8259bf66 LW |
648 | } |
649 | ||
650 | /** | |
651 | * bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist | |
652 | * @ctlr: SPI master controller | |
8259bf66 LW |
653 | * @tfr: SPI transfer |
654 | * @bs: BCM2835 SPI controller | |
ec679bda | 655 | * @slv: BCM2835 SPI slave |
8259bf66 LW |
656 | * @is_tx: whether to submit DMA descriptor for TX or RX sglist |
657 | * | |
658 | * Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr. | |
659 | * Return 0 on success or a negative error number. | |
660 | */ | |
5f336ea5 | 661 | static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr, |
3ecd37ed | 662 | struct spi_transfer *tfr, |
8259bf66 | 663 | struct bcm2835_spi *bs, |
ec679bda | 664 | struct bcm2835_spidev *slv, |
3ecd37ed MS |
665 | bool is_tx) |
666 | { | |
667 | struct dma_chan *chan; | |
668 | struct scatterlist *sgl; | |
669 | unsigned int nents; | |
670 | enum dma_transfer_direction dir; | |
671 | unsigned long flags; | |
672 | ||
673 | struct dma_async_tx_descriptor *desc; | |
674 | dma_cookie_t cookie; | |
675 | ||
676 | if (is_tx) { | |
677 | dir = DMA_MEM_TO_DEV; | |
5f336ea5 | 678 | chan = ctlr->dma_tx; |
3ecd37ed MS |
679 | nents = tfr->tx_sg.nents; |
680 | sgl = tfr->tx_sg.sgl; | |
8259bf66 | 681 | flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT; |
3ecd37ed MS |
682 | } else { |
683 | dir = DMA_DEV_TO_MEM; | |
5f336ea5 | 684 | chan = ctlr->dma_rx; |
3ecd37ed MS |
685 | nents = tfr->rx_sg.nents; |
686 | sgl = tfr->rx_sg.sgl; | |
687 | flags = DMA_PREP_INTERRUPT; | |
688 | } | |
689 | /* prepare the channel */ | |
690 | desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags); | |
691 | if (!desc) | |
692 | return -EINVAL; | |
693 | ||
8259bf66 LW |
694 | /* |
695 | * Completion is signaled by the RX channel for bidirectional and | |
696 | * RX-only transfers; else by the TX channel for TX-only transfers. | |
697 | */ | |
3ecd37ed | 698 | if (!is_tx) { |
8259bf66 | 699 | desc->callback = bcm2835_spi_dma_rx_done; |
5f336ea5 | 700 | desc->callback_param = ctlr; |
8259bf66 LW |
701 | } else if (!tfr->rx_buf) { |
702 | desc->callback = bcm2835_spi_dma_tx_done; | |
5f336ea5 | 703 | desc->callback_param = ctlr; |
ec679bda | 704 | bs->slv = slv; |
3ecd37ed MS |
705 | } |
706 | ||
707 | /* submit it to DMA-engine */ | |
708 | cookie = dmaengine_submit(desc); | |
709 | ||
710 | return dma_submit_error(cookie); | |
711 | } | |
712 | ||
8259bf66 LW |
713 | /** |
714 | * bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine | |
715 | * @ctlr: SPI master controller | |
8259bf66 | 716 | * @tfr: SPI transfer |
ec679bda | 717 | * @slv: BCM2835 SPI slave |
8259bf66 LW |
718 | * @cs: CS register |
719 | * | |
720 | * For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up | |
721 | * the TX and RX DMA channel to copy between memory and FIFO register. | |
722 | * | |
723 | * For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to | |
724 | * memory is pointless. However not reading the RX FIFO isn't an option either | |
725 | * because transmission is halted once it's full. As a workaround, cyclically | |
726 | * clear the RX FIFO by setting the CLEAR_RX bit in the CS register. | |
727 | * | |
728 | * The CS register value is precalculated in bcm2835_spi_setup(). Normally | |
729 | * this is called only once, on slave registration. A DMA descriptor to write | |
730 | * this value is preallocated in bcm2835_dma_init(). All that's left to do | |
731 | * when performing a TX-only transfer is to submit this descriptor to the RX | |
732 | * DMA channel. Latency is thereby minimized. The descriptor does not | |
733 | * generate any interrupts while running. It must be terminated once the | |
734 | * TX DMA channel is done. | |
735 | * | |
736 | * Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted | |
737 | * when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC | |
738 | * register.) Reading 32 bytes from the RX FIFO would normally require 8 bus | |
739 | * accesses, whereas clearing it requires only 1 bus access. So an 8-fold | |
740 | * reduction in bus traffic and thus energy consumption is achieved. | |
2b8279ae LW |
741 | * |
742 | * For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically | |
743 | * copying from the zero page. The DMA descriptor to do this is preallocated | |
744 | * in bcm2835_dma_init(). It must be terminated once the RX DMA channel is | |
745 | * done and can then be reused. | |
746 | * | |
747 | * The BCM2835 DMA driver autodetects when a transaction copies from the zero | |
748 | * page and utilizes the DMA controller's ability to synthesize zeroes instead | |
749 | * of copying them from memory. This reduces traffic on the memory bus. The | |
750 | * feature is not available on so-called "lite" channels, but normally TX DMA | |
751 | * is backed by a full-featured channel. | |
752 | * | |
753 | * Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the | |
754 | * BCM2835 SPI controller continues to assert DREQ even after the DLEN register | |
755 | * has been counted down to zero (hardware erratum). Thus, when the transfer | |
756 | * has finished, the DMA engine zero-fills the TX FIFO until it is half full. | |
757 | * (Tuneable with the DC register.) So up to 9 gratuitous bus accesses are | |
758 | * performed at the end of an RX-only transfer. | |
8259bf66 | 759 | */ |
5f336ea5 | 760 | static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr, |
3ecd37ed | 761 | struct spi_transfer *tfr, |
ec679bda | 762 | struct bcm2835_spidev *slv, |
3ecd37ed MS |
763 | u32 cs) |
764 | { | |
5f336ea5 | 765 | struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); |
8259bf66 | 766 | dma_cookie_t cookie; |
3ecd37ed MS |
767 | int ret; |
768 | ||
154f7da5 MS |
769 | /* update usage statistics */ |
770 | bs->count_transfer_dma++; | |
771 | ||
3bd7f658 LW |
772 | /* |
773 | * Transfer first few bytes without DMA if length of first TX or RX | |
774 | * sglist entry is not a multiple of 4 bytes (hardware limitation). | |
775 | */ | |
5f336ea5 | 776 | bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs); |
3ecd37ed MS |
777 | |
778 | /* setup tx-DMA */ | |
2b8279ae | 779 | if (bs->tx_buf) { |
ec679bda | 780 | ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, true); |
2b8279ae LW |
781 | } else { |
782 | cookie = dmaengine_submit(bs->fill_tx_desc); | |
783 | ret = dma_submit_error(cookie); | |
784 | } | |
3ecd37ed | 785 | if (ret) |
3bd7f658 | 786 | goto err_reset_hw; |
3ecd37ed | 787 | |
3ecd37ed | 788 | /* set the DMA length */ |
3bd7f658 | 789 | bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len); |
3ecd37ed MS |
790 | |
791 | /* start the HW */ | |
792 | bcm2835_wr(bs, BCM2835_SPI_CS, | |
793 | cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN); | |
794 | ||
8259bf66 LW |
795 | bs->tx_dma_active = true; |
796 | smp_wmb(); | |
797 | ||
798 | /* start TX early */ | |
799 | dma_async_issue_pending(ctlr->dma_tx); | |
800 | ||
3ecd37ed MS |
801 | /* setup rx-DMA late - to run transfers while |
802 | * mapping of the rx buffers still takes place | |
803 | * this saves 10us or more. | |
804 | */ | |
8259bf66 | 805 | if (bs->rx_buf) { |
ec679bda | 806 | ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, false); |
8259bf66 | 807 | } else { |
ec679bda | 808 | cookie = dmaengine_submit(slv->clear_rx_desc); |
8259bf66 LW |
809 | ret = dma_submit_error(cookie); |
810 | } | |
3ecd37ed MS |
811 | if (ret) { |
812 | /* need to reset on errors */ | |
5f336ea5 | 813 | dmaengine_terminate_sync(ctlr->dma_tx); |
8259bf66 | 814 | bs->tx_dma_active = false; |
3bd7f658 | 815 | goto err_reset_hw; |
3ecd37ed MS |
816 | } |
817 | ||
818 | /* start rx dma late */ | |
5f336ea5 | 819 | dma_async_issue_pending(ctlr->dma_rx); |
8259bf66 LW |
820 | bs->rx_dma_active = true; |
821 | smp_mb(); | |
822 | ||
823 | /* | |
824 | * In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done() | |
825 | * may run before RX DMA is issued. Terminate RX DMA if so. | |
826 | */ | |
827 | if (!bs->rx_buf && !bs->tx_dma_active && | |
828 | cmpxchg(&bs->rx_dma_active, true, false)) { | |
829 | dmaengine_terminate_async(ctlr->dma_rx); | |
ac4648b5 | 830 | bcm2835_spi_reset_hw(bs); |
8259bf66 | 831 | } |
3ecd37ed MS |
832 | |
833 | /* wait for wakeup in framework */ | |
834 | return 1; | |
3bd7f658 LW |
835 | |
836 | err_reset_hw: | |
ac4648b5 | 837 | bcm2835_spi_reset_hw(bs); |
3bd7f658 LW |
838 | bcm2835_spi_undo_prologue(bs); |
839 | return ret; | |
3ecd37ed MS |
840 | } |
841 | ||
5f336ea5 | 842 | static bool bcm2835_spi_can_dma(struct spi_controller *ctlr, |
3ecd37ed MS |
843 | struct spi_device *spi, |
844 | struct spi_transfer *tfr) | |
845 | { | |
3ecd37ed MS |
846 | /* we start DMA efforts only on bigger transfers */ |
847 | if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH) | |
848 | return false; | |
849 | ||
3ecd37ed MS |
850 | /* return OK */ |
851 | return true; | |
852 | } | |
853 | ||
8259bf66 LW |
854 | static void bcm2835_dma_release(struct spi_controller *ctlr, |
855 | struct bcm2835_spi *bs) | |
3ecd37ed | 856 | { |
5f336ea5 LW |
857 | if (ctlr->dma_tx) { |
858 | dmaengine_terminate_sync(ctlr->dma_tx); | |
2b8279ae LW |
859 | |
860 | if (bs->fill_tx_desc) | |
861 | dmaengine_desc_free(bs->fill_tx_desc); | |
862 | ||
863 | if (bs->fill_tx_addr) | |
864 | dma_unmap_page_attrs(ctlr->dma_tx->device->dev, | |
865 | bs->fill_tx_addr, sizeof(u32), | |
866 | DMA_TO_DEVICE, | |
867 | DMA_ATTR_SKIP_CPU_SYNC); | |
868 | ||
5f336ea5 LW |
869 | dma_release_channel(ctlr->dma_tx); |
870 | ctlr->dma_tx = NULL; | |
3ecd37ed | 871 | } |
8259bf66 | 872 | |
5f336ea5 LW |
873 | if (ctlr->dma_rx) { |
874 | dmaengine_terminate_sync(ctlr->dma_rx); | |
875 | dma_release_channel(ctlr->dma_rx); | |
876 | ctlr->dma_rx = NULL; | |
3ecd37ed MS |
877 | } |
878 | } | |
879 | ||
6133fed0 PU |
880 | static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, |
881 | struct bcm2835_spi *bs) | |
3ecd37ed MS |
882 | { |
883 | struct dma_slave_config slave_config; | |
884 | const __be32 *addr; | |
885 | dma_addr_t dma_reg_base; | |
ec679bda | 886 | int ret; |
3ecd37ed MS |
887 | |
888 | /* base address in dma-space */ | |
5f336ea5 | 889 | addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); |
3ecd37ed MS |
890 | if (!addr) { |
891 | dev_err(dev, "could not get DMA-register address - not using dma mode\n"); | |
6133fed0 PU |
892 | /* Fall back to interrupt mode */ |
893 | return 0; | |
3ecd37ed MS |
894 | } |
895 | dma_reg_base = be32_to_cpup(addr); | |
896 | ||
897 | /* get tx/rx dma */ | |
6133fed0 PU |
898 | ctlr->dma_tx = dma_request_chan(dev, "tx"); |
899 | if (IS_ERR(ctlr->dma_tx)) { | |
3ecd37ed | 900 | dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); |
6133fed0 PU |
901 | ret = PTR_ERR(ctlr->dma_tx); |
902 | ctlr->dma_tx = NULL; | |
3ecd37ed MS |
903 | goto err; |
904 | } | |
6133fed0 PU |
905 | ctlr->dma_rx = dma_request_chan(dev, "rx"); |
906 | if (IS_ERR(ctlr->dma_rx)) { | |
3ecd37ed | 907 | dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); |
6133fed0 PU |
908 | ret = PTR_ERR(ctlr->dma_rx); |
909 | ctlr->dma_rx = NULL; | |
3ecd37ed MS |
910 | goto err_release; |
911 | } | |
912 | ||
2b8279ae LW |
913 | /* |
914 | * The TX DMA channel either copies a transfer's TX buffer to the FIFO | |
915 | * or, in case of an RX-only transfer, cyclically copies from the zero | |
916 | * page to the FIFO using a preallocated, reusable descriptor. | |
917 | */ | |
3ecd37ed MS |
918 | slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); |
919 | slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
920 | ||
5f336ea5 | 921 | ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config); |
3ecd37ed MS |
922 | if (ret) |
923 | goto err_config; | |
924 | ||
2b8279ae LW |
925 | bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev, |
926 | ZERO_PAGE(0), 0, sizeof(u32), | |
927 | DMA_TO_DEVICE, | |
928 | DMA_ATTR_SKIP_CPU_SYNC); | |
929 | if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) { | |
930 | dev_err(dev, "cannot map zero page - not using DMA mode\n"); | |
931 | bs->fill_tx_addr = 0; | |
dd4441ab | 932 | ret = -ENOMEM; |
2b8279ae LW |
933 | goto err_release; |
934 | } | |
935 | ||
936 | bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx, | |
937 | bs->fill_tx_addr, | |
938 | sizeof(u32), 0, | |
939 | DMA_MEM_TO_DEV, 0); | |
940 | if (!bs->fill_tx_desc) { | |
941 | dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n"); | |
dd4441ab | 942 | ret = -ENOMEM; |
2b8279ae LW |
943 | goto err_release; |
944 | } | |
945 | ||
946 | ret = dmaengine_desc_set_reuse(bs->fill_tx_desc); | |
947 | if (ret) { | |
948 | dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n"); | |
949 | goto err_release; | |
950 | } | |
951 | ||
8259bf66 LW |
952 | /* |
953 | * The RX DMA channel is used bidirectionally: It either reads the | |
954 | * RX FIFO or, in case of a TX-only transfer, cyclically writes a | |
955 | * precalculated value to the CS register to clear the RX FIFO. | |
956 | */ | |
3ecd37ed MS |
957 | slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); |
958 | slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
8259bf66 LW |
959 | slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS); |
960 | slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
3ecd37ed | 961 | |
5f336ea5 | 962 | ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config); |
3ecd37ed MS |
963 | if (ret) |
964 | goto err_config; | |
965 | ||
966 | /* all went well, so set can_dma */ | |
5f336ea5 | 967 | ctlr->can_dma = bcm2835_spi_can_dma; |
3ecd37ed | 968 | |
6133fed0 | 969 | return 0; |
3ecd37ed MS |
970 | |
971 | err_config: | |
972 | dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", | |
973 | ret); | |
974 | err_release: | |
8259bf66 | 975 | bcm2835_dma_release(ctlr, bs); |
3ecd37ed | 976 | err: |
6133fed0 PU |
977 | /* |
978 | * Only report error for deferred probing, otherwise fall back to | |
979 | * interrupt mode | |
980 | */ | |
981 | if (ret != -EPROBE_DEFER) | |
982 | ret = 0; | |
983 | ||
984 | return ret; | |
3ecd37ed MS |
985 | } |
986 | ||
5f336ea5 | 987 | static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr, |
a750b124 MS |
988 | struct spi_device *spi, |
989 | struct spi_transfer *tfr, | |
9ac3f90d | 990 | u32 cs) |
a750b124 | 991 | { |
5f336ea5 | 992 | struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); |
a750b124 MS |
993 | unsigned long timeout; |
994 | ||
154f7da5 MS |
995 | /* update usage statistics */ |
996 | bs->count_transfer_polling++; | |
997 | ||
a750b124 MS |
998 | /* enable HW block without interrupts */ |
999 | bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); | |
1000 | ||
1001 | /* fill in the fifo before timeout calculations | |
1002 | * if we are interrupted here, then the data is | |
1003 | * getting transferred by the HW while we are interrupted | |
1004 | */ | |
2e0733bc | 1005 | bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); |
a750b124 | 1006 | |
ff245d90 MS |
1007 | /* set the timeout to at least 2 jiffies */ |
1008 | timeout = jiffies + 2 + HZ * polling_limit_us / 1000000; | |
a750b124 MS |
1009 | |
1010 | /* loop until finished the transfer */ | |
1011 | while (bs->rx_len) { | |
1012 | /* fill in tx fifo with remaining data */ | |
1013 | bcm2835_wr_fifo(bs); | |
1014 | ||
1015 | /* read from fifo as much as possible */ | |
1016 | bcm2835_rd_fifo(bs); | |
1017 | ||
1018 | /* if there is still data pending to read | |
1019 | * then check the timeout | |
1020 | */ | |
1021 | if (bs->rx_len && time_after(jiffies, timeout)) { | |
1022 | dev_dbg_ratelimited(&spi->dev, | |
1023 | "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n", | |
1024 | jiffies - timeout, | |
1025 | bs->tx_len, bs->rx_len); | |
1026 | /* fall back to interrupt mode */ | |
154f7da5 MS |
1027 | |
1028 | /* update usage statistics */ | |
1029 | bs->count_transfer_irq_after_polling++; | |
1030 | ||
5f336ea5 | 1031 | return bcm2835_spi_transfer_one_irq(ctlr, spi, |
2e0733bc | 1032 | tfr, cs, false); |
a750b124 MS |
1033 | } |
1034 | } | |
1035 | ||
1036 | /* Transfer complete - reset SPI HW */ | |
ac4648b5 | 1037 | bcm2835_spi_reset_hw(bs); |
a750b124 MS |
1038 | /* and return without waiting for completion */ |
1039 | return 0; | |
1040 | } | |
1041 | ||
5f336ea5 | 1042 | static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, |
e34ff011 MS |
1043 | struct spi_device *spi, |
1044 | struct spi_transfer *tfr) | |
f8043872 | 1045 | { |
5f336ea5 | 1046 | struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); |
ec679bda | 1047 | struct bcm2835_spidev *slv = spi_get_ctldata(spi); |
9df2003d | 1048 | unsigned long spi_hz, clk_hz, cdiv; |
ff245d90 | 1049 | unsigned long hz_per_byte, byte_limit; |
ec679bda | 1050 | u32 cs = slv->prepare_cs; |
f8043872 | 1051 | |
e34ff011 | 1052 | /* set clock */ |
f8043872 CB |
1053 | spi_hz = tfr->speed_hz; |
1054 | clk_hz = clk_get_rate(bs->clk); | |
1055 | ||
1056 | if (spi_hz >= clk_hz / 2) { | |
1057 | cdiv = 2; /* clk_hz/2 is the fastest we can go */ | |
1058 | } else if (spi_hz) { | |
210b4923 MS |
1059 | /* CDIV must be a multiple of two */ |
1060 | cdiv = DIV_ROUND_UP(clk_hz, spi_hz); | |
1061 | cdiv += (cdiv % 2); | |
f8043872 CB |
1062 | |
1063 | if (cdiv >= 65536) | |
1064 | cdiv = 0; /* 0 is the slowest we can go */ | |
342f948a | 1065 | } else { |
f8043872 | 1066 | cdiv = 0; /* 0 is the slowest we can go */ |
342f948a | 1067 | } |
9df2003d | 1068 | tfr->effective_speed_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536); |
e34ff011 | 1069 | bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); |
f8043872 | 1070 | |
acace73d | 1071 | /* handle all the 3-wire mode */ |
8259bf66 | 1072 | if (spi->mode & SPI_3WIRE && tfr->rx_buf) |
6935224d | 1073 | cs |= BCM2835_SPI_CS_REN; |
f8043872 | 1074 | |
e34ff011 | 1075 | /* set transmit buffers and length */ |
f8043872 CB |
1076 | bs->tx_buf = tfr->tx_buf; |
1077 | bs->rx_buf = tfr->rx_buf; | |
e34ff011 MS |
1078 | bs->tx_len = tfr->len; |
1079 | bs->rx_len = tfr->len; | |
f8043872 | 1080 | |
7f1922eb MS |
1081 | /* Calculate the estimated time in us the transfer runs. Note that |
1082 | * there is 1 idle clocks cycles after each byte getting transferred | |
1083 | * so we have 9 cycles/byte. This is used to find the number of Hz | |
1084 | * per byte per polling limit. E.g., we can transfer 1 byte in 30 us | |
1085 | * per 300,000 Hz of bus clock. | |
1086 | */ | |
ff245d90 | 1087 | hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0; |
9df2003d | 1088 | byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1; |
ff245d90 | 1089 | |
7f1922eb | 1090 | /* run in polling mode for short transfers */ |
ff245d90 | 1091 | if (tfr->len < byte_limit) |
5f336ea5 | 1092 | return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs); |
f8043872 | 1093 | |
c41d62b0 MS |
1094 | /* run in dma mode if conditions are right |
1095 | * Note that unlike poll or interrupt mode DMA mode does not have | |
1096 | * this 1 idle clock cycle pattern but runs the spi clock without gaps | |
1097 | */ | |
5f336ea5 | 1098 | if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr)) |
ec679bda | 1099 | return bcm2835_spi_transfer_one_dma(ctlr, tfr, slv, cs); |
3ecd37ed MS |
1100 | |
1101 | /* run in interrupt-mode */ | |
5f336ea5 | 1102 | return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true); |
f8043872 CB |
1103 | } |
1104 | ||
5f336ea5 | 1105 | static int bcm2835_spi_prepare_message(struct spi_controller *ctlr, |
acace73d MS |
1106 | struct spi_message *msg) |
1107 | { | |
1108 | struct spi_device *spi = msg->spi; | |
5f336ea5 | 1109 | struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); |
ec679bda | 1110 | struct bcm2835_spidev *slv = spi_get_ctldata(spi); |
8b7bd10e MM |
1111 | int ret; |
1112 | ||
5f336ea5 | 1113 | if (ctlr->can_dma) { |
3393f7d9 NSJ |
1114 | /* |
1115 | * DMA transfers are limited to 16 bit (0 to 65535 bytes) by | |
1116 | * the SPI HW due to DLEN. Split up transfers (32-bit FIFO | |
1117 | * aligned) if the limit is exceeded. | |
1118 | */ | |
5f336ea5 | 1119 | ret = spi_split_transfers_maxsize(ctlr, msg, 65532, |
3393f7d9 NSJ |
1120 | GFP_KERNEL | GFP_DMA); |
1121 | if (ret) | |
1122 | return ret; | |
1123 | } | |
acace73d | 1124 | |
571e31fa LW |
1125 | /* |
1126 | * Set up clock polarity before spi_transfer_one_message() asserts | |
1127 | * chip select to avoid a gratuitous clock signal edge. | |
1128 | */ | |
ec679bda | 1129 | bcm2835_wr(bs, BCM2835_SPI_CS, slv->prepare_cs); |
acace73d MS |
1130 | |
1131 | return 0; | |
1132 | } | |
1133 | ||
5f336ea5 | 1134 | static void bcm2835_spi_handle_err(struct spi_controller *ctlr, |
e34ff011 | 1135 | struct spi_message *msg) |
f8043872 | 1136 | { |
5f336ea5 | 1137 | struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); |
3ecd37ed MS |
1138 | |
1139 | /* if an error occurred and we have an active dma, then terminate */ | |
1513ceee | 1140 | dmaengine_terminate_sync(ctlr->dma_tx); |
8259bf66 | 1141 | bs->tx_dma_active = false; |
1513ceee | 1142 | dmaengine_terminate_sync(ctlr->dma_rx); |
8259bf66 | 1143 | bs->rx_dma_active = false; |
1513ceee LW |
1144 | bcm2835_spi_undo_prologue(bs); |
1145 | ||
3ecd37ed | 1146 | /* and reset */ |
ac4648b5 | 1147 | bcm2835_spi_reset_hw(bs); |
f8043872 CB |
1148 | } |
1149 | ||
a30a555d MS |
1150 | static int chip_match_name(struct gpio_chip *chip, void *data) |
1151 | { | |
1152 | return !strcmp(chip->label, data); | |
1153 | } | |
1154 | ||
ec679bda LW |
1155 | static void bcm2835_spi_cleanup(struct spi_device *spi) |
1156 | { | |
1157 | struct bcm2835_spidev *slv = spi_get_ctldata(spi); | |
1158 | struct spi_controller *ctlr = spi->controller; | |
1159 | ||
1160 | if (slv->clear_rx_desc) | |
1161 | dmaengine_desc_free(slv->clear_rx_desc); | |
1162 | ||
1163 | if (slv->clear_rx_addr) | |
1164 | dma_unmap_single(ctlr->dma_rx->device->dev, | |
1165 | slv->clear_rx_addr, | |
1166 | sizeof(u32), | |
1167 | DMA_TO_DEVICE); | |
1168 | ||
1169 | kfree(slv); | |
1170 | } | |
1171 | ||
1172 | static int bcm2835_spi_setup_dma(struct spi_controller *ctlr, | |
1173 | struct spi_device *spi, | |
1174 | struct bcm2835_spi *bs, | |
1175 | struct bcm2835_spidev *slv) | |
1176 | { | |
1177 | int ret; | |
1178 | ||
1179 | if (!ctlr->dma_rx) | |
1180 | return 0; | |
1181 | ||
1182 | slv->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev, | |
1183 | &slv->clear_rx_cs, | |
1184 | sizeof(u32), | |
1185 | DMA_TO_DEVICE); | |
1186 | if (dma_mapping_error(ctlr->dma_rx->device->dev, slv->clear_rx_addr)) { | |
1187 | dev_err(&spi->dev, "cannot map clear_rx_cs\n"); | |
1188 | slv->clear_rx_addr = 0; | |
1189 | return -ENOMEM; | |
1190 | } | |
1191 | ||
1192 | slv->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx, | |
1193 | slv->clear_rx_addr, | |
1194 | sizeof(u32), 0, | |
1195 | DMA_MEM_TO_DEV, 0); | |
1196 | if (!slv->clear_rx_desc) { | |
1197 | dev_err(&spi->dev, "cannot prepare clear_rx_desc\n"); | |
1198 | return -ENOMEM; | |
1199 | } | |
1200 | ||
1201 | ret = dmaengine_desc_set_reuse(slv->clear_rx_desc); | |
1202 | if (ret) { | |
1203 | dev_err(&spi->dev, "cannot reuse clear_rx_desc\n"); | |
1204 | return ret; | |
1205 | } | |
1206 | ||
1207 | return 0; | |
1208 | } | |
1209 | ||
e34ff011 MS |
1210 | static int bcm2835_spi_setup(struct spi_device *spi) |
1211 | { | |
8259bf66 LW |
1212 | struct spi_controller *ctlr = spi->controller; |
1213 | struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); | |
ec679bda | 1214 | struct bcm2835_spidev *slv = spi_get_ctldata(spi); |
a30a555d | 1215 | struct gpio_chip *chip; |
ec679bda | 1216 | int ret; |
571e31fa LW |
1217 | u32 cs; |
1218 | ||
ec679bda LW |
1219 | if (!slv) { |
1220 | slv = kzalloc(ALIGN(sizeof(*slv), dma_get_cache_alignment()), | |
1221 | GFP_KERNEL); | |
1222 | if (!slv) | |
1223 | return -ENOMEM; | |
1224 | ||
1225 | spi_set_ctldata(spi, slv); | |
1226 | ||
1227 | ret = bcm2835_spi_setup_dma(ctlr, spi, bs, slv); | |
1228 | if (ret) | |
1229 | goto err_cleanup; | |
13817d46 LW |
1230 | } |
1231 | ||
571e31fa LW |
1232 | /* |
1233 | * Precalculate SPI slave's CS register value for ->prepare_message(): | |
1234 | * The driver always uses software-controlled GPIO chip select, hence | |
1235 | * set the hardware-controlled native chip select to an invalid value | |
1236 | * to prevent it from interfering. | |
1237 | */ | |
1238 | cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; | |
1239 | if (spi->mode & SPI_CPOL) | |
1240 | cs |= BCM2835_SPI_CS_CPOL; | |
1241 | if (spi->mode & SPI_CPHA) | |
1242 | cs |= BCM2835_SPI_CS_CPHA; | |
ec679bda | 1243 | slv->prepare_cs = cs; |
3bd158c5 | 1244 | |
8259bf66 LW |
1245 | /* |
1246 | * Precalculate SPI slave's CS register value to clear RX FIFO | |
1247 | * in case of a TX-only DMA transfer. | |
1248 | */ | |
1249 | if (ctlr->dma_rx) { | |
ec679bda LW |
1250 | slv->clear_rx_cs = cs | BCM2835_SPI_CS_TA | |
1251 | BCM2835_SPI_CS_DMAEN | | |
1252 | BCM2835_SPI_CS_CLEAR_RX; | |
8259bf66 | 1253 | dma_sync_single_for_device(ctlr->dma_rx->device->dev, |
ec679bda LW |
1254 | slv->clear_rx_addr, |
1255 | sizeof(u32), | |
8259bf66 LW |
1256 | DMA_TO_DEVICE); |
1257 | } | |
1258 | ||
e34ff011 MS |
1259 | /* |
1260 | * sanity checking the native-chipselects | |
1261 | */ | |
1262 | if (spi->mode & SPI_NO_CS) | |
1263 | return 0; | |
3bd158c5 LW |
1264 | /* |
1265 | * The SPI core has successfully requested the CS GPIO line from the | |
1266 | * device tree, so we are done. | |
1267 | */ | |
1268 | if (spi->cs_gpiod) | |
e34ff011 | 1269 | return 0; |
a30a555d MS |
1270 | if (spi->chip_select > 1) { |
1271 | /* error in the case of native CS requested with CS > 1 | |
1272 | * officially there is a CS2, but it is not documented | |
1273 | * which GPIO is connected with that... | |
1274 | */ | |
1275 | dev_err(&spi->dev, | |
1276 | "setup: only two native chip-selects are supported\n"); | |
ec679bda LW |
1277 | ret = -EINVAL; |
1278 | goto err_cleanup; | |
a30a555d | 1279 | } |
3bd158c5 LW |
1280 | |
1281 | /* | |
1282 | * Translate native CS to GPIO | |
1283 | * | |
1284 | * FIXME: poking around in the gpiolib internals like this is | |
1285 | * not very good practice. Find a way to locate the real problem | |
1286 | * and fix it. Why is the GPIO descriptor in spi->cs_gpiod | |
1287 | * sometimes not assigned correctly? Erroneous device trees? | |
1288 | */ | |
a30a555d MS |
1289 | |
1290 | /* get the gpio chip for the base */ | |
1291 | chip = gpiochip_find("pinctrl-bcm2835", chip_match_name); | |
1292 | if (!chip) | |
e34ff011 MS |
1293 | return 0; |
1294 | ||
3bd158c5 LW |
1295 | spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select, |
1296 | DRV_NAME, | |
bc7f2cd7 | 1297 | GPIO_LOOKUP_FLAGS_DEFAULT, |
3bd158c5 | 1298 | GPIOD_OUT_LOW); |
ec679bda LW |
1299 | if (IS_ERR(spi->cs_gpiod)) { |
1300 | ret = PTR_ERR(spi->cs_gpiod); | |
1301 | goto err_cleanup; | |
1302 | } | |
a30a555d MS |
1303 | |
1304 | /* and set up the "mode" and level */ | |
3bd158c5 LW |
1305 | dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n", |
1306 | spi->chip_select); | |
a30a555d MS |
1307 | |
1308 | return 0; | |
ec679bda LW |
1309 | |
1310 | err_cleanup: | |
1311 | bcm2835_spi_cleanup(spi); | |
1312 | return ret; | |
f8043872 CB |
1313 | } |
1314 | ||
1315 | static int bcm2835_spi_probe(struct platform_device *pdev) | |
1316 | { | |
5f336ea5 | 1317 | struct spi_controller *ctlr; |
f8043872 | 1318 | struct bcm2835_spi *bs; |
f8043872 CB |
1319 | int err; |
1320 | ||
ec679bda | 1321 | ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*bs)); |
5f336ea5 | 1322 | if (!ctlr) |
f8043872 | 1323 | return -ENOMEM; |
f8043872 | 1324 | |
5f336ea5 | 1325 | platform_set_drvdata(pdev, ctlr); |
f8043872 | 1326 | |
3bd158c5 | 1327 | ctlr->use_gpio_descriptors = true; |
5f336ea5 LW |
1328 | ctlr->mode_bits = BCM2835_SPI_MODE_BITS; |
1329 | ctlr->bits_per_word_mask = SPI_BPW_MASK(8); | |
13817d46 | 1330 | ctlr->num_chipselect = 3; |
5f336ea5 | 1331 | ctlr->setup = bcm2835_spi_setup; |
ec679bda | 1332 | ctlr->cleanup = bcm2835_spi_cleanup; |
5f336ea5 LW |
1333 | ctlr->transfer_one = bcm2835_spi_transfer_one; |
1334 | ctlr->handle_err = bcm2835_spi_handle_err; | |
1335 | ctlr->prepare_message = bcm2835_spi_prepare_message; | |
1336 | ctlr->dev.of_node = pdev->dev.of_node; | |
f8043872 | 1337 | |
5f336ea5 | 1338 | bs = spi_controller_get_devdata(ctlr); |
afe7e363 | 1339 | bs->ctlr = ctlr; |
f8043872 | 1340 | |
6ba794df | 1341 | bs->regs = devm_platform_ioremap_resource(pdev, 0); |
e1483ac0 LW |
1342 | if (IS_ERR(bs->regs)) |
1343 | return PTR_ERR(bs->regs); | |
f8043872 CB |
1344 | |
1345 | bs->clk = devm_clk_get(&pdev->dev, NULL); | |
e1483ac0 LW |
1346 | if (IS_ERR(bs->clk)) |
1347 | return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk), | |
1348 | "could not get clk\n"); | |
f8043872 | 1349 | |
c6892892 RF |
1350 | ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2; |
1351 | ||
ddf0e1c2 | 1352 | bs->irq = platform_get_irq(pdev, 0); |
e1483ac0 LW |
1353 | if (bs->irq <= 0) |
1354 | return bs->irq ? bs->irq : -ENODEV; | |
f8043872 CB |
1355 | |
1356 | clk_prepare_enable(bs->clk); | |
1357 | ||
6133fed0 PU |
1358 | err = bcm2835_dma_init(ctlr, &pdev->dev, bs); |
1359 | if (err) | |
1360 | goto out_clk_disable; | |
ddf0e1c2 MS |
1361 | |
1362 | /* initialise the hardware with the default polarities */ | |
1363 | bcm2835_wr(bs, BCM2835_SPI_CS, | |
1364 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); | |
1365 | ||
d62069c2 | 1366 | err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, |
afe7e363 | 1367 | dev_name(&pdev->dev), bs); |
f8043872 CB |
1368 | if (err) { |
1369 | dev_err(&pdev->dev, "could not request IRQ: %d\n", err); | |
666224b4 | 1370 | goto out_dma_release; |
f8043872 CB |
1371 | } |
1372 | ||
9dd277ff | 1373 | err = spi_register_controller(ctlr); |
f8043872 | 1374 | if (err) { |
5f336ea5 LW |
1375 | dev_err(&pdev->dev, "could not register SPI controller: %d\n", |
1376 | err); | |
666224b4 | 1377 | goto out_dma_release; |
f8043872 CB |
1378 | } |
1379 | ||
154f7da5 MS |
1380 | bcm2835_debugfs_create(bs, dev_name(&pdev->dev)); |
1381 | ||
f8043872 CB |
1382 | return 0; |
1383 | ||
666224b4 PU |
1384 | out_dma_release: |
1385 | bcm2835_dma_release(ctlr, bs); | |
f8043872 CB |
1386 | out_clk_disable: |
1387 | clk_disable_unprepare(bs->clk); | |
f8043872 CB |
1388 | return err; |
1389 | } | |
1390 | ||
1391 | static int bcm2835_spi_remove(struct platform_device *pdev) | |
1392 | { | |
5f336ea5 LW |
1393 | struct spi_controller *ctlr = platform_get_drvdata(pdev); |
1394 | struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); | |
f8043872 | 1395 | |
154f7da5 MS |
1396 | bcm2835_debugfs_remove(bs); |
1397 | ||
9dd277ff LW |
1398 | spi_unregister_controller(ctlr); |
1399 | ||
05897c71 LW |
1400 | bcm2835_dma_release(ctlr, bs); |
1401 | ||
f8043872 CB |
1402 | /* Clear FIFOs, and disable the HW block */ |
1403 | bcm2835_wr(bs, BCM2835_SPI_CS, | |
1404 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); | |
1405 | ||
1406 | clk_disable_unprepare(bs->clk); | |
f8043872 CB |
1407 | |
1408 | return 0; | |
1409 | } | |
1410 | ||
118eb0e5 FF |
1411 | static void bcm2835_spi_shutdown(struct platform_device *pdev) |
1412 | { | |
1413 | int ret; | |
1414 | ||
1415 | ret = bcm2835_spi_remove(pdev); | |
1416 | if (ret) | |
1417 | dev_err(&pdev->dev, "failed to shutdown\n"); | |
1418 | } | |
1419 | ||
f8043872 CB |
1420 | static const struct of_device_id bcm2835_spi_match[] = { |
1421 | { .compatible = "brcm,bcm2835-spi", }, | |
1422 | {} | |
1423 | }; | |
1424 | MODULE_DEVICE_TABLE(of, bcm2835_spi_match); | |
1425 | ||
1426 | static struct platform_driver bcm2835_spi_driver = { | |
1427 | .driver = { | |
1428 | .name = DRV_NAME, | |
f8043872 CB |
1429 | .of_match_table = bcm2835_spi_match, |
1430 | }, | |
1431 | .probe = bcm2835_spi_probe, | |
1432 | .remove = bcm2835_spi_remove, | |
118eb0e5 | 1433 | .shutdown = bcm2835_spi_shutdown, |
f8043872 CB |
1434 | }; |
1435 | module_platform_driver(bcm2835_spi_driver); | |
1436 | ||
1437 | MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835"); | |
1438 | MODULE_AUTHOR("Chris Boot <bootc@bootc.net>"); | |
22bf6cd2 | 1439 | MODULE_LICENSE("GPL"); |