Merge tag 'linux_kselftest-fixes-6.12-rc2' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / drivers / spi / spi-ingenic.c
CommitLineData
ae5f94cc
AR
1// SPDX-License-Identifier: GPL-2.0
2/*
6d72b114 3 * SPI bus driver for the Ingenic SoCs
ae5f94cc
AR
4 * Copyright (c) 2017-2021 Artur Rojek <contact@artur-rojek.eu>
5 * Copyright (c) 2017-2021 Paul Cercueil <paul@crapouillou.net>
6d72b114 6 * Copyright (c) 2022 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
ae5f94cc
AR
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/dma-mapping.h>
13#include <linux/iopoll.h>
14#include <linux/module.h>
749396cb 15#include <linux/of.h>
ae5f94cc
AR
16#include <linux/platform_device.h>
17#include <linux/regmap.h>
18#include <linux/spi/spi.h>
54c5a9db 19#include "internals.h"
ae5f94cc
AR
20
21#define REG_SSIDR 0x0
22#define REG_SSICR0 0x4
23#define REG_SSICR1 0x8
24#define REG_SSISR 0xc
25#define REG_SSIGR 0x18
26
27#define REG_SSICR0_TENDIAN_LSB BIT(19)
28#define REG_SSICR0_RENDIAN_LSB BIT(17)
29#define REG_SSICR0_SSIE BIT(15)
30#define REG_SSICR0_LOOP BIT(10)
31#define REG_SSICR0_EACLRUN BIT(7)
32#define REG_SSICR0_FSEL BIT(6)
33#define REG_SSICR0_TFLUSH BIT(2)
34#define REG_SSICR0_RFLUSH BIT(1)
35
36#define REG_SSICR1_FRMHL_MASK (BIT(31) | BIT(30))
37#define REG_SSICR1_FRMHL BIT(30)
38#define REG_SSICR1_LFST BIT(25)
39#define REG_SSICR1_UNFIN BIT(23)
40#define REG_SSICR1_PHA BIT(1)
41#define REG_SSICR1_POL BIT(0)
42
43#define REG_SSISR_END BIT(7)
44#define REG_SSISR_BUSY BIT(6)
45#define REG_SSISR_TFF BIT(5)
46#define REG_SSISR_RFE BIT(4)
47#define REG_SSISR_RFHF BIT(2)
48#define REG_SSISR_UNDR BIT(1)
49#define REG_SSISR_OVER BIT(0)
50
51#define SPI_INGENIC_FIFO_SIZE 128u
52
53struct jz_soc_info {
54 u32 bits_per_word_mask;
55 struct reg_field flen_field;
56 bool has_trendian;
6d72b114
ZY
57
58 unsigned int max_speed_hz;
59 unsigned int max_native_cs;
ae5f94cc
AR
60};
61
62struct ingenic_spi {
63 const struct jz_soc_info *soc_info;
64 struct clk *clk;
65 struct resource *mem_res;
66
67 struct regmap *map;
68 struct regmap_field *flen_field;
69};
70
71static int spi_ingenic_wait(struct ingenic_spi *priv,
72 unsigned long mask,
73 bool condition)
74{
75 unsigned int val;
76
77 return regmap_read_poll_timeout(priv->map, REG_SSISR, val,
78 !!(val & mask) == condition,
79 100, 10000);
80}
81
82static void spi_ingenic_set_cs(struct spi_device *spi, bool disable)
83{
84 struct ingenic_spi *priv = spi_controller_get_devdata(spi->controller);
85
86 if (disable) {
87 regmap_clear_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
88 regmap_clear_bits(priv->map, REG_SSISR,
89 REG_SSISR_UNDR | REG_SSISR_OVER);
90
91 spi_ingenic_wait(priv, REG_SSISR_END, true);
92 } else {
93 regmap_set_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
94 }
95
96 regmap_set_bits(priv->map, REG_SSICR0,
97 REG_SSICR0_RFLUSH | REG_SSICR0_TFLUSH);
98}
99
100static void spi_ingenic_prepare_transfer(struct ingenic_spi *priv,
101 struct spi_device *spi,
102 struct spi_transfer *xfer)
103{
104 unsigned long clk_hz = clk_get_rate(priv->clk);
105 u32 cdiv, speed_hz = xfer->speed_hz ?: spi->max_speed_hz,
106 bits_per_word = xfer->bits_per_word ?: spi->bits_per_word;
107
108 cdiv = clk_hz / (speed_hz * 2);
109 cdiv = clamp(cdiv, 1u, 0x100u) - 1;
110
111 regmap_write(priv->map, REG_SSIGR, cdiv);
112
113 regmap_field_write(priv->flen_field, bits_per_word - 2);
114}
115
116static void spi_ingenic_finalize_transfer(void *controller)
117{
118 spi_finalize_current_transfer(controller);
119}
120
121static struct dma_async_tx_descriptor *
122spi_ingenic_prepare_dma(struct spi_controller *ctlr, struct dma_chan *chan,
123 struct sg_table *sg, enum dma_transfer_direction dir,
124 unsigned int bits)
125{
126 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
127 struct dma_slave_config cfg = {
128 .direction = dir,
129 .src_addr = priv->mem_res->start + REG_SSIDR,
130 .dst_addr = priv->mem_res->start + REG_SSIDR,
131 };
132 struct dma_async_tx_descriptor *desc;
133 dma_cookie_t cookie;
134 int ret;
135
136 if (bits > 16) {
137 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
138 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
139 cfg.src_maxburst = cfg.dst_maxburst = 4;
140 } else if (bits > 8) {
141 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
142 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
143 cfg.src_maxburst = cfg.dst_maxburst = 2;
144 } else {
145 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
146 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
147 cfg.src_maxburst = cfg.dst_maxburst = 1;
148 }
149
150 ret = dmaengine_slave_config(chan, &cfg);
151 if (ret)
152 return ERR_PTR(ret);
153
154 desc = dmaengine_prep_slave_sg(chan, sg->sgl, sg->nents, dir,
155 DMA_PREP_INTERRUPT);
156 if (!desc)
157 return ERR_PTR(-ENOMEM);
158
159 if (dir == DMA_DEV_TO_MEM) {
160 desc->callback = spi_ingenic_finalize_transfer;
161 desc->callback_param = ctlr;
162 }
163
164 cookie = dmaengine_submit(desc);
165
166 ret = dma_submit_error(cookie);
167 if (ret) {
168 dmaengine_desc_free(desc);
169 return ERR_PTR(ret);
170 }
171
172 return desc;
173}
174
175static int spi_ingenic_dma_tx(struct spi_controller *ctlr,
176 struct spi_transfer *xfer, unsigned int bits)
177{
178 struct dma_async_tx_descriptor *rx_desc, *tx_desc;
179
180 rx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_rx,
181 &xfer->rx_sg, DMA_DEV_TO_MEM, bits);
182 if (IS_ERR(rx_desc))
183 return PTR_ERR(rx_desc);
184
185 tx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_tx,
186 &xfer->tx_sg, DMA_MEM_TO_DEV, bits);
187 if (IS_ERR(tx_desc)) {
188 dmaengine_terminate_async(ctlr->dma_rx);
189 dmaengine_desc_free(rx_desc);
190 return PTR_ERR(tx_desc);
191 }
192
193 dma_async_issue_pending(ctlr->dma_rx);
194 dma_async_issue_pending(ctlr->dma_tx);
195
196 return 1;
197}
198
199#define SPI_INGENIC_TX(x) \
200static int spi_ingenic_tx##x(struct ingenic_spi *priv, \
201 struct spi_transfer *xfer) \
202{ \
203 unsigned int count = xfer->len / (x / 8); \
204 unsigned int prefill = min(count, SPI_INGENIC_FIFO_SIZE); \
205 const u##x *tx_buf = xfer->tx_buf; \
206 u##x *rx_buf = xfer->rx_buf; \
207 unsigned int i, val; \
208 int err; \
209 \
210 /* Fill up the TX fifo */ \
211 for (i = 0; i < prefill; i++) { \
212 val = tx_buf ? tx_buf[i] : 0; \
213 \
214 regmap_write(priv->map, REG_SSIDR, val); \
215 } \
216 \
217 for (i = 0; i < count; i++) { \
218 err = spi_ingenic_wait(priv, REG_SSISR_RFE, false); \
219 if (err) \
220 return err; \
221 \
222 regmap_read(priv->map, REG_SSIDR, &val); \
223 if (rx_buf) \
224 rx_buf[i] = val; \
225 \
226 if (i < count - prefill) { \
227 val = tx_buf ? tx_buf[i + prefill] : 0; \
228 \
229 regmap_write(priv->map, REG_SSIDR, val); \
230 } \
231 } \
232 \
233 return 0; \
234}
235SPI_INGENIC_TX(8)
236SPI_INGENIC_TX(16)
237SPI_INGENIC_TX(32)
238#undef SPI_INGENIC_TX
239
240static int spi_ingenic_transfer_one(struct spi_controller *ctlr,
241 struct spi_device *spi,
242 struct spi_transfer *xfer)
243{
244 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
245 unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word;
ae5f94cc
AR
246
247 spi_ingenic_prepare_transfer(priv, spi, xfer);
248
54c5a9db 249 if (spi_xfer_is_dma_mapped(ctlr, spi, xfer))
ae5f94cc
AR
250 return spi_ingenic_dma_tx(ctlr, xfer, bits);
251
252 if (bits > 16)
253 return spi_ingenic_tx32(priv, xfer);
254
255 if (bits > 8)
256 return spi_ingenic_tx16(priv, xfer);
257
258 return spi_ingenic_tx8(priv, xfer);
259}
260
261static int spi_ingenic_prepare_message(struct spi_controller *ctlr,
262 struct spi_message *message)
263{
264 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
265 struct spi_device *spi = message->spi;
9e264f3f 266 unsigned int cs = REG_SSICR1_FRMHL << spi_get_chipselect(spi, 0);
ae5f94cc
AR
267 unsigned int ssicr0_mask = REG_SSICR0_LOOP | REG_SSICR0_FSEL;
268 unsigned int ssicr1_mask = REG_SSICR1_PHA | REG_SSICR1_POL | cs;
269 unsigned int ssicr0 = 0, ssicr1 = 0;
270
271 if (priv->soc_info->has_trendian) {
272 ssicr0_mask |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
273
274 if (spi->mode & SPI_LSB_FIRST)
275 ssicr0 |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
276 } else {
277 ssicr1_mask |= REG_SSICR1_LFST;
278
279 if (spi->mode & SPI_LSB_FIRST)
280 ssicr1 |= REG_SSICR1_LFST;
281 }
282
283 if (spi->mode & SPI_LOOP)
284 ssicr0 |= REG_SSICR0_LOOP;
9e264f3f 285 if (spi_get_chipselect(spi, 0))
ae5f94cc
AR
286 ssicr0 |= REG_SSICR0_FSEL;
287
288 if (spi->mode & SPI_CPHA)
289 ssicr1 |= REG_SSICR1_PHA;
290 if (spi->mode & SPI_CPOL)
291 ssicr1 |= REG_SSICR1_POL;
292 if (spi->mode & SPI_CS_HIGH)
293 ssicr1 |= cs;
294
295 regmap_update_bits(priv->map, REG_SSICR0, ssicr0_mask, ssicr0);
296 regmap_update_bits(priv->map, REG_SSICR1, ssicr1_mask, ssicr1);
297
298 return 0;
299}
300
301static int spi_ingenic_prepare_hardware(struct spi_controller *ctlr)
302{
303 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
304 int ret;
305
306 ret = clk_prepare_enable(priv->clk);
307 if (ret)
308 return ret;
309
310 regmap_write(priv->map, REG_SSICR0, REG_SSICR0_EACLRUN);
311 regmap_write(priv->map, REG_SSICR1, 0);
312 regmap_write(priv->map, REG_SSISR, 0);
313 regmap_set_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
314
315 return 0;
316}
317
318static int spi_ingenic_unprepare_hardware(struct spi_controller *ctlr)
319{
320 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
321
322 regmap_clear_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
323
324 clk_disable_unprepare(priv->clk);
325
326 return 0;
327}
328
329static bool spi_ingenic_can_dma(struct spi_controller *ctlr,
330 struct spi_device *spi,
331 struct spi_transfer *xfer)
332{
333 struct dma_slave_caps caps;
334 int ret;
335
336 ret = dma_get_slave_caps(ctlr->dma_tx, &caps);
337 if (ret) {
338 dev_err(&spi->dev, "Unable to get slave caps: %d\n", ret);
339 return false;
340 }
341
342 return !caps.max_sg_burst ||
343 xfer->len <= caps.max_sg_burst * SPI_INGENIC_FIFO_SIZE;
344}
345
346static int spi_ingenic_request_dma(struct spi_controller *ctlr,
347 struct device *dev)
348{
d3bb2cb0 349 struct dma_chan *chan;
ae5f94cc 350
d3bb2cb0
CJ
351 chan = dma_request_chan(dev, "tx");
352 if (IS_ERR(chan))
353 return PTR_ERR(chan);
354 ctlr->dma_tx = chan;
ae5f94cc 355
d3bb2cb0
CJ
356 chan = dma_request_chan(dev, "rx");
357 if (IS_ERR(chan))
358 return PTR_ERR(chan);
359 ctlr->dma_rx = chan;
ae5f94cc
AR
360
361 ctlr->can_dma = spi_ingenic_can_dma;
362
363 return 0;
364}
365
366static void spi_ingenic_release_dma(void *data)
367{
368 struct spi_controller *ctlr = data;
369
370 if (ctlr->dma_tx)
371 dma_release_channel(ctlr->dma_tx);
372 if (ctlr->dma_rx)
373 dma_release_channel(ctlr->dma_rx);
374}
375
376static const struct regmap_config spi_ingenic_regmap_config = {
377 .reg_bits = 32,
378 .val_bits = 32,
379 .reg_stride = 4,
380 .max_register = REG_SSIGR,
381};
382
383static int spi_ingenic_probe(struct platform_device *pdev)
384{
385 const struct jz_soc_info *pdata;
386 struct device *dev = &pdev->dev;
387 struct spi_controller *ctlr;
388 struct ingenic_spi *priv;
389 void __iomem *base;
e64e9ad2 390 int num_cs, ret;
ae5f94cc
AR
391
392 pdata = of_device_get_match_data(dev);
393 if (!pdata) {
394 dev_err(dev, "Missing platform data.\n");
395 return -EINVAL;
396 }
397
452edead 398 ctlr = devm_spi_alloc_host(dev, sizeof(*priv));
ae5f94cc
AR
399 if (!ctlr) {
400 dev_err(dev, "Unable to allocate SPI controller.\n");
401 return -ENOMEM;
402 }
403
404 priv = spi_controller_get_devdata(ctlr);
405 priv->soc_info = pdata;
406
407 priv->clk = devm_clk_get(dev, NULL);
408 if (IS_ERR(priv->clk)) {
409 return dev_err_probe(dev, PTR_ERR(priv->clk),
410 "Unable to get clock.\n");
411 }
412
413 base = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->mem_res);
414 if (IS_ERR(base))
415 return PTR_ERR(base);
416
417 priv->map = devm_regmap_init_mmio(dev, base, &spi_ingenic_regmap_config);
418 if (IS_ERR(priv->map))
419 return PTR_ERR(priv->map);
420
421 priv->flen_field = devm_regmap_field_alloc(dev, priv->map,
422 pdata->flen_field);
423 if (IS_ERR(priv->flen_field))
424 return PTR_ERR(priv->flen_field);
425
e64e9ad2 426 if (device_property_read_u32(dev, "num-cs", &num_cs))
6d72b114 427 num_cs = pdata->max_native_cs;
e64e9ad2 428
ae5f94cc
AR
429 platform_set_drvdata(pdev, ctlr);
430
431 ctlr->prepare_transfer_hardware = spi_ingenic_prepare_hardware;
432 ctlr->unprepare_transfer_hardware = spi_ingenic_unprepare_hardware;
433 ctlr->prepare_message = spi_ingenic_prepare_message;
434 ctlr->set_cs = spi_ingenic_set_cs;
435 ctlr->transfer_one = spi_ingenic_transfer_one;
436 ctlr->mode_bits = SPI_MODE_3 | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH;
437 ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
438 ctlr->max_dma_len = SPI_INGENIC_FIFO_SIZE;
439 ctlr->bits_per_word_mask = pdata->bits_per_word_mask;
440 ctlr->min_speed_hz = 7200;
6d72b114 441 ctlr->max_speed_hz = pdata->max_speed_hz;
e64e9ad2 442 ctlr->use_gpio_descriptors = true;
6d72b114 443 ctlr->max_native_cs = pdata->max_native_cs;
e64e9ad2 444 ctlr->num_chipselect = num_cs;
ae5f94cc
AR
445 ctlr->dev.of_node = pdev->dev.of_node;
446
447 if (spi_ingenic_request_dma(ctlr, dev))
448 dev_warn(dev, "DMA not available.\n");
449
450 ret = devm_add_action_or_reset(dev, spi_ingenic_release_dma, ctlr);
451 if (ret) {
452 dev_err(dev, "Unable to add action.\n");
453 return ret;
454 }
455
456 ret = devm_spi_register_controller(dev, ctlr);
457 if (ret)
458 dev_err(dev, "Unable to register SPI controller.\n");
459
460 return ret;
461}
462
463static const struct jz_soc_info jz4750_soc_info = {
464 .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 17),
465 .flen_field = REG_FIELD(REG_SSICR1, 4, 7),
466 .has_trendian = false,
6d72b114
ZY
467
468 .max_speed_hz = 54000000,
469 .max_native_cs = 2,
ae5f94cc
AR
470};
471
472static const struct jz_soc_info jz4780_soc_info = {
473 .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
474 .flen_field = REG_FIELD(REG_SSICR1, 3, 7),
475 .has_trendian = true,
6d72b114
ZY
476
477 .max_speed_hz = 54000000,
478 .max_native_cs = 2,
479};
480
481static const struct jz_soc_info x1000_soc_info = {
482 .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
483 .flen_field = REG_FIELD(REG_SSICR1, 3, 7),
484 .has_trendian = true,
485
486 .max_speed_hz = 50000000,
487 .max_native_cs = 2,
488};
489
490static const struct jz_soc_info x2000_soc_info = {
491 .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
492 .flen_field = REG_FIELD(REG_SSICR1, 3, 7),
493 .has_trendian = true,
494
495 .max_speed_hz = 50000000,
496 .max_native_cs = 1,
ae5f94cc
AR
497};
498
499static const struct of_device_id spi_ingenic_of_match[] = {
500 { .compatible = "ingenic,jz4750-spi", .data = &jz4750_soc_info },
6d72b114 501 { .compatible = "ingenic,jz4775-spi", .data = &jz4780_soc_info },
ae5f94cc 502 { .compatible = "ingenic,jz4780-spi", .data = &jz4780_soc_info },
6d72b114
ZY
503 { .compatible = "ingenic,x1000-spi", .data = &x1000_soc_info },
504 { .compatible = "ingenic,x2000-spi", .data = &x2000_soc_info },
ae5f94cc
AR
505 {}
506};
507MODULE_DEVICE_TABLE(of, spi_ingenic_of_match);
508
509static struct platform_driver spi_ingenic_driver = {
510 .driver = {
511 .name = "spi-ingenic",
512 .of_match_table = spi_ingenic_of_match,
513 },
514 .probe = spi_ingenic_probe,
515};
516
517module_platform_driver(spi_ingenic_driver);
6d72b114 518MODULE_DESCRIPTION("SPI bus driver for the Ingenic SoCs");
ae5f94cc
AR
519MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
520MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
6d72b114 521MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
ae5f94cc 522MODULE_LICENSE("GPL");