1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // Copyright (C) IBM Corporation 2020
4 #include <linux/bitfield.h>
5 #include <linux/bits.h>
7 #include <linux/jiffies.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/spi/spi.h>
13 #define FSI_ENGID_SPI 0x23
14 #define FSI_MBOX_ROOT_CTRL_8 0x2860
15 #define FSI_MBOX_ROOT_CTRL_8_SPI_MUX 0xf0000000
17 #define FSI2SPI_DATA0 0x00
18 #define FSI2SPI_DATA1 0x04
19 #define FSI2SPI_CMD 0x08
20 #define FSI2SPI_CMD_WRITE BIT(31)
21 #define FSI2SPI_RESET 0x18
22 #define FSI2SPI_STATUS 0x1c
23 #define FSI2SPI_STATUS_ANY_ERROR BIT(31)
24 #define FSI2SPI_IRQ 0x20
26 #define SPI_FSI_BASE 0x70000
27 #define SPI_FSI_INIT_TIMEOUT_MS 1000
28 #define SPI_FSI_MAX_XFR_SIZE 2048
29 #define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 8
31 #define SPI_FSI_ERROR 0x0
32 #define SPI_FSI_COUNTER_CFG 0x1
33 #define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
34 #define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
35 #define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
36 #define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
37 #define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
38 #define SPI_FSI_CFG1 0x2
39 #define SPI_FSI_CLOCK_CFG 0x3
40 #define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
41 #define SPI_FSI_CLOCK_CFG_ECC_DISABLE (BIT_ULL(35) | BIT_ULL(33))
42 #define SPI_FSI_CLOCK_CFG_RESET1 (BIT_ULL(36) | BIT_ULL(38))
43 #define SPI_FSI_CLOCK_CFG_RESET2 (BIT_ULL(37) | BIT_ULL(39))
44 #define SPI_FSI_CLOCK_CFG_MODE (BIT_ULL(41) | BIT_ULL(42))
45 #define SPI_FSI_CLOCK_CFG_SCK_RECV_DEL GENMASK_ULL(51, 44)
46 #define SPI_FSI_CLOCK_CFG_SCK_NO_DEL BIT_ULL(51)
47 #define SPI_FSI_CLOCK_CFG_SCK_DIV GENMASK_ULL(63, 52)
48 #define SPI_FSI_MMAP 0x4
49 #define SPI_FSI_DATA_TX 0x5
50 #define SPI_FSI_DATA_RX 0x6
51 #define SPI_FSI_SEQUENCE 0x7
52 #define SPI_FSI_SEQUENCE_STOP 0x00
53 #define SPI_FSI_SEQUENCE_SEL_SLAVE(x) (0x10 | ((x) & 0xf))
54 #define SPI_FSI_SEQUENCE_SHIFT_OUT(x) (0x30 | ((x) & 0xf))
55 #define SPI_FSI_SEQUENCE_SHIFT_IN(x) (0x40 | ((x) & 0xf))
56 #define SPI_FSI_SEQUENCE_COPY_DATA_TX 0xc0
57 #define SPI_FSI_SEQUENCE_BRANCH(x) (0xe0 | ((x) & 0xf))
58 #define SPI_FSI_STATUS 0x8
59 #define SPI_FSI_STATUS_ERROR \
60 (GENMASK_ULL(31, 21) | GENMASK_ULL(15, 12))
61 #define SPI_FSI_STATUS_SEQ_STATE GENMASK_ULL(55, 48)
62 #define SPI_FSI_STATUS_SEQ_STATE_IDLE BIT_ULL(48)
63 #define SPI_FSI_STATUS_TDR_UNDERRUN BIT_ULL(57)
64 #define SPI_FSI_STATUS_TDR_OVERRUN BIT_ULL(58)
65 #define SPI_FSI_STATUS_TDR_FULL BIT_ULL(59)
66 #define SPI_FSI_STATUS_RDR_UNDERRUN BIT_ULL(61)
67 #define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
68 #define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
69 #define SPI_FSI_STATUS_ANY_ERROR \
70 (SPI_FSI_STATUS_ERROR | \
71 SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
72 SPI_FSI_STATUS_RDR_OVERRUN)
73 #define SPI_FSI_PORT_CTRL 0x9
76 struct device *dev; /* SPI controller device */
77 struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
83 struct fsi_spi_sequence {
88 static int fsi_spi_check_mux(struct fsi_device *fsi, struct device *dev)
92 __be32 root_ctrl_8_be;
94 rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8_be,
95 sizeof(root_ctrl_8_be));
99 root_ctrl_8 = be32_to_cpu(root_ctrl_8_be);
100 dev_dbg(dev, "Root control register 8: %08x\n", root_ctrl_8);
101 if ((root_ctrl_8 & FSI_MBOX_ROOT_CTRL_8_SPI_MUX) ==
102 FSI_MBOX_ROOT_CTRL_8_SPI_MUX)
108 static int fsi_spi_check_status(struct fsi_spi *ctx)
114 rc = fsi_device_read(ctx->fsi, FSI2SPI_STATUS, &sts_be,
119 sts = be32_to_cpu(sts_be);
120 if (sts & FSI2SPI_STATUS_ANY_ERROR) {
121 dev_err(ctx->dev, "Error with FSI2SPI interface: %08x.\n", sts);
128 static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value)
133 u32 cmd = offset + ctx->base;
137 if (cmd & FSI2SPI_CMD_WRITE)
140 cmd_be = cpu_to_be32(cmd);
141 rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
145 rc = fsi_spi_check_status(ctx);
149 rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA0, &data_be,
154 *value |= (u64)be32_to_cpu(data_be) << 32;
156 rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA1, &data_be,
161 *value |= (u64)be32_to_cpu(data_be);
162 dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value);
167 static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value)
172 u32 cmd = offset + ctx->base;
174 if (cmd & FSI2SPI_CMD_WRITE)
177 dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value);
179 data_be = cpu_to_be32(upper_32_bits(value));
180 rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA0, &data_be,
185 data_be = cpu_to_be32(lower_32_bits(value));
186 rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA1, &data_be,
191 cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE);
192 rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
196 return fsi_spi_check_status(ctx);
199 static int fsi_spi_data_in(u64 in, u8 *rx, int len)
202 int num_bytes = min(len, 8);
204 for (i = 0; i < num_bytes; ++i)
205 rx[i] = (u8)(in >> (8 * ((num_bytes - 1) - i)));
210 static int fsi_spi_data_out(u64 *out, const u8 *tx, int len)
213 int num_bytes = min(len, 8);
214 u8 *out_bytes = (u8 *)out;
216 /* Unused bytes of the tx data should be 0. */
219 for (i = 0; i < num_bytes; ++i)
220 out_bytes[8 - (i + 1)] = tx[i];
225 static int fsi_spi_reset(struct fsi_spi *ctx)
229 dev_dbg(ctx->dev, "Resetting SPI controller.\n");
231 rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
232 SPI_FSI_CLOCK_CFG_RESET1);
236 rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
237 SPI_FSI_CLOCK_CFG_RESET2);
241 return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
244 static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
247 * Add the next byte of instruction to the 8-byte sequence register.
248 * Then decrement the counter so that the next instruction will go in
249 * the right place. Return the index of the slot we just filled in the
252 seq->data |= (u64)val << seq->bit;
255 return ((64 - seq->bit) / 8) - 2;
258 static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
264 static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
265 struct fsi_spi_sequence *seq,
266 struct spi_transfer *transfer)
272 u8 len = min(transfer->len, 8U);
273 u8 rem = transfer->len % len;
275 loops = transfer->len / len;
277 if (transfer->tx_buf) {
278 val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
279 idx = fsi_spi_sequence_add(seq, val);
282 rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
283 } else if (transfer->rx_buf) {
284 val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
285 idx = fsi_spi_sequence_add(seq, val);
288 rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
293 if (ctx->restricted && loops > 1) {
295 "Transfer too large; no branches permitted.\n");
300 u64 cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
302 fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
304 if (transfer->rx_buf)
305 cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
306 SPI_FSI_COUNTER_CFG_N2_TX |
307 SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
308 SPI_FSI_COUNTER_CFG_N2_RELOAD;
310 rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
314 fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
318 fsi_spi_sequence_add(seq, rem);
323 static int fsi_spi_transfer_data(struct fsi_spi *ctx,
324 struct spi_transfer *transfer)
330 if (transfer->tx_buf) {
334 const u8 *tx = transfer->tx_buf;
336 while (transfer->len > sent) {
337 nb = fsi_spi_data_out(&out, &tx[sent],
338 (int)transfer->len - sent);
340 rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, out);
345 rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
350 if (status & SPI_FSI_STATUS_ANY_ERROR) {
351 rc = fsi_spi_reset(ctx);
357 } while (status & SPI_FSI_STATUS_TDR_FULL);
361 } else if (transfer->rx_buf) {
364 u8 *rx = transfer->rx_buf;
366 rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
370 if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
371 rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
376 while (transfer->len > recv) {
378 rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
383 if (status & SPI_FSI_STATUS_ANY_ERROR) {
384 rc = fsi_spi_reset(ctx);
390 } while (!(status & SPI_FSI_STATUS_RDR_FULL));
392 rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
396 recv += fsi_spi_data_in(in, &rx[recv],
397 (int)transfer->len - recv);
404 static int fsi_spi_transfer_init(struct fsi_spi *ctx)
410 u64 clock_cfg = 0ULL;
412 u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
413 SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
414 FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
416 end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
418 if (time_after(jiffies, end))
421 rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status);
425 seq_state = status & SPI_FSI_STATUS_SEQ_STATE;
427 if (status & (SPI_FSI_STATUS_ANY_ERROR |
428 SPI_FSI_STATUS_TDR_FULL |
429 SPI_FSI_STATUS_RDR_FULL)) {
433 rc = fsi_spi_reset(ctx);
440 } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
442 rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
446 if ((clock_cfg & (SPI_FSI_CLOCK_CFG_MM_ENABLE |
447 SPI_FSI_CLOCK_CFG_ECC_DISABLE |
448 SPI_FSI_CLOCK_CFG_MODE |
449 SPI_FSI_CLOCK_CFG_SCK_RECV_DEL |
450 SPI_FSI_CLOCK_CFG_SCK_DIV)) != wanted_clock_cfg)
451 rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
457 static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
458 struct spi_message *mesg)
461 u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
462 struct spi_transfer *transfer;
463 struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
465 rc = fsi_spi_check_mux(ctx->fsi, ctx->dev);
469 list_for_each_entry(transfer, &mesg->transfers, transfer_list) {
470 struct fsi_spi_sequence seq;
471 struct spi_transfer *next = NULL;
473 /* Sequencer must do shift out (tx) first. */
474 if (!transfer->tx_buf ||
475 transfer->len > (ctx->max_xfr_size + 8)) {
480 dev_dbg(ctx->dev, "Start tx of %d bytes.\n", transfer->len);
482 rc = fsi_spi_transfer_init(ctx);
486 fsi_spi_sequence_init(&seq);
487 fsi_spi_sequence_add(&seq, seq_slave);
489 rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
493 if (!list_is_last(&transfer->transfer_list,
495 next = list_next_entry(transfer, transfer_list);
497 /* Sequencer can only do shift in (rx) after tx. */
499 if (next->len > ctx->max_xfr_size) {
504 dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
507 rc = fsi_spi_sequence_transfer(ctx, &seq,
516 fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SEL_SLAVE(0));
518 rc = fsi_spi_write_reg(ctx, SPI_FSI_SEQUENCE, seq.data);
522 rc = fsi_spi_transfer_data(ctx, transfer);
527 rc = fsi_spi_transfer_data(ctx, next);
537 spi_finalize_current_message(ctlr);
542 static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
544 struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
546 return ctx->max_xfr_size;
549 static int fsi_spi_probe(struct device *dev)
552 struct device_node *np;
553 int num_controllers_registered = 0;
554 struct fsi_device *fsi = to_fsi_dev(dev);
556 rc = fsi_spi_check_mux(fsi, dev);
560 for_each_available_child_of_node(dev->of_node, np) {
563 struct spi_controller *ctlr;
565 if (of_property_read_u32(np, "reg", &base))
568 ctlr = spi_alloc_master(dev, sizeof(*ctx));
574 ctlr->dev.of_node = np;
575 ctlr->num_chipselect = of_get_available_child_count(np) ?: 1;
576 ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
577 ctlr->max_transfer_size = fsi_spi_max_transfer_size;
578 ctlr->transfer_one_message = fsi_spi_transfer_one_message;
580 ctx = spi_controller_get_devdata(ctlr);
581 ctx->dev = &ctlr->dev;
583 ctx->base = base + SPI_FSI_BASE;
585 if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
586 ctx->restricted = true;
587 ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
589 ctx->restricted = false;
590 ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
593 rc = devm_spi_register_controller(dev, ctlr);
595 spi_controller_put(ctlr);
597 num_controllers_registered++;
600 if (!num_controllers_registered)
606 static const struct fsi_device_id fsi_spi_ids[] = {
607 { FSI_ENGID_SPI, FSI_VERSION_ANY },
610 MODULE_DEVICE_TABLE(fsi, fsi_spi_ids);
612 static struct fsi_driver fsi_spi_driver = {
613 .id_table = fsi_spi_ids,
616 .bus = &fsi_bus_type,
617 .probe = fsi_spi_probe,
620 module_fsi_driver(fsi_spi_driver);
622 MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
623 MODULE_DESCRIPTION("FSI attached SPI controller");
624 MODULE_LICENSE("GPL");