Merge tag 'bcachefs-2024-05-24' of https://evilpiepirate.org/git/bcachefs
[linux-2.6-block.git] / drivers / spi / spi-qup.c
CommitLineData
ce718dfb 1// SPDX-License-Identifier: GPL-2.0-only
64ff247a
II
2/*
3 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
64ff247a
II
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/err.h>
ecdaa947 9#include <linux/interconnect.h>
64ff247a
II
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/platform_device.h>
287fcdaa 16#include <linux/pm_opp.h>
64ff247a
II
17#include <linux/pm_runtime.h>
18#include <linux/spi/spi.h>
612762e8
AG
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
64ff247a
II
21
22#define QUP_CONFIG 0x0000
23#define QUP_STATE 0x0004
24#define QUP_IO_M_MODES 0x0008
25#define QUP_SW_RESET 0x000c
26#define QUP_OPERATIONAL 0x0018
27#define QUP_ERROR_FLAGS 0x001c
28#define QUP_ERROR_FLAGS_EN 0x0020
29#define QUP_OPERATIONAL_MASK 0x0028
30#define QUP_HW_VERSION 0x0030
31#define QUP_MX_OUTPUT_CNT 0x0100
32#define QUP_OUTPUT_FIFO 0x0110
33#define QUP_MX_WRITE_CNT 0x0150
34#define QUP_MX_INPUT_CNT 0x0200
35#define QUP_MX_READ_CNT 0x0208
36#define QUP_INPUT_FIFO 0x0218
37
38#define SPI_CONFIG 0x0300
39#define SPI_IO_CONTROL 0x0304
40#define SPI_ERROR_FLAGS 0x0308
41#define SPI_ERROR_FLAGS_EN 0x030c
42
43/* QUP_CONFIG fields */
44#define QUP_CONFIG_SPI_MODE (1 << 8)
45#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
46#define QUP_CONFIG_NO_INPUT BIT(7)
47#define QUP_CONFIG_NO_OUTPUT BIT(6)
48#define QUP_CONFIG_N 0x001f
49
50/* QUP_STATE fields */
51#define QUP_STATE_VALID BIT(2)
52#define QUP_STATE_RESET 0
53#define QUP_STATE_RUN 1
54#define QUP_STATE_PAUSE 3
55#define QUP_STATE_MASK 3
56#define QUP_STATE_CLEAR 2
57
58#define QUP_HW_VERSION_2_1_1 0x20010001
59
60/* QUP_IO_M_MODES fields */
61#define QUP_IO_M_PACK_EN BIT(15)
62#define QUP_IO_M_UNPACK_EN BIT(14)
63#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
64#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
65#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
66#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
67
68#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
69#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
70#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
71#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
72
73#define QUP_IO_M_MODE_FIFO 0
74#define QUP_IO_M_MODE_BLOCK 1
75#define QUP_IO_M_MODE_DMOV 2
76#define QUP_IO_M_MODE_BAM 3
77
78/* QUP_OPERATIONAL fields */
7538726f
VN
79#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
80#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
64ff247a
II
81#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
82#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
83#define QUP_OP_IN_SERVICE_FLAG BIT(9)
84#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
85#define QUP_OP_IN_FIFO_FULL BIT(7)
86#define QUP_OP_OUT_FIFO_FULL BIT(6)
87#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
88#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
89
90/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
91#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
92#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
93#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
94#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
95
96/* SPI_CONFIG fields */
97#define SPI_CONFIG_HS_MODE BIT(10)
98#define SPI_CONFIG_INPUT_FIRST BIT(9)
99#define SPI_CONFIG_LOOPBACK BIT(8)
100
101/* SPI_IO_CONTROL fields */
102#define SPI_IO_C_FORCE_CS BIT(11)
103#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
104#define SPI_IO_C_MX_CS_MODE BIT(8)
105#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
106#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
107#define SPI_IO_C_CS_SELECT_MASK 0x000c
108#define SPI_IO_C_TRISTATE_CS BIT(1)
109#define SPI_IO_C_NO_TRI_STATE BIT(0)
110
111/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
112#define SPI_ERROR_CLK_OVER_RUN BIT(1)
113#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
114
115#define SPI_NUM_CHIPSELECTS 4
116
5dc47fef 117#define SPI_MAX_XFER (SZ_64K - 64)
612762e8 118
64ff247a
II
119/* high speed mode is when bus rate is greater then 26MHz */
120#define SPI_HS_MIN_RATE 26000000
121#define SPI_MAX_RATE 50000000
122
123#define SPI_DELAY_THRESHOLD 1
124#define SPI_DELAY_RETRY 10
125
ecdaa947
SG
126#define SPI_BUS_WIDTH 8
127
64ff247a
II
128struct spi_qup {
129 void __iomem *base;
130 struct device *dev;
131 struct clk *cclk; /* core clock */
132 struct clk *iclk; /* interface clock */
ecdaa947 133 struct icc_path *icc_path; /* interconnect to RAM */
64ff247a 134 int irq;
64ff247a
II
135 spinlock_t lock;
136
137 int in_fifo_sz;
138 int out_fifo_sz;
139 int in_blk_sz;
140 int out_blk_sz;
141
142 struct spi_transfer *xfer;
143 struct completion done;
144 int error;
145 int w_size; /* bytes per SPI word */
612762e8 146 int n_words;
64ff247a
II
147 int tx_bytes;
148 int rx_bytes;
5dc47fef
VN
149 const u8 *tx_buf;
150 u8 *rx_buf;
70cea0a9 151 int qup_v1;
612762e8 152
32ecab99 153 int mode;
612762e8
AG
154 struct dma_slave_config rx_conf;
155 struct dma_slave_config tx_conf;
ecdaa947
SG
156
157 u32 bw_speed_hz;
64ff247a
II
158};
159
3b5ea2c9
VN
160static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
161
7538726f
VN
162static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
163{
164 u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
165
166 return (opflag & flag) != 0;
167}
168
32ecab99
VN
169static inline bool spi_qup_is_dma_xfer(int mode)
170{
171 if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
172 return true;
173
174 return false;
175}
64ff247a 176
5dc47fef
VN
177/* get's the transaction size length */
178static inline unsigned int spi_qup_len(struct spi_qup *controller)
179{
180 return controller->n_words * controller->w_size;
181}
182
64ff247a
II
183static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
184{
185 u32 opstate = readl_relaxed(controller->base + QUP_STATE);
186
187 return opstate & QUP_STATE_VALID;
188}
189
ecdaa947
SG
190static int spi_qup_vote_bw(struct spi_qup *controller, u32 speed_hz)
191{
192 u32 needed_peak_bw;
193 int ret;
194
195 if (controller->bw_speed_hz == speed_hz)
196 return 0;
197
198 needed_peak_bw = Bps_to_icc(speed_hz * SPI_BUS_WIDTH);
199 ret = icc_set_bw(controller->icc_path, 0, needed_peak_bw);
200 if (ret)
201 return ret;
202
203 controller->bw_speed_hz = speed_hz;
204 return 0;
205}
206
64ff247a
II
207static int spi_qup_set_state(struct spi_qup *controller, u32 state)
208{
209 unsigned long loop;
210 u32 cur_state;
211
212 loop = 0;
213 while (!spi_qup_is_valid_state(controller)) {
214
215 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
216
217 if (++loop > SPI_DELAY_RETRY)
218 return -EIO;
219 }
220
221 if (loop)
222 dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
223 loop, state);
224
225 cur_state = readl_relaxed(controller->base + QUP_STATE);
226 /*
227 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
228 * of (b10) are required
229 */
230 if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
231 (state == QUP_STATE_RESET)) {
232 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
233 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
234 } else {
235 cur_state &= ~QUP_STATE_MASK;
236 cur_state |= state;
237 writel_relaxed(cur_state, controller->base + QUP_STATE);
238 }
239
240 loop = 0;
241 while (!spi_qup_is_valid_state(controller)) {
242
243 usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
244
245 if (++loop > SPI_DELAY_RETRY)
246 return -EIO;
247 }
248
249 return 0;
250}
251
5dc47fef 252static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
64ff247a 253{
5dc47fef 254 u8 *rx_buf = controller->rx_buf;
7538726f
VN
255 int i, shift, num_bytes;
256 u32 word;
64ff247a 257
7538726f 258 for (; num_words; num_words--) {
64ff247a
II
259
260 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
261
5dc47fef
VN
262 num_bytes = min_t(int, spi_qup_len(controller) -
263 controller->rx_bytes,
264 controller->w_size);
7538726f 265
64ff247a 266 if (!rx_buf) {
7538726f 267 controller->rx_bytes += num_bytes;
64ff247a
II
268 continue;
269 }
270
7538726f 271 for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
64ff247a
II
272 /*
273 * The data format depends on bytes per SPI word:
274 * 4 bytes: 0x12345678
275 * 2 bytes: 0x00001234
276 * 1 byte : 0x00000012
277 */
278 shift = BITS_PER_BYTE;
7538726f 279 shift *= (controller->w_size - i - 1);
64ff247a
II
280 rx_buf[controller->rx_bytes] = word >> shift;
281 }
282 }
283}
284
cd595b99 285static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
64ff247a 286{
7538726f
VN
287 u32 remainder, words_per_block, num_words;
288 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
289
5dc47fef 290 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
7538726f
VN
291 controller->w_size);
292 words_per_block = controller->in_blk_sz >> 2;
293
294 do {
295 /* ACK by clearing service flag */
296 writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
297 controller->base + QUP_OPERATIONAL);
298
a75e91ba
JRO
299 if (!remainder)
300 goto exit;
301
7538726f
VN
302 if (is_block_mode) {
303 num_words = (remainder > words_per_block) ?
304 words_per_block : remainder;
305 } else {
306 if (!spi_qup_is_flag_set(controller,
307 QUP_OP_IN_FIFO_NOT_EMPTY))
308 break;
64ff247a 309
7538726f
VN
310 num_words = 1;
311 }
64ff247a 312
7538726f 313 /* read up to the maximum transfer size available */
5dc47fef 314 spi_qup_read_from_fifo(controller, num_words);
64ff247a 315
7538726f
VN
316 remainder -= num_words;
317
318 /* if block mode, check to see if next block is available */
319 if (is_block_mode && !spi_qup_is_flag_set(controller,
320 QUP_OP_IN_BLOCK_READ_REQ))
64ff247a
II
321 break;
322
7538726f
VN
323 } while (remainder);
324
325 /*
326 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
cd595b99
VN
327 * reads, it has to be cleared again at the very end. However, be sure
328 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
329 * present and this is used to determine if transaction is complete
7538726f 330 */
a75e91ba
JRO
331exit:
332 if (!remainder) {
333 *opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
334 if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
335 writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
336 controller->base + QUP_OPERATIONAL);
337 }
7538726f
VN
338}
339
5dc47fef 340static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
7538726f 341{
5dc47fef 342 const u8 *tx_buf = controller->tx_buf;
7538726f
VN
343 int i, num_bytes;
344 u32 word, data;
345
346 for (; num_words; num_words--) {
64ff247a 347 word = 0;
64ff247a 348
5dc47fef
VN
349 num_bytes = min_t(int, spi_qup_len(controller) -
350 controller->tx_bytes,
351 controller->w_size);
7538726f
VN
352 if (tx_buf)
353 for (i = 0; i < num_bytes; i++) {
354 data = tx_buf[controller->tx_bytes + i];
355 word |= data << (BITS_PER_BYTE * (3 - i));
64ff247a
II
356 }
357
7538726f 358 controller->tx_bytes += num_bytes;
64ff247a
II
359
360 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
361 }
362}
363
612762e8
AG
364static void spi_qup_dma_done(void *data)
365{
366 struct spi_qup *qup = data;
367
368 complete(&qup->done);
369}
370
5dc47fef 371static void spi_qup_write(struct spi_qup *controller)
7538726f
VN
372{
373 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
374 u32 remainder, words_per_block, num_words;
375
5dc47fef 376 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
7538726f
VN
377 controller->w_size);
378 words_per_block = controller->out_blk_sz >> 2;
379
380 do {
381 /* ACK by clearing service flag */
382 writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
383 controller->base + QUP_OPERATIONAL);
384
a75e91ba
JRO
385 /* make sure the interrupt is valid */
386 if (!remainder)
387 return;
388
7538726f
VN
389 if (is_block_mode) {
390 num_words = (remainder > words_per_block) ?
391 words_per_block : remainder;
392 } else {
393 if (spi_qup_is_flag_set(controller,
394 QUP_OP_OUT_FIFO_FULL))
395 break;
396
397 num_words = 1;
398 }
399
5dc47fef 400 spi_qup_write_to_fifo(controller, num_words);
7538726f
VN
401
402 remainder -= num_words;
403
404 /* if block mode, check to see if next block is available */
405 if (is_block_mode && !spi_qup_is_flag_set(controller,
406 QUP_OP_OUT_BLOCK_WRITE_REQ))
407 break;
408
409 } while (remainder);
410}
411
597442ff 412static int spi_qup_prep_sg(struct spi_controller *host, struct scatterlist *sgl,
a841b24e 413 unsigned int nents, enum dma_transfer_direction dir,
612762e8
AG
414 dma_async_tx_callback callback)
415{
597442ff 416 struct spi_qup *qup = spi_controller_get_devdata(host);
612762e8
AG
417 unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
418 struct dma_async_tx_descriptor *desc;
612762e8
AG
419 struct dma_chan *chan;
420 dma_cookie_t cookie;
612762e8 421
a841b24e 422 if (dir == DMA_MEM_TO_DEV)
597442ff 423 chan = host->dma_tx;
a841b24e 424 else
597442ff 425 chan = host->dma_rx;
612762e8
AG
426
427 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
d9a09a6c
VN
428 if (IS_ERR_OR_NULL(desc))
429 return desc ? PTR_ERR(desc) : -EINVAL;
612762e8
AG
430
431 desc->callback = callback;
432 desc->callback_param = qup;
433
434 cookie = dmaengine_submit(desc);
435
436 return dma_submit_error(cookie);
437}
438
597442ff 439static void spi_qup_dma_terminate(struct spi_controller *host,
612762e8
AG
440 struct spi_transfer *xfer)
441{
442 if (xfer->tx_buf)
597442ff 443 dmaengine_terminate_all(host->dma_tx);
612762e8 444 if (xfer->rx_buf)
597442ff 445 dmaengine_terminate_all(host->dma_rx);
612762e8
AG
446}
447
5884e17e
VN
448static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
449 u32 *nents)
450{
451 struct scatterlist *sg;
452 u32 total = 0;
453
5884e17e
VN
454 for (sg = sgl; sg; sg = sg_next(sg)) {
455 unsigned int len = sg_dma_len(sg);
456
457 /* check for overflow as well as limit */
458 if (((total + len) < total) || ((total + len) > max))
459 break;
460
461 total += len;
462 (*nents)++;
463 }
464
465 return total;
466}
467
3b5ea2c9 468static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
5f13fd60 469 unsigned long timeout)
612762e8
AG
470{
471 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
597442ff
YY
472 struct spi_controller *host = spi->controller;
473 struct spi_qup *qup = spi_controller_get_devdata(host);
5884e17e 474 struct scatterlist *tx_sgl, *rx_sgl;
612762e8
AG
475 int ret;
476
ecdaa947
SG
477 ret = spi_qup_vote_bw(qup, xfer->speed_hz);
478 if (ret) {
479 dev_err(qup->dev, "fail to vote for ICC bandwidth: %d\n", ret);
480 return -EIO;
481 }
482
612762e8
AG
483 if (xfer->rx_buf)
484 rx_done = spi_qup_dma_done;
485 else if (xfer->tx_buf)
486 tx_done = spi_qup_dma_done;
487
5884e17e
VN
488 rx_sgl = xfer->rx_sg.sgl;
489 tx_sgl = xfer->tx_sg.sgl;
3b5ea2c9 490
5884e17e 491 do {
6f38f125 492 u32 rx_nents = 0, tx_nents = 0;
5884e17e
VN
493
494 if (rx_sgl)
495 qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
496 SPI_MAX_XFER, &rx_nents) / qup->w_size;
497 if (tx_sgl)
498 qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
499 SPI_MAX_XFER, &tx_nents) / qup->w_size;
500 if (!qup->n_words)
501 return -EIO;
ce00bab3 502
5884e17e 503 ret = spi_qup_io_config(spi, xfer);
612762e8
AG
504 if (ret)
505 return ret;
506
5884e17e
VN
507 /* before issuing the descriptors, set the QUP to run */
508 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
509 if (ret) {
510 dev_warn(qup->dev, "cannot set RUN state\n");
612762e8 511 return ret;
5884e17e
VN
512 }
513 if (rx_sgl) {
597442ff 514 ret = spi_qup_prep_sg(host, rx_sgl, rx_nents,
5884e17e
VN
515 DMA_DEV_TO_MEM, rx_done);
516 if (ret)
517 return ret;
597442ff 518 dma_async_issue_pending(host->dma_rx);
5884e17e 519 }
612762e8 520
5884e17e 521 if (tx_sgl) {
597442ff 522 ret = spi_qup_prep_sg(host, tx_sgl, tx_nents,
5884e17e
VN
523 DMA_MEM_TO_DEV, tx_done);
524 if (ret)
525 return ret;
526
597442ff 527 dma_async_issue_pending(host->dma_tx);
5884e17e
VN
528 }
529
530 if (!wait_for_completion_timeout(&qup->done, timeout))
531 return -ETIMEDOUT;
532
533 for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
534 ;
535 for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
536 ;
612762e8 537
5884e17e 538 } while (rx_sgl || tx_sgl);
5f13fd60 539
612762e8
AG
540 return 0;
541}
542
3b5ea2c9 543static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
5f13fd60 544 unsigned long timeout)
612762e8 545{
597442ff
YY
546 struct spi_controller *host = spi->controller;
547 struct spi_qup *qup = spi_controller_get_devdata(host);
5dc47fef 548 int ret, n_words, iterations, offset = 0;
612762e8 549
5dc47fef
VN
550 n_words = qup->n_words;
551 iterations = n_words / SPI_MAX_XFER; /* round down */
552 qup->rx_buf = xfer->rx_buf;
553 qup->tx_buf = xfer->tx_buf;
3b5ea2c9 554
5dc47fef
VN
555 do {
556 if (iterations)
557 qup->n_words = SPI_MAX_XFER;
558 else
559 qup->n_words = n_words % SPI_MAX_XFER;
612762e8 560
5dc47fef
VN
561 if (qup->tx_buf && offset)
562 qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
612762e8 563
5dc47fef
VN
564 if (qup->rx_buf && offset)
565 qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
612762e8 566
5dc47fef
VN
567 /*
568 * if the transaction is small enough, we need
569 * to fallback to FIFO mode
570 */
571 if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
572 qup->mode = QUP_IO_M_MODE_FIFO;
ce00bab3 573
5dc47fef
VN
574 ret = spi_qup_io_config(spi, xfer);
575 if (ret)
576 return ret;
577
578 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
579 if (ret) {
580 dev_warn(qup->dev, "cannot set RUN state\n");
581 return ret;
582 }
583
584 ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
585 if (ret) {
586 dev_warn(qup->dev, "cannot set PAUSE state\n");
587 return ret;
588 }
589
590 if (qup->mode == QUP_IO_M_MODE_FIFO)
591 spi_qup_write(qup);
592
593 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
594 if (ret) {
595 dev_warn(qup->dev, "cannot set RUN state\n");
596 return ret;
597 }
598
599 if (!wait_for_completion_timeout(&qup->done, timeout))
600 return -ETIMEDOUT;
601
602 offset++;
603 } while (iterations--);
5f13fd60 604
612762e8
AG
605 return 0;
606}
607
a75e91ba
JRO
608static bool spi_qup_data_pending(struct spi_qup *controller)
609{
610 unsigned int remainder_tx, remainder_rx;
611
612 remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
613 controller->tx_bytes, controller->w_size);
614
615 remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
616 controller->rx_bytes, controller->w_size);
617
618 return remainder_tx || remainder_rx;
619}
620
64ff247a
II
621static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
622{
623 struct spi_qup *controller = dev_id;
64ff247a 624 u32 opflags, qup_err, spi_err;
64ff247a
II
625 int error = 0;
626
64ff247a
II
627 qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
628 spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
629 opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
630
631 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
632 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
64ff247a
II
633
634 if (qup_err) {
635 if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
636 dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
637 if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
638 dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
639 if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
640 dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
641 if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
642 dev_warn(controller->dev, "INPUT_OVER_RUN\n");
643
644 error = -EIO;
645 }
646
647 if (spi_err) {
648 if (spi_err & SPI_ERROR_CLK_OVER_RUN)
649 dev_warn(controller->dev, "CLK_OVER_RUN\n");
650 if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
651 dev_warn(controller->dev, "CLK_UNDER_RUN\n");
652
653 error = -EIO;
654 }
655
fa0f3db4 656 spin_lock(&controller->lock);
a75e91ba
JRO
657 if (!controller->error)
658 controller->error = error;
fa0f3db4 659 spin_unlock(&controller->lock);
a75e91ba 660
ce7dfc71
VN
661 if (spi_qup_is_dma_xfer(controller->mode)) {
662 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
663 } else {
612762e8 664 if (opflags & QUP_OP_IN_SERVICE_FLAG)
cd595b99 665 spi_qup_read(controller, &opflags);
64ff247a 666
612762e8 667 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
5dc47fef 668 spi_qup_write(controller);
a75e91ba
JRO
669
670 if (!spi_qup_data_pending(controller))
671 complete(&controller->done);
612762e8 672 }
64ff247a 673
a75e91ba 674 if (error)
64ff247a
II
675 complete(&controller->done);
676
a75e91ba
JRO
677 if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
678 if (!spi_qup_is_dma_xfer(controller->mode)) {
679 if (spi_qup_data_pending(controller))
680 return IRQ_HANDLED;
681 }
682 complete(&controller->done);
683 }
684
64ff247a
II
685 return IRQ_HANDLED;
686}
687
94b9149f
VN
688/* set clock freq ... bits per word, determine mode */
689static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
64ff247a 690{
597442ff 691 struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
94b9149f 692 int ret;
64ff247a 693
00cce74d 694 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
64ff247a
II
695 dev_err(controller->dev, "too big size for loopback %d > %d\n",
696 xfer->len, controller->in_fifo_sz);
697 return -EIO;
698 }
699
287fcdaa 700 ret = dev_pm_opp_set_rate(controller->dev, xfer->speed_hz);
64ff247a
II
701 if (ret) {
702 dev_err(controller->dev, "fail to set frequency %d",
703 xfer->speed_hz);
704 return -EIO;
705 }
706
32ecab99
VN
707 controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
708 controller->n_words = xfer->len / controller->w_size;
32ecab99 709
94b9149f 710 if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
32ecab99 711 controller->mode = QUP_IO_M_MODE_FIFO;
597442ff
YY
712 else if (spi->controller->can_dma &&
713 spi->controller->can_dma(spi->controller, spi, xfer) &&
714 spi->controller->cur_msg_mapped)
94b9149f
VN
715 controller->mode = QUP_IO_M_MODE_BAM;
716 else
717 controller->mode = QUP_IO_M_MODE_BLOCK;
718
719 return 0;
720}
721
722/* prep qup for another spi transaction of specific type */
723static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
724{
597442ff 725 struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
94b9149f
VN
726 u32 config, iomode, control;
727 unsigned long flags;
32ecab99 728
94b9149f
VN
729 spin_lock_irqsave(&controller->lock, flags);
730 controller->xfer = xfer;
731 controller->error = 0;
732 controller->rx_bytes = 0;
733 controller->tx_bytes = 0;
734 spin_unlock_irqrestore(&controller->lock, flags);
735
736
737 if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
738 dev_err(controller->dev, "cannot set RESET state\n");
739 return -EIO;
740 }
741
742 switch (controller->mode) {
743 case QUP_IO_M_MODE_FIFO:
744 writel_relaxed(controller->n_words,
745 controller->base + QUP_MX_READ_CNT);
746 writel_relaxed(controller->n_words,
747 controller->base + QUP_MX_WRITE_CNT);
64ff247a
II
748 /* must be zero for FIFO */
749 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
750 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
94b9149f
VN
751 break;
752 case QUP_IO_M_MODE_BAM:
753 writel_relaxed(controller->n_words,
754 controller->base + QUP_MX_INPUT_CNT);
755 writel_relaxed(controller->n_words,
756 controller->base + QUP_MX_OUTPUT_CNT);
64ff247a
II
757 /* must be zero for BLOCK and BAM */
758 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
759 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
612762e8
AG
760
761 if (!controller->qup_v1) {
762 void __iomem *input_cnt;
763
764 input_cnt = controller->base + QUP_MX_INPUT_CNT;
765 /*
766 * for DMA transfers, both QUP_MX_INPUT_CNT and
767 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
768 * That case is a non-balanced transfer when there is
769 * only a rx_buf.
770 */
771 if (xfer->tx_buf)
772 writel_relaxed(0, input_cnt);
773 else
94b9149f 774 writel_relaxed(controller->n_words, input_cnt);
612762e8
AG
775
776 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
777 }
94b9149f
VN
778 break;
779 case QUP_IO_M_MODE_BLOCK:
780 reinit_completion(&controller->done);
781 writel_relaxed(controller->n_words,
782 controller->base + QUP_MX_INPUT_CNT);
783 writel_relaxed(controller->n_words,
784 controller->base + QUP_MX_OUTPUT_CNT);
32ecab99
VN
785 /* must be zero for BLOCK and BAM */
786 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
787 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
94b9149f
VN
788 break;
789 default:
790 dev_err(controller->dev, "unknown mode = %d\n",
791 controller->mode);
792 return -EIO;
64ff247a
II
793 }
794
795 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
796 /* Set input and output transfer mode */
797 iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
612762e8 798
32ecab99 799 if (!spi_qup_is_dma_xfer(controller->mode))
612762e8
AG
800 iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
801 else
802 iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
803
32ecab99
VN
804 iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
805 iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
64ff247a
II
806
807 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
808
0667dd5f
II
809 control = readl_relaxed(controller->base + SPI_IO_CONTROL);
810
811 if (spi->mode & SPI_CPOL)
812 control |= SPI_IO_C_CLK_IDLE_HIGH;
813 else
814 control &= ~SPI_IO_C_CLK_IDLE_HIGH;
815
816 writel_relaxed(control, controller->base + SPI_IO_CONTROL);
817
64ff247a
II
818 config = readl_relaxed(controller->base + SPI_CONFIG);
819
00cce74d 820 if (spi->mode & SPI_LOOP)
64ff247a
II
821 config |= SPI_CONFIG_LOOPBACK;
822 else
823 config &= ~SPI_CONFIG_LOOPBACK;
824
00cce74d 825 if (spi->mode & SPI_CPHA)
64ff247a
II
826 config &= ~SPI_CONFIG_INPUT_FIRST;
827 else
828 config |= SPI_CONFIG_INPUT_FIRST;
829
830 /*
831 * HS_MODE improves signal stability for spi-clk high rates,
832 * but is invalid in loop back mode.
833 */
00cce74d 834 if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
64ff247a
II
835 config |= SPI_CONFIG_HS_MODE;
836 else
837 config &= ~SPI_CONFIG_HS_MODE;
838
839 writel_relaxed(config, controller->base + SPI_CONFIG);
840
841 config = readl_relaxed(controller->base + QUP_CONFIG);
842 config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
843 config |= xfer->bits_per_word - 1;
844 config |= QUP_CONFIG_SPI_MODE;
612762e8 845
32ecab99 846 if (spi_qup_is_dma_xfer(controller->mode)) {
612762e8
AG
847 if (!xfer->tx_buf)
848 config |= QUP_CONFIG_NO_OUTPUT;
849 if (!xfer->rx_buf)
850 config |= QUP_CONFIG_NO_INPUT;
851 }
852
64ff247a
II
853 writel_relaxed(config, controller->base + QUP_CONFIG);
854
70cea0a9 855 /* only write to OPERATIONAL_MASK when register is present */
612762e8
AG
856 if (!controller->qup_v1) {
857 u32 mask = 0;
858
859 /*
860 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
861 * status change in BAM mode
862 */
863
32ecab99 864 if (spi_qup_is_dma_xfer(controller->mode))
612762e8
AG
865 mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
866
867 writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
868 }
869
64ff247a
II
870 return 0;
871}
872
597442ff 873static int spi_qup_transfer_one(struct spi_controller *host,
64ff247a
II
874 struct spi_device *spi,
875 struct spi_transfer *xfer)
876{
597442ff 877 struct spi_qup *controller = spi_controller_get_devdata(host);
64ff247a 878 unsigned long timeout, flags;
4a6c7d6f 879 int ret;
64ff247a 880
94b9149f
VN
881 ret = spi_qup_io_prep(spi, xfer);
882 if (ret)
883 return ret;
884
64ff247a 885 timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
5dc47fef
VN
886 timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
887 xfer->len) * 8, timeout);
64ff247a
II
888 timeout = 100 * msecs_to_jiffies(timeout);
889
890 reinit_completion(&controller->done);
891
892 spin_lock_irqsave(&controller->lock, flags);
893 controller->xfer = xfer;
894 controller->error = 0;
895 controller->rx_bytes = 0;
896 controller->tx_bytes = 0;
897 spin_unlock_irqrestore(&controller->lock, flags);
898
32ecab99 899 if (spi_qup_is_dma_xfer(controller->mode))
3b5ea2c9 900 ret = spi_qup_do_dma(spi, xfer, timeout);
612762e8 901 else
3b5ea2c9 902 ret = spi_qup_do_pio(spi, xfer, timeout);
64ff247a 903
64ff247a
II
904 spi_qup_set_state(controller, QUP_STATE_RESET);
905 spin_lock_irqsave(&controller->lock, flags);
64ff247a
II
906 if (!ret)
907 ret = controller->error;
908 spin_unlock_irqrestore(&controller->lock, flags);
612762e8 909
32ecab99 910 if (ret && spi_qup_is_dma_xfer(controller->mode))
597442ff 911 spi_qup_dma_terminate(host, xfer);
612762e8
AG
912
913 return ret;
914}
915
597442ff 916static bool spi_qup_can_dma(struct spi_controller *host, struct spi_device *spi,
612762e8
AG
917 struct spi_transfer *xfer)
918{
597442ff 919 struct spi_qup *qup = spi_controller_get_devdata(host);
612762e8 920 size_t dma_align = dma_get_cache_alignment();
32ecab99 921 int n_words;
612762e8 922
32ecab99
VN
923 if (xfer->rx_buf) {
924 if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
597442ff 925 IS_ERR_OR_NULL(host->dma_rx))
32ecab99
VN
926 return false;
927 if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
928 return false;
929 }
612762e8 930
32ecab99
VN
931 if (xfer->tx_buf) {
932 if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
597442ff 933 IS_ERR_OR_NULL(host->dma_tx))
32ecab99
VN
934 return false;
935 if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
936 return false;
937 }
612762e8 938
32ecab99
VN
939 n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
940 if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
612762e8
AG
941 return false;
942
612762e8
AG
943 return true;
944}
945
597442ff 946static void spi_qup_release_dma(struct spi_controller *host)
612762e8 947{
597442ff
YY
948 if (!IS_ERR_OR_NULL(host->dma_rx))
949 dma_release_channel(host->dma_rx);
950 if (!IS_ERR_OR_NULL(host->dma_tx))
951 dma_release_channel(host->dma_tx);
612762e8
AG
952}
953
597442ff 954static int spi_qup_init_dma(struct spi_controller *host, resource_size_t base)
612762e8 955{
597442ff 956 struct spi_qup *spi = spi_controller_get_devdata(host);
612762e8
AG
957 struct dma_slave_config *rx_conf = &spi->rx_conf,
958 *tx_conf = &spi->tx_conf;
959 struct device *dev = spi->dev;
960 int ret;
961
962 /* allocate dma resources, if available */
597442ff
YY
963 host->dma_rx = dma_request_chan(dev, "rx");
964 if (IS_ERR(host->dma_rx))
965 return PTR_ERR(host->dma_rx);
612762e8 966
597442ff
YY
967 host->dma_tx = dma_request_chan(dev, "tx");
968 if (IS_ERR(host->dma_tx)) {
969 ret = PTR_ERR(host->dma_tx);
612762e8
AG
970 goto err_tx;
971 }
972
973 /* set DMA parameters */
974 rx_conf->direction = DMA_DEV_TO_MEM;
975 rx_conf->device_fc = 1;
976 rx_conf->src_addr = base + QUP_INPUT_FIFO;
977 rx_conf->src_maxburst = spi->in_blk_sz;
978
979 tx_conf->direction = DMA_MEM_TO_DEV;
980 tx_conf->device_fc = 1;
981 tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
982 tx_conf->dst_maxburst = spi->out_blk_sz;
983
597442ff 984 ret = dmaengine_slave_config(host->dma_rx, rx_conf);
612762e8
AG
985 if (ret) {
986 dev_err(dev, "failed to configure RX channel\n");
987 goto err;
988 }
989
597442ff 990 ret = dmaengine_slave_config(host->dma_tx, tx_conf);
612762e8
AG
991 if (ret) {
992 dev_err(dev, "failed to configure TX channel\n");
993 goto err;
994 }
995
996 return 0;
997
998err:
597442ff 999 dma_release_channel(host->dma_tx);
612762e8 1000err_tx:
597442ff 1001 dma_release_channel(host->dma_rx);
64ff247a
II
1002 return ret;
1003}
1004
b702b9fb
VN
1005static void spi_qup_set_cs(struct spi_device *spi, bool val)
1006{
1007 struct spi_qup *controller;
1008 u32 spi_ioc;
1009 u32 spi_ioc_orig;
1010
597442ff 1011 controller = spi_controller_get_devdata(spi->controller);
b702b9fb
VN
1012 spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
1013 spi_ioc_orig = spi_ioc;
1014 if (!val)
1015 spi_ioc |= SPI_IO_C_FORCE_CS;
1016 else
1017 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1018
1019 if (spi_ioc != spi_ioc_orig)
1020 writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
1021}
1022
64ff247a
II
1023static int spi_qup_probe(struct platform_device *pdev)
1024{
597442ff 1025 struct spi_controller *host;
ecdaa947 1026 struct icc_path *icc_path;
64ff247a
II
1027 struct clk *iclk, *cclk;
1028 struct spi_qup *controller;
1029 struct resource *res;
1030 struct device *dev;
1031 void __iomem *base;
12cb89e3 1032 u32 max_freq, iomode, num_cs;
64ff247a
II
1033 int ret, irq, size;
1034
1035 dev = &pdev->dev;
dc2eb794 1036 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
64ff247a
II
1037 if (IS_ERR(base))
1038 return PTR_ERR(base);
1039
1040 irq = platform_get_irq(pdev, 0);
64ff247a
II
1041 if (irq < 0)
1042 return irq;
1043
1044 cclk = devm_clk_get(dev, "core");
1045 if (IS_ERR(cclk))
1046 return PTR_ERR(cclk);
1047
1048 iclk = devm_clk_get(dev, "iface");
1049 if (IS_ERR(iclk))
1050 return PTR_ERR(iclk);
1051
ecdaa947
SG
1052 icc_path = devm_of_icc_get(dev, NULL);
1053 if (IS_ERR(icc_path))
1054 return dev_err_probe(dev, PTR_ERR(icc_path),
1055 "failed to get interconnect path\n");
1056
64ff247a
II
1057 /* This is optional parameter */
1058 if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
1059 max_freq = SPI_MAX_RATE;
1060
1061 if (!max_freq || max_freq > SPI_MAX_RATE) {
1062 dev_err(dev, "invalid clock frequency %d\n", max_freq);
1063 return -ENXIO;
1064 }
1065
287fcdaa
SG
1066 ret = devm_pm_opp_set_clkname(dev, "core");
1067 if (ret)
1068 return ret;
1069
1070 /* OPP table is optional */
1071 ret = devm_pm_opp_of_add_table(dev);
1072 if (ret && ret != -ENODEV)
1073 return dev_err_probe(dev, ret, "invalid OPP table\n");
1074
597442ff
YY
1075 host = spi_alloc_host(dev, sizeof(struct spi_qup));
1076 if (!host) {
1077 dev_err(dev, "cannot allocate host\n");
64ff247a
II
1078 return -ENOMEM;
1079 }
1080
4a8573ab 1081 /* use num-cs unless not present or out of range */
12cb89e3
II
1082 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1083 num_cs > SPI_NUM_CHIPSELECTS)
597442ff 1084 host->num_chipselect = SPI_NUM_CHIPSELECTS;
12cb89e3 1085 else
597442ff 1086 host->num_chipselect = num_cs;
4a8573ab 1087
597442ff
YY
1088 host->use_gpio_descriptors = true;
1089 host->max_native_cs = SPI_NUM_CHIPSELECTS;
1090 host->bus_num = pdev->id;
1091 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1092 host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1093 host->max_speed_hz = max_freq;
1094 host->transfer_one = spi_qup_transfer_one;
1095 host->dev.of_node = pdev->dev.of_node;
1096 host->auto_runtime_pm = true;
1097 host->dma_alignment = dma_get_cache_alignment();
1098 host->max_dma_len = SPI_MAX_XFER;
64ff247a 1099
597442ff 1100 platform_set_drvdata(pdev, host);
64ff247a 1101
597442ff 1102 controller = spi_controller_get_devdata(host);
64ff247a
II
1103
1104 controller->dev = dev;
1105 controller->base = base;
1106 controller->iclk = iclk;
1107 controller->cclk = cclk;
ecdaa947 1108 controller->icc_path = icc_path;
64ff247a 1109 controller->irq = irq;
64ff247a 1110
597442ff 1111 ret = spi_qup_init_dma(host, res->start);
612762e8
AG
1112 if (ret == -EPROBE_DEFER)
1113 goto error;
1114 else if (!ret)
597442ff 1115 host->can_dma = spi_qup_can_dma;
612762e8 1116
88a19814 1117 controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
70cea0a9 1118
b702b9fb 1119 if (!controller->qup_v1)
597442ff 1120 host->set_cs = spi_qup_set_cs;
b702b9fb 1121
64ff247a
II
1122 spin_lock_init(&controller->lock);
1123 init_completion(&controller->done);
1124
0c331fd1
SG
1125 ret = clk_prepare_enable(cclk);
1126 if (ret) {
1127 dev_err(dev, "cannot enable core clock\n");
1128 goto error_dma;
1129 }
1130
1131 ret = clk_prepare_enable(iclk);
1132 if (ret) {
1133 clk_disable_unprepare(cclk);
1134 dev_err(dev, "cannot enable iface clock\n");
1135 goto error_dma;
1136 }
1137
64ff247a
II
1138 iomode = readl_relaxed(base + QUP_IO_M_MODES);
1139
1140 size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1141 if (size)
1142 controller->out_blk_sz = size * 16;
1143 else
1144 controller->out_blk_sz = 4;
1145
1146 size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1147 if (size)
1148 controller->in_blk_sz = size * 16;
1149 else
1150 controller->in_blk_sz = 4;
1151
1152 size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1153 controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1154
1155 size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1156 controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1157
70cea0a9
AG
1158 dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1159 controller->in_blk_sz, controller->in_fifo_sz,
64ff247a
II
1160 controller->out_blk_sz, controller->out_fifo_sz);
1161
1162 writel_relaxed(1, base + QUP_SW_RESET);
1163
1164 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1165 if (ret) {
1166 dev_err(dev, "cannot set RESET state\n");
0c331fd1 1167 goto error_clk;
64ff247a
II
1168 }
1169
1170 writel_relaxed(0, base + QUP_OPERATIONAL);
1171 writel_relaxed(0, base + QUP_IO_M_MODES);
70cea0a9
AG
1172
1173 if (!controller->qup_v1)
1174 writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1175
64ff247a
II
1176 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1177 base + SPI_ERROR_FLAGS_EN);
1178
70cea0a9
AG
1179 /* if earlier version of the QUP, disable INPUT_OVERRUN */
1180 if (controller->qup_v1)
1181 writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1182 QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1183 base + QUP_ERROR_FLAGS_EN);
1184
64ff247a
II
1185 writel_relaxed(0, base + SPI_CONFIG);
1186 writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1187
1188 ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1189 IRQF_TRIGGER_HIGH, pdev->name, controller);
1190 if (ret)
0c331fd1 1191 goto error_clk;
64ff247a 1192
64ff247a
II
1193 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1194 pm_runtime_use_autosuspend(dev);
1195 pm_runtime_set_active(dev);
1196 pm_runtime_enable(dev);
045c243a 1197
597442ff 1198 ret = devm_spi_register_controller(dev, host);
045c243a
AG
1199 if (ret)
1200 goto disable_pm;
1201
64ff247a
II
1202 return 0;
1203
045c243a
AG
1204disable_pm:
1205 pm_runtime_disable(&pdev->dev);
0c331fd1
SG
1206error_clk:
1207 clk_disable_unprepare(cclk);
1208 clk_disable_unprepare(iclk);
612762e8 1209error_dma:
597442ff 1210 spi_qup_release_dma(host);
64ff247a 1211error:
597442ff 1212 spi_controller_put(host);
64ff247a
II
1213 return ret;
1214}
1215
ec833050 1216#ifdef CONFIG_PM
64ff247a
II
1217static int spi_qup_pm_suspend_runtime(struct device *device)
1218{
597442ff
YY
1219 struct spi_controller *host = dev_get_drvdata(device);
1220 struct spi_qup *controller = spi_controller_get_devdata(host);
64ff247a
II
1221 u32 config;
1222
1223 /* Enable clocks auto gaiting */
1224 config = readl(controller->base + QUP_CONFIG);
f0ceb114 1225 config |= QUP_CONFIG_CLOCK_AUTO_GATE;
64ff247a 1226 writel_relaxed(config, controller->base + QUP_CONFIG);
dae1a770
PG
1227
1228 clk_disable_unprepare(controller->cclk);
ecdaa947 1229 spi_qup_vote_bw(controller, 0);
dae1a770
PG
1230 clk_disable_unprepare(controller->iclk);
1231
64ff247a
II
1232 return 0;
1233}
1234
1235static int spi_qup_pm_resume_runtime(struct device *device)
1236{
597442ff
YY
1237 struct spi_controller *host = dev_get_drvdata(device);
1238 struct spi_qup *controller = spi_controller_get_devdata(host);
64ff247a 1239 u32 config;
dae1a770
PG
1240 int ret;
1241
1242 ret = clk_prepare_enable(controller->iclk);
1243 if (ret)
1244 return ret;
1245
1246 ret = clk_prepare_enable(controller->cclk);
494a2276
XQ
1247 if (ret) {
1248 clk_disable_unprepare(controller->iclk);
dae1a770 1249 return ret;
494a2276 1250 }
64ff247a
II
1251
1252 /* Disable clocks auto gaiting */
1253 config = readl_relaxed(controller->base + QUP_CONFIG);
f0ceb114 1254 config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
64ff247a
II
1255 writel_relaxed(config, controller->base + QUP_CONFIG);
1256 return 0;
1257}
ec833050 1258#endif /* CONFIG_PM */
64ff247a
II
1259
1260#ifdef CONFIG_PM_SLEEP
1261static int spi_qup_suspend(struct device *device)
1262{
597442ff
YY
1263 struct spi_controller *host = dev_get_drvdata(device);
1264 struct spi_qup *controller = spi_controller_get_devdata(host);
64ff247a
II
1265 int ret;
1266
136b5cd2
YS
1267 if (pm_runtime_suspended(device)) {
1268 ret = spi_qup_pm_resume_runtime(device);
1269 if (ret)
1270 return ret;
1271 }
597442ff 1272 ret = spi_controller_suspend(host);
64ff247a
II
1273 if (ret)
1274 return ret;
1275
1276 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1277 if (ret)
1278 return ret;
1279
136b5cd2 1280 clk_disable_unprepare(controller->cclk);
ecdaa947 1281 spi_qup_vote_bw(controller, 0);
136b5cd2 1282 clk_disable_unprepare(controller->iclk);
64ff247a
II
1283 return 0;
1284}
1285
1286static int spi_qup_resume(struct device *device)
1287{
597442ff
YY
1288 struct spi_controller *host = dev_get_drvdata(device);
1289 struct spi_qup *controller = spi_controller_get_devdata(host);
64ff247a
II
1290 int ret;
1291
1292 ret = clk_prepare_enable(controller->iclk);
1293 if (ret)
1294 return ret;
1295
1296 ret = clk_prepare_enable(controller->cclk);
70034320
XQ
1297 if (ret) {
1298 clk_disable_unprepare(controller->iclk);
64ff247a 1299 return ret;
70034320 1300 }
64ff247a
II
1301
1302 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1303 if (ret)
70034320
XQ
1304 goto disable_clk;
1305
597442ff 1306 ret = spi_controller_resume(host);
70034320
XQ
1307 if (ret)
1308 goto disable_clk;
64ff247a 1309
70034320
XQ
1310 return 0;
1311
1312disable_clk:
1313 clk_disable_unprepare(controller->cclk);
1314 clk_disable_unprepare(controller->iclk);
1315 return ret;
64ff247a
II
1316}
1317#endif /* CONFIG_PM_SLEEP */
1318
dea8e70f 1319static void spi_qup_remove(struct platform_device *pdev)
64ff247a 1320{
597442ff
YY
1321 struct spi_controller *host = dev_get_drvdata(&pdev->dev);
1322 struct spi_qup *controller = spi_controller_get_devdata(host);
64ff247a
II
1323 int ret;
1324
61f49171 1325 ret = pm_runtime_get_sync(&pdev->dev);
64ff247a 1326
61f49171
UKK
1327 if (ret >= 0) {
1328 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1329 if (ret)
1330 dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
1331 ERR_PTR(ret));
64ff247a 1332
61f49171
UKK
1333 clk_disable_unprepare(controller->cclk);
1334 clk_disable_unprepare(controller->iclk);
1335 } else {
1336 dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
1337 ERR_PTR(ret));
1338 }
612762e8 1339
597442ff 1340 spi_qup_release_dma(host);
64ff247a
II
1341
1342 pm_runtime_put_noidle(&pdev->dev);
1343 pm_runtime_disable(&pdev->dev);
64ff247a
II
1344}
1345
113b1a07 1346static const struct of_device_id spi_qup_dt_match[] = {
4d023737 1347 { .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
64ff247a
II
1348 { .compatible = "qcom,spi-qup-v2.1.1", },
1349 { .compatible = "qcom,spi-qup-v2.2.1", },
1350 { }
1351};
1352MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1353
1354static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1355 SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1356 SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1357 spi_qup_pm_resume_runtime,
1358 NULL)
1359};
1360
1361static struct platform_driver spi_qup_driver = {
1362 .driver = {
1363 .name = "spi_qup",
64ff247a
II
1364 .pm = &spi_qup_dev_pm_ops,
1365 .of_match_table = spi_qup_dt_match,
1366 },
1367 .probe = spi_qup_probe,
dea8e70f 1368 .remove_new = spi_qup_remove,
64ff247a
II
1369};
1370module_platform_driver(spi_qup_driver);
1371
1372MODULE_LICENSE("GPL v2");
64ff247a 1373MODULE_ALIAS("platform:spi_qup");