1 // SPDX-License-Identifier: GPL-2.0-only
2 /* OMAP SSI port driver.
4 * Copyright (C) 2010 Nokia Corporation. All rights reserved.
5 * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
7 * Contact: Carlos Chinea <carlos.chinea@nokia.com>
10 #include <linux/mod_devicetable.h>
11 #include <linux/platform_device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/delay.h>
16 #include <linux/gpio/consumer.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/debugfs.h>
20 #include "omap_ssi_regs.h"
23 static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
28 static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
33 static inline unsigned int ssi_wakein(struct hsi_port *port)
35 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
36 return gpiod_get_value(omap_port->wake_gpio);
39 #ifdef CONFIG_DEBUG_FS
40 static void ssi_debug_remove_port(struct hsi_port *port)
42 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
44 debugfs_remove_recursive(omap_port->dir);
47 static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused)
49 struct hsi_port *port = m->private;
50 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
51 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
52 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
53 void __iomem *base = omap_ssi->sys;
56 pm_runtime_get_sync(omap_port->pdev);
57 if (omap_port->wake_irq > 0)
58 seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
59 seq_printf(m, "WAKE\t\t: 0x%08x\n",
60 readl(base + SSI_WAKE_REG(port->num)));
61 seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
62 readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
63 seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
64 readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
66 base = omap_port->sst_base;
67 seq_puts(m, "\nSST\n===\n");
68 seq_printf(m, "ID SST\t\t: 0x%08x\n",
69 readl(base + SSI_SST_ID_REG));
70 seq_printf(m, "MODE\t\t: 0x%08x\n",
71 readl(base + SSI_SST_MODE_REG));
72 seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
73 readl(base + SSI_SST_FRAMESIZE_REG));
74 seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
75 readl(base + SSI_SST_DIVISOR_REG));
76 seq_printf(m, "CHANNELS\t: 0x%08x\n",
77 readl(base + SSI_SST_CHANNELS_REG));
78 seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
79 readl(base + SSI_SST_ARBMODE_REG));
80 seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
81 readl(base + SSI_SST_TXSTATE_REG));
82 seq_printf(m, "BUFSTATE\t: 0x%08x\n",
83 readl(base + SSI_SST_BUFSTATE_REG));
84 seq_printf(m, "BREAK\t\t: 0x%08x\n",
85 readl(base + SSI_SST_BREAK_REG));
86 for (ch = 0; ch < omap_port->channels; ch++) {
87 seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
88 readl(base + SSI_SST_BUFFER_CH_REG(ch)));
91 base = omap_port->ssr_base;
92 seq_puts(m, "\nSSR\n===\n");
93 seq_printf(m, "ID SSR\t\t: 0x%08x\n",
94 readl(base + SSI_SSR_ID_REG));
95 seq_printf(m, "MODE\t\t: 0x%08x\n",
96 readl(base + SSI_SSR_MODE_REG));
97 seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
98 readl(base + SSI_SSR_FRAMESIZE_REG));
99 seq_printf(m, "CHANNELS\t: 0x%08x\n",
100 readl(base + SSI_SSR_CHANNELS_REG));
101 seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
102 readl(base + SSI_SSR_TIMEOUT_REG));
103 seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
104 readl(base + SSI_SSR_RXSTATE_REG));
105 seq_printf(m, "BUFSTATE\t: 0x%08x\n",
106 readl(base + SSI_SSR_BUFSTATE_REG));
107 seq_printf(m, "BREAK\t\t: 0x%08x\n",
108 readl(base + SSI_SSR_BREAK_REG));
109 seq_printf(m, "ERROR\t\t: 0x%08x\n",
110 readl(base + SSI_SSR_ERROR_REG));
111 seq_printf(m, "ERRORACK\t: 0x%08x\n",
112 readl(base + SSI_SSR_ERRORACK_REG));
113 for (ch = 0; ch < omap_port->channels; ch++) {
114 seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
115 readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
117 pm_runtime_put_autosuspend(omap_port->pdev);
122 DEFINE_SHOW_ATTRIBUTE(ssi_port_regs);
124 static int ssi_div_get(void *data, u64 *val)
126 struct hsi_port *port = data;
127 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
129 pm_runtime_get_sync(omap_port->pdev);
130 *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
131 pm_runtime_put_autosuspend(omap_port->pdev);
136 static int ssi_div_set(void *data, u64 val)
138 struct hsi_port *port = data;
139 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
144 pm_runtime_get_sync(omap_port->pdev);
145 writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
146 omap_port->sst.divisor = val;
147 pm_runtime_put_autosuspend(omap_port->pdev);
152 DEFINE_DEBUGFS_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
154 static void ssi_debug_add_port(struct omap_ssi_port *omap_port,
157 struct hsi_port *port = to_hsi_port(omap_port->dev);
159 dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
160 omap_port->dir = dir;
161 debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
162 dir = debugfs_create_dir("sst", dir);
163 debugfs_create_file_unsafe("divisor", 0644, dir, port,
168 static void ssi_process_errqueue(struct work_struct *work)
170 struct omap_ssi_port *omap_port;
171 struct list_head *head, *tmp;
174 omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
176 list_for_each_safe(head, tmp, &omap_port->errqueue) {
177 msg = list_entry(head, struct hsi_msg, link);
183 static int ssi_claim_lch(struct hsi_msg *msg)
186 struct hsi_port *port = hsi_get_port(msg->cl);
187 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
188 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
191 for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
192 if (!omap_ssi->gdd_trn[lch].msg) {
193 omap_ssi->gdd_trn[lch].msg = msg;
194 omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
201 static int ssi_start_dma(struct hsi_msg *msg, int lch)
203 struct hsi_port *port = hsi_get_port(msg->cl);
204 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
205 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
206 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
207 void __iomem *gdd = omap_ssi->gdd;
215 /* Hold clocks during the transfer */
216 pm_runtime_get(omap_port->pdev);
218 if (!pm_runtime_active(omap_port->pdev)) {
219 dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
220 pm_runtime_put_autosuspend(omap_port->pdev);
224 if (msg->ttype == HSI_MSG_READ) {
225 err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
228 dev_dbg(&ssi->device, "DMA map SG failed !\n");
229 pm_runtime_put_autosuspend(omap_port->pdev);
232 csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
233 SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
235 ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
236 ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
238 s_addr = omap_port->ssr_dma +
239 SSI_SSR_BUFFER_CH_REG(msg->channel);
240 d_addr = sg_dma_address(msg->sgt.sgl);
242 err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
245 dev_dbg(&ssi->device, "DMA map SG failed !\n");
246 pm_runtime_put_autosuspend(omap_port->pdev);
249 csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
250 SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
252 ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
253 ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
255 s_addr = sg_dma_address(msg->sgt.sgl);
256 d_addr = omap_port->sst_dma +
257 SSI_SST_BUFFER_CH_REG(msg->channel);
259 dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
260 lch, csdp, ccr, s_addr, d_addr);
262 writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
263 writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
264 writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
265 writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
266 writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
267 gdd + SSI_GDD_CEN_REG(lch));
269 spin_lock_bh(&omap_ssi->lock);
270 tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
271 tmp |= SSI_GDD_LCH(lch);
272 writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
273 spin_unlock_bh(&omap_ssi->lock);
274 writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
275 msg->status = HSI_STATUS_PROCEEDING;
280 static int ssi_start_pio(struct hsi_msg *msg)
282 struct hsi_port *port = hsi_get_port(msg->cl);
283 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
284 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
285 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
288 pm_runtime_get(omap_port->pdev);
290 if (!pm_runtime_active(omap_port->pdev)) {
291 dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
292 pm_runtime_put_autosuspend(omap_port->pdev);
296 if (msg->ttype == HSI_MSG_WRITE) {
297 val = SSI_DATAACCEPT(msg->channel);
298 /* Hold clocks for pio writes */
299 pm_runtime_get(omap_port->pdev);
301 val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
303 dev_dbg(&port->device, "Single %s transfer\n",
304 msg->ttype ? "write" : "read");
305 val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
306 writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
307 pm_runtime_put_autosuspend(omap_port->pdev);
309 msg->status = HSI_STATUS_PROCEEDING;
314 static int ssi_start_transfer(struct list_head *queue)
319 if (list_empty(queue))
321 msg = list_first_entry(queue, struct hsi_msg, link);
322 if (msg->status != HSI_STATUS_QUEUED)
324 if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
325 lch = ssi_claim_lch(msg);
327 return ssi_start_dma(msg, lch);
329 return ssi_start_pio(msg);
332 static int ssi_async_break(struct hsi_msg *msg)
334 struct hsi_port *port = hsi_get_port(msg->cl);
335 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
336 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
337 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
341 pm_runtime_get_sync(omap_port->pdev);
342 if (msg->ttype == HSI_MSG_WRITE) {
343 if (omap_port->sst.mode != SSI_MODE_FRAME) {
347 writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
348 msg->status = HSI_STATUS_COMPLETED;
351 if (omap_port->ssr.mode != SSI_MODE_FRAME) {
355 spin_lock_bh(&omap_port->lock);
356 tmp = readl(omap_ssi->sys +
357 SSI_MPU_ENABLE_REG(port->num, 0));
358 writel(tmp | SSI_BREAKDETECTED,
359 omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
360 msg->status = HSI_STATUS_PROCEEDING;
361 list_add_tail(&msg->link, &omap_port->brkqueue);
362 spin_unlock_bh(&omap_port->lock);
365 pm_runtime_mark_last_busy(omap_port->pdev);
366 pm_runtime_put_autosuspend(omap_port->pdev);
371 static int ssi_async(struct hsi_msg *msg)
373 struct hsi_port *port = hsi_get_port(msg->cl);
374 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
375 struct list_head *queue;
380 if (msg->sgt.nents > 1)
381 return -ENOSYS; /* TODO: Add sg support */
383 if (msg->break_frame)
384 return ssi_async_break(msg);
387 BUG_ON(msg->channel >= omap_port->sst.channels);
388 queue = &omap_port->txqueue[msg->channel];
390 BUG_ON(msg->channel >= omap_port->ssr.channels);
391 queue = &omap_port->rxqueue[msg->channel];
393 msg->status = HSI_STATUS_QUEUED;
395 pm_runtime_get_sync(omap_port->pdev);
396 spin_lock_bh(&omap_port->lock);
397 list_add_tail(&msg->link, queue);
398 err = ssi_start_transfer(queue);
400 list_del(&msg->link);
401 msg->status = HSI_STATUS_ERROR;
403 spin_unlock_bh(&omap_port->lock);
404 pm_runtime_mark_last_busy(omap_port->pdev);
405 pm_runtime_put_autosuspend(omap_port->pdev);
406 dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
407 msg->status, msg->ttype, msg->channel);
412 static u32 ssi_calculate_div(struct hsi_controller *ssi)
414 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
415 u32 tx_fckrate = (u32) omap_ssi->fck_rate;
417 /* / 2 : SSI TX clock is always half of the SSI functional clock */
419 /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
421 dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
422 tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
423 omap_ssi->max_speed);
425 return tx_fckrate / omap_ssi->max_speed;
428 static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
430 struct list_head *node, *tmp;
433 list_for_each_safe(node, tmp, queue) {
434 msg = list_entry(node, struct hsi_msg, link);
435 if ((cl) && (cl != msg->cl))
438 pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
439 msg->channel, msg, msg->sgt.sgl->length,
440 msg->ttype, msg->context);
442 msg->destructor(msg);
448 static int ssi_setup(struct hsi_client *cl)
450 struct hsi_port *port = to_hsi_port(cl->device.parent);
451 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
452 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
453 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
454 void __iomem *sst = omap_port->sst_base;
455 void __iomem *ssr = omap_port->ssr_base;
460 pm_runtime_get_sync(omap_port->pdev);
461 spin_lock_bh(&omap_port->lock);
462 if (cl->tx_cfg.speed)
463 omap_ssi->max_speed = cl->tx_cfg.speed;
464 div = ssi_calculate_div(ssi);
465 if (div > SSI_MAX_DIVISOR) {
466 dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
467 cl->tx_cfg.speed, div);
471 /* Set TX/RX module to sleep to stop TX/RX during cfg update */
472 writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
473 writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
474 /* Flush posted write */
475 val = readl(ssr + SSI_SSR_MODE_REG);
477 writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
478 writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
479 writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
480 writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
481 writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
483 writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
484 writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
485 writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
486 /* Cleanup the break queue if we leave FRAME mode */
487 if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
488 (cl->rx_cfg.mode != SSI_MODE_FRAME))
489 ssi_flush_queue(&omap_port->brkqueue, cl);
490 writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
491 omap_port->channels = max(cl->rx_cfg.num_hw_channels,
492 cl->tx_cfg.num_hw_channels);
493 /* Shadow registering for OFF mode */
495 omap_port->sst.divisor = div;
496 omap_port->sst.frame_size = 31;
497 omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
498 omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
499 omap_port->sst.mode = cl->tx_cfg.mode;
501 omap_port->ssr.frame_size = 31;
502 omap_port->ssr.timeout = 0;
503 omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
504 omap_port->ssr.mode = cl->rx_cfg.mode;
506 spin_unlock_bh(&omap_port->lock);
507 pm_runtime_mark_last_busy(omap_port->pdev);
508 pm_runtime_put_autosuspend(omap_port->pdev);
513 static int ssi_flush(struct hsi_client *cl)
515 struct hsi_port *port = hsi_get_port(cl);
516 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
517 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
518 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
520 void __iomem *sst = omap_port->sst_base;
521 void __iomem *ssr = omap_port->ssr_base;
525 pm_runtime_get_sync(omap_port->pdev);
526 spin_lock_bh(&omap_port->lock);
528 /* stop all ssi communication */
529 pinctrl_pm_select_idle_state(omap_port->pdev);
530 udelay(1); /* wait for racing frames */
532 /* Stop all DMA transfers */
533 for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
534 msg = omap_ssi->gdd_trn[i].msg;
535 if (!msg || (port != hsi_get_port(msg->cl)))
537 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
538 if (msg->ttype == HSI_MSG_READ)
539 pm_runtime_put_autosuspend(omap_port->pdev);
540 omap_ssi->gdd_trn[i].msg = NULL;
542 /* Flush all SST buffers */
543 writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
544 writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
545 /* Flush all SSR buffers */
546 writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
547 writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
548 /* Flush all errors */
549 err = readl(ssr + SSI_SSR_ERROR_REG);
550 writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
552 writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
553 /* Clear interrupts */
554 writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
555 writel_relaxed(0xffffff00,
556 omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
557 writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
558 writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
559 /* Dequeue all pending requests */
560 for (i = 0; i < omap_port->channels; i++) {
561 /* Release write clocks */
562 if (!list_empty(&omap_port->txqueue[i]))
563 pm_runtime_put_autosuspend(omap_port->pdev);
564 ssi_flush_queue(&omap_port->txqueue[i], NULL);
565 ssi_flush_queue(&omap_port->rxqueue[i], NULL);
567 ssi_flush_queue(&omap_port->brkqueue, NULL);
569 /* Resume SSI communication */
570 pinctrl_pm_select_default_state(omap_port->pdev);
572 spin_unlock_bh(&omap_port->lock);
573 pm_runtime_mark_last_busy(omap_port->pdev);
574 pm_runtime_put_autosuspend(omap_port->pdev);
579 static void start_tx_work(struct work_struct *work)
581 struct omap_ssi_port *omap_port =
582 container_of(work, struct omap_ssi_port, work);
583 struct hsi_port *port = to_hsi_port(omap_port->dev);
584 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
585 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
587 pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
588 writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
591 static int ssi_start_tx(struct hsi_client *cl)
593 struct hsi_port *port = hsi_get_port(cl);
594 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
596 dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
598 spin_lock_bh(&omap_port->wk_lock);
599 if (omap_port->wk_refcount++) {
600 spin_unlock_bh(&omap_port->wk_lock);
603 spin_unlock_bh(&omap_port->wk_lock);
605 schedule_work(&omap_port->work);
610 static int ssi_stop_tx(struct hsi_client *cl)
612 struct hsi_port *port = hsi_get_port(cl);
613 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
614 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
615 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
617 dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
619 spin_lock_bh(&omap_port->wk_lock);
620 BUG_ON(!omap_port->wk_refcount);
621 if (--omap_port->wk_refcount) {
622 spin_unlock_bh(&omap_port->wk_lock);
625 writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
626 spin_unlock_bh(&omap_port->wk_lock);
628 pm_runtime_mark_last_busy(omap_port->pdev);
629 pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
635 static void ssi_transfer(struct omap_ssi_port *omap_port,
636 struct list_head *queue)
641 pm_runtime_get(omap_port->pdev);
642 spin_lock_bh(&omap_port->lock);
644 err = ssi_start_transfer(queue);
646 msg = list_first_entry(queue, struct hsi_msg, link);
647 msg->status = HSI_STATUS_ERROR;
649 list_del(&msg->link);
650 spin_unlock_bh(&omap_port->lock);
652 spin_lock_bh(&omap_port->lock);
655 spin_unlock_bh(&omap_port->lock);
656 pm_runtime_mark_last_busy(omap_port->pdev);
657 pm_runtime_put_autosuspend(omap_port->pdev);
660 static void ssi_cleanup_queues(struct hsi_client *cl)
662 struct hsi_port *port = hsi_get_port(cl);
663 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
664 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
665 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
670 u32 status = SSI_ERROROCCURED;
673 ssi_flush_queue(&omap_port->brkqueue, cl);
674 if (list_empty(&omap_port->brkqueue))
675 status |= SSI_BREAKDETECTED;
677 for (i = 0; i < omap_port->channels; i++) {
678 if (list_empty(&omap_port->txqueue[i]))
680 msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
682 if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
683 txbufstate |= (1 << i);
684 status |= SSI_DATAACCEPT(i);
685 /* Release the clocks writes, also GDD ones */
686 pm_runtime_mark_last_busy(omap_port->pdev);
687 pm_runtime_put_autosuspend(omap_port->pdev);
689 ssi_flush_queue(&omap_port->txqueue[i], cl);
691 for (i = 0; i < omap_port->channels; i++) {
692 if (list_empty(&omap_port->rxqueue[i]))
694 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
696 if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
697 rxbufstate |= (1 << i);
698 status |= SSI_DATAAVAILABLE(i);
700 ssi_flush_queue(&omap_port->rxqueue[i], cl);
701 /* Check if we keep the error detection interrupt armed */
702 if (!list_empty(&omap_port->rxqueue[i]))
703 status &= ~SSI_ERROROCCURED;
705 /* Cleanup write buffers */
706 tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
708 writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
709 /* Cleanup read buffers */
710 tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
712 writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
713 /* Disarm and ack pending interrupts */
714 tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
716 writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
717 writel_relaxed(status, omap_ssi->sys +
718 SSI_MPU_STATUS_REG(port->num, 0));
721 static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
723 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
724 struct hsi_port *port = hsi_get_port(cl);
725 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
731 for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
732 msg = omap_ssi->gdd_trn[i].msg;
733 if ((!msg) || (msg->cl != cl))
735 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
738 * Clock references for write will be handled in
741 if (msg->ttype == HSI_MSG_READ) {
742 pm_runtime_mark_last_busy(omap_port->pdev);
743 pm_runtime_put_autosuspend(omap_port->pdev);
745 omap_ssi->gdd_trn[i].msg = NULL;
747 tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
749 writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
750 writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
753 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
755 writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
756 writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
758 mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
763 static int ssi_release(struct hsi_client *cl)
765 struct hsi_port *port = hsi_get_port(cl);
766 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
767 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
769 pm_runtime_get_sync(omap_port->pdev);
770 spin_lock_bh(&omap_port->lock);
771 /* Stop all the pending DMA requests for that client */
772 ssi_cleanup_gdd(ssi, cl);
773 /* Now cleanup all the queues */
774 ssi_cleanup_queues(cl);
775 /* If it is the last client of the port, do extra checks and cleanup */
776 if (port->claimed <= 1) {
778 * Drop the clock reference for the incoming wake line
779 * if it is still kept high by the other side.
781 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
782 pm_runtime_put_sync(omap_port->pdev);
783 pm_runtime_get(omap_port->pdev);
784 /* Stop any SSI TX/RX without a client */
785 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
786 omap_port->sst.mode = SSI_MODE_SLEEP;
787 omap_port->ssr.mode = SSI_MODE_SLEEP;
788 pm_runtime_put(omap_port->pdev);
789 WARN_ON(omap_port->wk_refcount != 0);
791 spin_unlock_bh(&omap_port->lock);
792 pm_runtime_put_sync(omap_port->pdev);
799 static void ssi_error(struct hsi_port *port)
801 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
802 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
803 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
811 err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
812 dev_err(&port->device, "SSI error: 0x%02x\n", err);
814 dev_dbg(&port->device, "spurious SSI error ignored!\n");
817 spin_lock(&omap_ssi->lock);
818 /* Cancel all GDD read transfers */
819 for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
820 msg = omap_ssi->gdd_trn[i].msg;
821 if ((msg) && (msg->ttype == HSI_MSG_READ)) {
822 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
824 omap_ssi->gdd_trn[i].msg = NULL;
827 tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
829 writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
830 spin_unlock(&omap_ssi->lock);
831 /* Cancel all PIO read transfers */
832 spin_lock(&omap_port->lock);
833 tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
834 tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
835 writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
837 writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
838 writel_relaxed(SSI_ERROROCCURED,
839 omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
840 /* Signal the error all current pending read requests */
841 for (i = 0; i < omap_port->channels; i++) {
842 if (list_empty(&omap_port->rxqueue[i]))
844 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
846 list_del(&msg->link);
847 msg->status = HSI_STATUS_ERROR;
848 spin_unlock(&omap_port->lock);
850 /* Now restart queued reads if any */
851 ssi_transfer(omap_port, &omap_port->rxqueue[i]);
852 spin_lock(&omap_port->lock);
854 spin_unlock(&omap_port->lock);
857 static void ssi_break_complete(struct hsi_port *port)
859 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
860 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
861 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
866 dev_dbg(&port->device, "HWBREAK received\n");
868 spin_lock(&omap_port->lock);
869 val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
870 val &= ~SSI_BREAKDETECTED;
871 writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
872 writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
873 writel(SSI_BREAKDETECTED,
874 omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
875 spin_unlock(&omap_port->lock);
877 list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
878 msg->status = HSI_STATUS_COMPLETED;
879 spin_lock(&omap_port->lock);
880 list_del(&msg->link);
881 spin_unlock(&omap_port->lock);
887 static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
889 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
890 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
891 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
897 spin_lock_bh(&omap_port->lock);
898 msg = list_first_entry(queue, struct hsi_msg, link);
899 if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
901 msg->status = HSI_STATUS_PENDING;
903 if (msg->ttype == HSI_MSG_WRITE)
904 val = SSI_DATAACCEPT(msg->channel);
906 val = SSI_DATAAVAILABLE(msg->channel);
907 if (msg->status == HSI_STATUS_PROCEEDING) {
908 buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
909 if (msg->ttype == HSI_MSG_WRITE)
910 writel(*buf, omap_port->sst_base +
911 SSI_SST_BUFFER_CH_REG(msg->channel));
913 *buf = readl(omap_port->ssr_base +
914 SSI_SSR_BUFFER_CH_REG(msg->channel));
915 dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
917 msg->actual_len += sizeof(*buf);
918 if (msg->actual_len >= msg->sgt.sgl->length)
919 msg->status = HSI_STATUS_COMPLETED;
921 * Wait for the last written frame to be really sent before
922 * we call the complete callback
924 if ((msg->status == HSI_STATUS_PROCEEDING) ||
925 ((msg->status == HSI_STATUS_COMPLETED) &&
926 (msg->ttype == HSI_MSG_WRITE))) {
927 writel(val, omap_ssi->sys +
928 SSI_MPU_STATUS_REG(port->num, 0));
929 spin_unlock_bh(&omap_port->lock);
935 /* Transfer completed at this point */
936 reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
937 if (msg->ttype == HSI_MSG_WRITE) {
938 /* Release clocks for write transfer */
939 pm_runtime_mark_last_busy(omap_port->pdev);
940 pm_runtime_put_autosuspend(omap_port->pdev);
943 writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
944 writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
945 list_del(&msg->link);
946 spin_unlock_bh(&omap_port->lock);
948 ssi_transfer(omap_port, queue);
951 static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
953 struct hsi_port *port = (struct hsi_port *)ssi_port;
954 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
955 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
956 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
957 void __iomem *sys = omap_ssi->sys;
961 pm_runtime_get_sync(omap_port->pdev);
964 status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
965 status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
967 for (ch = 0; ch < omap_port->channels; ch++) {
968 if (status_reg & SSI_DATAACCEPT(ch))
969 ssi_pio_complete(port, &omap_port->txqueue[ch]);
970 if (status_reg & SSI_DATAAVAILABLE(ch))
971 ssi_pio_complete(port, &omap_port->rxqueue[ch]);
973 if (status_reg & SSI_BREAKDETECTED)
974 ssi_break_complete(port);
975 if (status_reg & SSI_ERROROCCURED)
978 status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
979 status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
981 /* TODO: sleep if we retry? */
982 } while (status_reg);
984 pm_runtime_mark_last_busy(omap_port->pdev);
985 pm_runtime_put_autosuspend(omap_port->pdev);
990 static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
992 struct hsi_port *port = (struct hsi_port *)ssi_port;
993 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
994 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
995 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
997 if (ssi_wakein(port)) {
999 * We can have a quick High-Low-High transition in the line.
1000 * In such a case if we have long interrupt latencies,
1001 * we can miss the low event or get twice a high event.
1002 * This workaround will avoid breaking the clock reference
1003 * count when such a situation ocurrs.
1005 if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
1006 pm_runtime_get_sync(omap_port->pdev);
1007 dev_dbg(&ssi->device, "Wake in high\n");
1008 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1010 omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
1012 hsi_event(port, HSI_EVENT_START_RX);
1014 dev_dbg(&ssi->device, "Wake in low\n");
1015 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1017 omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
1019 hsi_event(port, HSI_EVENT_STOP_RX);
1020 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
1021 pm_runtime_mark_last_busy(omap_port->pdev);
1022 pm_runtime_put_autosuspend(omap_port->pdev);
1029 static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
1031 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1034 err = platform_get_irq(pd, 0);
1037 omap_port->irq = err;
1038 err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
1039 ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
1041 dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1042 omap_port->irq, err);
1046 static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
1048 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1052 if (!omap_port->wake_gpio) {
1053 omap_port->wake_irq = -1;
1057 cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
1058 omap_port->wake_irq = cawake_irq;
1060 err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
1062 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1063 "SSI cawake", port);
1065 dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1067 err = enable_irq_wake(cawake_irq);
1069 dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1075 static void ssi_queues_init(struct omap_ssi_port *omap_port)
1079 for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1080 INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1081 INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1083 INIT_LIST_HEAD(&omap_port->brkqueue);
1086 static int ssi_port_get_iomem(struct platform_device *pd,
1087 const char *name, void __iomem **pbase, dma_addr_t *phy)
1089 struct hsi_port *port = platform_get_drvdata(pd);
1090 struct resource *mem;
1091 struct resource *ioarea;
1094 mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1096 dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1099 ioarea = devm_request_mem_region(&port->device, mem->start,
1100 resource_size(mem), dev_name(&pd->dev));
1102 dev_err(&pd->dev, "%s IO memory region request failed\n",
1106 base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1108 dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1119 static int ssi_port_probe(struct platform_device *pd)
1121 struct device_node *np = pd->dev.of_node;
1122 struct hsi_port *port;
1123 struct omap_ssi_port *omap_port;
1124 struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1125 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1126 struct gpio_desc *cawake_gpio = NULL;
1130 dev_dbg(&pd->dev, "init ssi port...\n");
1132 if (!ssi->port || !omap_ssi->port) {
1133 dev_err(&pd->dev, "ssi controller not initialized!\n");
1138 /* get id of first uninitialized port in controller */
1139 for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1143 if (port_id >= ssi->num_ports) {
1144 dev_err(&pd->dev, "port id out of range!\n");
1149 port = ssi->port[port_id];
1152 dev_err(&pd->dev, "missing device tree data\n");
1157 cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
1158 if (IS_ERR(cawake_gpio)) {
1159 err = PTR_ERR(cawake_gpio);
1160 dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
1164 omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1169 omap_port->wake_gpio = cawake_gpio;
1170 omap_port->pdev = &pd->dev;
1171 omap_port->port_id = port_id;
1173 INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
1174 INIT_WORK(&omap_port->work, start_tx_work);
1176 /* initialize HSI port */
1177 port->async = ssi_async;
1178 port->setup = ssi_setup;
1179 port->flush = ssi_flush;
1180 port->start_tx = ssi_start_tx;
1181 port->stop_tx = ssi_stop_tx;
1182 port->release = ssi_release;
1183 hsi_port_set_drvdata(port, omap_port);
1184 omap_ssi->port[port_id] = omap_port;
1186 platform_set_drvdata(pd, port);
1188 err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1189 &omap_port->sst_dma);
1192 err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1193 &omap_port->ssr_dma);
1197 err = ssi_port_irq(port, pd);
1200 err = ssi_wake_irq(port, pd);
1204 ssi_queues_init(omap_port);
1205 spin_lock_init(&omap_port->lock);
1206 spin_lock_init(&omap_port->wk_lock);
1207 omap_port->dev = &port->device;
1209 pm_runtime_use_autosuspend(omap_port->pdev);
1210 pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
1211 pm_runtime_enable(omap_port->pdev);
1213 #ifdef CONFIG_DEBUG_FS
1214 ssi_debug_add_port(omap_port, omap_ssi->dir);
1217 hsi_add_clients_from_dt(port, np);
1219 dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
1227 static int ssi_port_remove(struct platform_device *pd)
1229 struct hsi_port *port = platform_get_drvdata(pd);
1230 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1231 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1232 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1234 #ifdef CONFIG_DEBUG_FS
1235 ssi_debug_remove_port(port);
1238 cancel_delayed_work_sync(&omap_port->errqueue_work);
1240 hsi_port_unregister_clients(port);
1242 port->async = hsi_dummy_msg;
1243 port->setup = hsi_dummy_cl;
1244 port->flush = hsi_dummy_cl;
1245 port->start_tx = hsi_dummy_cl;
1246 port->stop_tx = hsi_dummy_cl;
1247 port->release = hsi_dummy_cl;
1249 omap_ssi->port[omap_port->port_id] = NULL;
1250 platform_set_drvdata(pd, NULL);
1252 pm_runtime_dont_use_autosuspend(&pd->dev);
1253 pm_runtime_disable(&pd->dev);
1258 static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1260 writel_relaxed(omap_port->sst.divisor,
1261 omap_port->sst_base + SSI_SST_DIVISOR_REG);
1266 void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
1267 struct omap_ssi_port *omap_port)
1269 /* update divisor */
1270 u32 div = ssi_calculate_div(ssi);
1271 omap_port->sst.divisor = div;
1272 ssi_restore_divisor(omap_port);
1276 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1278 struct hsi_port *port = to_hsi_port(omap_port->dev);
1279 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1280 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1282 omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1283 SSI_MPU_ENABLE_REG(port->num, 0));
1288 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1290 struct hsi_port *port = to_hsi_port(omap_port->dev);
1291 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1292 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1295 writel_relaxed(omap_port->sys_mpu_enable,
1296 omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1299 base = omap_port->sst_base;
1300 writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1301 writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1302 writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1305 base = omap_port->ssr_base;
1306 writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1307 writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1308 writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1313 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1317 writel_relaxed(omap_port->sst.mode,
1318 omap_port->sst_base + SSI_SST_MODE_REG);
1319 writel_relaxed(omap_port->ssr.mode,
1320 omap_port->ssr_base + SSI_SSR_MODE_REG);
1322 mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1327 static int omap_ssi_port_runtime_suspend(struct device *dev)
1329 struct hsi_port *port = dev_get_drvdata(dev);
1330 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1331 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1332 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1334 dev_dbg(dev, "port runtime suspend!\n");
1336 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1337 if (omap_ssi->get_loss)
1338 omap_port->loss_count =
1339 omap_ssi->get_loss(ssi->device.parent);
1340 ssi_save_port_ctx(omap_port);
1345 static int omap_ssi_port_runtime_resume(struct device *dev)
1347 struct hsi_port *port = dev_get_drvdata(dev);
1348 struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1349 struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1350 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1352 dev_dbg(dev, "port runtime resume!\n");
1354 if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1355 omap_ssi->get_loss(ssi->device.parent)))
1356 goto mode; /* We always need to restore the mode & TX divisor */
1358 ssi_restore_port_ctx(omap_port);
1361 ssi_restore_divisor(omap_port);
1362 ssi_restore_port_mode(omap_port);
1367 static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1368 SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1369 omap_ssi_port_runtime_resume, NULL)
1372 #define DEV_PM_OPS (&omap_ssi_port_pm_ops)
1374 #define DEV_PM_OPS NULL
1379 static const struct of_device_id omap_ssi_port_of_match[] = {
1380 { .compatible = "ti,omap3-ssi-port", },
1383 MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1385 #define omap_ssi_port_of_match NULL
1388 struct platform_driver ssi_port_pdriver = {
1389 .probe = ssi_port_probe,
1390 .remove = ssi_port_remove,
1392 .name = "omap_ssi_port",
1393 .of_match_table = omap_ssi_port_of_match,