Merge tag 'devicetree-fixes-for-6.2-3' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / soundwire / intel.c
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3
4 /*
5  * Soundwire Intel Master Driver
6  */
7
8 #include <linux/acpi.h>
9 #include <linux/debugfs.h>
10 #include <linux/delay.h>
11 #include <linux/io.h>
12 #include <sound/pcm_params.h>
13 #include <linux/pm_runtime.h>
14 #include <sound/soc.h>
15 #include <linux/soundwire/sdw_registers.h>
16 #include <linux/soundwire/sdw.h>
17 #include <linux/soundwire/sdw_intel.h>
18 #include "cadence_master.h"
19 #include "bus.h"
20 #include "intel.h"
21
22
23 enum intel_pdi_type {
24         INTEL_PDI_IN = 0,
25         INTEL_PDI_OUT = 1,
26         INTEL_PDI_BD = 2,
27 };
28
29 #define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
30
31 /*
32  * Read, write helpers for HW registers
33  */
34 static inline int intel_readl(void __iomem *base, int offset)
35 {
36         return readl(base + offset);
37 }
38
39 static inline void intel_writel(void __iomem *base, int offset, int value)
40 {
41         writel(value, base + offset);
42 }
43
44 static inline u16 intel_readw(void __iomem *base, int offset)
45 {
46         return readw(base + offset);
47 }
48
49 static inline void intel_writew(void __iomem *base, int offset, u16 value)
50 {
51         writew(value, base + offset);
52 }
53
54 static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
55 {
56         int timeout = 10;
57         u32 reg_read;
58
59         do {
60                 reg_read = readl(base + offset);
61                 if ((reg_read & mask) == target)
62                         return 0;
63
64                 timeout--;
65                 usleep_range(50, 100);
66         } while (timeout != 0);
67
68         return -EAGAIN;
69 }
70
71 static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
72 {
73         writel(value, base + offset);
74         return intel_wait_bit(base, offset, mask, 0);
75 }
76
77 static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
78 {
79         writel(value, base + offset);
80         return intel_wait_bit(base, offset, mask, mask);
81 }
82
83 /*
84  * debugfs
85  */
86 #ifdef CONFIG_DEBUG_FS
87
88 #define RD_BUF (2 * PAGE_SIZE)
89
90 static ssize_t intel_sprintf(void __iomem *mem, bool l,
91                              char *buf, size_t pos, unsigned int reg)
92 {
93         int value;
94
95         if (l)
96                 value = intel_readl(mem, reg);
97         else
98                 value = intel_readw(mem, reg);
99
100         return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
101 }
102
103 static int intel_reg_show(struct seq_file *s_file, void *data)
104 {
105         struct sdw_intel *sdw = s_file->private;
106         void __iomem *s = sdw->link_res->shim;
107         void __iomem *a = sdw->link_res->alh;
108         char *buf;
109         ssize_t ret;
110         int i, j;
111         unsigned int links, reg;
112
113         buf = kzalloc(RD_BUF, GFP_KERNEL);
114         if (!buf)
115                 return -ENOMEM;
116
117         links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
118
119         ret = scnprintf(buf, RD_BUF, "Register  Value\n");
120         ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
121
122         for (i = 0; i < links; i++) {
123                 reg = SDW_SHIM_LCAP + i * 4;
124                 ret += intel_sprintf(s, true, buf, ret, reg);
125         }
126
127         for (i = 0; i < links; i++) {
128                 ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
129                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
130                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
131                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
132                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
133                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
134                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
135
136                 ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
137
138                 /*
139                  * the value 10 is the number of PDIs. We will need a
140                  * cleanup to remove hard-coded Intel configurations
141                  * from cadence_master.c
142                  */
143                 for (j = 0; j < 10; j++) {
144                         ret += intel_sprintf(s, false, buf, ret,
145                                         SDW_SHIM_PCMSYCHM(i, j));
146                         ret += intel_sprintf(s, false, buf, ret,
147                                         SDW_SHIM_PCMSYCHC(i, j));
148                 }
149                 ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
150
151                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
152                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
153         }
154
155         ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
156         ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
157         ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
158
159         ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
160         for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
161                 ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
162
163         seq_printf(s_file, "%s", buf);
164         kfree(buf);
165
166         return 0;
167 }
168 DEFINE_SHOW_ATTRIBUTE(intel_reg);
169
170 static int intel_set_m_datamode(void *data, u64 value)
171 {
172         struct sdw_intel *sdw = data;
173         struct sdw_bus *bus = &sdw->cdns.bus;
174
175         if (value > SDW_PORT_DATA_MODE_STATIC_1)
176                 return -EINVAL;
177
178         /* Userspace changed the hardware state behind the kernel's back */
179         add_taint(TAINT_USER, LOCKDEP_STILL_OK);
180
181         bus->params.m_data_mode = value;
182
183         return 0;
184 }
185 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
186                          intel_set_m_datamode, "%llu\n");
187
188 static int intel_set_s_datamode(void *data, u64 value)
189 {
190         struct sdw_intel *sdw = data;
191         struct sdw_bus *bus = &sdw->cdns.bus;
192
193         if (value > SDW_PORT_DATA_MODE_STATIC_1)
194                 return -EINVAL;
195
196         /* Userspace changed the hardware state behind the kernel's back */
197         add_taint(TAINT_USER, LOCKDEP_STILL_OK);
198
199         bus->params.s_data_mode = value;
200
201         return 0;
202 }
203 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
204                          intel_set_s_datamode, "%llu\n");
205
206 static void intel_debugfs_init(struct sdw_intel *sdw)
207 {
208         struct dentry *root = sdw->cdns.bus.debugfs;
209
210         if (!root)
211                 return;
212
213         sdw->debugfs = debugfs_create_dir("intel-sdw", root);
214
215         debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
216                             &intel_reg_fops);
217
218         debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
219                             &intel_set_m_datamode_fops);
220
221         debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
222                             &intel_set_s_datamode_fops);
223
224         sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
225 }
226
227 static void intel_debugfs_exit(struct sdw_intel *sdw)
228 {
229         debugfs_remove_recursive(sdw->debugfs);
230 }
231 #else
232 static void intel_debugfs_init(struct sdw_intel *sdw) {}
233 static void intel_debugfs_exit(struct sdw_intel *sdw) {}
234 #endif /* CONFIG_DEBUG_FS */
235
236 /*
237  * shim ops
238  */
239 /* this needs to be called with shim_lock */
240 static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
241 {
242         void __iomem *shim = sdw->link_res->shim;
243         unsigned int link_id = sdw->instance;
244         u16 ioctl;
245
246         /* Switch to MIP from Glue logic */
247         ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
248
249         ioctl &= ~(SDW_SHIM_IOCTL_DOE);
250         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
251         usleep_range(10, 15);
252
253         ioctl &= ~(SDW_SHIM_IOCTL_DO);
254         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
255         usleep_range(10, 15);
256
257         ioctl |= (SDW_SHIM_IOCTL_MIF);
258         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
259         usleep_range(10, 15);
260
261         ioctl &= ~(SDW_SHIM_IOCTL_BKE);
262         ioctl &= ~(SDW_SHIM_IOCTL_COE);
263         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
264         usleep_range(10, 15);
265
266         /* at this point Master IP has full control of the I/Os */
267 }
268
269 /* this needs to be called with shim_lock */
270 static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
271 {
272         unsigned int link_id = sdw->instance;
273         void __iomem *shim = sdw->link_res->shim;
274         u16 ioctl;
275
276         /* Glue logic */
277         ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
278         ioctl |= SDW_SHIM_IOCTL_BKE;
279         ioctl |= SDW_SHIM_IOCTL_COE;
280         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
281         usleep_range(10, 15);
282
283         ioctl &= ~(SDW_SHIM_IOCTL_MIF);
284         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
285         usleep_range(10, 15);
286
287         /* at this point Integration Glue has full control of the I/Os */
288 }
289
290 /* this needs to be called with shim_lock */
291 static void intel_shim_init(struct sdw_intel *sdw)
292 {
293         void __iomem *shim = sdw->link_res->shim;
294         unsigned int link_id = sdw->instance;
295         u16 ioctl = 0, act = 0;
296
297         /* Initialize Shim */
298         ioctl |= SDW_SHIM_IOCTL_BKE;
299         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
300         usleep_range(10, 15);
301
302         ioctl |= SDW_SHIM_IOCTL_WPDD;
303         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
304         usleep_range(10, 15);
305
306         ioctl |= SDW_SHIM_IOCTL_DO;
307         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
308         usleep_range(10, 15);
309
310         ioctl |= SDW_SHIM_IOCTL_DOE;
311         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
312         usleep_range(10, 15);
313
314         intel_shim_glue_to_master_ip(sdw);
315
316         u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
317         act |= SDW_SHIM_CTMCTL_DACTQE;
318         act |= SDW_SHIM_CTMCTL_DODS;
319         intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
320         usleep_range(10, 15);
321 }
322
323 static int intel_shim_check_wake(struct sdw_intel *sdw)
324 {
325         void __iomem *shim;
326         u16 wake_sts;
327
328         shim = sdw->link_res->shim;
329         wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
330
331         return wake_sts & BIT(sdw->instance);
332 }
333
334 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
335 {
336         void __iomem *shim = sdw->link_res->shim;
337         unsigned int link_id = sdw->instance;
338         u16 wake_en, wake_sts;
339
340         mutex_lock(sdw->link_res->shim_lock);
341         wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
342
343         if (wake_enable) {
344                 /* Enable the wakeup */
345                 wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
346                 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
347         } else {
348                 /* Disable the wake up interrupt */
349                 wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
350                 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
351
352                 /* Clear wake status */
353                 wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
354                 wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
355                 intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
356         }
357         mutex_unlock(sdw->link_res->shim_lock);
358 }
359
360 static int intel_link_power_up(struct sdw_intel *sdw)
361 {
362         unsigned int link_id = sdw->instance;
363         void __iomem *shim = sdw->link_res->shim;
364         u32 *shim_mask = sdw->link_res->shim_mask;
365         struct sdw_bus *bus = &sdw->cdns.bus;
366         struct sdw_master_prop *prop = &bus->prop;
367         u32 spa_mask, cpa_mask;
368         u32 link_control;
369         int ret = 0;
370         u32 syncprd;
371         u32 sync_reg;
372
373         mutex_lock(sdw->link_res->shim_lock);
374
375         /*
376          * The hardware relies on an internal counter, typically 4kHz,
377          * to generate the SoundWire SSP - which defines a 'safe'
378          * synchronization point between commands and audio transport
379          * and allows for multi link synchronization. The SYNCPRD value
380          * is only dependent on the oscillator clock provided to
381          * the IP, so adjust based on _DSD properties reported in DSDT
382          * tables. The values reported are based on either 24MHz
383          * (CNL/CML) or 38.4 MHz (ICL/TGL+).
384          */
385         if (prop->mclk_freq % 6000000)
386                 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
387         else
388                 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
389
390         if (!*shim_mask) {
391                 dev_dbg(sdw->cdns.dev, "powering up all links\n");
392
393                 /* we first need to program the SyncPRD/CPU registers */
394                 dev_dbg(sdw->cdns.dev,
395                         "first link up, programming SYNCPRD\n");
396
397                 /* set SyncPRD period */
398                 sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
399                 u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
400
401                 /* Set SyncCPU bit */
402                 sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
403                 intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
404
405                 /* Link power up sequence */
406                 link_control = intel_readl(shim, SDW_SHIM_LCTL);
407
408                 /* only power-up enabled links */
409                 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
410                 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
411
412                 link_control |=  spa_mask;
413
414                 ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
415                 if (ret < 0) {
416                         dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
417                         goto out;
418                 }
419
420                 /* SyncCPU will change once link is active */
421                 ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
422                                      SDW_SHIM_SYNC_SYNCCPU, 0);
423                 if (ret < 0) {
424                         dev_err(sdw->cdns.dev,
425                                 "Failed to set SHIM_SYNC: %d\n", ret);
426                         goto out;
427                 }
428         }
429
430         *shim_mask |= BIT(link_id);
431
432         sdw->cdns.link_up = true;
433
434         intel_shim_init(sdw);
435
436 out:
437         mutex_unlock(sdw->link_res->shim_lock);
438
439         return ret;
440 }
441
442 static int intel_link_power_down(struct sdw_intel *sdw)
443 {
444         u32 link_control, spa_mask, cpa_mask;
445         unsigned int link_id = sdw->instance;
446         void __iomem *shim = sdw->link_res->shim;
447         u32 *shim_mask = sdw->link_res->shim_mask;
448         int ret = 0;
449
450         mutex_lock(sdw->link_res->shim_lock);
451
452         if (!(*shim_mask & BIT(link_id)))
453                 dev_err(sdw->cdns.dev,
454                         "%s: Unbalanced power-up/down calls\n", __func__);
455
456         sdw->cdns.link_up = false;
457
458         intel_shim_master_ip_to_glue(sdw);
459
460         *shim_mask &= ~BIT(link_id);
461
462         if (!*shim_mask) {
463
464                 dev_dbg(sdw->cdns.dev, "powering down all links\n");
465
466                 /* Link power down sequence */
467                 link_control = intel_readl(shim, SDW_SHIM_LCTL);
468
469                 /* only power-down enabled links */
470                 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
471                 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
472
473                 link_control &=  spa_mask;
474
475                 ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
476                 if (ret < 0) {
477                         dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
478
479                         /*
480                          * we leave the sdw->cdns.link_up flag as false since we've disabled
481                          * the link at this point and cannot handle interrupts any longer.
482                          */
483                 }
484         }
485
486         mutex_unlock(sdw->link_res->shim_lock);
487
488         return ret;
489 }
490
491 static void intel_shim_sync_arm(struct sdw_intel *sdw)
492 {
493         void __iomem *shim = sdw->link_res->shim;
494         u32 sync_reg;
495
496         mutex_lock(sdw->link_res->shim_lock);
497
498         /* update SYNC register */
499         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
500         sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
501         intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
502
503         mutex_unlock(sdw->link_res->shim_lock);
504 }
505
506 static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
507 {
508         void __iomem *shim = sdw->link_res->shim;
509         u32 sync_reg;
510         int ret;
511
512         /* Read SYNC register */
513         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
514
515         /*
516          * Set SyncGO bit to synchronously trigger a bank switch for
517          * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
518          * the Masters.
519          */
520         sync_reg |= SDW_SHIM_SYNC_SYNCGO;
521
522         ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
523                               SDW_SHIM_SYNC_SYNCGO);
524
525         if (ret < 0)
526                 dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
527
528         return ret;
529 }
530
531 static int intel_shim_sync_go(struct sdw_intel *sdw)
532 {
533         int ret;
534
535         mutex_lock(sdw->link_res->shim_lock);
536
537         ret = intel_shim_sync_go_unlocked(sdw);
538
539         mutex_unlock(sdw->link_res->shim_lock);
540
541         return ret;
542 }
543
544 /*
545  * PDI routines
546  */
547 static void intel_pdi_init(struct sdw_intel *sdw,
548                            struct sdw_cdns_stream_config *config)
549 {
550         void __iomem *shim = sdw->link_res->shim;
551         unsigned int link_id = sdw->instance;
552         int pcm_cap;
553
554         /* PCM Stream Capability */
555         pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
556
557         config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
558         config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
559         config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
560
561         dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
562                 config->pcm_bd, config->pcm_in, config->pcm_out);
563 }
564
565 static int
566 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
567 {
568         void __iomem *shim = sdw->link_res->shim;
569         unsigned int link_id = sdw->instance;
570         int count;
571
572         count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
573
574         /*
575          * WORKAROUND: on all existing Intel controllers, pdi
576          * number 2 reports channel count as 1 even though it
577          * supports 8 channels. Performing hardcoding for pdi
578          * number 2.
579          */
580         if (pdi_num == 2)
581                 count = 7;
582
583         /* zero based values for channel count in register */
584         count++;
585
586         return count;
587 }
588
589 static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
590                                    struct sdw_cdns_pdi *pdi,
591                                    unsigned int num_pdi,
592                                    unsigned int *num_ch)
593 {
594         int i, ch_count = 0;
595
596         for (i = 0; i < num_pdi; i++) {
597                 pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
598                 ch_count += pdi->ch_count;
599                 pdi++;
600         }
601
602         *num_ch = ch_count;
603         return 0;
604 }
605
606 static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
607                                       struct sdw_cdns_streams *stream)
608 {
609         intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
610                                 &stream->num_ch_bd);
611
612         intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
613                                 &stream->num_ch_in);
614
615         intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
616                                 &stream->num_ch_out);
617
618         return 0;
619 }
620
621 static int intel_pdi_ch_update(struct sdw_intel *sdw)
622 {
623         intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
624
625         return 0;
626 }
627
628 static void
629 intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
630 {
631         void __iomem *shim = sdw->link_res->shim;
632         unsigned int link_id = sdw->instance;
633         int pdi_conf = 0;
634
635         /* the Bulk and PCM streams are not contiguous */
636         pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
637         if (pdi->num >= 2)
638                 pdi->intel_alh_id += 2;
639
640         /*
641          * Program stream parameters to stream SHIM register
642          * This is applicable for PCM stream only.
643          */
644         if (pdi->type != SDW_STREAM_PCM)
645                 return;
646
647         if (pdi->dir == SDW_DATA_DIR_RX)
648                 pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
649         else
650                 pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
651
652         u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
653         u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
654         u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
655
656         intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
657 }
658
659 static void
660 intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
661 {
662         void __iomem *alh = sdw->link_res->alh;
663         unsigned int link_id = sdw->instance;
664         unsigned int conf;
665
666         /* the Bulk and PCM streams are not contiguous */
667         pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
668         if (pdi->num >= 2)
669                 pdi->intel_alh_id += 2;
670
671         /* Program Stream config ALH register */
672         conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
673
674         u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
675         u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
676
677         intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
678 }
679
680 static int intel_params_stream(struct sdw_intel *sdw,
681                                int stream,
682                                struct snd_soc_dai *dai,
683                                struct snd_pcm_hw_params *hw_params,
684                                int link_id, int alh_stream_id)
685 {
686         struct sdw_intel_link_res *res = sdw->link_res;
687         struct sdw_intel_stream_params_data params_data;
688
689         params_data.stream = stream; /* direction */
690         params_data.dai = dai;
691         params_data.hw_params = hw_params;
692         params_data.link_id = link_id;
693         params_data.alh_stream_id = alh_stream_id;
694
695         if (res->ops && res->ops->params_stream && res->dev)
696                 return res->ops->params_stream(res->dev,
697                                                &params_data);
698         return -EIO;
699 }
700
701 static int intel_free_stream(struct sdw_intel *sdw,
702                              int stream,
703                              struct snd_soc_dai *dai,
704                              int link_id)
705 {
706         struct sdw_intel_link_res *res = sdw->link_res;
707         struct sdw_intel_stream_free_data free_data;
708
709         free_data.stream = stream; /* direction */
710         free_data.dai = dai;
711         free_data.link_id = link_id;
712
713         if (res->ops && res->ops->free_stream && res->dev)
714                 return res->ops->free_stream(res->dev,
715                                              &free_data);
716
717         return 0;
718 }
719
720 /*
721  * bank switch routines
722  */
723
724 static int intel_pre_bank_switch(struct sdw_intel *sdw)
725 {
726         struct sdw_cdns *cdns = &sdw->cdns;
727         struct sdw_bus *bus = &cdns->bus;
728
729         /* Write to register only for multi-link */
730         if (!bus->multi_link)
731                 return 0;
732
733         intel_shim_sync_arm(sdw);
734
735         return 0;
736 }
737
738 static int intel_post_bank_switch(struct sdw_intel *sdw)
739 {
740         struct sdw_cdns *cdns = &sdw->cdns;
741         struct sdw_bus *bus = &cdns->bus;
742         void __iomem *shim = sdw->link_res->shim;
743         int sync_reg, ret;
744
745         /* Write to register only for multi-link */
746         if (!bus->multi_link)
747                 return 0;
748
749         mutex_lock(sdw->link_res->shim_lock);
750
751         /* Read SYNC register */
752         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
753
754         /*
755          * post_bank_switch() ops is called from the bus in loop for
756          * all the Masters in the steam with the expectation that
757          * we trigger the bankswitch for the only first Master in the list
758          * and do nothing for the other Masters
759          *
760          * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
761          */
762         if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
763                 ret = 0;
764                 goto unlock;
765         }
766
767         ret = intel_shim_sync_go_unlocked(sdw);
768 unlock:
769         mutex_unlock(sdw->link_res->shim_lock);
770
771         if (ret < 0)
772                 dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
773
774         return ret;
775 }
776
777 /*
778  * DAI routines
779  */
780
781 static int intel_hw_params(struct snd_pcm_substream *substream,
782                            struct snd_pcm_hw_params *params,
783                            struct snd_soc_dai *dai)
784 {
785         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
786         struct sdw_intel *sdw = cdns_to_intel(cdns);
787         struct sdw_cdns_dai_runtime *dai_runtime;
788         struct sdw_cdns_pdi *pdi;
789         struct sdw_stream_config sconfig;
790         struct sdw_port_config *pconfig;
791         int ch, dir;
792         int ret;
793
794         dai_runtime = cdns->dai_runtime_array[dai->id];
795         if (!dai_runtime)
796                 return -EIO;
797
798         ch = params_channels(params);
799         if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
800                 dir = SDW_DATA_DIR_RX;
801         else
802                 dir = SDW_DATA_DIR_TX;
803
804         pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
805
806         if (!pdi) {
807                 ret = -EINVAL;
808                 goto error;
809         }
810
811         /* do run-time configurations for SHIM, ALH and PDI/PORT */
812         intel_pdi_shim_configure(sdw, pdi);
813         intel_pdi_alh_configure(sdw, pdi);
814         sdw_cdns_config_stream(cdns, ch, dir, pdi);
815
816         /* store pdi and hw_params, may be needed in prepare step */
817         dai_runtime->paused = false;
818         dai_runtime->suspended = false;
819         dai_runtime->pdi = pdi;
820         dai_runtime->hw_params = params;
821
822         /* Inform DSP about PDI stream number */
823         ret = intel_params_stream(sdw, substream->stream, dai, params,
824                                   sdw->instance,
825                                   pdi->intel_alh_id);
826         if (ret)
827                 goto error;
828
829         sconfig.direction = dir;
830         sconfig.ch_count = ch;
831         sconfig.frame_rate = params_rate(params);
832         sconfig.type = dai_runtime->stream_type;
833
834         sconfig.bps = snd_pcm_format_width(params_format(params));
835
836         /* Port configuration */
837         pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
838         if (!pconfig) {
839                 ret =  -ENOMEM;
840                 goto error;
841         }
842
843         pconfig->num = pdi->num;
844         pconfig->ch_mask = (1 << ch) - 1;
845
846         ret = sdw_stream_add_master(&cdns->bus, &sconfig,
847                                     pconfig, 1, dai_runtime->stream);
848         if (ret)
849                 dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
850
851         kfree(pconfig);
852 error:
853         return ret;
854 }
855
856 static int intel_prepare(struct snd_pcm_substream *substream,
857                          struct snd_soc_dai *dai)
858 {
859         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
860         struct sdw_intel *sdw = cdns_to_intel(cdns);
861         struct sdw_cdns_dai_runtime *dai_runtime;
862         int ch, dir;
863         int ret = 0;
864
865         dai_runtime = cdns->dai_runtime_array[dai->id];
866         if (!dai_runtime) {
867                 dev_err(dai->dev, "failed to get dai runtime in %s\n",
868                         __func__);
869                 return -EIO;
870         }
871
872         if (dai_runtime->suspended) {
873                 dai_runtime->suspended = false;
874
875                 /*
876                  * .prepare() is called after system resume, where we
877                  * need to reinitialize the SHIM/ALH/Cadence IP.
878                  * .prepare() is also called to deal with underflows,
879                  * but in those cases we cannot touch ALH/SHIM
880                  * registers
881                  */
882
883                 /* configure stream */
884                 ch = params_channels(dai_runtime->hw_params);
885                 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
886                         dir = SDW_DATA_DIR_RX;
887                 else
888                         dir = SDW_DATA_DIR_TX;
889
890                 intel_pdi_shim_configure(sdw, dai_runtime->pdi);
891                 intel_pdi_alh_configure(sdw, dai_runtime->pdi);
892                 sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
893
894                 /* Inform DSP about PDI stream number */
895                 ret = intel_params_stream(sdw, substream->stream, dai,
896                                           dai_runtime->hw_params,
897                                           sdw->instance,
898                                           dai_runtime->pdi->intel_alh_id);
899         }
900
901         return ret;
902 }
903
904 static int
905 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
906 {
907         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
908         struct sdw_intel *sdw = cdns_to_intel(cdns);
909         struct sdw_cdns_dai_runtime *dai_runtime;
910         int ret;
911
912         dai_runtime = cdns->dai_runtime_array[dai->id];
913         if (!dai_runtime)
914                 return -EIO;
915
916         /*
917          * The sdw stream state will transition to RELEASED when stream->
918          * master_list is empty. So the stream state will transition to
919          * DEPREPARED for the first cpu-dai and to RELEASED for the last
920          * cpu-dai.
921          */
922         ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
923         if (ret < 0) {
924                 dev_err(dai->dev, "remove master from stream %s failed: %d\n",
925                         dai_runtime->stream->name, ret);
926                 return ret;
927         }
928
929         ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
930         if (ret < 0) {
931                 dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
932                 return ret;
933         }
934
935         dai_runtime->hw_params = NULL;
936         dai_runtime->pdi = NULL;
937
938         return 0;
939 }
940
941 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
942                                     void *stream, int direction)
943 {
944         return cdns_set_sdw_stream(dai, stream, direction);
945 }
946
947 static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
948                                   int direction)
949 {
950         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
951         struct sdw_cdns_dai_runtime *dai_runtime;
952
953         dai_runtime = cdns->dai_runtime_array[dai->id];
954         if (!dai_runtime)
955                 return ERR_PTR(-EINVAL);
956
957         return dai_runtime->stream;
958 }
959
960 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
961 {
962         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
963         struct sdw_intel *sdw = cdns_to_intel(cdns);
964         struct sdw_intel_link_res *res = sdw->link_res;
965         struct sdw_cdns_dai_runtime *dai_runtime;
966         int ret = 0;
967
968         /*
969          * The .trigger callback is used to send required IPC to audio
970          * firmware. The .free_stream callback will still be called
971          * by intel_free_stream() in the TRIGGER_SUSPEND case.
972          */
973         if (res->ops && res->ops->trigger)
974                 res->ops->trigger(dai, cmd, substream->stream);
975
976         dai_runtime = cdns->dai_runtime_array[dai->id];
977         if (!dai_runtime) {
978                 dev_err(dai->dev, "failed to get dai runtime in %s\n",
979                         __func__);
980                 return -EIO;
981         }
982
983         switch (cmd) {
984         case SNDRV_PCM_TRIGGER_SUSPEND:
985
986                 /*
987                  * The .prepare callback is used to deal with xruns and resume operations.
988                  * In the case of xruns, the DMAs and SHIM registers cannot be touched,
989                  * but for resume operations the DMAs and SHIM registers need to be initialized.
990                  * the .trigger callback is used to track the suspend case only.
991                  */
992
993                 dai_runtime->suspended = true;
994
995                 ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
996                 break;
997
998         case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
999                 dai_runtime->paused = true;
1000                 break;
1001         case SNDRV_PCM_TRIGGER_STOP:
1002         case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1003                 dai_runtime->paused = false;
1004                 break;
1005         default:
1006                 break;
1007         }
1008
1009         return ret;
1010 }
1011
1012 static int intel_component_probe(struct snd_soc_component *component)
1013 {
1014         int ret;
1015
1016         /*
1017          * make sure the device is pm_runtime_active before initiating
1018          * bus transactions during the card registration.
1019          * We use pm_runtime_resume() here, without taking a reference
1020          * and releasing it immediately.
1021          */
1022         ret = pm_runtime_resume(component->dev);
1023         if (ret < 0 && ret != -EACCES)
1024                 return ret;
1025
1026         return 0;
1027 }
1028
1029 static int intel_component_dais_suspend(struct snd_soc_component *component)
1030 {
1031         struct snd_soc_dai *dai;
1032
1033         /*
1034          * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
1035          * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
1036          * Since the component suspend is called last, we can trap this corner case
1037          * and force the DAIs to release their resources.
1038          */
1039         for_each_component_dais(component, dai) {
1040                 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1041                 struct sdw_intel *sdw = cdns_to_intel(cdns);
1042                 struct sdw_cdns_dai_runtime *dai_runtime;
1043                 int ret;
1044
1045                 dai_runtime = cdns->dai_runtime_array[dai->id];
1046
1047                 if (!dai_runtime)
1048                         continue;
1049
1050                 if (dai_runtime->suspended)
1051                         continue;
1052
1053                 if (dai_runtime->paused) {
1054                         dai_runtime->suspended = true;
1055
1056                         ret = intel_free_stream(sdw, dai_runtime->direction, dai, sdw->instance);
1057                         if (ret < 0)
1058                                 return ret;
1059                 }
1060         }
1061
1062         return 0;
1063 }
1064
1065 static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
1066         .hw_params = intel_hw_params,
1067         .prepare = intel_prepare,
1068         .hw_free = intel_hw_free,
1069         .trigger = intel_trigger,
1070         .set_stream = intel_pcm_set_sdw_stream,
1071         .get_stream = intel_get_sdw_stream,
1072 };
1073
1074 static const struct snd_soc_component_driver dai_component = {
1075         .name                   = "soundwire",
1076         .probe                  = intel_component_probe,
1077         .suspend                = intel_component_dais_suspend,
1078         .legacy_dai_naming      = 1,
1079 };
1080
1081 static int intel_create_dai(struct sdw_cdns *cdns,
1082                             struct snd_soc_dai_driver *dais,
1083                             enum intel_pdi_type type,
1084                             u32 num, u32 off, u32 max_ch)
1085 {
1086         int i;
1087
1088         if (num == 0)
1089                 return 0;
1090
1091          /* TODO: Read supported rates/formats from hardware */
1092         for (i = off; i < (off + num); i++) {
1093                 dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1094                                               "SDW%d Pin%d",
1095                                               cdns->instance, i);
1096                 if (!dais[i].name)
1097                         return -ENOMEM;
1098
1099                 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1100                         dais[i].playback.channels_min = 1;
1101                         dais[i].playback.channels_max = max_ch;
1102                         dais[i].playback.rates = SNDRV_PCM_RATE_48000;
1103                         dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
1104                 }
1105
1106                 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1107                         dais[i].capture.channels_min = 1;
1108                         dais[i].capture.channels_max = max_ch;
1109                         dais[i].capture.rates = SNDRV_PCM_RATE_48000;
1110                         dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
1111                 }
1112
1113                 dais[i].ops = &intel_pcm_dai_ops;
1114         }
1115
1116         return 0;
1117 }
1118
1119 static int intel_register_dai(struct sdw_intel *sdw)
1120 {
1121         struct sdw_cdns_dai_runtime **dai_runtime_array;
1122         struct sdw_cdns_stream_config config;
1123         struct sdw_cdns *cdns = &sdw->cdns;
1124         struct sdw_cdns_streams *stream;
1125         struct snd_soc_dai_driver *dais;
1126         int num_dai, ret, off = 0;
1127
1128         /* Read the PDI config and initialize cadence PDI */
1129         intel_pdi_init(sdw, &config);
1130         ret = sdw_cdns_pdi_init(cdns, config);
1131         if (ret)
1132                 return ret;
1133
1134         intel_pdi_ch_update(sdw);
1135
1136         /* DAIs are created based on total number of PDIs supported */
1137         num_dai = cdns->pcm.num_pdi;
1138
1139         dai_runtime_array = devm_kcalloc(cdns->dev, num_dai,
1140                                          sizeof(struct sdw_cdns_dai_runtime *),
1141                                          GFP_KERNEL);
1142         if (!dai_runtime_array)
1143                 return -ENOMEM;
1144         cdns->dai_runtime_array = dai_runtime_array;
1145
1146         dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1147         if (!dais)
1148                 return -ENOMEM;
1149
1150         /* Create PCM DAIs */
1151         stream = &cdns->pcm;
1152
1153         ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1154                                off, stream->num_ch_in);
1155         if (ret)
1156                 return ret;
1157
1158         off += cdns->pcm.num_in;
1159         ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1160                                off, stream->num_ch_out);
1161         if (ret)
1162                 return ret;
1163
1164         off += cdns->pcm.num_out;
1165         ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1166                                off, stream->num_ch_bd);
1167         if (ret)
1168                 return ret;
1169
1170         return devm_snd_soc_register_component(cdns->dev, &dai_component,
1171                                                dais, num_dai);
1172 }
1173
1174 static int intel_start_bus(struct sdw_intel *sdw)
1175 {
1176         struct device *dev = sdw->cdns.dev;
1177         struct sdw_cdns *cdns = &sdw->cdns;
1178         struct sdw_bus *bus = &cdns->bus;
1179         int ret;
1180
1181         ret = sdw_cdns_enable_interrupt(cdns, true);
1182         if (ret < 0) {
1183                 dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
1184                 return ret;
1185         }
1186
1187         /*
1188          * follow recommended programming flows to avoid timeouts when
1189          * gsync is enabled
1190          */
1191         if (bus->multi_link)
1192                 intel_shim_sync_arm(sdw);
1193
1194         ret = sdw_cdns_init(cdns);
1195         if (ret < 0) {
1196                 dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
1197                 goto err_interrupt;
1198         }
1199
1200         ret = sdw_cdns_exit_reset(cdns);
1201         if (ret < 0) {
1202                 dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
1203                 goto err_interrupt;
1204         }
1205
1206         if (bus->multi_link) {
1207                 ret = intel_shim_sync_go(sdw);
1208                 if (ret < 0) {
1209                         dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
1210                         goto err_interrupt;
1211                 }
1212         }
1213         sdw_cdns_check_self_clearing_bits(cdns, __func__,
1214                                           true, INTEL_MASTER_RESET_ITERATIONS);
1215
1216         return 0;
1217
1218 err_interrupt:
1219         sdw_cdns_enable_interrupt(cdns, false);
1220         return ret;
1221 }
1222
1223 static int intel_start_bus_after_reset(struct sdw_intel *sdw)
1224 {
1225         struct device *dev = sdw->cdns.dev;
1226         struct sdw_cdns *cdns = &sdw->cdns;
1227         struct sdw_bus *bus = &cdns->bus;
1228         bool clock_stop0;
1229         int status;
1230         int ret;
1231
1232         /*
1233          * An exception condition occurs for the CLK_STOP_BUS_RESET
1234          * case if one or more masters remain active. In this condition,
1235          * all the masters are powered on for they are in the same power
1236          * domain. Master can preserve its context for clock stop0, so
1237          * there is no need to clear slave status and reset bus.
1238          */
1239         clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1240
1241         if (!clock_stop0) {
1242
1243                 /*
1244                  * make sure all Slaves are tagged as UNATTACHED and
1245                  * provide reason for reinitialization
1246                  */
1247
1248                 status = SDW_UNATTACH_REQUEST_MASTER_RESET;
1249                 sdw_clear_slave_status(bus, status);
1250
1251                 ret = sdw_cdns_enable_interrupt(cdns, true);
1252                 if (ret < 0) {
1253                         dev_err(dev, "cannot enable interrupts during resume\n");
1254                         return ret;
1255                 }
1256
1257                 /*
1258                  * follow recommended programming flows to avoid
1259                  * timeouts when gsync is enabled
1260                  */
1261                 if (bus->multi_link)
1262                         intel_shim_sync_arm(sdw);
1263
1264                 /*
1265                  * Re-initialize the IP since it was powered-off
1266                  */
1267                 sdw_cdns_init(&sdw->cdns);
1268
1269         } else {
1270                 ret = sdw_cdns_enable_interrupt(cdns, true);
1271                 if (ret < 0) {
1272                         dev_err(dev, "cannot enable interrupts during resume\n");
1273                         return ret;
1274                 }
1275         }
1276
1277         ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
1278         if (ret < 0) {
1279                 dev_err(dev, "unable to restart clock during resume\n");
1280                 goto err_interrupt;
1281         }
1282
1283         if (!clock_stop0) {
1284                 ret = sdw_cdns_exit_reset(cdns);
1285                 if (ret < 0) {
1286                         dev_err(dev, "unable to exit bus reset sequence during resume\n");
1287                         goto err_interrupt;
1288                 }
1289
1290                 if (bus->multi_link) {
1291                         ret = intel_shim_sync_go(sdw);
1292                         if (ret < 0) {
1293                                 dev_err(sdw->cdns.dev, "sync go failed during resume\n");
1294                                 goto err_interrupt;
1295                         }
1296                 }
1297         }
1298         sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
1299
1300         return 0;
1301
1302 err_interrupt:
1303         sdw_cdns_enable_interrupt(cdns, false);
1304         return ret;
1305 }
1306
1307 static void intel_check_clock_stop(struct sdw_intel *sdw)
1308 {
1309         struct device *dev = sdw->cdns.dev;
1310         bool clock_stop0;
1311
1312         clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1313         if (!clock_stop0)
1314                 dev_err(dev, "%s: invalid configuration, clock was not stopped\n", __func__);
1315 }
1316
1317 static int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
1318 {
1319         struct device *dev = sdw->cdns.dev;
1320         struct sdw_cdns *cdns = &sdw->cdns;
1321         int ret;
1322
1323         ret = sdw_cdns_enable_interrupt(cdns, true);
1324         if (ret < 0) {
1325                 dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
1326                 return ret;
1327         }
1328
1329         ret = sdw_cdns_clock_restart(cdns, false);
1330         if (ret < 0) {
1331                 dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
1332                 sdw_cdns_enable_interrupt(cdns, false);
1333                 return ret;
1334         }
1335
1336         sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
1337                                           true, INTEL_MASTER_RESET_ITERATIONS);
1338
1339         return 0;
1340 }
1341
1342 static int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
1343 {
1344         struct device *dev = sdw->cdns.dev;
1345         struct sdw_cdns *cdns = &sdw->cdns;
1346         bool wake_enable = false;
1347         int ret;
1348
1349         if (clock_stop) {
1350                 ret = sdw_cdns_clock_stop(cdns, true);
1351                 if (ret < 0)
1352                         dev_err(dev, "%s: cannot stop clock: %d\n", __func__, ret);
1353                 else
1354                         wake_enable = true;
1355         }
1356
1357         ret = sdw_cdns_enable_interrupt(cdns, false);
1358         if (ret < 0) {
1359                 dev_err(dev, "%s: cannot disable interrupts: %d\n", __func__, ret);
1360                 return ret;
1361         }
1362
1363         ret = intel_link_power_down(sdw);
1364         if (ret) {
1365                 dev_err(dev, "%s: Link power down failed: %d\n", __func__, ret);
1366                 return ret;
1367         }
1368
1369         intel_shim_wake(sdw, wake_enable);
1370
1371         return 0;
1372 }
1373
1374 const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = {
1375         .debugfs_init = intel_debugfs_init,
1376         .debugfs_exit = intel_debugfs_exit,
1377
1378         .register_dai = intel_register_dai,
1379
1380         .check_clock_stop = intel_check_clock_stop,
1381         .start_bus = intel_start_bus,
1382         .start_bus_after_reset = intel_start_bus_after_reset,
1383         .start_bus_after_clock_stop = intel_start_bus_after_clock_stop,
1384         .stop_bus = intel_stop_bus,
1385
1386         .link_power_up = intel_link_power_up,
1387         .link_power_down = intel_link_power_down,
1388
1389         .shim_check_wake = intel_shim_check_wake,
1390         .shim_wake = intel_shim_wake,
1391
1392         .pre_bank_switch = intel_pre_bank_switch,
1393         .post_bank_switch = intel_post_bank_switch,
1394 };
1395 EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, SOUNDWIRE_INTEL);
1396