Merge branch 'for-6.3/hid-bpf' into for-linus
[linux-block.git] / drivers / soundwire / intel.c
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3
4 /*
5  * Soundwire Intel Master Driver
6  */
7
8 #include <linux/acpi.h>
9 #include <linux/debugfs.h>
10 #include <linux/delay.h>
11 #include <linux/io.h>
12 #include <sound/pcm_params.h>
13 #include <linux/pm_runtime.h>
14 #include <sound/soc.h>
15 #include <linux/soundwire/sdw_registers.h>
16 #include <linux/soundwire/sdw.h>
17 #include <linux/soundwire/sdw_intel.h>
18 #include "cadence_master.h"
19 #include "bus.h"
20 #include "intel.h"
21
22
23 enum intel_pdi_type {
24         INTEL_PDI_IN = 0,
25         INTEL_PDI_OUT = 1,
26         INTEL_PDI_BD = 2,
27 };
28
29 #define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
30
31 /*
32  * Read, write helpers for HW registers
33  */
34 static inline int intel_readl(void __iomem *base, int offset)
35 {
36         return readl(base + offset);
37 }
38
39 static inline void intel_writel(void __iomem *base, int offset, int value)
40 {
41         writel(value, base + offset);
42 }
43
44 static inline u16 intel_readw(void __iomem *base, int offset)
45 {
46         return readw(base + offset);
47 }
48
49 static inline void intel_writew(void __iomem *base, int offset, u16 value)
50 {
51         writew(value, base + offset);
52 }
53
54 static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
55 {
56         int timeout = 10;
57         u32 reg_read;
58
59         do {
60                 reg_read = readl(base + offset);
61                 if ((reg_read & mask) == target)
62                         return 0;
63
64                 timeout--;
65                 usleep_range(50, 100);
66         } while (timeout != 0);
67
68         return -EAGAIN;
69 }
70
71 static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
72 {
73         writel(value, base + offset);
74         return intel_wait_bit(base, offset, mask, 0);
75 }
76
77 static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
78 {
79         writel(value, base + offset);
80         return intel_wait_bit(base, offset, mask, mask);
81 }
82
83 /*
84  * debugfs
85  */
86 #ifdef CONFIG_DEBUG_FS
87
88 #define RD_BUF (2 * PAGE_SIZE)
89
90 static ssize_t intel_sprintf(void __iomem *mem, bool l,
91                              char *buf, size_t pos, unsigned int reg)
92 {
93         int value;
94
95         if (l)
96                 value = intel_readl(mem, reg);
97         else
98                 value = intel_readw(mem, reg);
99
100         return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
101 }
102
103 static int intel_reg_show(struct seq_file *s_file, void *data)
104 {
105         struct sdw_intel *sdw = s_file->private;
106         void __iomem *s = sdw->link_res->shim;
107         void __iomem *a = sdw->link_res->alh;
108         char *buf;
109         ssize_t ret;
110         int i, j;
111         unsigned int links, reg;
112
113         buf = kzalloc(RD_BUF, GFP_KERNEL);
114         if (!buf)
115                 return -ENOMEM;
116
117         links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
118
119         ret = scnprintf(buf, RD_BUF, "Register  Value\n");
120         ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
121
122         for (i = 0; i < links; i++) {
123                 reg = SDW_SHIM_LCAP + i * 4;
124                 ret += intel_sprintf(s, true, buf, ret, reg);
125         }
126
127         for (i = 0; i < links; i++) {
128                 ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
129                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
130                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
131                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
132                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
133                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
134                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
135
136                 ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
137
138                 /*
139                  * the value 10 is the number of PDIs. We will need a
140                  * cleanup to remove hard-coded Intel configurations
141                  * from cadence_master.c
142                  */
143                 for (j = 0; j < 10; j++) {
144                         ret += intel_sprintf(s, false, buf, ret,
145                                         SDW_SHIM_PCMSYCHM(i, j));
146                         ret += intel_sprintf(s, false, buf, ret,
147                                         SDW_SHIM_PCMSYCHC(i, j));
148                 }
149                 ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
150
151                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
152                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
153         }
154
155         ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
156         ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
157         ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
158
159         ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
160         for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
161                 ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
162
163         seq_printf(s_file, "%s", buf);
164         kfree(buf);
165
166         return 0;
167 }
168 DEFINE_SHOW_ATTRIBUTE(intel_reg);
169
170 static int intel_set_m_datamode(void *data, u64 value)
171 {
172         struct sdw_intel *sdw = data;
173         struct sdw_bus *bus = &sdw->cdns.bus;
174
175         if (value > SDW_PORT_DATA_MODE_STATIC_1)
176                 return -EINVAL;
177
178         /* Userspace changed the hardware state behind the kernel's back */
179         add_taint(TAINT_USER, LOCKDEP_STILL_OK);
180
181         bus->params.m_data_mode = value;
182
183         return 0;
184 }
185 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
186                          intel_set_m_datamode, "%llu\n");
187
188 static int intel_set_s_datamode(void *data, u64 value)
189 {
190         struct sdw_intel *sdw = data;
191         struct sdw_bus *bus = &sdw->cdns.bus;
192
193         if (value > SDW_PORT_DATA_MODE_STATIC_1)
194                 return -EINVAL;
195
196         /* Userspace changed the hardware state behind the kernel's back */
197         add_taint(TAINT_USER, LOCKDEP_STILL_OK);
198
199         bus->params.s_data_mode = value;
200
201         return 0;
202 }
203 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
204                          intel_set_s_datamode, "%llu\n");
205
206 static void intel_debugfs_init(struct sdw_intel *sdw)
207 {
208         struct dentry *root = sdw->cdns.bus.debugfs;
209
210         if (!root)
211                 return;
212
213         sdw->debugfs = debugfs_create_dir("intel-sdw", root);
214
215         debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
216                             &intel_reg_fops);
217
218         debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
219                             &intel_set_m_datamode_fops);
220
221         debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
222                             &intel_set_s_datamode_fops);
223
224         sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
225 }
226
227 static void intel_debugfs_exit(struct sdw_intel *sdw)
228 {
229         debugfs_remove_recursive(sdw->debugfs);
230 }
231 #else
232 static void intel_debugfs_init(struct sdw_intel *sdw) {}
233 static void intel_debugfs_exit(struct sdw_intel *sdw) {}
234 #endif /* CONFIG_DEBUG_FS */
235
236 /*
237  * shim ops
238  */
239 /* this needs to be called with shim_lock */
240 static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
241 {
242         void __iomem *shim = sdw->link_res->shim;
243         unsigned int link_id = sdw->instance;
244         u16 ioctl;
245
246         /* Switch to MIP from Glue logic */
247         ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
248
249         ioctl &= ~(SDW_SHIM_IOCTL_DOE);
250         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
251         usleep_range(10, 15);
252
253         ioctl &= ~(SDW_SHIM_IOCTL_DO);
254         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
255         usleep_range(10, 15);
256
257         ioctl |= (SDW_SHIM_IOCTL_MIF);
258         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
259         usleep_range(10, 15);
260
261         ioctl &= ~(SDW_SHIM_IOCTL_BKE);
262         ioctl &= ~(SDW_SHIM_IOCTL_COE);
263         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
264         usleep_range(10, 15);
265
266         /* at this point Master IP has full control of the I/Os */
267 }
268
269 /* this needs to be called with shim_lock */
270 static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
271 {
272         unsigned int link_id = sdw->instance;
273         void __iomem *shim = sdw->link_res->shim;
274         u16 ioctl;
275
276         /* Glue logic */
277         ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
278         ioctl |= SDW_SHIM_IOCTL_BKE;
279         ioctl |= SDW_SHIM_IOCTL_COE;
280         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
281         usleep_range(10, 15);
282
283         ioctl &= ~(SDW_SHIM_IOCTL_MIF);
284         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
285         usleep_range(10, 15);
286
287         /* at this point Integration Glue has full control of the I/Os */
288 }
289
290 /* this needs to be called with shim_lock */
291 static void intel_shim_init(struct sdw_intel *sdw)
292 {
293         void __iomem *shim = sdw->link_res->shim;
294         unsigned int link_id = sdw->instance;
295         u16 ioctl = 0, act = 0;
296
297         /* Initialize Shim */
298         ioctl |= SDW_SHIM_IOCTL_BKE;
299         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
300         usleep_range(10, 15);
301
302         ioctl |= SDW_SHIM_IOCTL_WPDD;
303         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
304         usleep_range(10, 15);
305
306         ioctl |= SDW_SHIM_IOCTL_DO;
307         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
308         usleep_range(10, 15);
309
310         ioctl |= SDW_SHIM_IOCTL_DOE;
311         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
312         usleep_range(10, 15);
313
314         intel_shim_glue_to_master_ip(sdw);
315
316         u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
317         act |= SDW_SHIM_CTMCTL_DACTQE;
318         act |= SDW_SHIM_CTMCTL_DODS;
319         intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
320         usleep_range(10, 15);
321 }
322
323 static int intel_shim_check_wake(struct sdw_intel *sdw)
324 {
325         void __iomem *shim;
326         u16 wake_sts;
327
328         shim = sdw->link_res->shim;
329         wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
330
331         return wake_sts & BIT(sdw->instance);
332 }
333
334 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
335 {
336         void __iomem *shim = sdw->link_res->shim;
337         unsigned int link_id = sdw->instance;
338         u16 wake_en, wake_sts;
339
340         mutex_lock(sdw->link_res->shim_lock);
341         wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
342
343         if (wake_enable) {
344                 /* Enable the wakeup */
345                 wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
346                 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
347         } else {
348                 /* Disable the wake up interrupt */
349                 wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
350                 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
351
352                 /* Clear wake status */
353                 wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
354                 wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
355                 intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
356         }
357         mutex_unlock(sdw->link_res->shim_lock);
358 }
359
360 static int intel_link_power_up(struct sdw_intel *sdw)
361 {
362         unsigned int link_id = sdw->instance;
363         void __iomem *shim = sdw->link_res->shim;
364         u32 *shim_mask = sdw->link_res->shim_mask;
365         struct sdw_bus *bus = &sdw->cdns.bus;
366         struct sdw_master_prop *prop = &bus->prop;
367         u32 spa_mask, cpa_mask;
368         u32 link_control;
369         int ret = 0;
370         u32 syncprd;
371         u32 sync_reg;
372
373         mutex_lock(sdw->link_res->shim_lock);
374
375         /*
376          * The hardware relies on an internal counter, typically 4kHz,
377          * to generate the SoundWire SSP - which defines a 'safe'
378          * synchronization point between commands and audio transport
379          * and allows for multi link synchronization. The SYNCPRD value
380          * is only dependent on the oscillator clock provided to
381          * the IP, so adjust based on _DSD properties reported in DSDT
382          * tables. The values reported are based on either 24MHz
383          * (CNL/CML) or 38.4 MHz (ICL/TGL+).
384          */
385         if (prop->mclk_freq % 6000000)
386                 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
387         else
388                 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
389
390         if (!*shim_mask) {
391                 dev_dbg(sdw->cdns.dev, "powering up all links\n");
392
393                 /* we first need to program the SyncPRD/CPU registers */
394                 dev_dbg(sdw->cdns.dev,
395                         "first link up, programming SYNCPRD\n");
396
397                 /* set SyncPRD period */
398                 sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
399                 u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
400
401                 /* Set SyncCPU bit */
402                 sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
403                 intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
404
405                 /* Link power up sequence */
406                 link_control = intel_readl(shim, SDW_SHIM_LCTL);
407
408                 /* only power-up enabled links */
409                 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
410                 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
411
412                 link_control |=  spa_mask;
413
414                 ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
415                 if (ret < 0) {
416                         dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
417                         goto out;
418                 }
419
420                 /* SyncCPU will change once link is active */
421                 ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
422                                      SDW_SHIM_SYNC_SYNCCPU, 0);
423                 if (ret < 0) {
424                         dev_err(sdw->cdns.dev,
425                                 "Failed to set SHIM_SYNC: %d\n", ret);
426                         goto out;
427                 }
428         }
429
430         *shim_mask |= BIT(link_id);
431
432         sdw->cdns.link_up = true;
433
434         intel_shim_init(sdw);
435
436 out:
437         mutex_unlock(sdw->link_res->shim_lock);
438
439         return ret;
440 }
441
442 static int intel_link_power_down(struct sdw_intel *sdw)
443 {
444         u32 link_control, spa_mask, cpa_mask;
445         unsigned int link_id = sdw->instance;
446         void __iomem *shim = sdw->link_res->shim;
447         u32 *shim_mask = sdw->link_res->shim_mask;
448         int ret = 0;
449
450         mutex_lock(sdw->link_res->shim_lock);
451
452         if (!(*shim_mask & BIT(link_id)))
453                 dev_err(sdw->cdns.dev,
454                         "%s: Unbalanced power-up/down calls\n", __func__);
455
456         sdw->cdns.link_up = false;
457
458         intel_shim_master_ip_to_glue(sdw);
459
460         *shim_mask &= ~BIT(link_id);
461
462         if (!*shim_mask) {
463
464                 dev_dbg(sdw->cdns.dev, "powering down all links\n");
465
466                 /* Link power down sequence */
467                 link_control = intel_readl(shim, SDW_SHIM_LCTL);
468
469                 /* only power-down enabled links */
470                 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
471                 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
472
473                 link_control &=  spa_mask;
474
475                 ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
476                 if (ret < 0) {
477                         dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
478
479                         /*
480                          * we leave the sdw->cdns.link_up flag as false since we've disabled
481                          * the link at this point and cannot handle interrupts any longer.
482                          */
483                 }
484         }
485
486         mutex_unlock(sdw->link_res->shim_lock);
487
488         return ret;
489 }
490
491 static void intel_shim_sync_arm(struct sdw_intel *sdw)
492 {
493         void __iomem *shim = sdw->link_res->shim;
494         u32 sync_reg;
495
496         mutex_lock(sdw->link_res->shim_lock);
497
498         /* update SYNC register */
499         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
500         sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
501         intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
502
503         mutex_unlock(sdw->link_res->shim_lock);
504 }
505
506 static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
507 {
508         void __iomem *shim = sdw->link_res->shim;
509         u32 sync_reg;
510         int ret;
511
512         /* Read SYNC register */
513         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
514
515         /*
516          * Set SyncGO bit to synchronously trigger a bank switch for
517          * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
518          * the Masters.
519          */
520         sync_reg |= SDW_SHIM_SYNC_SYNCGO;
521
522         ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
523                               SDW_SHIM_SYNC_SYNCGO);
524
525         if (ret < 0)
526                 dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
527
528         return ret;
529 }
530
531 static int intel_shim_sync_go(struct sdw_intel *sdw)
532 {
533         int ret;
534
535         mutex_lock(sdw->link_res->shim_lock);
536
537         ret = intel_shim_sync_go_unlocked(sdw);
538
539         mutex_unlock(sdw->link_res->shim_lock);
540
541         return ret;
542 }
543
544 /*
545  * PDI routines
546  */
547 static void intel_pdi_init(struct sdw_intel *sdw,
548                            struct sdw_cdns_stream_config *config)
549 {
550         void __iomem *shim = sdw->link_res->shim;
551         unsigned int link_id = sdw->instance;
552         int pcm_cap;
553
554         /* PCM Stream Capability */
555         pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
556
557         config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
558         config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
559         config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
560
561         dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
562                 config->pcm_bd, config->pcm_in, config->pcm_out);
563 }
564
565 static int
566 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
567 {
568         void __iomem *shim = sdw->link_res->shim;
569         unsigned int link_id = sdw->instance;
570         int count;
571
572         count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
573
574         /*
575          * WORKAROUND: on all existing Intel controllers, pdi
576          * number 2 reports channel count as 1 even though it
577          * supports 8 channels. Performing hardcoding for pdi
578          * number 2.
579          */
580         if (pdi_num == 2)
581                 count = 7;
582
583         /* zero based values for channel count in register */
584         count++;
585
586         return count;
587 }
588
589 static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
590                                    struct sdw_cdns_pdi *pdi,
591                                    unsigned int num_pdi,
592                                    unsigned int *num_ch)
593 {
594         int i, ch_count = 0;
595
596         for (i = 0; i < num_pdi; i++) {
597                 pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
598                 ch_count += pdi->ch_count;
599                 pdi++;
600         }
601
602         *num_ch = ch_count;
603         return 0;
604 }
605
606 static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
607                                       struct sdw_cdns_streams *stream)
608 {
609         intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
610                                 &stream->num_ch_bd);
611
612         intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
613                                 &stream->num_ch_in);
614
615         intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
616                                 &stream->num_ch_out);
617
618         return 0;
619 }
620
621 static int intel_pdi_ch_update(struct sdw_intel *sdw)
622 {
623         intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
624
625         return 0;
626 }
627
628 static void
629 intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
630 {
631         void __iomem *shim = sdw->link_res->shim;
632         unsigned int link_id = sdw->instance;
633         int pdi_conf = 0;
634
635         /* the Bulk and PCM streams are not contiguous */
636         pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
637         if (pdi->num >= 2)
638                 pdi->intel_alh_id += 2;
639
640         /*
641          * Program stream parameters to stream SHIM register
642          * This is applicable for PCM stream only.
643          */
644         if (pdi->type != SDW_STREAM_PCM)
645                 return;
646
647         if (pdi->dir == SDW_DATA_DIR_RX)
648                 pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
649         else
650                 pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
651
652         u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
653         u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
654         u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
655
656         intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
657 }
658
659 static void
660 intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
661 {
662         void __iomem *alh = sdw->link_res->alh;
663         unsigned int link_id = sdw->instance;
664         unsigned int conf;
665
666         /* the Bulk and PCM streams are not contiguous */
667         pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
668         if (pdi->num >= 2)
669                 pdi->intel_alh_id += 2;
670
671         /* Program Stream config ALH register */
672         conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
673
674         u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
675         u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
676
677         intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
678 }
679
680 static int intel_params_stream(struct sdw_intel *sdw,
681                                int stream,
682                                struct snd_soc_dai *dai,
683                                struct snd_pcm_hw_params *hw_params,
684                                int link_id, int alh_stream_id)
685 {
686         struct sdw_intel_link_res *res = sdw->link_res;
687         struct sdw_intel_stream_params_data params_data;
688
689         params_data.stream = stream; /* direction */
690         params_data.dai = dai;
691         params_data.hw_params = hw_params;
692         params_data.link_id = link_id;
693         params_data.alh_stream_id = alh_stream_id;
694
695         if (res->ops && res->ops->params_stream && res->dev)
696                 return res->ops->params_stream(res->dev,
697                                                &params_data);
698         return -EIO;
699 }
700
701 static int intel_free_stream(struct sdw_intel *sdw,
702                              int stream,
703                              struct snd_soc_dai *dai,
704                              int link_id)
705 {
706         struct sdw_intel_link_res *res = sdw->link_res;
707         struct sdw_intel_stream_free_data free_data;
708
709         free_data.stream = stream; /* direction */
710         free_data.dai = dai;
711         free_data.link_id = link_id;
712
713         if (res->ops && res->ops->free_stream && res->dev)
714                 return res->ops->free_stream(res->dev,
715                                              &free_data);
716
717         return 0;
718 }
719
720 /*
721  * bank switch routines
722  */
723
724 static int intel_pre_bank_switch(struct sdw_intel *sdw)
725 {
726         struct sdw_cdns *cdns = &sdw->cdns;
727         struct sdw_bus *bus = &cdns->bus;
728
729         /* Write to register only for multi-link */
730         if (!bus->multi_link)
731                 return 0;
732
733         intel_shim_sync_arm(sdw);
734
735         return 0;
736 }
737
738 static int intel_post_bank_switch(struct sdw_intel *sdw)
739 {
740         struct sdw_cdns *cdns = &sdw->cdns;
741         struct sdw_bus *bus = &cdns->bus;
742         void __iomem *shim = sdw->link_res->shim;
743         int sync_reg, ret;
744
745         /* Write to register only for multi-link */
746         if (!bus->multi_link)
747                 return 0;
748
749         mutex_lock(sdw->link_res->shim_lock);
750
751         /* Read SYNC register */
752         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
753
754         /*
755          * post_bank_switch() ops is called from the bus in loop for
756          * all the Masters in the steam with the expectation that
757          * we trigger the bankswitch for the only first Master in the list
758          * and do nothing for the other Masters
759          *
760          * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
761          */
762         if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
763                 ret = 0;
764                 goto unlock;
765         }
766
767         ret = intel_shim_sync_go_unlocked(sdw);
768 unlock:
769         mutex_unlock(sdw->link_res->shim_lock);
770
771         if (ret < 0)
772                 dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
773
774         return ret;
775 }
776
777 /*
778  * DAI routines
779  */
780
781 static int intel_startup(struct snd_pcm_substream *substream,
782                          struct snd_soc_dai *dai)
783 {
784         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
785         int ret;
786
787         ret = pm_runtime_resume_and_get(cdns->dev);
788         if (ret < 0 && ret != -EACCES) {
789                 dev_err_ratelimited(cdns->dev,
790                                     "pm_runtime_resume_and_get failed in %s, ret %d\n",
791                                     __func__, ret);
792                 return ret;
793         }
794         return 0;
795 }
796
797 static int intel_hw_params(struct snd_pcm_substream *substream,
798                            struct snd_pcm_hw_params *params,
799                            struct snd_soc_dai *dai)
800 {
801         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
802         struct sdw_intel *sdw = cdns_to_intel(cdns);
803         struct sdw_cdns_dai_runtime *dai_runtime;
804         struct sdw_cdns_pdi *pdi;
805         struct sdw_stream_config sconfig;
806         struct sdw_port_config *pconfig;
807         int ch, dir;
808         int ret;
809
810         dai_runtime = cdns->dai_runtime_array[dai->id];
811         if (!dai_runtime)
812                 return -EIO;
813
814         ch = params_channels(params);
815         if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
816                 dir = SDW_DATA_DIR_RX;
817         else
818                 dir = SDW_DATA_DIR_TX;
819
820         pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
821
822         if (!pdi) {
823                 ret = -EINVAL;
824                 goto error;
825         }
826
827         /* do run-time configurations for SHIM, ALH and PDI/PORT */
828         intel_pdi_shim_configure(sdw, pdi);
829         intel_pdi_alh_configure(sdw, pdi);
830         sdw_cdns_config_stream(cdns, ch, dir, pdi);
831
832         /* store pdi and hw_params, may be needed in prepare step */
833         dai_runtime->paused = false;
834         dai_runtime->suspended = false;
835         dai_runtime->pdi = pdi;
836         dai_runtime->hw_params = params;
837
838         /* Inform DSP about PDI stream number */
839         ret = intel_params_stream(sdw, substream->stream, dai, params,
840                                   sdw->instance,
841                                   pdi->intel_alh_id);
842         if (ret)
843                 goto error;
844
845         sconfig.direction = dir;
846         sconfig.ch_count = ch;
847         sconfig.frame_rate = params_rate(params);
848         sconfig.type = dai_runtime->stream_type;
849
850         sconfig.bps = snd_pcm_format_width(params_format(params));
851
852         /* Port configuration */
853         pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
854         if (!pconfig) {
855                 ret =  -ENOMEM;
856                 goto error;
857         }
858
859         pconfig->num = pdi->num;
860         pconfig->ch_mask = (1 << ch) - 1;
861
862         ret = sdw_stream_add_master(&cdns->bus, &sconfig,
863                                     pconfig, 1, dai_runtime->stream);
864         if (ret)
865                 dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
866
867         kfree(pconfig);
868 error:
869         return ret;
870 }
871
872 static int intel_prepare(struct snd_pcm_substream *substream,
873                          struct snd_soc_dai *dai)
874 {
875         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
876         struct sdw_intel *sdw = cdns_to_intel(cdns);
877         struct sdw_cdns_dai_runtime *dai_runtime;
878         int ch, dir;
879         int ret = 0;
880
881         dai_runtime = cdns->dai_runtime_array[dai->id];
882         if (!dai_runtime) {
883                 dev_err(dai->dev, "failed to get dai runtime in %s\n",
884                         __func__);
885                 return -EIO;
886         }
887
888         if (dai_runtime->suspended) {
889                 dai_runtime->suspended = false;
890
891                 /*
892                  * .prepare() is called after system resume, where we
893                  * need to reinitialize the SHIM/ALH/Cadence IP.
894                  * .prepare() is also called to deal with underflows,
895                  * but in those cases we cannot touch ALH/SHIM
896                  * registers
897                  */
898
899                 /* configure stream */
900                 ch = params_channels(dai_runtime->hw_params);
901                 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
902                         dir = SDW_DATA_DIR_RX;
903                 else
904                         dir = SDW_DATA_DIR_TX;
905
906                 intel_pdi_shim_configure(sdw, dai_runtime->pdi);
907                 intel_pdi_alh_configure(sdw, dai_runtime->pdi);
908                 sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
909
910                 /* Inform DSP about PDI stream number */
911                 ret = intel_params_stream(sdw, substream->stream, dai,
912                                           dai_runtime->hw_params,
913                                           sdw->instance,
914                                           dai_runtime->pdi->intel_alh_id);
915         }
916
917         return ret;
918 }
919
920 static int
921 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
922 {
923         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
924         struct sdw_intel *sdw = cdns_to_intel(cdns);
925         struct sdw_cdns_dai_runtime *dai_runtime;
926         int ret;
927
928         dai_runtime = cdns->dai_runtime_array[dai->id];
929         if (!dai_runtime)
930                 return -EIO;
931
932         /*
933          * The sdw stream state will transition to RELEASED when stream->
934          * master_list is empty. So the stream state will transition to
935          * DEPREPARED for the first cpu-dai and to RELEASED for the last
936          * cpu-dai.
937          */
938         ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
939         if (ret < 0) {
940                 dev_err(dai->dev, "remove master from stream %s failed: %d\n",
941                         dai_runtime->stream->name, ret);
942                 return ret;
943         }
944
945         ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
946         if (ret < 0) {
947                 dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
948                 return ret;
949         }
950
951         dai_runtime->hw_params = NULL;
952         dai_runtime->pdi = NULL;
953
954         return 0;
955 }
956
957 static void intel_shutdown(struct snd_pcm_substream *substream,
958                            struct snd_soc_dai *dai)
959 {
960         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
961
962         pm_runtime_mark_last_busy(cdns->dev);
963         pm_runtime_put_autosuspend(cdns->dev);
964 }
965
966 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
967                                     void *stream, int direction)
968 {
969         return cdns_set_sdw_stream(dai, stream, direction);
970 }
971
972 static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
973                                   int direction)
974 {
975         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
976         struct sdw_cdns_dai_runtime *dai_runtime;
977
978         dai_runtime = cdns->dai_runtime_array[dai->id];
979         if (!dai_runtime)
980                 return ERR_PTR(-EINVAL);
981
982         return dai_runtime->stream;
983 }
984
985 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
986 {
987         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
988         struct sdw_intel *sdw = cdns_to_intel(cdns);
989         struct sdw_intel_link_res *res = sdw->link_res;
990         struct sdw_cdns_dai_runtime *dai_runtime;
991         int ret = 0;
992
993         /*
994          * The .trigger callback is used to send required IPC to audio
995          * firmware. The .free_stream callback will still be called
996          * by intel_free_stream() in the TRIGGER_SUSPEND case.
997          */
998         if (res->ops && res->ops->trigger)
999                 res->ops->trigger(dai, cmd, substream->stream);
1000
1001         dai_runtime = cdns->dai_runtime_array[dai->id];
1002         if (!dai_runtime) {
1003                 dev_err(dai->dev, "failed to get dai runtime in %s\n",
1004                         __func__);
1005                 return -EIO;
1006         }
1007
1008         switch (cmd) {
1009         case SNDRV_PCM_TRIGGER_SUSPEND:
1010
1011                 /*
1012                  * The .prepare callback is used to deal with xruns and resume operations.
1013                  * In the case of xruns, the DMAs and SHIM registers cannot be touched,
1014                  * but for resume operations the DMAs and SHIM registers need to be initialized.
1015                  * the .trigger callback is used to track the suspend case only.
1016                  */
1017
1018                 dai_runtime->suspended = true;
1019
1020                 ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
1021                 break;
1022
1023         case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1024                 dai_runtime->paused = true;
1025                 break;
1026         case SNDRV_PCM_TRIGGER_STOP:
1027         case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1028                 dai_runtime->paused = false;
1029                 break;
1030         default:
1031                 break;
1032         }
1033
1034         return ret;
1035 }
1036
1037 static int intel_component_probe(struct snd_soc_component *component)
1038 {
1039         int ret;
1040
1041         /*
1042          * make sure the device is pm_runtime_active before initiating
1043          * bus transactions during the card registration.
1044          * We use pm_runtime_resume() here, without taking a reference
1045          * and releasing it immediately.
1046          */
1047         ret = pm_runtime_resume(component->dev);
1048         if (ret < 0 && ret != -EACCES)
1049                 return ret;
1050
1051         return 0;
1052 }
1053
1054 static int intel_component_dais_suspend(struct snd_soc_component *component)
1055 {
1056         struct snd_soc_dai *dai;
1057
1058         /*
1059          * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
1060          * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
1061          * Since the component suspend is called last, we can trap this corner case
1062          * and force the DAIs to release their resources.
1063          */
1064         for_each_component_dais(component, dai) {
1065                 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1066                 struct sdw_intel *sdw = cdns_to_intel(cdns);
1067                 struct sdw_cdns_dai_runtime *dai_runtime;
1068                 int ret;
1069
1070                 dai_runtime = cdns->dai_runtime_array[dai->id];
1071
1072                 if (!dai_runtime)
1073                         continue;
1074
1075                 if (dai_runtime->suspended)
1076                         continue;
1077
1078                 if (dai_runtime->paused) {
1079                         dai_runtime->suspended = true;
1080
1081                         ret = intel_free_stream(sdw, dai_runtime->direction, dai, sdw->instance);
1082                         if (ret < 0)
1083                                 return ret;
1084                 }
1085         }
1086
1087         return 0;
1088 }
1089
1090 static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
1091         .startup = intel_startup,
1092         .hw_params = intel_hw_params,
1093         .prepare = intel_prepare,
1094         .hw_free = intel_hw_free,
1095         .trigger = intel_trigger,
1096         .shutdown = intel_shutdown,
1097         .set_stream = intel_pcm_set_sdw_stream,
1098         .get_stream = intel_get_sdw_stream,
1099 };
1100
1101 static const struct snd_soc_component_driver dai_component = {
1102         .name                   = "soundwire",
1103         .probe                  = intel_component_probe,
1104         .suspend                = intel_component_dais_suspend,
1105         .legacy_dai_naming      = 1,
1106 };
1107
1108 static int intel_create_dai(struct sdw_cdns *cdns,
1109                             struct snd_soc_dai_driver *dais,
1110                             enum intel_pdi_type type,
1111                             u32 num, u32 off, u32 max_ch)
1112 {
1113         int i;
1114
1115         if (num == 0)
1116                 return 0;
1117
1118          /* TODO: Read supported rates/formats from hardware */
1119         for (i = off; i < (off + num); i++) {
1120                 dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1121                                               "SDW%d Pin%d",
1122                                               cdns->instance, i);
1123                 if (!dais[i].name)
1124                         return -ENOMEM;
1125
1126                 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1127                         dais[i].playback.channels_min = 1;
1128                         dais[i].playback.channels_max = max_ch;
1129                         dais[i].playback.rates = SNDRV_PCM_RATE_48000;
1130                         dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
1131                 }
1132
1133                 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1134                         dais[i].capture.channels_min = 1;
1135                         dais[i].capture.channels_max = max_ch;
1136                         dais[i].capture.rates = SNDRV_PCM_RATE_48000;
1137                         dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
1138                 }
1139
1140                 dais[i].ops = &intel_pcm_dai_ops;
1141         }
1142
1143         return 0;
1144 }
1145
1146 static int intel_register_dai(struct sdw_intel *sdw)
1147 {
1148         struct sdw_cdns_dai_runtime **dai_runtime_array;
1149         struct sdw_cdns_stream_config config;
1150         struct sdw_cdns *cdns = &sdw->cdns;
1151         struct sdw_cdns_streams *stream;
1152         struct snd_soc_dai_driver *dais;
1153         int num_dai, ret, off = 0;
1154
1155         /* Read the PDI config and initialize cadence PDI */
1156         intel_pdi_init(sdw, &config);
1157         ret = sdw_cdns_pdi_init(cdns, config);
1158         if (ret)
1159                 return ret;
1160
1161         intel_pdi_ch_update(sdw);
1162
1163         /* DAIs are created based on total number of PDIs supported */
1164         num_dai = cdns->pcm.num_pdi;
1165
1166         dai_runtime_array = devm_kcalloc(cdns->dev, num_dai,
1167                                          sizeof(struct sdw_cdns_dai_runtime *),
1168                                          GFP_KERNEL);
1169         if (!dai_runtime_array)
1170                 return -ENOMEM;
1171         cdns->dai_runtime_array = dai_runtime_array;
1172
1173         dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1174         if (!dais)
1175                 return -ENOMEM;
1176
1177         /* Create PCM DAIs */
1178         stream = &cdns->pcm;
1179
1180         ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1181                                off, stream->num_ch_in);
1182         if (ret)
1183                 return ret;
1184
1185         off += cdns->pcm.num_in;
1186         ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1187                                off, stream->num_ch_out);
1188         if (ret)
1189                 return ret;
1190
1191         off += cdns->pcm.num_out;
1192         ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1193                                off, stream->num_ch_bd);
1194         if (ret)
1195                 return ret;
1196
1197         return devm_snd_soc_register_component(cdns->dev, &dai_component,
1198                                                dais, num_dai);
1199 }
1200
1201 static int intel_start_bus(struct sdw_intel *sdw)
1202 {
1203         struct device *dev = sdw->cdns.dev;
1204         struct sdw_cdns *cdns = &sdw->cdns;
1205         struct sdw_bus *bus = &cdns->bus;
1206         int ret;
1207
1208         ret = sdw_cdns_enable_interrupt(cdns, true);
1209         if (ret < 0) {
1210                 dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
1211                 return ret;
1212         }
1213
1214         /*
1215          * follow recommended programming flows to avoid timeouts when
1216          * gsync is enabled
1217          */
1218         if (bus->multi_link)
1219                 intel_shim_sync_arm(sdw);
1220
1221         ret = sdw_cdns_init(cdns);
1222         if (ret < 0) {
1223                 dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
1224                 goto err_interrupt;
1225         }
1226
1227         ret = sdw_cdns_exit_reset(cdns);
1228         if (ret < 0) {
1229                 dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
1230                 goto err_interrupt;
1231         }
1232
1233         if (bus->multi_link) {
1234                 ret = intel_shim_sync_go(sdw);
1235                 if (ret < 0) {
1236                         dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
1237                         goto err_interrupt;
1238                 }
1239         }
1240         sdw_cdns_check_self_clearing_bits(cdns, __func__,
1241                                           true, INTEL_MASTER_RESET_ITERATIONS);
1242
1243         return 0;
1244
1245 err_interrupt:
1246         sdw_cdns_enable_interrupt(cdns, false);
1247         return ret;
1248 }
1249
1250 static int intel_start_bus_after_reset(struct sdw_intel *sdw)
1251 {
1252         struct device *dev = sdw->cdns.dev;
1253         struct sdw_cdns *cdns = &sdw->cdns;
1254         struct sdw_bus *bus = &cdns->bus;
1255         bool clock_stop0;
1256         int status;
1257         int ret;
1258
1259         /*
1260          * An exception condition occurs for the CLK_STOP_BUS_RESET
1261          * case if one or more masters remain active. In this condition,
1262          * all the masters are powered on for they are in the same power
1263          * domain. Master can preserve its context for clock stop0, so
1264          * there is no need to clear slave status and reset bus.
1265          */
1266         clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1267
1268         if (!clock_stop0) {
1269
1270                 /*
1271                  * make sure all Slaves are tagged as UNATTACHED and
1272                  * provide reason for reinitialization
1273                  */
1274
1275                 status = SDW_UNATTACH_REQUEST_MASTER_RESET;
1276                 sdw_clear_slave_status(bus, status);
1277
1278                 ret = sdw_cdns_enable_interrupt(cdns, true);
1279                 if (ret < 0) {
1280                         dev_err(dev, "cannot enable interrupts during resume\n");
1281                         return ret;
1282                 }
1283
1284                 /*
1285                  * follow recommended programming flows to avoid
1286                  * timeouts when gsync is enabled
1287                  */
1288                 if (bus->multi_link)
1289                         intel_shim_sync_arm(sdw);
1290
1291                 /*
1292                  * Re-initialize the IP since it was powered-off
1293                  */
1294                 sdw_cdns_init(&sdw->cdns);
1295
1296         } else {
1297                 ret = sdw_cdns_enable_interrupt(cdns, true);
1298                 if (ret < 0) {
1299                         dev_err(dev, "cannot enable interrupts during resume\n");
1300                         return ret;
1301                 }
1302         }
1303
1304         ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
1305         if (ret < 0) {
1306                 dev_err(dev, "unable to restart clock during resume\n");
1307                 goto err_interrupt;
1308         }
1309
1310         if (!clock_stop0) {
1311                 ret = sdw_cdns_exit_reset(cdns);
1312                 if (ret < 0) {
1313                         dev_err(dev, "unable to exit bus reset sequence during resume\n");
1314                         goto err_interrupt;
1315                 }
1316
1317                 if (bus->multi_link) {
1318                         ret = intel_shim_sync_go(sdw);
1319                         if (ret < 0) {
1320                                 dev_err(sdw->cdns.dev, "sync go failed during resume\n");
1321                                 goto err_interrupt;
1322                         }
1323                 }
1324         }
1325         sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
1326
1327         return 0;
1328
1329 err_interrupt:
1330         sdw_cdns_enable_interrupt(cdns, false);
1331         return ret;
1332 }
1333
1334 static void intel_check_clock_stop(struct sdw_intel *sdw)
1335 {
1336         struct device *dev = sdw->cdns.dev;
1337         bool clock_stop0;
1338
1339         clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1340         if (!clock_stop0)
1341                 dev_err(dev, "%s: invalid configuration, clock was not stopped\n", __func__);
1342 }
1343
1344 static int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
1345 {
1346         struct device *dev = sdw->cdns.dev;
1347         struct sdw_cdns *cdns = &sdw->cdns;
1348         int ret;
1349
1350         ret = sdw_cdns_enable_interrupt(cdns, true);
1351         if (ret < 0) {
1352                 dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
1353                 return ret;
1354         }
1355
1356         ret = sdw_cdns_clock_restart(cdns, false);
1357         if (ret < 0) {
1358                 dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
1359                 sdw_cdns_enable_interrupt(cdns, false);
1360                 return ret;
1361         }
1362
1363         sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
1364                                           true, INTEL_MASTER_RESET_ITERATIONS);
1365
1366         return 0;
1367 }
1368
1369 static int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
1370 {
1371         struct device *dev = sdw->cdns.dev;
1372         struct sdw_cdns *cdns = &sdw->cdns;
1373         bool wake_enable = false;
1374         int ret;
1375
1376         if (clock_stop) {
1377                 ret = sdw_cdns_clock_stop(cdns, true);
1378                 if (ret < 0)
1379                         dev_err(dev, "%s: cannot stop clock: %d\n", __func__, ret);
1380                 else
1381                         wake_enable = true;
1382         }
1383
1384         ret = sdw_cdns_enable_interrupt(cdns, false);
1385         if (ret < 0) {
1386                 dev_err(dev, "%s: cannot disable interrupts: %d\n", __func__, ret);
1387                 return ret;
1388         }
1389
1390         ret = intel_link_power_down(sdw);
1391         if (ret) {
1392                 dev_err(dev, "%s: Link power down failed: %d\n", __func__, ret);
1393                 return ret;
1394         }
1395
1396         intel_shim_wake(sdw, wake_enable);
1397
1398         return 0;
1399 }
1400
1401 const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = {
1402         .debugfs_init = intel_debugfs_init,
1403         .debugfs_exit = intel_debugfs_exit,
1404
1405         .register_dai = intel_register_dai,
1406
1407         .check_clock_stop = intel_check_clock_stop,
1408         .start_bus = intel_start_bus,
1409         .start_bus_after_reset = intel_start_bus_after_reset,
1410         .start_bus_after_clock_stop = intel_start_bus_after_clock_stop,
1411         .stop_bus = intel_stop_bus,
1412
1413         .link_power_up = intel_link_power_up,
1414         .link_power_down = intel_link_power_down,
1415
1416         .shim_check_wake = intel_shim_check_wake,
1417         .shim_wake = intel_shim_wake,
1418
1419         .pre_bank_switch = intel_pre_bank_switch,
1420         .post_bank_switch = intel_post_bank_switch,
1421 };
1422 EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, SOUNDWIRE_INTEL);
1423