ASoC: Intel: Skylake: Add support for specifying D0i3 configuration
[linux-2.6-block.git] / sound / soc / intel / skylake / skl-topology.c
CommitLineData
e4e2d2f4
JK
1/*
2 * skl-topology.c - Implements Platform component ALSA controls/widget
3 * handlers.
4 *
5 * Copyright (C) 2014-2015 Intel Corp
6 * Author: Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/firmware.h>
22#include <sound/soc.h>
23#include <sound/soc-topology.h>
6277e832 24#include <uapi/sound/snd_sst_tokens.h>
e4e2d2f4
JK
25#include "skl-sst-dsp.h"
26#include "skl-sst-ipc.h"
27#include "skl-topology.h"
28#include "skl.h"
29#include "skl-tplg-interface.h"
6c5768b3
D
30#include "../common/sst-dsp.h"
31#include "../common/sst-dsp-priv.h"
e4e2d2f4 32
f7590d4f
JK
33#define SKL_CH_FIXUP_MASK (1 << 0)
34#define SKL_RATE_FIXUP_MASK (1 << 1)
35#define SKL_FMT_FIXUP_MASK (1 << 2)
6277e832
SN
36#define SKL_IN_DIR_BIT_MASK BIT(0)
37#define SKL_PIN_COUNT_MASK GENMASK(7, 4)
f7590d4f 38
e4e2d2f4
JK
39/*
40 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
41 * ignore. This helpers checks if the SKL driver handles this widget type
42 */
43static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
44{
45 switch (w->id) {
46 case snd_soc_dapm_dai_link:
47 case snd_soc_dapm_dai_in:
48 case snd_soc_dapm_aif_in:
49 case snd_soc_dapm_aif_out:
50 case snd_soc_dapm_dai_out:
51 case snd_soc_dapm_switch:
52 return false;
53 default:
54 return true;
55 }
56}
57
58/*
59 * Each pipelines needs memory to be allocated. Check if we have free memory
9ba8ffef 60 * from available pool.
e4e2d2f4 61 */
9ba8ffef 62static bool skl_is_pipe_mem_avail(struct skl *skl,
e4e2d2f4
JK
63 struct skl_module_cfg *mconfig)
64{
65 struct skl_sst *ctx = skl->skl_sst;
66
67 if (skl->resource.mem + mconfig->pipe->memory_pages >
68 skl->resource.max_mem) {
69 dev_err(ctx->dev,
70 "%s: module_id %d instance %d\n", __func__,
71 mconfig->id.module_id,
72 mconfig->id.instance_id);
73 dev_err(ctx->dev,
74 "exceeds ppl memory available %d mem %d\n",
75 skl->resource.max_mem, skl->resource.mem);
76 return false;
9ba8ffef
D
77 } else {
78 return true;
e4e2d2f4 79 }
9ba8ffef 80}
e4e2d2f4 81
9ba8ffef
D
82/*
83 * Add the mem to the mem pool. This is freed when pipe is deleted.
84 * Note: DSP does actual memory management we only keep track for complete
85 * pool
86 */
87static void skl_tplg_alloc_pipe_mem(struct skl *skl,
88 struct skl_module_cfg *mconfig)
89{
e4e2d2f4 90 skl->resource.mem += mconfig->pipe->memory_pages;
e4e2d2f4
JK
91}
92
93/*
94 * Pipeline needs needs DSP CPU resources for computation, this is
95 * quantified in MCPS (Million Clocks Per Second) required for module/pipe
96 *
97 * Each pipelines needs mcps to be allocated. Check if we have mcps for this
9ba8ffef 98 * pipe.
e4e2d2f4 99 */
9ba8ffef
D
100
101static bool skl_is_pipe_mcps_avail(struct skl *skl,
e4e2d2f4
JK
102 struct skl_module_cfg *mconfig)
103{
104 struct skl_sst *ctx = skl->skl_sst;
105
106 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
107 dev_err(ctx->dev,
108 "%s: module_id %d instance %d\n", __func__,
109 mconfig->id.module_id, mconfig->id.instance_id);
110 dev_err(ctx->dev,
7ca42f5a 111 "exceeds ppl mcps available %d > mem %d\n",
e4e2d2f4
JK
112 skl->resource.max_mcps, skl->resource.mcps);
113 return false;
9ba8ffef
D
114 } else {
115 return true;
e4e2d2f4 116 }
9ba8ffef 117}
e4e2d2f4 118
9ba8ffef
D
119static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
120 struct skl_module_cfg *mconfig)
121{
e4e2d2f4 122 skl->resource.mcps += mconfig->mcps;
e4e2d2f4
JK
123}
124
125/*
126 * Free the mcps when tearing down
127 */
128static void
129skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
130{
131 skl->resource.mcps -= mconfig->mcps;
132}
133
134/*
135 * Free the memory when tearing down
136 */
137static void
138skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
139{
140 skl->resource.mem -= mconfig->pipe->memory_pages;
141}
142
f7590d4f
JK
143
144static void skl_dump_mconfig(struct skl_sst *ctx,
145 struct skl_module_cfg *mcfg)
146{
147 dev_dbg(ctx->dev, "Dumping config\n");
148 dev_dbg(ctx->dev, "Input Format:\n");
4cd9899f
HS
149 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
150 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
151 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
152 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
f7590d4f 153 dev_dbg(ctx->dev, "Output Format:\n");
4cd9899f
HS
154 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
155 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
156 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
157 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
f7590d4f
JK
158}
159
ea5a137d
SP
160static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
161{
162 int slot_map = 0xFFFFFFFF;
163 int start_slot = 0;
164 int i;
165
166 for (i = 0; i < chs; i++) {
167 /*
168 * For 2 channels with starting slot as 0, slot map will
169 * look like 0xFFFFFF10.
170 */
171 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
172 start_slot++;
173 }
174 fmt->ch_map = slot_map;
175}
176
f7590d4f
JK
177static void skl_tplg_update_params(struct skl_module_fmt *fmt,
178 struct skl_pipe_params *params, int fixup)
179{
180 if (fixup & SKL_RATE_FIXUP_MASK)
181 fmt->s_freq = params->s_freq;
ea5a137d 182 if (fixup & SKL_CH_FIXUP_MASK) {
f7590d4f 183 fmt->channels = params->ch;
ea5a137d
SP
184 skl_tplg_update_chmap(fmt, fmt->channels);
185 }
98256f83
JK
186 if (fixup & SKL_FMT_FIXUP_MASK) {
187 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
188
189 /*
190 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
191 * container so update bit depth accordingly
192 */
193 switch (fmt->valid_bit_depth) {
194 case SKL_DEPTH_16BIT:
195 fmt->bit_depth = fmt->valid_bit_depth;
196 break;
197
198 default:
199 fmt->bit_depth = SKL_DEPTH_32BIT;
200 break;
201 }
202 }
203
f7590d4f
JK
204}
205
206/*
207 * A pipeline may have modules which impact the pcm parameters, like SRC,
208 * channel converter, format converter.
209 * We need to calculate the output params by applying the 'fixup'
210 * Topology will tell driver which type of fixup is to be applied by
211 * supplying the fixup mask, so based on that we calculate the output
212 *
213 * Now In FE the pcm hw_params is source/target format. Same is applicable
214 * for BE with its hw_params invoked.
215 * here based on FE, BE pipeline and direction we calculate the input and
216 * outfix and then apply that for a module
217 */
218static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
219 struct skl_pipe_params *params, bool is_fe)
220{
221 int in_fixup, out_fixup;
222 struct skl_module_fmt *in_fmt, *out_fmt;
223
4cd9899f
HS
224 /* Fixups will be applied to pin 0 only */
225 in_fmt = &m_cfg->in_fmt[0];
226 out_fmt = &m_cfg->out_fmt[0];
f7590d4f
JK
227
228 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
229 if (is_fe) {
230 in_fixup = m_cfg->params_fixup;
231 out_fixup = (~m_cfg->converter) &
232 m_cfg->params_fixup;
233 } else {
234 out_fixup = m_cfg->params_fixup;
235 in_fixup = (~m_cfg->converter) &
236 m_cfg->params_fixup;
237 }
238 } else {
239 if (is_fe) {
240 out_fixup = m_cfg->params_fixup;
241 in_fixup = (~m_cfg->converter) &
242 m_cfg->params_fixup;
243 } else {
244 in_fixup = m_cfg->params_fixup;
245 out_fixup = (~m_cfg->converter) &
246 m_cfg->params_fixup;
247 }
248 }
249
250 skl_tplg_update_params(in_fmt, params, in_fixup);
251 skl_tplg_update_params(out_fmt, params, out_fixup);
252}
253
254/*
255 * A module needs input and output buffers, which are dependent upon pcm
256 * params, so once we have calculate params, we need buffer calculation as
257 * well.
258 */
259static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
260 struct skl_module_cfg *mcfg)
261{
262 int multiplier = 1;
4cd9899f 263 struct skl_module_fmt *in_fmt, *out_fmt;
f0c8e1d9 264 int in_rate, out_rate;
4cd9899f
HS
265
266
267 /* Since fixups is applied to pin 0 only, ibs, obs needs
268 * change for pin 0 only
269 */
270 in_fmt = &mcfg->in_fmt[0];
271 out_fmt = &mcfg->out_fmt[0];
f7590d4f
JK
272
273 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
274 multiplier = 5;
f0c8e1d9
SP
275
276 if (in_fmt->s_freq % 1000)
277 in_rate = (in_fmt->s_freq / 1000) + 1;
278 else
279 in_rate = (in_fmt->s_freq / 1000);
280
281 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
282 (mcfg->in_fmt->bit_depth >> 3) *
283 multiplier;
284
285 if (mcfg->out_fmt->s_freq % 1000)
286 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
287 else
288 out_rate = (mcfg->out_fmt->s_freq / 1000);
289
290 mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
291 (mcfg->out_fmt->bit_depth >> 3) *
292 multiplier;
f7590d4f
JK
293}
294
2d1419a3
JK
295static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
296 struct skl_sst *ctx)
297{
298 struct skl_module_cfg *m_cfg = w->priv;
299 int link_type, dir;
300 u32 ch, s_freq, s_fmt;
301 struct nhlt_specific_cfg *cfg;
302 struct skl *skl = get_skl_ctx(ctx->dev);
303
304 /* check if we already have blob */
305 if (m_cfg->formats_config.caps_size > 0)
306 return 0;
307
c7c6c736 308 dev_dbg(ctx->dev, "Applying default cfg blob\n");
2d1419a3
JK
309 switch (m_cfg->dev_type) {
310 case SKL_DEVICE_DMIC:
311 link_type = NHLT_LINK_DMIC;
c7c6c736 312 dir = SNDRV_PCM_STREAM_CAPTURE;
2d1419a3
JK
313 s_freq = m_cfg->in_fmt[0].s_freq;
314 s_fmt = m_cfg->in_fmt[0].bit_depth;
315 ch = m_cfg->in_fmt[0].channels;
316 break;
317
318 case SKL_DEVICE_I2S:
319 link_type = NHLT_LINK_SSP;
320 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
c7c6c736 321 dir = SNDRV_PCM_STREAM_PLAYBACK;
2d1419a3
JK
322 s_freq = m_cfg->out_fmt[0].s_freq;
323 s_fmt = m_cfg->out_fmt[0].bit_depth;
324 ch = m_cfg->out_fmt[0].channels;
c7c6c736
JK
325 } else {
326 dir = SNDRV_PCM_STREAM_CAPTURE;
327 s_freq = m_cfg->in_fmt[0].s_freq;
328 s_fmt = m_cfg->in_fmt[0].bit_depth;
329 ch = m_cfg->in_fmt[0].channels;
2d1419a3
JK
330 }
331 break;
332
333 default:
334 return -EINVAL;
335 }
336
337 /* update the blob based on virtual bus_id and default params */
338 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
339 s_fmt, ch, s_freq, dir);
340 if (cfg) {
341 m_cfg->formats_config.caps_size = cfg->size;
342 m_cfg->formats_config.caps = (u32 *) &cfg->caps;
343 } else {
344 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
345 m_cfg->vbus_id, link_type, dir);
346 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
347 ch, s_freq, s_fmt);
348 return -EIO;
349 }
350
351 return 0;
352}
353
f7590d4f
JK
354static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
355 struct skl_sst *ctx)
356{
357 struct skl_module_cfg *m_cfg = w->priv;
358 struct skl_pipe_params *params = m_cfg->pipe->p_params;
359 int p_conn_type = m_cfg->pipe->conn_type;
360 bool is_fe;
361
362 if (!m_cfg->params_fixup)
363 return;
364
365 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
366 w->name);
367
368 skl_dump_mconfig(ctx, m_cfg);
369
370 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
371 is_fe = true;
372 else
373 is_fe = false;
374
375 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
376 skl_tplg_update_buffer_size(ctx, m_cfg);
377
378 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
379 w->name);
380
381 skl_dump_mconfig(ctx, m_cfg);
382}
383
abb74003
JK
384/*
385 * some modules can have multiple params set from user control and
386 * need to be set after module is initialized. If set_param flag is
387 * set module params will be done after module is initialised.
388 */
389static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
390 struct skl_sst *ctx)
391{
392 int i, ret;
393 struct skl_module_cfg *mconfig = w->priv;
394 const struct snd_kcontrol_new *k;
395 struct soc_bytes_ext *sb;
396 struct skl_algo_data *bc;
397 struct skl_specific_cfg *sp_cfg;
398
399 if (mconfig->formats_config.caps_size > 0 &&
4ced1827 400 mconfig->formats_config.set_params == SKL_PARAM_SET) {
abb74003
JK
401 sp_cfg = &mconfig->formats_config;
402 ret = skl_set_module_params(ctx, sp_cfg->caps,
403 sp_cfg->caps_size,
404 sp_cfg->param_id, mconfig);
405 if (ret < 0)
406 return ret;
407 }
408
409 for (i = 0; i < w->num_kcontrols; i++) {
410 k = &w->kcontrol_news[i];
411 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
412 sb = (void *) k->private_value;
413 bc = (struct skl_algo_data *)sb->dobj.private;
414
4ced1827 415 if (bc->set_params == SKL_PARAM_SET) {
abb74003 416 ret = skl_set_module_params(ctx,
0d682104 417 (u32 *)bc->params, bc->size,
abb74003
JK
418 bc->param_id, mconfig);
419 if (ret < 0)
420 return ret;
421 }
422 }
423 }
424
425 return 0;
426}
427
428/*
429 * some module param can set from user control and this is required as
430 * when module is initailzed. if module param is required in init it is
431 * identifed by set_param flag. if set_param flag is not set, then this
432 * parameter needs to set as part of module init.
433 */
434static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
435{
436 const struct snd_kcontrol_new *k;
437 struct soc_bytes_ext *sb;
438 struct skl_algo_data *bc;
439 struct skl_module_cfg *mconfig = w->priv;
440 int i;
441
442 for (i = 0; i < w->num_kcontrols; i++) {
443 k = &w->kcontrol_news[i];
444 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
445 sb = (struct soc_bytes_ext *)k->private_value;
446 bc = (struct skl_algo_data *)sb->dobj.private;
447
4ced1827 448 if (bc->set_params != SKL_PARAM_INIT)
abb74003
JK
449 continue;
450
451 mconfig->formats_config.caps = (u32 *)&bc->params;
0d682104 452 mconfig->formats_config.caps_size = bc->size;
abb74003
JK
453
454 break;
455 }
456 }
457
458 return 0;
459}
460
e4e2d2f4
JK
461/*
462 * Inside a pipe instance, we can have various modules. These modules need
463 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
464 * skl_init_module() routine, so invoke that for all modules in a pipeline
465 */
466static int
467skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
468{
469 struct skl_pipe_module *w_module;
470 struct snd_soc_dapm_widget *w;
471 struct skl_module_cfg *mconfig;
472 struct skl_sst *ctx = skl->skl_sst;
473 int ret = 0;
474
475 list_for_each_entry(w_module, &pipe->w_list, node) {
476 w = w_module->w;
477 mconfig = w->priv;
478
b7c50555
VK
479 /* check if module ids are populated */
480 if (mconfig->id.module_id < 0) {
a657ae7e
VK
481 dev_err(skl->skl_sst->dev,
482 "module %pUL id not populated\n",
483 (uuid_le *)mconfig->guid);
484 return -EIO;
b7c50555
VK
485 }
486
e4e2d2f4 487 /* check resource available */
9ba8ffef 488 if (!skl_is_pipe_mcps_avail(skl, mconfig))
e4e2d2f4
JK
489 return -ENOMEM;
490
6c5768b3
D
491 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
492 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
493 mconfig->id.module_id, mconfig->guid);
494 if (ret < 0)
495 return ret;
d643678b
JK
496
497 mconfig->m_state = SKL_MODULE_LOADED;
6c5768b3
D
498 }
499
2d1419a3
JK
500 /* update blob if blob is null for be with default value */
501 skl_tplg_update_be_blob(w, ctx);
502
f7590d4f
JK
503 /*
504 * apply fix/conversion to module params based on
505 * FE/BE params
506 */
507 skl_tplg_update_module_params(w, ctx);
ef2a352c
D
508 mconfig->id.pvt_id = skl_get_pvt_id(ctx, mconfig);
509 if (mconfig->id.pvt_id < 0)
510 return ret;
abb74003 511 skl_tplg_set_module_init_data(w);
9939a9c3 512 ret = skl_init_module(ctx, mconfig);
ef2a352c
D
513 if (ret < 0) {
514 skl_put_pvt_id(ctx, mconfig);
e4e2d2f4 515 return ret;
ef2a352c 516 }
260eb73a 517 skl_tplg_alloc_pipe_mcps(skl, mconfig);
abb74003 518 ret = skl_tplg_set_module_params(w, ctx);
e4e2d2f4
JK
519 if (ret < 0)
520 return ret;
521 }
522
523 return 0;
524}
d93f8e55 525
6c5768b3
D
526static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
527 struct skl_pipe *pipe)
528{
b0fab9c6 529 int ret;
6c5768b3
D
530 struct skl_pipe_module *w_module = NULL;
531 struct skl_module_cfg *mconfig = NULL;
532
533 list_for_each_entry(w_module, &pipe->w_list, node) {
534 mconfig = w_module->w->priv;
535
d643678b 536 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
b0fab9c6
D
537 mconfig->m_state > SKL_MODULE_UNINIT) {
538 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
6c5768b3 539 mconfig->id.module_id);
b0fab9c6
D
540 if (ret < 0)
541 return -EIO;
542 }
ef2a352c 543 skl_put_pvt_id(ctx, mconfig);
6c5768b3
D
544 }
545
546 /* no modules to unload in this path, so return */
547 return 0;
548}
549
d93f8e55
VK
550/*
551 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
552 * need create the pipeline. So we do following:
553 * - check the resources
554 * - Create the pipeline
555 * - Initialize the modules in pipeline
556 * - finally bind all modules together
557 */
558static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
559 struct skl *skl)
560{
561 int ret;
562 struct skl_module_cfg *mconfig = w->priv;
563 struct skl_pipe_module *w_module;
564 struct skl_pipe *s_pipe = mconfig->pipe;
565 struct skl_module_cfg *src_module = NULL, *dst_module;
566 struct skl_sst *ctx = skl->skl_sst;
567
568 /* check resource available */
9ba8ffef 569 if (!skl_is_pipe_mcps_avail(skl, mconfig))
d93f8e55
VK
570 return -EBUSY;
571
9ba8ffef 572 if (!skl_is_pipe_mem_avail(skl, mconfig))
d93f8e55
VK
573 return -ENOMEM;
574
575 /*
576 * Create a list of modules for pipe.
577 * This list contains modules from source to sink
578 */
579 ret = skl_create_pipeline(ctx, mconfig->pipe);
580 if (ret < 0)
581 return ret;
582
260eb73a
D
583 skl_tplg_alloc_pipe_mem(skl, mconfig);
584 skl_tplg_alloc_pipe_mcps(skl, mconfig);
d93f8e55
VK
585
586 /* Init all pipe modules from source to sink */
587 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
588 if (ret < 0)
589 return ret;
590
591 /* Bind modules from source to sink */
592 list_for_each_entry(w_module, &s_pipe->w_list, node) {
593 dst_module = w_module->w->priv;
594
595 if (src_module == NULL) {
596 src_module = dst_module;
597 continue;
598 }
599
600 ret = skl_bind_modules(ctx, src_module, dst_module);
601 if (ret < 0)
602 return ret;
603
604 src_module = dst_module;
605 }
606
607 return 0;
608}
609
5e8f0ee4
D
610static int skl_fill_sink_instance_id(struct skl_sst *ctx,
611 struct skl_algo_data *alg_data)
612{
613 struct skl_kpb_params *params = (struct skl_kpb_params *)alg_data->params;
614 struct skl_mod_inst_map *inst;
615 int i, pvt_id;
616
617 inst = params->map;
618
619 for (i = 0; i < params->num_modules; i++) {
620 pvt_id = skl_get_pvt_instance_id_map(ctx,
621 inst->mod_id, inst->inst_id);
622 if (pvt_id < 0)
623 return -EINVAL;
624 inst->inst_id = pvt_id;
625 inst++;
626 }
627 return 0;
628}
629
cc6a4044
JK
630/*
631 * Some modules require params to be set after the module is bound to
632 * all pins connected.
633 *
634 * The module provider initializes set_param flag for such modules and we
635 * send params after binding
636 */
637static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
638 struct skl_module_cfg *mcfg, struct skl_sst *ctx)
639{
640 int i, ret;
641 struct skl_module_cfg *mconfig = w->priv;
642 const struct snd_kcontrol_new *k;
643 struct soc_bytes_ext *sb;
644 struct skl_algo_data *bc;
645 struct skl_specific_cfg *sp_cfg;
646
647 /*
648 * check all out/in pins are in bind state.
649 * if so set the module param
650 */
651 for (i = 0; i < mcfg->max_out_queue; i++) {
652 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
653 return 0;
654 }
655
656 for (i = 0; i < mcfg->max_in_queue; i++) {
657 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
658 return 0;
659 }
660
661 if (mconfig->formats_config.caps_size > 0 &&
662 mconfig->formats_config.set_params == SKL_PARAM_BIND) {
663 sp_cfg = &mconfig->formats_config;
664 ret = skl_set_module_params(ctx, sp_cfg->caps,
665 sp_cfg->caps_size,
666 sp_cfg->param_id, mconfig);
667 if (ret < 0)
668 return ret;
669 }
670
671 for (i = 0; i < w->num_kcontrols; i++) {
672 k = &w->kcontrol_news[i];
673 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
674 sb = (void *) k->private_value;
675 bc = (struct skl_algo_data *)sb->dobj.private;
676
677 if (bc->set_params == SKL_PARAM_BIND) {
5e8f0ee4
D
678 if (mconfig->m_type == SKL_MODULE_TYPE_KPB)
679 skl_fill_sink_instance_id(ctx, bc);
cc6a4044
JK
680 ret = skl_set_module_params(ctx,
681 (u32 *)bc->params, bc->max,
682 bc->param_id, mconfig);
683 if (ret < 0)
684 return ret;
685 }
686 }
687 }
688
689 return 0;
690}
691
8724ff17
JK
692static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
693 struct skl *skl,
6bd4cf85 694 struct snd_soc_dapm_widget *src_w,
8724ff17 695 struct skl_module_cfg *src_mconfig)
d93f8e55
VK
696{
697 struct snd_soc_dapm_path *p;
0ed95d76 698 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
8724ff17 699 struct skl_module_cfg *sink_mconfig;
d93f8e55 700 struct skl_sst *ctx = skl->skl_sst;
8724ff17 701 int ret;
d93f8e55 702
8724ff17 703 snd_soc_dapm_widget_for_each_sink_path(w, p) {
d93f8e55
VK
704 if (!p->connect)
705 continue;
706
707 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
708 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
709
0ed95d76 710 next_sink = p->sink;
6bd4cf85
JK
711
712 if (!is_skl_dsp_widget_type(p->sink))
713 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
714
d93f8e55
VK
715 /*
716 * here we will check widgets in sink pipelines, so that
717 * can be any widgets type and we are only interested if
718 * they are ones used for SKL so check that first
719 */
720 if ((p->sink->priv != NULL) &&
721 is_skl_dsp_widget_type(p->sink)) {
722
723 sink = p->sink;
d93f8e55
VK
724 sink_mconfig = sink->priv;
725
cc6a4044
JK
726 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
727 sink_mconfig->m_state == SKL_MODULE_UNINIT)
728 continue;
729
d93f8e55
VK
730 /* Bind source to sink, mixin is always source */
731 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
732 if (ret)
733 return ret;
734
cc6a4044
JK
735 /* set module params after bind */
736 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
737 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
738
d93f8e55
VK
739 /* Start sinks pipe first */
740 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
d1730c3d
JK
741 if (sink_mconfig->pipe->conn_type !=
742 SKL_PIPE_CONN_TYPE_FE)
743 ret = skl_run_pipe(ctx,
744 sink_mconfig->pipe);
d93f8e55
VK
745 if (ret)
746 return ret;
747 }
d93f8e55
VK
748 }
749 }
750
8724ff17 751 if (!sink)
6bd4cf85 752 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
8724ff17
JK
753
754 return 0;
755}
756
757/*
758 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
759 * we need to do following:
760 * - Bind to sink pipeline
761 * Since the sink pipes can be running and we don't get mixer event on
762 * connect for already running mixer, we need to find the sink pipes
763 * here and bind to them. This way dynamic connect works.
764 * - Start sink pipeline, if not running
765 * - Then run current pipe
766 */
767static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
768 struct skl *skl)
769{
770 struct skl_module_cfg *src_mconfig;
771 struct skl_sst *ctx = skl->skl_sst;
772 int ret = 0;
773
774 src_mconfig = w->priv;
775
776 /*
777 * find which sink it is connected to, bind with the sink,
778 * if sink is not started, start sink pipe first, then start
779 * this pipe
780 */
6bd4cf85 781 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
d93f8e55
VK
782 if (ret)
783 return ret;
784
d93f8e55 785 /* Start source pipe last after starting all sinks */
d1730c3d
JK
786 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
787 return skl_run_pipe(ctx, src_mconfig->pipe);
d93f8e55
VK
788
789 return 0;
790}
791
8724ff17
JK
792static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
793 struct snd_soc_dapm_widget *w, struct skl *skl)
794{
795 struct snd_soc_dapm_path *p;
796 struct snd_soc_dapm_widget *src_w = NULL;
797 struct skl_sst *ctx = skl->skl_sst;
798
799 snd_soc_dapm_widget_for_each_source_path(w, p) {
800 src_w = p->source;
801 if (!p->connect)
802 continue;
803
804 dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
805 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
806
807 /*
808 * here we will check widgets in sink pipelines, so that can
809 * be any widgets type and we are only interested if they are
810 * ones used for SKL so check that first
811 */
812 if ((p->source->priv != NULL) &&
813 is_skl_dsp_widget_type(p->source)) {
814 return p->source;
815 }
816 }
817
818 if (src_w != NULL)
819 return skl_get_src_dsp_widget(src_w, skl);
820
821 return NULL;
822}
823
d93f8e55
VK
824/*
825 * in the Post-PMU event of mixer we need to do following:
826 * - Check if this pipe is running
827 * - if not, then
828 * - bind this pipeline to its source pipeline
829 * if source pipe is already running, this means it is a dynamic
830 * connection and we need to bind only to that pipe
831 * - start this pipeline
832 */
833static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
834 struct skl *skl)
835{
836 int ret = 0;
d93f8e55
VK
837 struct snd_soc_dapm_widget *source, *sink;
838 struct skl_module_cfg *src_mconfig, *sink_mconfig;
839 struct skl_sst *ctx = skl->skl_sst;
840 int src_pipe_started = 0;
841
842 sink = w;
843 sink_mconfig = sink->priv;
844
845 /*
846 * If source pipe is already started, that means source is driving
847 * one more sink before this sink got connected, Since source is
848 * started, bind this sink to source and start this pipe.
849 */
8724ff17
JK
850 source = skl_get_src_dsp_widget(w, skl);
851 if (source != NULL) {
852 src_mconfig = source->priv;
853 sink_mconfig = sink->priv;
854 src_pipe_started = 1;
d93f8e55
VK
855
856 /*
8724ff17
JK
857 * check pipe state, then no need to bind or start the
858 * pipe
d93f8e55 859 */
8724ff17
JK
860 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
861 src_pipe_started = 0;
d93f8e55
VK
862 }
863
864 if (src_pipe_started) {
865 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
866 if (ret)
867 return ret;
868
cc6a4044
JK
869 /* set module params after bind */
870 skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
871 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
872
d1730c3d
JK
873 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
874 ret = skl_run_pipe(ctx, sink_mconfig->pipe);
d93f8e55
VK
875 }
876
877 return ret;
878}
879
880/*
881 * in the Pre-PMD event of mixer we need to do following:
882 * - Stop the pipe
883 * - find the source connections and remove that from dapm_path_list
884 * - unbind with source pipelines if still connected
885 */
886static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
887 struct skl *skl)
888{
d93f8e55 889 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 890 int ret = 0, i;
d93f8e55
VK
891 struct skl_sst *ctx = skl->skl_sst;
892
ce1b5551 893 sink_mconfig = w->priv;
d93f8e55
VK
894
895 /* Stop the pipe */
896 ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
897 if (ret)
898 return ret;
899
ce1b5551
JK
900 for (i = 0; i < sink_mconfig->max_in_queue; i++) {
901 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
902 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
903 if (!src_mconfig)
904 continue;
905 /*
906 * If path_found == 1, that means pmd for source
907 * pipe has not occurred, source is connected to
908 * some other sink. so its responsibility of sink
909 * to unbind itself from source.
910 */
911 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
912 if (ret < 0)
913 return ret;
d93f8e55 914
ce1b5551
JK
915 ret = skl_unbind_modules(ctx,
916 src_mconfig, sink_mconfig);
d93f8e55 917 }
d93f8e55
VK
918 }
919
920 return ret;
921}
922
923/*
924 * in the Post-PMD event of mixer we need to do following:
925 * - Free the mcps used
926 * - Free the mem used
927 * - Unbind the modules within the pipeline
928 * - Delete the pipeline (modules are not required to be explicitly
929 * deleted, pipeline delete is enough here
930 */
931static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
932 struct skl *skl)
933{
934 struct skl_module_cfg *mconfig = w->priv;
935 struct skl_pipe_module *w_module;
936 struct skl_module_cfg *src_module = NULL, *dst_module;
937 struct skl_sst *ctx = skl->skl_sst;
938 struct skl_pipe *s_pipe = mconfig->pipe;
939 int ret = 0;
940
260eb73a
D
941 if (s_pipe->state == SKL_PIPE_INVALID)
942 return -EINVAL;
943
d93f8e55 944 skl_tplg_free_pipe_mcps(skl, mconfig);
65976878 945 skl_tplg_free_pipe_mem(skl, mconfig);
d93f8e55
VK
946
947 list_for_each_entry(w_module, &s_pipe->w_list, node) {
948 dst_module = w_module->w->priv;
949
260eb73a
D
950 if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
951 skl_tplg_free_pipe_mcps(skl, dst_module);
d93f8e55
VK
952 if (src_module == NULL) {
953 src_module = dst_module;
954 continue;
955 }
956
7ca42f5a 957 skl_unbind_modules(ctx, src_module, dst_module);
d93f8e55
VK
958 src_module = dst_module;
959 }
960
961 ret = skl_delete_pipe(ctx, mconfig->pipe);
d93f8e55 962
6c5768b3 963 return skl_tplg_unload_pipe_modules(ctx, s_pipe);
d93f8e55
VK
964}
965
966/*
967 * in the Post-PMD event of PGA we need to do following:
968 * - Free the mcps used
969 * - Stop the pipeline
970 * - In source pipe is connected, unbind with source pipelines
971 */
972static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
973 struct skl *skl)
974{
d93f8e55 975 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 976 int ret = 0, i;
d93f8e55
VK
977 struct skl_sst *ctx = skl->skl_sst;
978
ce1b5551 979 src_mconfig = w->priv;
d93f8e55 980
d93f8e55
VK
981 /* Stop the pipe since this is a mixin module */
982 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
983 if (ret)
984 return ret;
985
ce1b5551
JK
986 for (i = 0; i < src_mconfig->max_out_queue; i++) {
987 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
988 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
989 if (!sink_mconfig)
990 continue;
991 /*
992 * This is a connecter and if path is found that means
993 * unbind between source and sink has not happened yet
994 */
ce1b5551
JK
995 ret = skl_unbind_modules(ctx, src_mconfig,
996 sink_mconfig);
d93f8e55
VK
997 }
998 }
999
d93f8e55
VK
1000 return ret;
1001}
1002
1003/*
1004 * In modelling, we assume there will be ONLY one mixer in a pipeline. If
1005 * mixer is not required then it is treated as static mixer aka vmixer with
1006 * a hard path to source module
1007 * So we don't need to check if source is started or not as hard path puts
1008 * dependency on each other
1009 */
1010static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
1011 struct snd_kcontrol *k, int event)
1012{
1013 struct snd_soc_dapm_context *dapm = w->dapm;
1014 struct skl *skl = get_skl_ctx(dapm->dev);
1015
1016 switch (event) {
1017 case SND_SOC_DAPM_PRE_PMU:
1018 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1019
de1fedf2
JK
1020 case SND_SOC_DAPM_POST_PMU:
1021 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1022
1023 case SND_SOC_DAPM_PRE_PMD:
1024 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1025
d93f8e55
VK
1026 case SND_SOC_DAPM_POST_PMD:
1027 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1028 }
1029
1030 return 0;
1031}
1032
1033/*
1034 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1035 * second one is required that is created as another pipe entity.
1036 * The mixer is responsible for pipe management and represent a pipeline
1037 * instance
1038 */
1039static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1040 struct snd_kcontrol *k, int event)
1041{
1042 struct snd_soc_dapm_context *dapm = w->dapm;
1043 struct skl *skl = get_skl_ctx(dapm->dev);
1044
1045 switch (event) {
1046 case SND_SOC_DAPM_PRE_PMU:
1047 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1048
1049 case SND_SOC_DAPM_POST_PMU:
1050 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1051
1052 case SND_SOC_DAPM_PRE_PMD:
1053 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1054
1055 case SND_SOC_DAPM_POST_PMD:
1056 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1057 }
1058
1059 return 0;
1060}
1061
1062/*
1063 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1064 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1065 * the sink when it is running (two FE to one BE or one FE to two BE)
1066 * scenarios
1067 */
1068static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1069 struct snd_kcontrol *k, int event)
1070
1071{
1072 struct snd_soc_dapm_context *dapm = w->dapm;
1073 struct skl *skl = get_skl_ctx(dapm->dev);
1074
1075 switch (event) {
1076 case SND_SOC_DAPM_PRE_PMU:
1077 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1078
1079 case SND_SOC_DAPM_POST_PMD:
1080 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1081 }
1082
1083 return 0;
1084}
cfb0a873 1085
140adfba
JK
1086static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1087 unsigned int __user *data, unsigned int size)
1088{
1089 struct soc_bytes_ext *sb =
1090 (struct soc_bytes_ext *)kcontrol->private_value;
1091 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
7d9f2911
OA
1092 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1093 struct skl_module_cfg *mconfig = w->priv;
1094 struct skl *skl = get_skl_ctx(w->dapm->dev);
1095
1096 if (w->power)
1097 skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
0d682104 1098 bc->size, bc->param_id, mconfig);
140adfba 1099
41556f68
VK
1100 /* decrement size for TLV header */
1101 size -= 2 * sizeof(u32);
1102
1103 /* check size as we don't want to send kernel data */
1104 if (size > bc->max)
1105 size = bc->max;
1106
140adfba
JK
1107 if (bc->params) {
1108 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1109 return -EFAULT;
e8bc3c99 1110 if (copy_to_user(data + 1, &size, sizeof(u32)))
140adfba 1111 return -EFAULT;
e8bc3c99 1112 if (copy_to_user(data + 2, bc->params, size))
140adfba
JK
1113 return -EFAULT;
1114 }
1115
1116 return 0;
1117}
1118
1119#define SKL_PARAM_VENDOR_ID 0xff
1120
1121static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1122 const unsigned int __user *data, unsigned int size)
1123{
1124 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1125 struct skl_module_cfg *mconfig = w->priv;
1126 struct soc_bytes_ext *sb =
1127 (struct soc_bytes_ext *)kcontrol->private_value;
1128 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1129 struct skl *skl = get_skl_ctx(w->dapm->dev);
1130
1131 if (ac->params) {
0d682104
D
1132 if (size > ac->max)
1133 return -EINVAL;
1134
1135 ac->size = size;
140adfba
JK
1136 /*
1137 * if the param_is is of type Vendor, firmware expects actual
1138 * parameter id and size from the control.
1139 */
1140 if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1141 if (copy_from_user(ac->params, data, size))
1142 return -EFAULT;
1143 } else {
1144 if (copy_from_user(ac->params,
65b4bcb8 1145 data + 2, size))
140adfba
JK
1146 return -EFAULT;
1147 }
1148
1149 if (w->power)
1150 return skl_set_module_params(skl->skl_sst,
0d682104 1151 (u32 *)ac->params, ac->size,
140adfba
JK
1152 ac->param_id, mconfig);
1153 }
1154
1155 return 0;
1156}
1157
8871dcb9
JK
1158/*
1159 * Fill the dma id for host and link. In case of passthrough
1160 * pipeline, this will both host and link in the same
1161 * pipeline, so need to copy the link and host based on dev_type
1162 */
1163static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1164 struct skl_pipe_params *params)
1165{
1166 struct skl_pipe *pipe = mcfg->pipe;
1167
1168 if (pipe->passthru) {
1169 switch (mcfg->dev_type) {
1170 case SKL_DEVICE_HDALINK:
1171 pipe->p_params->link_dma_id = params->link_dma_id;
1172 break;
1173
1174 case SKL_DEVICE_HDAHOST:
1175 pipe->p_params->host_dma_id = params->host_dma_id;
1176 break;
1177
1178 default:
1179 break;
1180 }
1181 pipe->p_params->s_fmt = params->s_fmt;
1182 pipe->p_params->ch = params->ch;
1183 pipe->p_params->s_freq = params->s_freq;
1184 pipe->p_params->stream = params->stream;
1185
1186 } else {
1187 memcpy(pipe->p_params, params, sizeof(*params));
1188 }
1189}
1190
cfb0a873
VK
1191/*
1192 * The FE params are passed by hw_params of the DAI.
1193 * On hw_params, the params are stored in Gateway module of the FE and we
1194 * need to calculate the format in DSP module configuration, that
1195 * conversion is done here
1196 */
1197int skl_tplg_update_pipe_params(struct device *dev,
1198 struct skl_module_cfg *mconfig,
1199 struct skl_pipe_params *params)
1200{
cfb0a873
VK
1201 struct skl_module_fmt *format = NULL;
1202
8871dcb9 1203 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873
VK
1204
1205 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
4cd9899f 1206 format = &mconfig->in_fmt[0];
cfb0a873 1207 else
4cd9899f 1208 format = &mconfig->out_fmt[0];
cfb0a873
VK
1209
1210 /* set the hw_params */
1211 format->s_freq = params->s_freq;
1212 format->channels = params->ch;
1213 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1214
1215 /*
1216 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1217 * container so update bit depth accordingly
1218 */
1219 switch (format->valid_bit_depth) {
1220 case SKL_DEPTH_16BIT:
1221 format->bit_depth = format->valid_bit_depth;
1222 break;
1223
1224 case SKL_DEPTH_24BIT:
6654f39e 1225 case SKL_DEPTH_32BIT:
cfb0a873
VK
1226 format->bit_depth = SKL_DEPTH_32BIT;
1227 break;
1228
1229 default:
1230 dev_err(dev, "Invalid bit depth %x for pipe\n",
1231 format->valid_bit_depth);
1232 return -EINVAL;
1233 }
1234
1235 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1236 mconfig->ibs = (format->s_freq / 1000) *
1237 (format->channels) *
1238 (format->bit_depth >> 3);
1239 } else {
1240 mconfig->obs = (format->s_freq / 1000) *
1241 (format->channels) *
1242 (format->bit_depth >> 3);
1243 }
1244
1245 return 0;
1246}
1247
1248/*
1249 * Query the module config for the FE DAI
1250 * This is used to find the hw_params set for that DAI and apply to FE
1251 * pipeline
1252 */
1253struct skl_module_cfg *
1254skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1255{
1256 struct snd_soc_dapm_widget *w;
1257 struct snd_soc_dapm_path *p = NULL;
1258
1259 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1260 w = dai->playback_widget;
f0900eb2 1261 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873 1262 if (p->connect && p->sink->power &&
a28f51db 1263 !is_skl_dsp_widget_type(p->sink))
cfb0a873
VK
1264 continue;
1265
1266 if (p->sink->priv) {
1267 dev_dbg(dai->dev, "set params for %s\n",
1268 p->sink->name);
1269 return p->sink->priv;
1270 }
1271 }
1272 } else {
1273 w = dai->capture_widget;
f0900eb2 1274 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873 1275 if (p->connect && p->source->power &&
a28f51db 1276 !is_skl_dsp_widget_type(p->source))
cfb0a873
VK
1277 continue;
1278
1279 if (p->source->priv) {
1280 dev_dbg(dai->dev, "set params for %s\n",
1281 p->source->name);
1282 return p->source->priv;
1283 }
1284 }
1285 }
1286
1287 return NULL;
1288}
1289
718a42b5
D
1290static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1291 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1292{
1293 struct snd_soc_dapm_path *p;
1294 struct skl_module_cfg *mconfig = NULL;
1295
1296 snd_soc_dapm_widget_for_each_source_path(w, p) {
1297 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1298 if (p->connect &&
1299 (p->sink->id == snd_soc_dapm_aif_out) &&
1300 p->source->priv) {
1301 mconfig = p->source->priv;
1302 return mconfig;
1303 }
1304 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1305 if (mconfig)
1306 return mconfig;
1307 }
1308 }
1309 return mconfig;
1310}
1311
1312static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1313 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1314{
1315 struct snd_soc_dapm_path *p;
1316 struct skl_module_cfg *mconfig = NULL;
1317
1318 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1319 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1320 if (p->connect &&
1321 (p->source->id == snd_soc_dapm_aif_in) &&
1322 p->sink->priv) {
1323 mconfig = p->sink->priv;
1324 return mconfig;
1325 }
1326 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1327 if (mconfig)
1328 return mconfig;
1329 }
1330 }
1331 return mconfig;
1332}
1333
1334struct skl_module_cfg *
1335skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1336{
1337 struct snd_soc_dapm_widget *w;
1338 struct skl_module_cfg *mconfig;
1339
1340 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1341 w = dai->playback_widget;
1342 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1343 } else {
1344 w = dai->capture_widget;
1345 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1346 }
1347 return mconfig;
1348}
1349
cfb0a873
VK
1350static u8 skl_tplg_be_link_type(int dev_type)
1351{
1352 int ret;
1353
1354 switch (dev_type) {
1355 case SKL_DEVICE_BT:
1356 ret = NHLT_LINK_SSP;
1357 break;
1358
1359 case SKL_DEVICE_DMIC:
1360 ret = NHLT_LINK_DMIC;
1361 break;
1362
1363 case SKL_DEVICE_I2S:
1364 ret = NHLT_LINK_SSP;
1365 break;
1366
1367 case SKL_DEVICE_HDALINK:
1368 ret = NHLT_LINK_HDA;
1369 break;
1370
1371 default:
1372 ret = NHLT_LINK_INVALID;
1373 break;
1374 }
1375
1376 return ret;
1377}
1378
1379/*
1380 * Fill the BE gateway parameters
1381 * The BE gateway expects a blob of parameters which are kept in the ACPI
1382 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1383 * The port can have multiple settings so pick based on the PCM
1384 * parameters
1385 */
1386static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1387 struct skl_module_cfg *mconfig,
1388 struct skl_pipe_params *params)
1389{
cfb0a873
VK
1390 struct nhlt_specific_cfg *cfg;
1391 struct skl *skl = get_skl_ctx(dai->dev);
1392 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1393
8871dcb9 1394 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873 1395
b30c275e
JK
1396 if (link_type == NHLT_LINK_HDA)
1397 return 0;
1398
cfb0a873
VK
1399 /* update the blob based on virtual bus_id*/
1400 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1401 params->s_fmt, params->ch,
1402 params->s_freq, params->stream);
1403 if (cfg) {
1404 mconfig->formats_config.caps_size = cfg->size;
bc03281a 1405 mconfig->formats_config.caps = (u32 *) &cfg->caps;
cfb0a873
VK
1406 } else {
1407 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1408 mconfig->vbus_id, link_type,
1409 params->stream);
1410 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1411 params->ch, params->s_freq, params->s_fmt);
1412 return -EINVAL;
1413 }
1414
1415 return 0;
1416}
1417
1418static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1419 struct snd_soc_dapm_widget *w,
1420 struct skl_pipe_params *params)
1421{
1422 struct snd_soc_dapm_path *p;
4d8adccb 1423 int ret = -EIO;
cfb0a873 1424
f0900eb2 1425 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873
VK
1426 if (p->connect && is_skl_dsp_widget_type(p->source) &&
1427 p->source->priv) {
1428
9a03cb49
JK
1429 ret = skl_tplg_be_fill_pipe_params(dai,
1430 p->source->priv, params);
1431 if (ret < 0)
1432 return ret;
cfb0a873 1433 } else {
9a03cb49
JK
1434 ret = skl_tplg_be_set_src_pipe_params(dai,
1435 p->source, params);
4d8adccb
SP
1436 if (ret < 0)
1437 return ret;
cfb0a873
VK
1438 }
1439 }
1440
4d8adccb 1441 return ret;
cfb0a873
VK
1442}
1443
1444static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1445 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1446{
1447 struct snd_soc_dapm_path *p = NULL;
4d8adccb 1448 int ret = -EIO;
cfb0a873 1449
f0900eb2 1450 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873
VK
1451 if (p->connect && is_skl_dsp_widget_type(p->sink) &&
1452 p->sink->priv) {
1453
9a03cb49
JK
1454 ret = skl_tplg_be_fill_pipe_params(dai,
1455 p->sink->priv, params);
1456 if (ret < 0)
1457 return ret;
cfb0a873 1458 } else {
4d8adccb 1459 ret = skl_tplg_be_set_sink_pipe_params(
cfb0a873 1460 dai, p->sink, params);
4d8adccb
SP
1461 if (ret < 0)
1462 return ret;
cfb0a873
VK
1463 }
1464 }
1465
4d8adccb 1466 return ret;
cfb0a873
VK
1467}
1468
1469/*
1470 * BE hw_params can be a source parameters (capture) or sink parameters
1471 * (playback). Based on sink and source we need to either find the source
1472 * list or the sink list and set the pipeline parameters
1473 */
1474int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1475 struct skl_pipe_params *params)
1476{
1477 struct snd_soc_dapm_widget *w;
1478
1479 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1480 w = dai->playback_widget;
1481
1482 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1483
1484 } else {
1485 w = dai->capture_widget;
1486
1487 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1488 }
1489
1490 return 0;
1491}
3af36706
VK
1492
1493static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1494 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
1495 {SKL_VMIXER_EVENT, skl_tplg_vmixer_event},
1496 {SKL_PGA_EVENT, skl_tplg_pga_event},
1497};
1498
140adfba
JK
1499static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1500 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1501 skl_tplg_tlv_control_set},
1502};
1503
6277e832
SN
1504static int skl_tplg_fill_pipe_tkn(struct device *dev,
1505 struct skl_pipe *pipe, u32 tkn,
1506 u32 tkn_val)
3af36706 1507{
3af36706 1508
6277e832
SN
1509 switch (tkn) {
1510 case SKL_TKN_U32_PIPE_CONN_TYPE:
1511 pipe->conn_type = tkn_val;
1512 break;
1513
1514 case SKL_TKN_U32_PIPE_PRIORITY:
1515 pipe->pipe_priority = tkn_val;
1516 break;
1517
1518 case SKL_TKN_U32_PIPE_MEM_PGS:
1519 pipe->memory_pages = tkn_val;
1520 break;
1521
8a0cb236
VK
1522 case SKL_TKN_U32_PMODE:
1523 pipe->lp_mode = tkn_val;
1524 break;
1525
6277e832
SN
1526 default:
1527 dev_err(dev, "Token not handled %d\n", tkn);
1528 return -EINVAL;
3af36706 1529 }
6277e832
SN
1530
1531 return 0;
3af36706
VK
1532}
1533
1534/*
6277e832
SN
1535 * Add pipeline by parsing the relevant tokens
1536 * Return an existing pipe if the pipe already exists.
3af36706 1537 */
6277e832
SN
1538static int skl_tplg_add_pipe(struct device *dev,
1539 struct skl_module_cfg *mconfig, struct skl *skl,
1540 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
3af36706
VK
1541{
1542 struct skl_pipeline *ppl;
1543 struct skl_pipe *pipe;
1544 struct skl_pipe_params *params;
1545
1546 list_for_each_entry(ppl, &skl->ppl_list, node) {
6277e832
SN
1547 if (ppl->pipe->ppl_id == tkn_elem->value) {
1548 mconfig->pipe = ppl->pipe;
1549 return EEXIST;
1550 }
3af36706
VK
1551 }
1552
1553 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
1554 if (!ppl)
6277e832 1555 return -ENOMEM;
3af36706
VK
1556
1557 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
1558 if (!pipe)
6277e832 1559 return -ENOMEM;
3af36706
VK
1560
1561 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
1562 if (!params)
6277e832 1563 return -ENOMEM;
3af36706 1564
3af36706 1565 pipe->p_params = params;
6277e832 1566 pipe->ppl_id = tkn_elem->value;
3af36706
VK
1567 INIT_LIST_HEAD(&pipe->w_list);
1568
1569 ppl->pipe = pipe;
1570 list_add(&ppl->node, &skl->ppl_list);
1571
6277e832
SN
1572 mconfig->pipe = pipe;
1573 mconfig->pipe->state = SKL_PIPE_INVALID;
1574
1575 return 0;
1576}
1577
1578static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
1579 struct skl_module_pin *m_pin,
1580 int pin_index, u32 value)
1581{
1582 switch (tkn) {
1583 case SKL_TKN_U32_PIN_MOD_ID:
1584 m_pin[pin_index].id.module_id = value;
1585 break;
1586
1587 case SKL_TKN_U32_PIN_INST_ID:
1588 m_pin[pin_index].id.instance_id = value;
1589 break;
1590
1591 default:
1592 dev_err(dev, "%d Not a pin token\n", value);
1593 return -EINVAL;
1594 }
1595
1596 return 0;
1597}
1598
1599/*
1600 * Parse for pin config specific tokens to fill up the
1601 * module private data
1602 */
1603static int skl_tplg_fill_pins_info(struct device *dev,
1604 struct skl_module_cfg *mconfig,
1605 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1606 int dir, int pin_count)
1607{
1608 int ret;
1609 struct skl_module_pin *m_pin;
1610
1611 switch (dir) {
1612 case SKL_DIR_IN:
1613 m_pin = mconfig->m_in_pin;
1614 break;
1615
1616 case SKL_DIR_OUT:
1617 m_pin = mconfig->m_out_pin;
1618 break;
1619
1620 default:
ecd286a9 1621 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1622 return -EINVAL;
1623 }
1624
1625 ret = skl_tplg_fill_pin(dev, tkn_elem->token,
1626 m_pin, pin_count, tkn_elem->value);
1627
1628 if (ret < 0)
1629 return ret;
1630
1631 m_pin[pin_count].in_use = false;
1632 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
1633
1634 return 0;
3af36706
VK
1635}
1636
6277e832
SN
1637/*
1638 * Fill up input/output module config format based
1639 * on the direction
1640 */
1641static int skl_tplg_fill_fmt(struct device *dev,
1642 struct skl_module_cfg *mconfig, u32 tkn,
1643 u32 value, u32 dir, u32 pin_count)
1644{
1645 struct skl_module_fmt *dst_fmt;
1646
1647 switch (dir) {
1648 case SKL_DIR_IN:
1649 dst_fmt = mconfig->in_fmt;
1650 dst_fmt += pin_count;
1651 break;
1652
1653 case SKL_DIR_OUT:
1654 dst_fmt = mconfig->out_fmt;
1655 dst_fmt += pin_count;
1656 break;
1657
1658 default:
ecd286a9 1659 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1660 return -EINVAL;
1661 }
1662
1663 switch (tkn) {
1664 case SKL_TKN_U32_FMT_CH:
1665 dst_fmt->channels = value;
1666 break;
1667
1668 case SKL_TKN_U32_FMT_FREQ:
1669 dst_fmt->s_freq = value;
1670 break;
1671
1672 case SKL_TKN_U32_FMT_BIT_DEPTH:
1673 dst_fmt->bit_depth = value;
1674 break;
1675
1676 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1677 dst_fmt->valid_bit_depth = value;
1678 break;
1679
1680 case SKL_TKN_U32_FMT_CH_CONFIG:
1681 dst_fmt->ch_cfg = value;
1682 break;
1683
1684 case SKL_TKN_U32_FMT_INTERLEAVE:
1685 dst_fmt->interleaving_style = value;
1686 break;
1687
1688 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1689 dst_fmt->sample_type = value;
1690 break;
1691
1692 case SKL_TKN_U32_FMT_CH_MAP:
1693 dst_fmt->ch_map = value;
1694 break;
1695
1696 default:
ecd286a9 1697 dev_err(dev, "Invalid token %d\n", tkn);
6277e832
SN
1698 return -EINVAL;
1699 }
1700
1701 return 0;
1702}
1703
1704static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
1705 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
1706{
1707 if (uuid_tkn->token == SKL_TKN_UUID)
1708 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
1709 else {
ecd286a9 1710 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
6277e832
SN
1711 return -EINVAL;
1712 }
1713
1714 return 0;
1715}
1716
1717static void skl_tplg_fill_pin_dynamic_val(
1718 struct skl_module_pin *mpin, u32 pin_count, u32 value)
4cd9899f
HS
1719{
1720 int i;
1721
6277e832
SN
1722 for (i = 0; i < pin_count; i++)
1723 mpin[i].is_dynamic = value;
1724}
1725
1726/*
1727 * Parse tokens to fill up the module private data
1728 */
1729static int skl_tplg_get_token(struct device *dev,
1730 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1731 struct skl *skl, struct skl_module_cfg *mconfig)
1732{
1733 int tkn_count = 0;
1734 int ret;
1735 static int is_pipe_exists;
1736 static int pin_index, dir;
1737
1738 if (tkn_elem->token > SKL_TKN_MAX)
1739 return -EINVAL;
1740
1741 switch (tkn_elem->token) {
1742 case SKL_TKN_U8_IN_QUEUE_COUNT:
1743 mconfig->max_in_queue = tkn_elem->value;
1744 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
1745 sizeof(*mconfig->m_in_pin),
1746 GFP_KERNEL);
1747 if (!mconfig->m_in_pin)
1748 return -ENOMEM;
1749
1750 break;
1751
1752 case SKL_TKN_U8_OUT_QUEUE_COUNT:
1753 mconfig->max_out_queue = tkn_elem->value;
1754 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
1755 sizeof(*mconfig->m_out_pin),
1756 GFP_KERNEL);
1757
1758 if (!mconfig->m_out_pin)
1759 return -ENOMEM;
1760
1761 break;
1762
1763 case SKL_TKN_U8_DYN_IN_PIN:
1764 if (!mconfig->m_in_pin)
1765 return -ENOMEM;
1766
1767 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
1768 mconfig->max_in_queue, tkn_elem->value);
1769
1770 break;
1771
1772 case SKL_TKN_U8_DYN_OUT_PIN:
1773 if (!mconfig->m_out_pin)
1774 return -ENOMEM;
1775
1776 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
1777 mconfig->max_out_queue, tkn_elem->value);
1778
1779 break;
1780
1781 case SKL_TKN_U8_TIME_SLOT:
1782 mconfig->time_slot = tkn_elem->value;
1783 break;
1784
1785 case SKL_TKN_U8_CORE_ID:
1786 mconfig->core_id = tkn_elem->value;
1787
1788 case SKL_TKN_U8_MOD_TYPE:
1789 mconfig->m_type = tkn_elem->value;
1790 break;
1791
1792 case SKL_TKN_U8_DEV_TYPE:
1793 mconfig->dev_type = tkn_elem->value;
1794 break;
1795
1796 case SKL_TKN_U8_HW_CONN_TYPE:
1797 mconfig->hw_conn_type = tkn_elem->value;
1798 break;
1799
1800 case SKL_TKN_U16_MOD_INST_ID:
1801 mconfig->id.instance_id =
1802 tkn_elem->value;
1803 break;
1804
1805 case SKL_TKN_U32_MEM_PAGES:
1806 mconfig->mem_pages = tkn_elem->value;
1807 break;
1808
1809 case SKL_TKN_U32_MAX_MCPS:
1810 mconfig->mcps = tkn_elem->value;
1811 break;
1812
1813 case SKL_TKN_U32_OBS:
1814 mconfig->obs = tkn_elem->value;
1815 break;
1816
1817 case SKL_TKN_U32_IBS:
1818 mconfig->ibs = tkn_elem->value;
1819 break;
1820
1821 case SKL_TKN_U32_VBUS_ID:
1822 mconfig->vbus_id = tkn_elem->value;
1823 break;
1824
1825 case SKL_TKN_U32_PARAMS_FIXUP:
1826 mconfig->params_fixup = tkn_elem->value;
1827 break;
1828
1829 case SKL_TKN_U32_CONVERTER:
1830 mconfig->converter = tkn_elem->value;
1831 break;
1832
6bd9dcf3
VK
1833 case SKL_TKL_U32_D0I3_CAPS:
1834 mconfig->d0i3_caps = tkn_elem->value;
1835 break;
1836
6277e832
SN
1837 case SKL_TKN_U32_PIPE_ID:
1838 ret = skl_tplg_add_pipe(dev,
1839 mconfig, skl, tkn_elem);
1840
1841 if (ret < 0)
1842 return is_pipe_exists;
1843
1844 if (ret == EEXIST)
1845 is_pipe_exists = 1;
1846
1847 break;
1848
1849 case SKL_TKN_U32_PIPE_CONN_TYPE:
1850 case SKL_TKN_U32_PIPE_PRIORITY:
1851 case SKL_TKN_U32_PIPE_MEM_PGS:
8a0cb236 1852 case SKL_TKN_U32_PMODE:
6277e832
SN
1853 if (is_pipe_exists) {
1854 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
1855 tkn_elem->token, tkn_elem->value);
1856 if (ret < 0)
1857 return ret;
1858 }
1859
1860 break;
1861
1862 /*
1863 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
1864 * direction and the pin count. The first four bits represent
1865 * direction and next four the pin count.
1866 */
1867 case SKL_TKN_U32_DIR_PIN_COUNT:
1868 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
1869 pin_index = (tkn_elem->value &
1870 SKL_PIN_COUNT_MASK) >> 4;
1871
1872 break;
1873
1874 case SKL_TKN_U32_FMT_CH:
1875 case SKL_TKN_U32_FMT_FREQ:
1876 case SKL_TKN_U32_FMT_BIT_DEPTH:
1877 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1878 case SKL_TKN_U32_FMT_CH_CONFIG:
1879 case SKL_TKN_U32_FMT_INTERLEAVE:
1880 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1881 case SKL_TKN_U32_FMT_CH_MAP:
1882 ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
1883 tkn_elem->value, dir, pin_index);
1884
1885 if (ret < 0)
1886 return ret;
1887
1888 break;
1889
1890 case SKL_TKN_U32_PIN_MOD_ID:
1891 case SKL_TKN_U32_PIN_INST_ID:
1892 ret = skl_tplg_fill_pins_info(dev,
1893 mconfig, tkn_elem, dir,
1894 pin_index);
1895 if (ret < 0)
1896 return ret;
1897
1898 break;
1899
1900 case SKL_TKN_U32_CAPS_SIZE:
1901 mconfig->formats_config.caps_size =
1902 tkn_elem->value;
1903
1904 break;
1905
1906 case SKL_TKN_U32_PROC_DOMAIN:
1907 mconfig->domain =
1908 tkn_elem->value;
1909
1910 break;
1911
1912 case SKL_TKN_U8_IN_PIN_TYPE:
1913 case SKL_TKN_U8_OUT_PIN_TYPE:
1914 case SKL_TKN_U8_CONN_TYPE:
1915 break;
1916
1917 default:
1918 dev_err(dev, "Token %d not handled\n",
1919 tkn_elem->token);
1920 return -EINVAL;
4cd9899f 1921 }
6277e832
SN
1922
1923 tkn_count++;
1924
1925 return tkn_count;
1926}
1927
1928/*
1929 * Parse the vendor array for specific tokens to construct
1930 * module private data
1931 */
1932static int skl_tplg_get_tokens(struct device *dev,
1933 char *pvt_data, struct skl *skl,
1934 struct skl_module_cfg *mconfig, int block_size)
1935{
1936 struct snd_soc_tplg_vendor_array *array;
1937 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
1938 int tkn_count = 0, ret;
1939 int off = 0, tuple_size = 0;
1940
1941 if (block_size <= 0)
1942 return -EINVAL;
1943
1944 while (tuple_size < block_size) {
1945 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
1946
1947 off += array->size;
1948
1949 switch (array->type) {
1950 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
ecd286a9 1951 dev_warn(dev, "no string tokens expected for skl tplg\n");
6277e832
SN
1952 continue;
1953
1954 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
1955 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
1956 if (ret < 0)
1957 return ret;
1958
1959 tuple_size += sizeof(*array->uuid);
1960
1961 continue;
1962
1963 default:
1964 tkn_elem = array->value;
1965 tkn_count = 0;
1966 break;
1967 }
1968
1969 while (tkn_count <= (array->num_elems - 1)) {
1970 ret = skl_tplg_get_token(dev, tkn_elem,
1971 skl, mconfig);
1972
1973 if (ret < 0)
1974 return ret;
1975
1976 tkn_count = tkn_count + ret;
1977 tkn_elem++;
1978 }
1979
1980 tuple_size += tkn_count * sizeof(*tkn_elem);
1981 }
1982
1983 return 0;
1984}
1985
1986/*
1987 * Every data block is preceded by a descriptor to read the number
1988 * of data blocks, they type of the block and it's size
1989 */
1990static int skl_tplg_get_desc_blocks(struct device *dev,
1991 struct snd_soc_tplg_vendor_array *array)
1992{
1993 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
1994
1995 tkn_elem = array->value;
1996
1997 switch (tkn_elem->token) {
1998 case SKL_TKN_U8_NUM_BLOCKS:
1999 case SKL_TKN_U8_BLOCK_TYPE:
2000 case SKL_TKN_U16_BLOCK_SIZE:
2001 return tkn_elem->value;
2002
2003 default:
ecd286a9 2004 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
6277e832
SN
2005 break;
2006 }
2007
2008 return -EINVAL;
2009}
2010
2011/*
2012 * Parse the private data for the token and corresponding value.
2013 * The private data can have multiple data blocks. So, a data block
2014 * is preceded by a descriptor for number of blocks and a descriptor
2015 * for the type and size of the suceeding data block.
2016 */
2017static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2018 struct skl *skl, struct device *dev,
2019 struct skl_module_cfg *mconfig)
2020{
2021 struct snd_soc_tplg_vendor_array *array;
2022 int num_blocks, block_size = 0, block_type, off = 0;
2023 char *data;
2024 int ret;
2025
2026 /* Read the NUM_DATA_BLOCKS descriptor */
2027 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2028 ret = skl_tplg_get_desc_blocks(dev, array);
2029 if (ret < 0)
2030 return ret;
2031 num_blocks = ret;
2032
2033 off += array->size;
2034 array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
2035
2036 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2037 while (num_blocks > 0) {
2038 ret = skl_tplg_get_desc_blocks(dev, array);
2039
2040 if (ret < 0)
2041 return ret;
2042 block_type = ret;
2043 off += array->size;
2044
2045 array = (struct snd_soc_tplg_vendor_array *)
2046 (tplg_w->priv.data + off);
2047
2048 ret = skl_tplg_get_desc_blocks(dev, array);
2049
2050 if (ret < 0)
2051 return ret;
2052 block_size = ret;
2053 off += array->size;
2054
2055 array = (struct snd_soc_tplg_vendor_array *)
2056 (tplg_w->priv.data + off);
2057
2058 data = (tplg_w->priv.data + off);
2059
2060 if (block_type == SKL_TYPE_TUPLE) {
2061 ret = skl_tplg_get_tokens(dev, data,
2062 skl, mconfig, block_size);
2063
2064 if (ret < 0)
2065 return ret;
2066
2067 --num_blocks;
2068 } else {
2069 if (mconfig->formats_config.caps_size > 0)
2070 memcpy(mconfig->formats_config.caps, data,
2071 mconfig->formats_config.caps_size);
2072 --num_blocks;
2073 }
2074 }
2075
2076 return 0;
4cd9899f
HS
2077}
2078
fe3f4442
D
2079static void skl_clear_pin_config(struct snd_soc_platform *platform,
2080 struct snd_soc_dapm_widget *w)
2081{
2082 int i;
2083 struct skl_module_cfg *mconfig;
2084 struct skl_pipe *pipe;
2085
2086 if (!strncmp(w->dapm->component->name, platform->component.name,
2087 strlen(platform->component.name))) {
2088 mconfig = w->priv;
2089 pipe = mconfig->pipe;
2090 for (i = 0; i < mconfig->max_in_queue; i++) {
2091 mconfig->m_in_pin[i].in_use = false;
2092 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2093 }
2094 for (i = 0; i < mconfig->max_out_queue; i++) {
2095 mconfig->m_out_pin[i].in_use = false;
2096 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2097 }
2098 pipe->state = SKL_PIPE_INVALID;
2099 mconfig->m_state = SKL_MODULE_UNINIT;
2100 }
2101}
2102
2103void skl_cleanup_resources(struct skl *skl)
2104{
2105 struct skl_sst *ctx = skl->skl_sst;
2106 struct snd_soc_platform *soc_platform = skl->platform;
2107 struct snd_soc_dapm_widget *w;
2108 struct snd_soc_card *card;
2109
2110 if (soc_platform == NULL)
2111 return;
2112
2113 card = soc_platform->component.card;
2114 if (!card || !card->instantiated)
2115 return;
2116
2117 skl->resource.mem = 0;
2118 skl->resource.mcps = 0;
2119
2120 list_for_each_entry(w, &card->widgets, list) {
2121 if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
2122 skl_clear_pin_config(soc_platform, w);
2123 }
2124
2125 skl_clear_module_cnt(ctx->dsp);
2126}
2127
3af36706
VK
2128/*
2129 * Topology core widget load callback
2130 *
2131 * This is used to save the private data for each widget which gives
2132 * information to the driver about module and pipeline parameters which DSP
2133 * FW expects like ids, resource values, formats etc
2134 */
2135static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
b663a8c5
JK
2136 struct snd_soc_dapm_widget *w,
2137 struct snd_soc_tplg_dapm_widget *tplg_w)
3af36706
VK
2138{
2139 int ret;
2140 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2141 struct skl *skl = ebus_to_skl(ebus);
2142 struct hdac_bus *bus = ebus_to_hbus(ebus);
2143 struct skl_module_cfg *mconfig;
3af36706
VK
2144
2145 if (!tplg_w->priv.size)
2146 goto bind_event;
2147
2148 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2149
2150 if (!mconfig)
2151 return -ENOMEM;
2152
2153 w->priv = mconfig;
09305da9 2154
b7c50555
VK
2155 /*
2156 * module binary can be loaded later, so set it to query when
2157 * module is load for a use case
2158 */
2159 mconfig->id.module_id = -1;
3af36706 2160
6277e832
SN
2161 /* Parse private data for tuples */
2162 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2163 if (ret < 0)
2164 return ret;
3af36706
VK
2165bind_event:
2166 if (tplg_w->event_type == 0) {
3373f716 2167 dev_dbg(bus->dev, "ASoC: No event handler required\n");
3af36706
VK
2168 return 0;
2169 }
2170
2171 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
b663a8c5
JK
2172 ARRAY_SIZE(skl_tplg_widget_ops),
2173 tplg_w->event_type);
3af36706
VK
2174
2175 if (ret) {
2176 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2177 __func__, tplg_w->event_type);
2178 return -EINVAL;
2179 }
2180
2181 return 0;
2182}
2183
140adfba
JK
2184static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2185 struct snd_soc_tplg_bytes_control *bc)
2186{
2187 struct skl_algo_data *ac;
2188 struct skl_dfw_algo_data *dfw_ac =
2189 (struct skl_dfw_algo_data *)bc->priv.data;
2190
2191 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2192 if (!ac)
2193 return -ENOMEM;
2194
2195 /* Fill private data */
2196 ac->max = dfw_ac->max;
2197 ac->param_id = dfw_ac->param_id;
2198 ac->set_params = dfw_ac->set_params;
0d682104 2199 ac->size = dfw_ac->max;
140adfba
JK
2200
2201 if (ac->max) {
2202 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2203 if (!ac->params)
2204 return -ENOMEM;
2205
edd7ea2d 2206 memcpy(ac->params, dfw_ac->params, ac->max);
140adfba
JK
2207 }
2208
2209 be->dobj.private = ac;
2210 return 0;
2211}
2212
2213static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2214 struct snd_kcontrol_new *kctl,
2215 struct snd_soc_tplg_ctl_hdr *hdr)
2216{
2217 struct soc_bytes_ext *sb;
2218 struct snd_soc_tplg_bytes_control *tplg_bc;
2219 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2220 struct hdac_bus *bus = ebus_to_hbus(ebus);
2221
2222 switch (hdr->ops.info) {
2223 case SND_SOC_TPLG_CTL_BYTES:
2224 tplg_bc = container_of(hdr,
2225 struct snd_soc_tplg_bytes_control, hdr);
2226 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2227 sb = (struct soc_bytes_ext *)kctl->private_value;
2228 if (tplg_bc->priv.size)
2229 return skl_init_algo_data(
2230 bus->dev, sb, tplg_bc);
2231 }
2232 break;
2233
2234 default:
2235 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
2236 hdr->ops.get, hdr->ops.put, hdr->ops.info);
2237 break;
2238 }
2239
2240 return 0;
2241}
2242
541070ce
SN
2243static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
2244 struct snd_soc_tplg_vendor_string_elem *str_elem,
2245 struct skl_dfw_manifest *minfo)
2246{
2247 int tkn_count = 0;
2248 static int ref_count;
2249
2250 switch (str_elem->token) {
2251 case SKL_TKN_STR_LIB_NAME:
2252 if (ref_count > minfo->lib_count - 1) {
2253 ref_count = 0;
2254 return -EINVAL;
2255 }
2256
2257 strncpy(minfo->lib[ref_count].name, str_elem->string,
2258 ARRAY_SIZE(minfo->lib[ref_count].name));
2259 ref_count++;
2260 tkn_count++;
2261 break;
2262
2263 default:
ecd286a9 2264 dev_err(dev, "Not a string token %d\n", str_elem->token);
541070ce
SN
2265 break;
2266 }
2267
2268 return tkn_count;
2269}
2270
2271static int skl_tplg_get_str_tkn(struct device *dev,
2272 struct snd_soc_tplg_vendor_array *array,
2273 struct skl_dfw_manifest *minfo)
2274{
2275 int tkn_count = 0, ret;
2276 struct snd_soc_tplg_vendor_string_elem *str_elem;
2277
2278 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
2279 while (tkn_count < array->num_elems) {
2280 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, minfo);
2281 str_elem++;
2282
2283 if (ret < 0)
2284 return ret;
2285
2286 tkn_count = tkn_count + ret;
2287 }
2288
2289 return tkn_count;
2290}
2291
2292static int skl_tplg_get_int_tkn(struct device *dev,
2293 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2294 struct skl_dfw_manifest *minfo)
2295{
2296 int tkn_count = 0;
2297
2298 switch (tkn_elem->token) {
2299 case SKL_TKN_U32_LIB_COUNT:
2300 minfo->lib_count = tkn_elem->value;
2301 tkn_count++;
2302 break;
2303
2304 default:
ecd286a9 2305 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
541070ce
SN
2306 return -EINVAL;
2307 }
2308
2309 return tkn_count;
2310}
2311
2312/*
2313 * Fill the manifest structure by parsing the tokens based on the
2314 * type.
2315 */
2316static int skl_tplg_get_manifest_tkn(struct device *dev,
2317 char *pvt_data, struct skl_dfw_manifest *minfo,
2318 int block_size)
2319{
2320 int tkn_count = 0, ret;
2321 int off = 0, tuple_size = 0;
2322 struct snd_soc_tplg_vendor_array *array;
2323 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2324
2325 if (block_size <= 0)
2326 return -EINVAL;
2327
2328 while (tuple_size < block_size) {
2329 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2330 off += array->size;
2331 switch (array->type) {
2332 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2333 ret = skl_tplg_get_str_tkn(dev, array, minfo);
2334
2335 if (ret < 0)
2336 return ret;
2337 tkn_count += ret;
2338
2339 tuple_size += tkn_count *
2340 sizeof(struct snd_soc_tplg_vendor_string_elem);
2341 continue;
2342
2343 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
ecd286a9 2344 dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
541070ce
SN
2345 continue;
2346
2347 default:
2348 tkn_elem = array->value;
2349 tkn_count = 0;
2350 break;
2351 }
2352
2353 while (tkn_count <= array->num_elems - 1) {
2354 ret = skl_tplg_get_int_tkn(dev,
2355 tkn_elem, minfo);
2356 if (ret < 0)
2357 return ret;
2358
2359 tkn_count = tkn_count + ret;
2360 tkn_elem++;
2361 tuple_size += tkn_count *
2362 sizeof(struct snd_soc_tplg_vendor_value_elem);
2363 break;
2364 }
2365 tkn_count = 0;
2366 }
2367
2368 return 0;
2369}
2370
2371/*
2372 * Parse manifest private data for tokens. The private data block is
2373 * preceded by descriptors for type and size of data block.
2374 */
2375static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
2376 struct device *dev, struct skl_dfw_manifest *minfo)
2377{
2378 struct snd_soc_tplg_vendor_array *array;
2379 int num_blocks, block_size = 0, block_type, off = 0;
2380 char *data;
2381 int ret;
2382
2383 /* Read the NUM_DATA_BLOCKS descriptor */
2384 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
2385 ret = skl_tplg_get_desc_blocks(dev, array);
2386 if (ret < 0)
2387 return ret;
2388 num_blocks = ret;
2389
2390 off += array->size;
2391 array = (struct snd_soc_tplg_vendor_array *)
2392 (manifest->priv.data + off);
2393
2394 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2395 while (num_blocks > 0) {
2396 ret = skl_tplg_get_desc_blocks(dev, array);
2397
2398 if (ret < 0)
2399 return ret;
2400 block_type = ret;
2401 off += array->size;
2402
2403 array = (struct snd_soc_tplg_vendor_array *)
2404 (manifest->priv.data + off);
2405
2406 ret = skl_tplg_get_desc_blocks(dev, array);
2407
2408 if (ret < 0)
2409 return ret;
2410 block_size = ret;
2411 off += array->size;
2412
2413 array = (struct snd_soc_tplg_vendor_array *)
2414 (manifest->priv.data + off);
2415
2416 data = (manifest->priv.data + off);
2417
2418 if (block_type == SKL_TYPE_TUPLE) {
2419 ret = skl_tplg_get_manifest_tkn(dev, data, minfo,
2420 block_size);
2421
2422 if (ret < 0)
2423 return ret;
2424
2425 --num_blocks;
2426 } else {
2427 return -EINVAL;
2428 }
2429 }
2430
2431 return 0;
2432}
2433
15ecaba9
K
2434static int skl_manifest_load(struct snd_soc_component *cmpnt,
2435 struct snd_soc_tplg_manifest *manifest)
2436{
2437 struct skl_dfw_manifest *minfo;
2438 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2439 struct hdac_bus *bus = ebus_to_hbus(ebus);
2440 struct skl *skl = ebus_to_skl(ebus);
2441 int ret = 0;
2442
c15ad605
VK
2443 /* proceed only if we have private data defined */
2444 if (manifest->priv.size == 0)
2445 return 0;
2446
15ecaba9 2447 minfo = &skl->skl_sst->manifest;
541070ce
SN
2448
2449 skl_tplg_get_manifest_data(manifest, bus->dev, minfo);
15ecaba9
K
2450
2451 if (minfo->lib_count > HDA_MAX_LIB) {
2452 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
2453 minfo->lib_count);
2454 ret = -EINVAL;
2455 }
2456
2457 return ret;
2458}
2459
3af36706
VK
2460static struct snd_soc_tplg_ops skl_tplg_ops = {
2461 .widget_load = skl_tplg_widget_load,
140adfba
JK
2462 .control_load = skl_tplg_control_load,
2463 .bytes_ext_ops = skl_tlv_ops,
2464 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
15ecaba9 2465 .manifest = skl_manifest_load,
3af36706
VK
2466};
2467
287af4f9
JK
2468/*
2469 * A pipe can have multiple modules, each of them will be a DAPM widget as
2470 * well. While managing a pipeline we need to get the list of all the
2471 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
2472 * helps to get the SKL type widgets in that pipeline
2473 */
2474static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
2475{
2476 struct snd_soc_dapm_widget *w;
2477 struct skl_module_cfg *mcfg = NULL;
2478 struct skl_pipe_module *p_module = NULL;
2479 struct skl_pipe *pipe;
2480
2481 list_for_each_entry(w, &platform->component.card->widgets, list) {
2482 if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
2483 mcfg = w->priv;
2484 pipe = mcfg->pipe;
2485
2486 p_module = devm_kzalloc(platform->dev,
2487 sizeof(*p_module), GFP_KERNEL);
2488 if (!p_module)
2489 return -ENOMEM;
2490
2491 p_module->w = w;
2492 list_add_tail(&p_module->node, &pipe->w_list);
2493 }
2494 }
2495
2496 return 0;
2497}
2498
f0aa94fa
JK
2499static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
2500{
2501 struct skl_pipe_module *w_module;
2502 struct snd_soc_dapm_widget *w;
2503 struct skl_module_cfg *mconfig;
2504 bool host_found = false, link_found = false;
2505
2506 list_for_each_entry(w_module, &pipe->w_list, node) {
2507 w = w_module->w;
2508 mconfig = w->priv;
2509
2510 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
2511 host_found = true;
2512 else if (mconfig->dev_type != SKL_DEVICE_NONE)
2513 link_found = true;
2514 }
2515
2516 if (host_found && link_found)
2517 pipe->passthru = true;
2518 else
2519 pipe->passthru = false;
2520}
2521
3af36706
VK
2522/* This will be read from topology manifest, currently defined here */
2523#define SKL_MAX_MCPS 30000000
2524#define SKL_FW_MAX_MEM 1000000
2525
2526/*
2527 * SKL topology init routine
2528 */
2529int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
2530{
2531 int ret;
2532 const struct firmware *fw;
2533 struct hdac_bus *bus = ebus_to_hbus(ebus);
2534 struct skl *skl = ebus_to_skl(ebus);
f0aa94fa 2535 struct skl_pipeline *ppl;
3af36706 2536
4b235c43 2537 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3af36706 2538 if (ret < 0) {
b663a8c5 2539 dev_err(bus->dev, "tplg fw %s load failed with %d\n",
4b235c43
VK
2540 skl->tplg_name, ret);
2541 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
2542 if (ret < 0) {
2543 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
2544 "dfw_sst.bin", ret);
2545 return ret;
2546 }
3af36706
VK
2547 }
2548
2549 /*
2550 * The complete tplg for SKL is loaded as index 0, we don't use
2551 * any other index
2552 */
b663a8c5
JK
2553 ret = snd_soc_tplg_component_load(&platform->component,
2554 &skl_tplg_ops, fw, 0);
3af36706
VK
2555 if (ret < 0) {
2556 dev_err(bus->dev, "tplg component load failed%d\n", ret);
c14a82c7 2557 release_firmware(fw);
3af36706
VK
2558 return -EINVAL;
2559 }
2560
2561 skl->resource.max_mcps = SKL_MAX_MCPS;
2562 skl->resource.max_mem = SKL_FW_MAX_MEM;
2563
d8018361 2564 skl->tplg = fw;
287af4f9
JK
2565 ret = skl_tplg_create_pipe_widget_list(platform);
2566 if (ret < 0)
2567 return ret;
d8018361 2568
f0aa94fa
JK
2569 list_for_each_entry(ppl, &skl->ppl_list, node)
2570 skl_tplg_set_pipe_type(skl, ppl->pipe);
d8018361 2571
3af36706
VK
2572 return 0;
2573}