ASoC: Intel: Skylake: use a helper macro to rounding-up calculation
[linux-2.6-block.git] / sound / soc / intel / skylake / skl-topology.c
CommitLineData
e4e2d2f4
JK
1/*
2 * skl-topology.c - Implements Platform component ALSA controls/widget
3 * handlers.
4 *
5 * Copyright (C) 2014-2015 Intel Corp
6 * Author: Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/firmware.h>
22#include <sound/soc.h>
23#include <sound/soc-topology.h>
6277e832 24#include <uapi/sound/snd_sst_tokens.h>
e4e2d2f4
JK
25#include "skl-sst-dsp.h"
26#include "skl-sst-ipc.h"
27#include "skl-topology.h"
28#include "skl.h"
29#include "skl-tplg-interface.h"
6c5768b3
D
30#include "../common/sst-dsp.h"
31#include "../common/sst-dsp-priv.h"
e4e2d2f4 32
f7590d4f
JK
33#define SKL_CH_FIXUP_MASK (1 << 0)
34#define SKL_RATE_FIXUP_MASK (1 << 1)
35#define SKL_FMT_FIXUP_MASK (1 << 2)
6277e832
SN
36#define SKL_IN_DIR_BIT_MASK BIT(0)
37#define SKL_PIN_COUNT_MASK GENMASK(7, 4)
f7590d4f 38
a83e3b4c
VK
39void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
40{
41 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
42
43 switch (caps) {
44 case SKL_D0I3_NONE:
45 d0i3->non_d0i3++;
46 break;
47
48 case SKL_D0I3_STREAMING:
49 d0i3->streaming++;
50 break;
51
52 case SKL_D0I3_NON_STREAMING:
53 d0i3->non_streaming++;
54 break;
55 }
56}
57
58void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
59{
60 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
61
62 switch (caps) {
63 case SKL_D0I3_NONE:
64 d0i3->non_d0i3--;
65 break;
66
67 case SKL_D0I3_STREAMING:
68 d0i3->streaming--;
69 break;
70
71 case SKL_D0I3_NON_STREAMING:
72 d0i3->non_streaming--;
73 break;
74 }
75}
76
e4e2d2f4
JK
77/*
78 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
79 * ignore. This helpers checks if the SKL driver handles this widget type
80 */
81static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
82{
83 switch (w->id) {
84 case snd_soc_dapm_dai_link:
85 case snd_soc_dapm_dai_in:
86 case snd_soc_dapm_aif_in:
87 case snd_soc_dapm_aif_out:
88 case snd_soc_dapm_dai_out:
89 case snd_soc_dapm_switch:
90 return false;
91 default:
92 return true;
93 }
94}
95
96/*
97 * Each pipelines needs memory to be allocated. Check if we have free memory
9ba8ffef 98 * from available pool.
e4e2d2f4 99 */
9ba8ffef 100static bool skl_is_pipe_mem_avail(struct skl *skl,
e4e2d2f4
JK
101 struct skl_module_cfg *mconfig)
102{
103 struct skl_sst *ctx = skl->skl_sst;
104
105 if (skl->resource.mem + mconfig->pipe->memory_pages >
106 skl->resource.max_mem) {
107 dev_err(ctx->dev,
108 "%s: module_id %d instance %d\n", __func__,
109 mconfig->id.module_id,
110 mconfig->id.instance_id);
111 dev_err(ctx->dev,
112 "exceeds ppl memory available %d mem %d\n",
113 skl->resource.max_mem, skl->resource.mem);
114 return false;
9ba8ffef
D
115 } else {
116 return true;
e4e2d2f4 117 }
9ba8ffef 118}
e4e2d2f4 119
9ba8ffef
D
120/*
121 * Add the mem to the mem pool. This is freed when pipe is deleted.
122 * Note: DSP does actual memory management we only keep track for complete
123 * pool
124 */
125static void skl_tplg_alloc_pipe_mem(struct skl *skl,
126 struct skl_module_cfg *mconfig)
127{
e4e2d2f4 128 skl->resource.mem += mconfig->pipe->memory_pages;
e4e2d2f4
JK
129}
130
131/*
132 * Pipeline needs needs DSP CPU resources for computation, this is
133 * quantified in MCPS (Million Clocks Per Second) required for module/pipe
134 *
135 * Each pipelines needs mcps to be allocated. Check if we have mcps for this
9ba8ffef 136 * pipe.
e4e2d2f4 137 */
9ba8ffef
D
138
139static bool skl_is_pipe_mcps_avail(struct skl *skl,
e4e2d2f4
JK
140 struct skl_module_cfg *mconfig)
141{
142 struct skl_sst *ctx = skl->skl_sst;
143
144 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
145 dev_err(ctx->dev,
146 "%s: module_id %d instance %d\n", __func__,
147 mconfig->id.module_id, mconfig->id.instance_id);
148 dev_err(ctx->dev,
7ca42f5a 149 "exceeds ppl mcps available %d > mem %d\n",
e4e2d2f4
JK
150 skl->resource.max_mcps, skl->resource.mcps);
151 return false;
9ba8ffef
D
152 } else {
153 return true;
e4e2d2f4 154 }
9ba8ffef 155}
e4e2d2f4 156
9ba8ffef
D
157static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
158 struct skl_module_cfg *mconfig)
159{
e4e2d2f4 160 skl->resource.mcps += mconfig->mcps;
e4e2d2f4
JK
161}
162
163/*
164 * Free the mcps when tearing down
165 */
166static void
167skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
168{
169 skl->resource.mcps -= mconfig->mcps;
170}
171
172/*
173 * Free the memory when tearing down
174 */
175static void
176skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
177{
178 skl->resource.mem -= mconfig->pipe->memory_pages;
179}
180
f7590d4f
JK
181
182static void skl_dump_mconfig(struct skl_sst *ctx,
183 struct skl_module_cfg *mcfg)
184{
185 dev_dbg(ctx->dev, "Dumping config\n");
186 dev_dbg(ctx->dev, "Input Format:\n");
4cd9899f
HS
187 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
188 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
189 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
190 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
f7590d4f 191 dev_dbg(ctx->dev, "Output Format:\n");
4cd9899f
HS
192 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
193 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
194 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
195 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
f7590d4f
JK
196}
197
ea5a137d
SP
198static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
199{
200 int slot_map = 0xFFFFFFFF;
201 int start_slot = 0;
202 int i;
203
204 for (i = 0; i < chs; i++) {
205 /*
206 * For 2 channels with starting slot as 0, slot map will
207 * look like 0xFFFFFF10.
208 */
209 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
210 start_slot++;
211 }
212 fmt->ch_map = slot_map;
213}
214
f7590d4f
JK
215static void skl_tplg_update_params(struct skl_module_fmt *fmt,
216 struct skl_pipe_params *params, int fixup)
217{
218 if (fixup & SKL_RATE_FIXUP_MASK)
219 fmt->s_freq = params->s_freq;
ea5a137d 220 if (fixup & SKL_CH_FIXUP_MASK) {
f7590d4f 221 fmt->channels = params->ch;
ea5a137d
SP
222 skl_tplg_update_chmap(fmt, fmt->channels);
223 }
98256f83
JK
224 if (fixup & SKL_FMT_FIXUP_MASK) {
225 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
226
227 /*
228 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
229 * container so update bit depth accordingly
230 */
231 switch (fmt->valid_bit_depth) {
232 case SKL_DEPTH_16BIT:
233 fmt->bit_depth = fmt->valid_bit_depth;
234 break;
235
236 default:
237 fmt->bit_depth = SKL_DEPTH_32BIT;
238 break;
239 }
240 }
241
f7590d4f
JK
242}
243
244/*
245 * A pipeline may have modules which impact the pcm parameters, like SRC,
246 * channel converter, format converter.
247 * We need to calculate the output params by applying the 'fixup'
248 * Topology will tell driver which type of fixup is to be applied by
249 * supplying the fixup mask, so based on that we calculate the output
250 *
251 * Now In FE the pcm hw_params is source/target format. Same is applicable
252 * for BE with its hw_params invoked.
253 * here based on FE, BE pipeline and direction we calculate the input and
254 * outfix and then apply that for a module
255 */
256static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
257 struct skl_pipe_params *params, bool is_fe)
258{
259 int in_fixup, out_fixup;
260 struct skl_module_fmt *in_fmt, *out_fmt;
261
4cd9899f
HS
262 /* Fixups will be applied to pin 0 only */
263 in_fmt = &m_cfg->in_fmt[0];
264 out_fmt = &m_cfg->out_fmt[0];
f7590d4f
JK
265
266 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
267 if (is_fe) {
268 in_fixup = m_cfg->params_fixup;
269 out_fixup = (~m_cfg->converter) &
270 m_cfg->params_fixup;
271 } else {
272 out_fixup = m_cfg->params_fixup;
273 in_fixup = (~m_cfg->converter) &
274 m_cfg->params_fixup;
275 }
276 } else {
277 if (is_fe) {
278 out_fixup = m_cfg->params_fixup;
279 in_fixup = (~m_cfg->converter) &
280 m_cfg->params_fixup;
281 } else {
282 in_fixup = m_cfg->params_fixup;
283 out_fixup = (~m_cfg->converter) &
284 m_cfg->params_fixup;
285 }
286 }
287
288 skl_tplg_update_params(in_fmt, params, in_fixup);
289 skl_tplg_update_params(out_fmt, params, out_fixup);
290}
291
292/*
293 * A module needs input and output buffers, which are dependent upon pcm
294 * params, so once we have calculate params, we need buffer calculation as
295 * well.
296 */
297static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
298 struct skl_module_cfg *mcfg)
299{
300 int multiplier = 1;
4cd9899f 301 struct skl_module_fmt *in_fmt, *out_fmt;
4cd9899f
HS
302
303 /* Since fixups is applied to pin 0 only, ibs, obs needs
304 * change for pin 0 only
305 */
306 in_fmt = &mcfg->in_fmt[0];
307 out_fmt = &mcfg->out_fmt[0];
f7590d4f
JK
308
309 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
310 multiplier = 5;
f0c8e1d9 311
8e15e762
TS
312 mcfg->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
313 (mcfg->in_fmt->channels) *
f0c8e1d9
SP
314 (mcfg->in_fmt->bit_depth >> 3) *
315 multiplier;
316
8e15e762
TS
317 mcfg->obs = DIV_ROUND_UP(mcfg->out_fmt->s_freq, 1000) *
318 (mcfg->out_fmt->channels) *
f0c8e1d9
SP
319 (mcfg->out_fmt->bit_depth >> 3) *
320 multiplier;
f7590d4f
JK
321}
322
db2f586b
SV
323static u8 skl_tplg_be_dev_type(int dev_type)
324{
325 int ret;
326
327 switch (dev_type) {
328 case SKL_DEVICE_BT:
329 ret = NHLT_DEVICE_BT;
330 break;
331
332 case SKL_DEVICE_DMIC:
333 ret = NHLT_DEVICE_DMIC;
334 break;
335
336 case SKL_DEVICE_I2S:
337 ret = NHLT_DEVICE_I2S;
338 break;
339
340 default:
341 ret = NHLT_DEVICE_INVALID;
342 break;
343 }
344
345 return ret;
346}
347
2d1419a3
JK
348static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
349 struct skl_sst *ctx)
350{
351 struct skl_module_cfg *m_cfg = w->priv;
352 int link_type, dir;
353 u32 ch, s_freq, s_fmt;
354 struct nhlt_specific_cfg *cfg;
355 struct skl *skl = get_skl_ctx(ctx->dev);
db2f586b 356 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
2d1419a3
JK
357
358 /* check if we already have blob */
359 if (m_cfg->formats_config.caps_size > 0)
360 return 0;
361
c7c6c736 362 dev_dbg(ctx->dev, "Applying default cfg blob\n");
2d1419a3
JK
363 switch (m_cfg->dev_type) {
364 case SKL_DEVICE_DMIC:
365 link_type = NHLT_LINK_DMIC;
c7c6c736 366 dir = SNDRV_PCM_STREAM_CAPTURE;
2d1419a3
JK
367 s_freq = m_cfg->in_fmt[0].s_freq;
368 s_fmt = m_cfg->in_fmt[0].bit_depth;
369 ch = m_cfg->in_fmt[0].channels;
370 break;
371
372 case SKL_DEVICE_I2S:
373 link_type = NHLT_LINK_SSP;
374 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
c7c6c736 375 dir = SNDRV_PCM_STREAM_PLAYBACK;
2d1419a3
JK
376 s_freq = m_cfg->out_fmt[0].s_freq;
377 s_fmt = m_cfg->out_fmt[0].bit_depth;
378 ch = m_cfg->out_fmt[0].channels;
c7c6c736
JK
379 } else {
380 dir = SNDRV_PCM_STREAM_CAPTURE;
381 s_freq = m_cfg->in_fmt[0].s_freq;
382 s_fmt = m_cfg->in_fmt[0].bit_depth;
383 ch = m_cfg->in_fmt[0].channels;
2d1419a3
JK
384 }
385 break;
386
387 default:
388 return -EINVAL;
389 }
390
391 /* update the blob based on virtual bus_id and default params */
392 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
db2f586b 393 s_fmt, ch, s_freq, dir, dev_type);
2d1419a3
JK
394 if (cfg) {
395 m_cfg->formats_config.caps_size = cfg->size;
396 m_cfg->formats_config.caps = (u32 *) &cfg->caps;
397 } else {
398 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
399 m_cfg->vbus_id, link_type, dir);
400 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
401 ch, s_freq, s_fmt);
402 return -EIO;
403 }
404
405 return 0;
406}
407
f7590d4f
JK
408static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
409 struct skl_sst *ctx)
410{
411 struct skl_module_cfg *m_cfg = w->priv;
412 struct skl_pipe_params *params = m_cfg->pipe->p_params;
413 int p_conn_type = m_cfg->pipe->conn_type;
414 bool is_fe;
415
416 if (!m_cfg->params_fixup)
417 return;
418
419 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
420 w->name);
421
422 skl_dump_mconfig(ctx, m_cfg);
423
424 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
425 is_fe = true;
426 else
427 is_fe = false;
428
429 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
430 skl_tplg_update_buffer_size(ctx, m_cfg);
431
432 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
433 w->name);
434
435 skl_dump_mconfig(ctx, m_cfg);
436}
437
abb74003
JK
438/*
439 * some modules can have multiple params set from user control and
440 * need to be set after module is initialized. If set_param flag is
441 * set module params will be done after module is initialised.
442 */
443static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
444 struct skl_sst *ctx)
445{
446 int i, ret;
447 struct skl_module_cfg *mconfig = w->priv;
448 const struct snd_kcontrol_new *k;
449 struct soc_bytes_ext *sb;
450 struct skl_algo_data *bc;
451 struct skl_specific_cfg *sp_cfg;
452
453 if (mconfig->formats_config.caps_size > 0 &&
4ced1827 454 mconfig->formats_config.set_params == SKL_PARAM_SET) {
abb74003
JK
455 sp_cfg = &mconfig->formats_config;
456 ret = skl_set_module_params(ctx, sp_cfg->caps,
457 sp_cfg->caps_size,
458 sp_cfg->param_id, mconfig);
459 if (ret < 0)
460 return ret;
461 }
462
463 for (i = 0; i < w->num_kcontrols; i++) {
464 k = &w->kcontrol_news[i];
465 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
466 sb = (void *) k->private_value;
467 bc = (struct skl_algo_data *)sb->dobj.private;
468
4ced1827 469 if (bc->set_params == SKL_PARAM_SET) {
abb74003 470 ret = skl_set_module_params(ctx,
0d682104 471 (u32 *)bc->params, bc->size,
abb74003
JK
472 bc->param_id, mconfig);
473 if (ret < 0)
474 return ret;
475 }
476 }
477 }
478
479 return 0;
480}
481
482/*
483 * some module param can set from user control and this is required as
484 * when module is initailzed. if module param is required in init it is
485 * identifed by set_param flag. if set_param flag is not set, then this
486 * parameter needs to set as part of module init.
487 */
488static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
489{
490 const struct snd_kcontrol_new *k;
491 struct soc_bytes_ext *sb;
492 struct skl_algo_data *bc;
493 struct skl_module_cfg *mconfig = w->priv;
494 int i;
495
496 for (i = 0; i < w->num_kcontrols; i++) {
497 k = &w->kcontrol_news[i];
498 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
499 sb = (struct soc_bytes_ext *)k->private_value;
500 bc = (struct skl_algo_data *)sb->dobj.private;
501
4ced1827 502 if (bc->set_params != SKL_PARAM_INIT)
abb74003
JK
503 continue;
504
505 mconfig->formats_config.caps = (u32 *)&bc->params;
0d682104 506 mconfig->formats_config.caps_size = bc->size;
abb74003
JK
507
508 break;
509 }
510 }
511
512 return 0;
513}
514
bb704a73
JK
515static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe,
516 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
517{
518 switch (mcfg->dev_type) {
519 case SKL_DEVICE_HDAHOST:
520 return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params);
521
522 case SKL_DEVICE_HDALINK:
523 return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params);
524 }
525
526 return 0;
527}
528
e4e2d2f4
JK
529/*
530 * Inside a pipe instance, we can have various modules. These modules need
531 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
532 * skl_init_module() routine, so invoke that for all modules in a pipeline
533 */
534static int
535skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
536{
537 struct skl_pipe_module *w_module;
538 struct snd_soc_dapm_widget *w;
539 struct skl_module_cfg *mconfig;
540 struct skl_sst *ctx = skl->skl_sst;
541 int ret = 0;
542
543 list_for_each_entry(w_module, &pipe->w_list, node) {
544 w = w_module->w;
545 mconfig = w->priv;
546
b7c50555
VK
547 /* check if module ids are populated */
548 if (mconfig->id.module_id < 0) {
a657ae7e
VK
549 dev_err(skl->skl_sst->dev,
550 "module %pUL id not populated\n",
551 (uuid_le *)mconfig->guid);
552 return -EIO;
b7c50555
VK
553 }
554
e4e2d2f4 555 /* check resource available */
9ba8ffef 556 if (!skl_is_pipe_mcps_avail(skl, mconfig))
e4e2d2f4
JK
557 return -ENOMEM;
558
6c5768b3
D
559 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
560 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
561 mconfig->id.module_id, mconfig->guid);
562 if (ret < 0)
563 return ret;
d643678b
JK
564
565 mconfig->m_state = SKL_MODULE_LOADED;
6c5768b3
D
566 }
567
bb704a73
JK
568 /* prepare the DMA if the module is gateway cpr */
569 ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig);
570 if (ret < 0)
571 return ret;
572
2d1419a3
JK
573 /* update blob if blob is null for be with default value */
574 skl_tplg_update_be_blob(w, ctx);
575
f7590d4f
JK
576 /*
577 * apply fix/conversion to module params based on
578 * FE/BE params
579 */
580 skl_tplg_update_module_params(w, ctx);
ef2a352c
D
581 mconfig->id.pvt_id = skl_get_pvt_id(ctx, mconfig);
582 if (mconfig->id.pvt_id < 0)
583 return ret;
abb74003 584 skl_tplg_set_module_init_data(w);
9939a9c3 585 ret = skl_init_module(ctx, mconfig);
ef2a352c
D
586 if (ret < 0) {
587 skl_put_pvt_id(ctx, mconfig);
e4e2d2f4 588 return ret;
ef2a352c 589 }
260eb73a 590 skl_tplg_alloc_pipe_mcps(skl, mconfig);
abb74003 591 ret = skl_tplg_set_module_params(w, ctx);
e4e2d2f4
JK
592 if (ret < 0)
593 return ret;
594 }
595
596 return 0;
597}
d93f8e55 598
6c5768b3
D
599static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
600 struct skl_pipe *pipe)
601{
b0fab9c6 602 int ret;
6c5768b3
D
603 struct skl_pipe_module *w_module = NULL;
604 struct skl_module_cfg *mconfig = NULL;
605
606 list_for_each_entry(w_module, &pipe->w_list, node) {
607 mconfig = w_module->w->priv;
608
d643678b 609 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
b0fab9c6
D
610 mconfig->m_state > SKL_MODULE_UNINIT) {
611 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
6c5768b3 612 mconfig->id.module_id);
b0fab9c6
D
613 if (ret < 0)
614 return -EIO;
615 }
ef2a352c 616 skl_put_pvt_id(ctx, mconfig);
6c5768b3
D
617 }
618
619 /* no modules to unload in this path, so return */
620 return 0;
621}
622
d93f8e55
VK
623/*
624 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
625 * need create the pipeline. So we do following:
626 * - check the resources
627 * - Create the pipeline
628 * - Initialize the modules in pipeline
629 * - finally bind all modules together
630 */
631static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
632 struct skl *skl)
633{
634 int ret;
635 struct skl_module_cfg *mconfig = w->priv;
636 struct skl_pipe_module *w_module;
637 struct skl_pipe *s_pipe = mconfig->pipe;
638 struct skl_module_cfg *src_module = NULL, *dst_module;
639 struct skl_sst *ctx = skl->skl_sst;
640
641 /* check resource available */
9ba8ffef 642 if (!skl_is_pipe_mcps_avail(skl, mconfig))
d93f8e55
VK
643 return -EBUSY;
644
9ba8ffef 645 if (!skl_is_pipe_mem_avail(skl, mconfig))
d93f8e55
VK
646 return -ENOMEM;
647
648 /*
649 * Create a list of modules for pipe.
650 * This list contains modules from source to sink
651 */
652 ret = skl_create_pipeline(ctx, mconfig->pipe);
653 if (ret < 0)
654 return ret;
655
260eb73a
D
656 skl_tplg_alloc_pipe_mem(skl, mconfig);
657 skl_tplg_alloc_pipe_mcps(skl, mconfig);
d93f8e55
VK
658
659 /* Init all pipe modules from source to sink */
660 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
661 if (ret < 0)
662 return ret;
663
664 /* Bind modules from source to sink */
665 list_for_each_entry(w_module, &s_pipe->w_list, node) {
666 dst_module = w_module->w->priv;
667
668 if (src_module == NULL) {
669 src_module = dst_module;
670 continue;
671 }
672
673 ret = skl_bind_modules(ctx, src_module, dst_module);
674 if (ret < 0)
675 return ret;
676
677 src_module = dst_module;
678 }
679
680 return 0;
681}
682
5e8f0ee4
D
683static int skl_fill_sink_instance_id(struct skl_sst *ctx,
684 struct skl_algo_data *alg_data)
685{
686 struct skl_kpb_params *params = (struct skl_kpb_params *)alg_data->params;
687 struct skl_mod_inst_map *inst;
688 int i, pvt_id;
689
690 inst = params->map;
691
692 for (i = 0; i < params->num_modules; i++) {
693 pvt_id = skl_get_pvt_instance_id_map(ctx,
694 inst->mod_id, inst->inst_id);
695 if (pvt_id < 0)
696 return -EINVAL;
697 inst->inst_id = pvt_id;
698 inst++;
699 }
700 return 0;
701}
702
cc6a4044
JK
703/*
704 * Some modules require params to be set after the module is bound to
705 * all pins connected.
706 *
707 * The module provider initializes set_param flag for such modules and we
708 * send params after binding
709 */
710static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
711 struct skl_module_cfg *mcfg, struct skl_sst *ctx)
712{
713 int i, ret;
714 struct skl_module_cfg *mconfig = w->priv;
715 const struct snd_kcontrol_new *k;
716 struct soc_bytes_ext *sb;
717 struct skl_algo_data *bc;
718 struct skl_specific_cfg *sp_cfg;
719
720 /*
721 * check all out/in pins are in bind state.
722 * if so set the module param
723 */
724 for (i = 0; i < mcfg->max_out_queue; i++) {
725 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
726 return 0;
727 }
728
729 for (i = 0; i < mcfg->max_in_queue; i++) {
730 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
731 return 0;
732 }
733
734 if (mconfig->formats_config.caps_size > 0 &&
735 mconfig->formats_config.set_params == SKL_PARAM_BIND) {
736 sp_cfg = &mconfig->formats_config;
737 ret = skl_set_module_params(ctx, sp_cfg->caps,
738 sp_cfg->caps_size,
739 sp_cfg->param_id, mconfig);
740 if (ret < 0)
741 return ret;
742 }
743
744 for (i = 0; i < w->num_kcontrols; i++) {
745 k = &w->kcontrol_news[i];
746 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
747 sb = (void *) k->private_value;
748 bc = (struct skl_algo_data *)sb->dobj.private;
749
750 if (bc->set_params == SKL_PARAM_BIND) {
5e8f0ee4
D
751 if (mconfig->m_type == SKL_MODULE_TYPE_KPB)
752 skl_fill_sink_instance_id(ctx, bc);
cc6a4044
JK
753 ret = skl_set_module_params(ctx,
754 (u32 *)bc->params, bc->max,
755 bc->param_id, mconfig);
756 if (ret < 0)
757 return ret;
758 }
759 }
760 }
761
762 return 0;
763}
764
8724ff17
JK
765static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
766 struct skl *skl,
6bd4cf85 767 struct snd_soc_dapm_widget *src_w,
8724ff17 768 struct skl_module_cfg *src_mconfig)
d93f8e55
VK
769{
770 struct snd_soc_dapm_path *p;
0ed95d76 771 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
8724ff17 772 struct skl_module_cfg *sink_mconfig;
d93f8e55 773 struct skl_sst *ctx = skl->skl_sst;
8724ff17 774 int ret;
d93f8e55 775
8724ff17 776 snd_soc_dapm_widget_for_each_sink_path(w, p) {
d93f8e55
VK
777 if (!p->connect)
778 continue;
779
780 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
781 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
782
0ed95d76 783 next_sink = p->sink;
6bd4cf85
JK
784
785 if (!is_skl_dsp_widget_type(p->sink))
786 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
787
d93f8e55
VK
788 /*
789 * here we will check widgets in sink pipelines, so that
790 * can be any widgets type and we are only interested if
791 * they are ones used for SKL so check that first
792 */
793 if ((p->sink->priv != NULL) &&
794 is_skl_dsp_widget_type(p->sink)) {
795
796 sink = p->sink;
d93f8e55
VK
797 sink_mconfig = sink->priv;
798
cc6a4044
JK
799 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
800 sink_mconfig->m_state == SKL_MODULE_UNINIT)
801 continue;
802
d93f8e55
VK
803 /* Bind source to sink, mixin is always source */
804 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
805 if (ret)
806 return ret;
807
cc6a4044
JK
808 /* set module params after bind */
809 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
810 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
811
d93f8e55
VK
812 /* Start sinks pipe first */
813 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
d1730c3d
JK
814 if (sink_mconfig->pipe->conn_type !=
815 SKL_PIPE_CONN_TYPE_FE)
816 ret = skl_run_pipe(ctx,
817 sink_mconfig->pipe);
d93f8e55
VK
818 if (ret)
819 return ret;
820 }
d93f8e55
VK
821 }
822 }
823
8724ff17 824 if (!sink)
6bd4cf85 825 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
8724ff17
JK
826
827 return 0;
828}
829
830/*
831 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
832 * we need to do following:
833 * - Bind to sink pipeline
834 * Since the sink pipes can be running and we don't get mixer event on
835 * connect for already running mixer, we need to find the sink pipes
836 * here and bind to them. This way dynamic connect works.
837 * - Start sink pipeline, if not running
838 * - Then run current pipe
839 */
840static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
841 struct skl *skl)
842{
843 struct skl_module_cfg *src_mconfig;
844 struct skl_sst *ctx = skl->skl_sst;
845 int ret = 0;
846
847 src_mconfig = w->priv;
848
849 /*
850 * find which sink it is connected to, bind with the sink,
851 * if sink is not started, start sink pipe first, then start
852 * this pipe
853 */
6bd4cf85 854 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
d93f8e55
VK
855 if (ret)
856 return ret;
857
d93f8e55 858 /* Start source pipe last after starting all sinks */
d1730c3d
JK
859 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
860 return skl_run_pipe(ctx, src_mconfig->pipe);
d93f8e55
VK
861
862 return 0;
863}
864
8724ff17
JK
865static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
866 struct snd_soc_dapm_widget *w, struct skl *skl)
867{
868 struct snd_soc_dapm_path *p;
869 struct snd_soc_dapm_widget *src_w = NULL;
870 struct skl_sst *ctx = skl->skl_sst;
871
872 snd_soc_dapm_widget_for_each_source_path(w, p) {
873 src_w = p->source;
874 if (!p->connect)
875 continue;
876
877 dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
878 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
879
880 /*
881 * here we will check widgets in sink pipelines, so that can
882 * be any widgets type and we are only interested if they are
883 * ones used for SKL so check that first
884 */
885 if ((p->source->priv != NULL) &&
886 is_skl_dsp_widget_type(p->source)) {
887 return p->source;
888 }
889 }
890
891 if (src_w != NULL)
892 return skl_get_src_dsp_widget(src_w, skl);
893
894 return NULL;
895}
896
d93f8e55
VK
897/*
898 * in the Post-PMU event of mixer we need to do following:
899 * - Check if this pipe is running
900 * - if not, then
901 * - bind this pipeline to its source pipeline
902 * if source pipe is already running, this means it is a dynamic
903 * connection and we need to bind only to that pipe
904 * - start this pipeline
905 */
906static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
907 struct skl *skl)
908{
909 int ret = 0;
d93f8e55
VK
910 struct snd_soc_dapm_widget *source, *sink;
911 struct skl_module_cfg *src_mconfig, *sink_mconfig;
912 struct skl_sst *ctx = skl->skl_sst;
913 int src_pipe_started = 0;
914
915 sink = w;
916 sink_mconfig = sink->priv;
917
918 /*
919 * If source pipe is already started, that means source is driving
920 * one more sink before this sink got connected, Since source is
921 * started, bind this sink to source and start this pipe.
922 */
8724ff17
JK
923 source = skl_get_src_dsp_widget(w, skl);
924 if (source != NULL) {
925 src_mconfig = source->priv;
926 sink_mconfig = sink->priv;
927 src_pipe_started = 1;
d93f8e55
VK
928
929 /*
8724ff17
JK
930 * check pipe state, then no need to bind or start the
931 * pipe
d93f8e55 932 */
8724ff17
JK
933 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
934 src_pipe_started = 0;
d93f8e55
VK
935 }
936
937 if (src_pipe_started) {
938 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
939 if (ret)
940 return ret;
941
cc6a4044
JK
942 /* set module params after bind */
943 skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
944 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
945
d1730c3d
JK
946 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
947 ret = skl_run_pipe(ctx, sink_mconfig->pipe);
d93f8e55
VK
948 }
949
950 return ret;
951}
952
953/*
954 * in the Pre-PMD event of mixer we need to do following:
955 * - Stop the pipe
956 * - find the source connections and remove that from dapm_path_list
957 * - unbind with source pipelines if still connected
958 */
959static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
960 struct skl *skl)
961{
d93f8e55 962 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 963 int ret = 0, i;
d93f8e55
VK
964 struct skl_sst *ctx = skl->skl_sst;
965
ce1b5551 966 sink_mconfig = w->priv;
d93f8e55
VK
967
968 /* Stop the pipe */
969 ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
970 if (ret)
971 return ret;
972
ce1b5551
JK
973 for (i = 0; i < sink_mconfig->max_in_queue; i++) {
974 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
975 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
976 if (!src_mconfig)
977 continue;
978 /*
979 * If path_found == 1, that means pmd for source
980 * pipe has not occurred, source is connected to
981 * some other sink. so its responsibility of sink
982 * to unbind itself from source.
983 */
984 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
985 if (ret < 0)
986 return ret;
d93f8e55 987
ce1b5551
JK
988 ret = skl_unbind_modules(ctx,
989 src_mconfig, sink_mconfig);
d93f8e55 990 }
d93f8e55
VK
991 }
992
993 return ret;
994}
995
996/*
997 * in the Post-PMD event of mixer we need to do following:
998 * - Free the mcps used
999 * - Free the mem used
1000 * - Unbind the modules within the pipeline
1001 * - Delete the pipeline (modules are not required to be explicitly
1002 * deleted, pipeline delete is enough here
1003 */
1004static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1005 struct skl *skl)
1006{
1007 struct skl_module_cfg *mconfig = w->priv;
1008 struct skl_pipe_module *w_module;
1009 struct skl_module_cfg *src_module = NULL, *dst_module;
1010 struct skl_sst *ctx = skl->skl_sst;
1011 struct skl_pipe *s_pipe = mconfig->pipe;
d93f8e55 1012
260eb73a
D
1013 if (s_pipe->state == SKL_PIPE_INVALID)
1014 return -EINVAL;
1015
d93f8e55 1016 skl_tplg_free_pipe_mcps(skl, mconfig);
65976878 1017 skl_tplg_free_pipe_mem(skl, mconfig);
d93f8e55
VK
1018
1019 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1020 dst_module = w_module->w->priv;
1021
260eb73a
D
1022 if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
1023 skl_tplg_free_pipe_mcps(skl, dst_module);
d93f8e55
VK
1024 if (src_module == NULL) {
1025 src_module = dst_module;
1026 continue;
1027 }
1028
7ca42f5a 1029 skl_unbind_modules(ctx, src_module, dst_module);
d93f8e55
VK
1030 src_module = dst_module;
1031 }
1032
547cafa3 1033 skl_delete_pipe(ctx, mconfig->pipe);
d93f8e55 1034
6c5768b3 1035 return skl_tplg_unload_pipe_modules(ctx, s_pipe);
d93f8e55
VK
1036}
1037
1038/*
1039 * in the Post-PMD event of PGA we need to do following:
1040 * - Free the mcps used
1041 * - Stop the pipeline
1042 * - In source pipe is connected, unbind with source pipelines
1043 */
1044static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1045 struct skl *skl)
1046{
d93f8e55 1047 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 1048 int ret = 0, i;
d93f8e55
VK
1049 struct skl_sst *ctx = skl->skl_sst;
1050
ce1b5551 1051 src_mconfig = w->priv;
d93f8e55 1052
d93f8e55
VK
1053 /* Stop the pipe since this is a mixin module */
1054 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
1055 if (ret)
1056 return ret;
1057
ce1b5551
JK
1058 for (i = 0; i < src_mconfig->max_out_queue; i++) {
1059 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1060 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1061 if (!sink_mconfig)
1062 continue;
1063 /*
1064 * This is a connecter and if path is found that means
1065 * unbind between source and sink has not happened yet
1066 */
ce1b5551
JK
1067 ret = skl_unbind_modules(ctx, src_mconfig,
1068 sink_mconfig);
d93f8e55
VK
1069 }
1070 }
1071
d93f8e55
VK
1072 return ret;
1073}
1074
1075/*
1076 * In modelling, we assume there will be ONLY one mixer in a pipeline. If
1077 * mixer is not required then it is treated as static mixer aka vmixer with
1078 * a hard path to source module
1079 * So we don't need to check if source is started or not as hard path puts
1080 * dependency on each other
1081 */
1082static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
1083 struct snd_kcontrol *k, int event)
1084{
1085 struct snd_soc_dapm_context *dapm = w->dapm;
1086 struct skl *skl = get_skl_ctx(dapm->dev);
1087
1088 switch (event) {
1089 case SND_SOC_DAPM_PRE_PMU:
1090 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1091
de1fedf2
JK
1092 case SND_SOC_DAPM_POST_PMU:
1093 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1094
1095 case SND_SOC_DAPM_PRE_PMD:
1096 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1097
d93f8e55
VK
1098 case SND_SOC_DAPM_POST_PMD:
1099 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1100 }
1101
1102 return 0;
1103}
1104
1105/*
1106 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1107 * second one is required that is created as another pipe entity.
1108 * The mixer is responsible for pipe management and represent a pipeline
1109 * instance
1110 */
1111static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1112 struct snd_kcontrol *k, int event)
1113{
1114 struct snd_soc_dapm_context *dapm = w->dapm;
1115 struct skl *skl = get_skl_ctx(dapm->dev);
1116
1117 switch (event) {
1118 case SND_SOC_DAPM_PRE_PMU:
1119 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1120
1121 case SND_SOC_DAPM_POST_PMU:
1122 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1123
1124 case SND_SOC_DAPM_PRE_PMD:
1125 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1126
1127 case SND_SOC_DAPM_POST_PMD:
1128 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1129 }
1130
1131 return 0;
1132}
1133
1134/*
1135 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1136 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1137 * the sink when it is running (two FE to one BE or one FE to two BE)
1138 * scenarios
1139 */
1140static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1141 struct snd_kcontrol *k, int event)
1142
1143{
1144 struct snd_soc_dapm_context *dapm = w->dapm;
1145 struct skl *skl = get_skl_ctx(dapm->dev);
1146
1147 switch (event) {
1148 case SND_SOC_DAPM_PRE_PMU:
1149 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1150
1151 case SND_SOC_DAPM_POST_PMD:
1152 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1153 }
1154
1155 return 0;
1156}
cfb0a873 1157
140adfba
JK
1158static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1159 unsigned int __user *data, unsigned int size)
1160{
1161 struct soc_bytes_ext *sb =
1162 (struct soc_bytes_ext *)kcontrol->private_value;
1163 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
7d9f2911
OA
1164 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1165 struct skl_module_cfg *mconfig = w->priv;
1166 struct skl *skl = get_skl_ctx(w->dapm->dev);
1167
1168 if (w->power)
1169 skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
0d682104 1170 bc->size, bc->param_id, mconfig);
140adfba 1171
41556f68
VK
1172 /* decrement size for TLV header */
1173 size -= 2 * sizeof(u32);
1174
1175 /* check size as we don't want to send kernel data */
1176 if (size > bc->max)
1177 size = bc->max;
1178
140adfba
JK
1179 if (bc->params) {
1180 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1181 return -EFAULT;
e8bc3c99 1182 if (copy_to_user(data + 1, &size, sizeof(u32)))
140adfba 1183 return -EFAULT;
e8bc3c99 1184 if (copy_to_user(data + 2, bc->params, size))
140adfba
JK
1185 return -EFAULT;
1186 }
1187
1188 return 0;
1189}
1190
1191#define SKL_PARAM_VENDOR_ID 0xff
1192
1193static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1194 const unsigned int __user *data, unsigned int size)
1195{
1196 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1197 struct skl_module_cfg *mconfig = w->priv;
1198 struct soc_bytes_ext *sb =
1199 (struct soc_bytes_ext *)kcontrol->private_value;
1200 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1201 struct skl *skl = get_skl_ctx(w->dapm->dev);
1202
1203 if (ac->params) {
0d682104
D
1204 if (size > ac->max)
1205 return -EINVAL;
1206
1207 ac->size = size;
140adfba
JK
1208 /*
1209 * if the param_is is of type Vendor, firmware expects actual
1210 * parameter id and size from the control.
1211 */
1212 if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1213 if (copy_from_user(ac->params, data, size))
1214 return -EFAULT;
1215 } else {
1216 if (copy_from_user(ac->params,
65b4bcb8 1217 data + 2, size))
140adfba
JK
1218 return -EFAULT;
1219 }
1220
1221 if (w->power)
1222 return skl_set_module_params(skl->skl_sst,
0d682104 1223 (u32 *)ac->params, ac->size,
140adfba
JK
1224 ac->param_id, mconfig);
1225 }
1226
1227 return 0;
1228}
1229
8871dcb9
JK
1230/*
1231 * Fill the dma id for host and link. In case of passthrough
1232 * pipeline, this will both host and link in the same
1233 * pipeline, so need to copy the link and host based on dev_type
1234 */
1235static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1236 struct skl_pipe_params *params)
1237{
1238 struct skl_pipe *pipe = mcfg->pipe;
1239
1240 if (pipe->passthru) {
1241 switch (mcfg->dev_type) {
1242 case SKL_DEVICE_HDALINK:
1243 pipe->p_params->link_dma_id = params->link_dma_id;
12c3be0e 1244 pipe->p_params->link_index = params->link_index;
8871dcb9
JK
1245 break;
1246
1247 case SKL_DEVICE_HDAHOST:
1248 pipe->p_params->host_dma_id = params->host_dma_id;
1249 break;
1250
1251 default:
1252 break;
1253 }
1254 pipe->p_params->s_fmt = params->s_fmt;
1255 pipe->p_params->ch = params->ch;
1256 pipe->p_params->s_freq = params->s_freq;
1257 pipe->p_params->stream = params->stream;
12c3be0e 1258 pipe->p_params->format = params->format;
8871dcb9
JK
1259
1260 } else {
1261 memcpy(pipe->p_params, params, sizeof(*params));
1262 }
1263}
1264
cfb0a873
VK
1265/*
1266 * The FE params are passed by hw_params of the DAI.
1267 * On hw_params, the params are stored in Gateway module of the FE and we
1268 * need to calculate the format in DSP module configuration, that
1269 * conversion is done here
1270 */
1271int skl_tplg_update_pipe_params(struct device *dev,
1272 struct skl_module_cfg *mconfig,
1273 struct skl_pipe_params *params)
1274{
cfb0a873
VK
1275 struct skl_module_fmt *format = NULL;
1276
8871dcb9 1277 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873
VK
1278
1279 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
4cd9899f 1280 format = &mconfig->in_fmt[0];
cfb0a873 1281 else
4cd9899f 1282 format = &mconfig->out_fmt[0];
cfb0a873
VK
1283
1284 /* set the hw_params */
1285 format->s_freq = params->s_freq;
1286 format->channels = params->ch;
1287 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1288
1289 /*
1290 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1291 * container so update bit depth accordingly
1292 */
1293 switch (format->valid_bit_depth) {
1294 case SKL_DEPTH_16BIT:
1295 format->bit_depth = format->valid_bit_depth;
1296 break;
1297
1298 case SKL_DEPTH_24BIT:
6654f39e 1299 case SKL_DEPTH_32BIT:
cfb0a873
VK
1300 format->bit_depth = SKL_DEPTH_32BIT;
1301 break;
1302
1303 default:
1304 dev_err(dev, "Invalid bit depth %x for pipe\n",
1305 format->valid_bit_depth);
1306 return -EINVAL;
1307 }
1308
1309 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1310 mconfig->ibs = (format->s_freq / 1000) *
1311 (format->channels) *
1312 (format->bit_depth >> 3);
1313 } else {
1314 mconfig->obs = (format->s_freq / 1000) *
1315 (format->channels) *
1316 (format->bit_depth >> 3);
1317 }
1318
1319 return 0;
1320}
1321
1322/*
1323 * Query the module config for the FE DAI
1324 * This is used to find the hw_params set for that DAI and apply to FE
1325 * pipeline
1326 */
1327struct skl_module_cfg *
1328skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1329{
1330 struct snd_soc_dapm_widget *w;
1331 struct snd_soc_dapm_path *p = NULL;
1332
1333 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1334 w = dai->playback_widget;
f0900eb2 1335 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873 1336 if (p->connect && p->sink->power &&
a28f51db 1337 !is_skl_dsp_widget_type(p->sink))
cfb0a873
VK
1338 continue;
1339
1340 if (p->sink->priv) {
1341 dev_dbg(dai->dev, "set params for %s\n",
1342 p->sink->name);
1343 return p->sink->priv;
1344 }
1345 }
1346 } else {
1347 w = dai->capture_widget;
f0900eb2 1348 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873 1349 if (p->connect && p->source->power &&
a28f51db 1350 !is_skl_dsp_widget_type(p->source))
cfb0a873
VK
1351 continue;
1352
1353 if (p->source->priv) {
1354 dev_dbg(dai->dev, "set params for %s\n",
1355 p->source->name);
1356 return p->source->priv;
1357 }
1358 }
1359 }
1360
1361 return NULL;
1362}
1363
718a42b5
D
1364static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1365 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1366{
1367 struct snd_soc_dapm_path *p;
1368 struct skl_module_cfg *mconfig = NULL;
1369
1370 snd_soc_dapm_widget_for_each_source_path(w, p) {
1371 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1372 if (p->connect &&
1373 (p->sink->id == snd_soc_dapm_aif_out) &&
1374 p->source->priv) {
1375 mconfig = p->source->priv;
1376 return mconfig;
1377 }
1378 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1379 if (mconfig)
1380 return mconfig;
1381 }
1382 }
1383 return mconfig;
1384}
1385
1386static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1387 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1388{
1389 struct snd_soc_dapm_path *p;
1390 struct skl_module_cfg *mconfig = NULL;
1391
1392 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1393 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1394 if (p->connect &&
1395 (p->source->id == snd_soc_dapm_aif_in) &&
1396 p->sink->priv) {
1397 mconfig = p->sink->priv;
1398 return mconfig;
1399 }
1400 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1401 if (mconfig)
1402 return mconfig;
1403 }
1404 }
1405 return mconfig;
1406}
1407
1408struct skl_module_cfg *
1409skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1410{
1411 struct snd_soc_dapm_widget *w;
1412 struct skl_module_cfg *mconfig;
1413
1414 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1415 w = dai->playback_widget;
1416 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1417 } else {
1418 w = dai->capture_widget;
1419 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1420 }
1421 return mconfig;
1422}
1423
cfb0a873
VK
1424static u8 skl_tplg_be_link_type(int dev_type)
1425{
1426 int ret;
1427
1428 switch (dev_type) {
1429 case SKL_DEVICE_BT:
1430 ret = NHLT_LINK_SSP;
1431 break;
1432
1433 case SKL_DEVICE_DMIC:
1434 ret = NHLT_LINK_DMIC;
1435 break;
1436
1437 case SKL_DEVICE_I2S:
1438 ret = NHLT_LINK_SSP;
1439 break;
1440
1441 case SKL_DEVICE_HDALINK:
1442 ret = NHLT_LINK_HDA;
1443 break;
1444
1445 default:
1446 ret = NHLT_LINK_INVALID;
1447 break;
1448 }
1449
1450 return ret;
1451}
1452
1453/*
1454 * Fill the BE gateway parameters
1455 * The BE gateway expects a blob of parameters which are kept in the ACPI
1456 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1457 * The port can have multiple settings so pick based on the PCM
1458 * parameters
1459 */
1460static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1461 struct skl_module_cfg *mconfig,
1462 struct skl_pipe_params *params)
1463{
cfb0a873
VK
1464 struct nhlt_specific_cfg *cfg;
1465 struct skl *skl = get_skl_ctx(dai->dev);
1466 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
db2f586b 1467 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
cfb0a873 1468
8871dcb9 1469 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873 1470
b30c275e
JK
1471 if (link_type == NHLT_LINK_HDA)
1472 return 0;
1473
cfb0a873
VK
1474 /* update the blob based on virtual bus_id*/
1475 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1476 params->s_fmt, params->ch,
db2f586b
SV
1477 params->s_freq, params->stream,
1478 dev_type);
cfb0a873
VK
1479 if (cfg) {
1480 mconfig->formats_config.caps_size = cfg->size;
bc03281a 1481 mconfig->formats_config.caps = (u32 *) &cfg->caps;
cfb0a873
VK
1482 } else {
1483 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1484 mconfig->vbus_id, link_type,
1485 params->stream);
1486 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1487 params->ch, params->s_freq, params->s_fmt);
1488 return -EINVAL;
1489 }
1490
1491 return 0;
1492}
1493
1494static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1495 struct snd_soc_dapm_widget *w,
1496 struct skl_pipe_params *params)
1497{
1498 struct snd_soc_dapm_path *p;
4d8adccb 1499 int ret = -EIO;
cfb0a873 1500
f0900eb2 1501 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873
VK
1502 if (p->connect && is_skl_dsp_widget_type(p->source) &&
1503 p->source->priv) {
1504
9a03cb49
JK
1505 ret = skl_tplg_be_fill_pipe_params(dai,
1506 p->source->priv, params);
1507 if (ret < 0)
1508 return ret;
cfb0a873 1509 } else {
9a03cb49
JK
1510 ret = skl_tplg_be_set_src_pipe_params(dai,
1511 p->source, params);
4d8adccb
SP
1512 if (ret < 0)
1513 return ret;
cfb0a873
VK
1514 }
1515 }
1516
4d8adccb 1517 return ret;
cfb0a873
VK
1518}
1519
1520static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1521 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1522{
1523 struct snd_soc_dapm_path *p = NULL;
4d8adccb 1524 int ret = -EIO;
cfb0a873 1525
f0900eb2 1526 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873
VK
1527 if (p->connect && is_skl_dsp_widget_type(p->sink) &&
1528 p->sink->priv) {
1529
9a03cb49
JK
1530 ret = skl_tplg_be_fill_pipe_params(dai,
1531 p->sink->priv, params);
1532 if (ret < 0)
1533 return ret;
cfb0a873 1534 } else {
4d8adccb 1535 ret = skl_tplg_be_set_sink_pipe_params(
cfb0a873 1536 dai, p->sink, params);
4d8adccb
SP
1537 if (ret < 0)
1538 return ret;
cfb0a873
VK
1539 }
1540 }
1541
4d8adccb 1542 return ret;
cfb0a873
VK
1543}
1544
1545/*
1546 * BE hw_params can be a source parameters (capture) or sink parameters
1547 * (playback). Based on sink and source we need to either find the source
1548 * list or the sink list and set the pipeline parameters
1549 */
1550int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1551 struct skl_pipe_params *params)
1552{
1553 struct snd_soc_dapm_widget *w;
1554
1555 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1556 w = dai->playback_widget;
1557
1558 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1559
1560 } else {
1561 w = dai->capture_widget;
1562
1563 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1564 }
1565
1566 return 0;
1567}
3af36706
VK
1568
1569static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1570 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
1571 {SKL_VMIXER_EVENT, skl_tplg_vmixer_event},
1572 {SKL_PGA_EVENT, skl_tplg_pga_event},
1573};
1574
140adfba
JK
1575static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1576 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1577 skl_tplg_tlv_control_set},
1578};
1579
6277e832
SN
1580static int skl_tplg_fill_pipe_tkn(struct device *dev,
1581 struct skl_pipe *pipe, u32 tkn,
1582 u32 tkn_val)
3af36706 1583{
3af36706 1584
6277e832
SN
1585 switch (tkn) {
1586 case SKL_TKN_U32_PIPE_CONN_TYPE:
1587 pipe->conn_type = tkn_val;
1588 break;
1589
1590 case SKL_TKN_U32_PIPE_PRIORITY:
1591 pipe->pipe_priority = tkn_val;
1592 break;
1593
1594 case SKL_TKN_U32_PIPE_MEM_PGS:
1595 pipe->memory_pages = tkn_val;
1596 break;
1597
8a0cb236
VK
1598 case SKL_TKN_U32_PMODE:
1599 pipe->lp_mode = tkn_val;
1600 break;
1601
6277e832
SN
1602 default:
1603 dev_err(dev, "Token not handled %d\n", tkn);
1604 return -EINVAL;
3af36706 1605 }
6277e832
SN
1606
1607 return 0;
3af36706
VK
1608}
1609
1610/*
6277e832
SN
1611 * Add pipeline by parsing the relevant tokens
1612 * Return an existing pipe if the pipe already exists.
3af36706 1613 */
6277e832
SN
1614static int skl_tplg_add_pipe(struct device *dev,
1615 struct skl_module_cfg *mconfig, struct skl *skl,
1616 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
3af36706
VK
1617{
1618 struct skl_pipeline *ppl;
1619 struct skl_pipe *pipe;
1620 struct skl_pipe_params *params;
1621
1622 list_for_each_entry(ppl, &skl->ppl_list, node) {
6277e832
SN
1623 if (ppl->pipe->ppl_id == tkn_elem->value) {
1624 mconfig->pipe = ppl->pipe;
1625 return EEXIST;
1626 }
3af36706
VK
1627 }
1628
1629 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
1630 if (!ppl)
6277e832 1631 return -ENOMEM;
3af36706
VK
1632
1633 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
1634 if (!pipe)
6277e832 1635 return -ENOMEM;
3af36706
VK
1636
1637 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
1638 if (!params)
6277e832 1639 return -ENOMEM;
3af36706 1640
3af36706 1641 pipe->p_params = params;
6277e832 1642 pipe->ppl_id = tkn_elem->value;
3af36706
VK
1643 INIT_LIST_HEAD(&pipe->w_list);
1644
1645 ppl->pipe = pipe;
1646 list_add(&ppl->node, &skl->ppl_list);
1647
6277e832
SN
1648 mconfig->pipe = pipe;
1649 mconfig->pipe->state = SKL_PIPE_INVALID;
1650
1651 return 0;
1652}
1653
1654static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
1655 struct skl_module_pin *m_pin,
1656 int pin_index, u32 value)
1657{
1658 switch (tkn) {
1659 case SKL_TKN_U32_PIN_MOD_ID:
1660 m_pin[pin_index].id.module_id = value;
1661 break;
1662
1663 case SKL_TKN_U32_PIN_INST_ID:
1664 m_pin[pin_index].id.instance_id = value;
1665 break;
1666
1667 default:
1668 dev_err(dev, "%d Not a pin token\n", value);
1669 return -EINVAL;
1670 }
1671
1672 return 0;
1673}
1674
1675/*
1676 * Parse for pin config specific tokens to fill up the
1677 * module private data
1678 */
1679static int skl_tplg_fill_pins_info(struct device *dev,
1680 struct skl_module_cfg *mconfig,
1681 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1682 int dir, int pin_count)
1683{
1684 int ret;
1685 struct skl_module_pin *m_pin;
1686
1687 switch (dir) {
1688 case SKL_DIR_IN:
1689 m_pin = mconfig->m_in_pin;
1690 break;
1691
1692 case SKL_DIR_OUT:
1693 m_pin = mconfig->m_out_pin;
1694 break;
1695
1696 default:
ecd286a9 1697 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1698 return -EINVAL;
1699 }
1700
1701 ret = skl_tplg_fill_pin(dev, tkn_elem->token,
1702 m_pin, pin_count, tkn_elem->value);
1703
1704 if (ret < 0)
1705 return ret;
1706
1707 m_pin[pin_count].in_use = false;
1708 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
1709
1710 return 0;
3af36706
VK
1711}
1712
6277e832
SN
1713/*
1714 * Fill up input/output module config format based
1715 * on the direction
1716 */
1717static int skl_tplg_fill_fmt(struct device *dev,
1718 struct skl_module_cfg *mconfig, u32 tkn,
1719 u32 value, u32 dir, u32 pin_count)
1720{
1721 struct skl_module_fmt *dst_fmt;
1722
1723 switch (dir) {
1724 case SKL_DIR_IN:
1725 dst_fmt = mconfig->in_fmt;
1726 dst_fmt += pin_count;
1727 break;
1728
1729 case SKL_DIR_OUT:
1730 dst_fmt = mconfig->out_fmt;
1731 dst_fmt += pin_count;
1732 break;
1733
1734 default:
ecd286a9 1735 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1736 return -EINVAL;
1737 }
1738
1739 switch (tkn) {
1740 case SKL_TKN_U32_FMT_CH:
1741 dst_fmt->channels = value;
1742 break;
1743
1744 case SKL_TKN_U32_FMT_FREQ:
1745 dst_fmt->s_freq = value;
1746 break;
1747
1748 case SKL_TKN_U32_FMT_BIT_DEPTH:
1749 dst_fmt->bit_depth = value;
1750 break;
1751
1752 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1753 dst_fmt->valid_bit_depth = value;
1754 break;
1755
1756 case SKL_TKN_U32_FMT_CH_CONFIG:
1757 dst_fmt->ch_cfg = value;
1758 break;
1759
1760 case SKL_TKN_U32_FMT_INTERLEAVE:
1761 dst_fmt->interleaving_style = value;
1762 break;
1763
1764 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1765 dst_fmt->sample_type = value;
1766 break;
1767
1768 case SKL_TKN_U32_FMT_CH_MAP:
1769 dst_fmt->ch_map = value;
1770 break;
1771
1772 default:
ecd286a9 1773 dev_err(dev, "Invalid token %d\n", tkn);
6277e832
SN
1774 return -EINVAL;
1775 }
1776
1777 return 0;
1778}
1779
1780static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
1781 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
1782{
1783 if (uuid_tkn->token == SKL_TKN_UUID)
1784 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
1785 else {
ecd286a9 1786 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
6277e832
SN
1787 return -EINVAL;
1788 }
1789
1790 return 0;
1791}
1792
1793static void skl_tplg_fill_pin_dynamic_val(
1794 struct skl_module_pin *mpin, u32 pin_count, u32 value)
4cd9899f
HS
1795{
1796 int i;
1797
6277e832
SN
1798 for (i = 0; i < pin_count; i++)
1799 mpin[i].is_dynamic = value;
1800}
1801
1802/*
1803 * Parse tokens to fill up the module private data
1804 */
1805static int skl_tplg_get_token(struct device *dev,
1806 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1807 struct skl *skl, struct skl_module_cfg *mconfig)
1808{
1809 int tkn_count = 0;
1810 int ret;
1811 static int is_pipe_exists;
1812 static int pin_index, dir;
1813
1814 if (tkn_elem->token > SKL_TKN_MAX)
1815 return -EINVAL;
1816
1817 switch (tkn_elem->token) {
1818 case SKL_TKN_U8_IN_QUEUE_COUNT:
1819 mconfig->max_in_queue = tkn_elem->value;
1820 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
1821 sizeof(*mconfig->m_in_pin),
1822 GFP_KERNEL);
1823 if (!mconfig->m_in_pin)
1824 return -ENOMEM;
1825
1826 break;
1827
1828 case SKL_TKN_U8_OUT_QUEUE_COUNT:
1829 mconfig->max_out_queue = tkn_elem->value;
1830 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
1831 sizeof(*mconfig->m_out_pin),
1832 GFP_KERNEL);
1833
1834 if (!mconfig->m_out_pin)
1835 return -ENOMEM;
1836
1837 break;
1838
1839 case SKL_TKN_U8_DYN_IN_PIN:
1840 if (!mconfig->m_in_pin)
1841 return -ENOMEM;
1842
1843 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
1844 mconfig->max_in_queue, tkn_elem->value);
1845
1846 break;
1847
1848 case SKL_TKN_U8_DYN_OUT_PIN:
1849 if (!mconfig->m_out_pin)
1850 return -ENOMEM;
1851
1852 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
1853 mconfig->max_out_queue, tkn_elem->value);
1854
1855 break;
1856
1857 case SKL_TKN_U8_TIME_SLOT:
1858 mconfig->time_slot = tkn_elem->value;
1859 break;
1860
1861 case SKL_TKN_U8_CORE_ID:
1862 mconfig->core_id = tkn_elem->value;
1863
1864 case SKL_TKN_U8_MOD_TYPE:
1865 mconfig->m_type = tkn_elem->value;
1866 break;
1867
1868 case SKL_TKN_U8_DEV_TYPE:
1869 mconfig->dev_type = tkn_elem->value;
1870 break;
1871
1872 case SKL_TKN_U8_HW_CONN_TYPE:
1873 mconfig->hw_conn_type = tkn_elem->value;
1874 break;
1875
1876 case SKL_TKN_U16_MOD_INST_ID:
1877 mconfig->id.instance_id =
1878 tkn_elem->value;
1879 break;
1880
1881 case SKL_TKN_U32_MEM_PAGES:
1882 mconfig->mem_pages = tkn_elem->value;
1883 break;
1884
1885 case SKL_TKN_U32_MAX_MCPS:
1886 mconfig->mcps = tkn_elem->value;
1887 break;
1888
1889 case SKL_TKN_U32_OBS:
1890 mconfig->obs = tkn_elem->value;
1891 break;
1892
1893 case SKL_TKN_U32_IBS:
1894 mconfig->ibs = tkn_elem->value;
1895 break;
1896
1897 case SKL_TKN_U32_VBUS_ID:
1898 mconfig->vbus_id = tkn_elem->value;
1899 break;
1900
1901 case SKL_TKN_U32_PARAMS_FIXUP:
1902 mconfig->params_fixup = tkn_elem->value;
1903 break;
1904
1905 case SKL_TKN_U32_CONVERTER:
1906 mconfig->converter = tkn_elem->value;
1907 break;
1908
6bd9dcf3
VK
1909 case SKL_TKL_U32_D0I3_CAPS:
1910 mconfig->d0i3_caps = tkn_elem->value;
1911 break;
1912
6277e832
SN
1913 case SKL_TKN_U32_PIPE_ID:
1914 ret = skl_tplg_add_pipe(dev,
1915 mconfig, skl, tkn_elem);
1916
1917 if (ret < 0)
1918 return is_pipe_exists;
1919
1920 if (ret == EEXIST)
1921 is_pipe_exists = 1;
1922
1923 break;
1924
1925 case SKL_TKN_U32_PIPE_CONN_TYPE:
1926 case SKL_TKN_U32_PIPE_PRIORITY:
1927 case SKL_TKN_U32_PIPE_MEM_PGS:
8a0cb236 1928 case SKL_TKN_U32_PMODE:
6277e832
SN
1929 if (is_pipe_exists) {
1930 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
1931 tkn_elem->token, tkn_elem->value);
1932 if (ret < 0)
1933 return ret;
1934 }
1935
1936 break;
1937
1938 /*
1939 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
1940 * direction and the pin count. The first four bits represent
1941 * direction and next four the pin count.
1942 */
1943 case SKL_TKN_U32_DIR_PIN_COUNT:
1944 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
1945 pin_index = (tkn_elem->value &
1946 SKL_PIN_COUNT_MASK) >> 4;
1947
1948 break;
1949
1950 case SKL_TKN_U32_FMT_CH:
1951 case SKL_TKN_U32_FMT_FREQ:
1952 case SKL_TKN_U32_FMT_BIT_DEPTH:
1953 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1954 case SKL_TKN_U32_FMT_CH_CONFIG:
1955 case SKL_TKN_U32_FMT_INTERLEAVE:
1956 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1957 case SKL_TKN_U32_FMT_CH_MAP:
1958 ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
1959 tkn_elem->value, dir, pin_index);
1960
1961 if (ret < 0)
1962 return ret;
1963
1964 break;
1965
1966 case SKL_TKN_U32_PIN_MOD_ID:
1967 case SKL_TKN_U32_PIN_INST_ID:
1968 ret = skl_tplg_fill_pins_info(dev,
1969 mconfig, tkn_elem, dir,
1970 pin_index);
1971 if (ret < 0)
1972 return ret;
1973
1974 break;
1975
1976 case SKL_TKN_U32_CAPS_SIZE:
1977 mconfig->formats_config.caps_size =
1978 tkn_elem->value;
1979
1980 break;
1981
1982 case SKL_TKN_U32_PROC_DOMAIN:
1983 mconfig->domain =
1984 tkn_elem->value;
1985
1986 break;
1987
1988 case SKL_TKN_U8_IN_PIN_TYPE:
1989 case SKL_TKN_U8_OUT_PIN_TYPE:
1990 case SKL_TKN_U8_CONN_TYPE:
1991 break;
1992
1993 default:
1994 dev_err(dev, "Token %d not handled\n",
1995 tkn_elem->token);
1996 return -EINVAL;
4cd9899f 1997 }
6277e832
SN
1998
1999 tkn_count++;
2000
2001 return tkn_count;
2002}
2003
2004/*
2005 * Parse the vendor array for specific tokens to construct
2006 * module private data
2007 */
2008static int skl_tplg_get_tokens(struct device *dev,
2009 char *pvt_data, struct skl *skl,
2010 struct skl_module_cfg *mconfig, int block_size)
2011{
2012 struct snd_soc_tplg_vendor_array *array;
2013 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2014 int tkn_count = 0, ret;
2015 int off = 0, tuple_size = 0;
2016
2017 if (block_size <= 0)
2018 return -EINVAL;
2019
2020 while (tuple_size < block_size) {
2021 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2022
2023 off += array->size;
2024
2025 switch (array->type) {
2026 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
ecd286a9 2027 dev_warn(dev, "no string tokens expected for skl tplg\n");
6277e832
SN
2028 continue;
2029
2030 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2031 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
2032 if (ret < 0)
2033 return ret;
2034
2035 tuple_size += sizeof(*array->uuid);
2036
2037 continue;
2038
2039 default:
2040 tkn_elem = array->value;
2041 tkn_count = 0;
2042 break;
2043 }
2044
2045 while (tkn_count <= (array->num_elems - 1)) {
2046 ret = skl_tplg_get_token(dev, tkn_elem,
2047 skl, mconfig);
2048
2049 if (ret < 0)
2050 return ret;
2051
2052 tkn_count = tkn_count + ret;
2053 tkn_elem++;
2054 }
2055
2056 tuple_size += tkn_count * sizeof(*tkn_elem);
2057 }
2058
2059 return 0;
2060}
2061
2062/*
2063 * Every data block is preceded by a descriptor to read the number
2064 * of data blocks, they type of the block and it's size
2065 */
2066static int skl_tplg_get_desc_blocks(struct device *dev,
2067 struct snd_soc_tplg_vendor_array *array)
2068{
2069 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2070
2071 tkn_elem = array->value;
2072
2073 switch (tkn_elem->token) {
2074 case SKL_TKN_U8_NUM_BLOCKS:
2075 case SKL_TKN_U8_BLOCK_TYPE:
2076 case SKL_TKN_U16_BLOCK_SIZE:
2077 return tkn_elem->value;
2078
2079 default:
ecd286a9 2080 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
6277e832
SN
2081 break;
2082 }
2083
2084 return -EINVAL;
2085}
2086
2087/*
2088 * Parse the private data for the token and corresponding value.
2089 * The private data can have multiple data blocks. So, a data block
2090 * is preceded by a descriptor for number of blocks and a descriptor
2091 * for the type and size of the suceeding data block.
2092 */
2093static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2094 struct skl *skl, struct device *dev,
2095 struct skl_module_cfg *mconfig)
2096{
2097 struct snd_soc_tplg_vendor_array *array;
2098 int num_blocks, block_size = 0, block_type, off = 0;
2099 char *data;
2100 int ret;
2101
2102 /* Read the NUM_DATA_BLOCKS descriptor */
2103 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2104 ret = skl_tplg_get_desc_blocks(dev, array);
2105 if (ret < 0)
2106 return ret;
2107 num_blocks = ret;
2108
2109 off += array->size;
2110 array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
2111
2112 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2113 while (num_blocks > 0) {
2114 ret = skl_tplg_get_desc_blocks(dev, array);
2115
2116 if (ret < 0)
2117 return ret;
2118 block_type = ret;
2119 off += array->size;
2120
2121 array = (struct snd_soc_tplg_vendor_array *)
2122 (tplg_w->priv.data + off);
2123
2124 ret = skl_tplg_get_desc_blocks(dev, array);
2125
2126 if (ret < 0)
2127 return ret;
2128 block_size = ret;
2129 off += array->size;
2130
2131 array = (struct snd_soc_tplg_vendor_array *)
2132 (tplg_w->priv.data + off);
2133
2134 data = (tplg_w->priv.data + off);
2135
2136 if (block_type == SKL_TYPE_TUPLE) {
2137 ret = skl_tplg_get_tokens(dev, data,
2138 skl, mconfig, block_size);
2139
2140 if (ret < 0)
2141 return ret;
2142
2143 --num_blocks;
2144 } else {
2145 if (mconfig->formats_config.caps_size > 0)
2146 memcpy(mconfig->formats_config.caps, data,
2147 mconfig->formats_config.caps_size);
2148 --num_blocks;
2149 }
2150 }
2151
2152 return 0;
4cd9899f
HS
2153}
2154
fe3f4442
D
2155static void skl_clear_pin_config(struct snd_soc_platform *platform,
2156 struct snd_soc_dapm_widget *w)
2157{
2158 int i;
2159 struct skl_module_cfg *mconfig;
2160 struct skl_pipe *pipe;
2161
2162 if (!strncmp(w->dapm->component->name, platform->component.name,
2163 strlen(platform->component.name))) {
2164 mconfig = w->priv;
2165 pipe = mconfig->pipe;
2166 for (i = 0; i < mconfig->max_in_queue; i++) {
2167 mconfig->m_in_pin[i].in_use = false;
2168 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2169 }
2170 for (i = 0; i < mconfig->max_out_queue; i++) {
2171 mconfig->m_out_pin[i].in_use = false;
2172 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2173 }
2174 pipe->state = SKL_PIPE_INVALID;
2175 mconfig->m_state = SKL_MODULE_UNINIT;
2176 }
2177}
2178
2179void skl_cleanup_resources(struct skl *skl)
2180{
2181 struct skl_sst *ctx = skl->skl_sst;
2182 struct snd_soc_platform *soc_platform = skl->platform;
2183 struct snd_soc_dapm_widget *w;
2184 struct snd_soc_card *card;
2185
2186 if (soc_platform == NULL)
2187 return;
2188
2189 card = soc_platform->component.card;
2190 if (!card || !card->instantiated)
2191 return;
2192
2193 skl->resource.mem = 0;
2194 skl->resource.mcps = 0;
2195
2196 list_for_each_entry(w, &card->widgets, list) {
2197 if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
2198 skl_clear_pin_config(soc_platform, w);
2199 }
2200
2201 skl_clear_module_cnt(ctx->dsp);
2202}
2203
3af36706
VK
2204/*
2205 * Topology core widget load callback
2206 *
2207 * This is used to save the private data for each widget which gives
2208 * information to the driver about module and pipeline parameters which DSP
2209 * FW expects like ids, resource values, formats etc
2210 */
2211static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
b663a8c5
JK
2212 struct snd_soc_dapm_widget *w,
2213 struct snd_soc_tplg_dapm_widget *tplg_w)
3af36706
VK
2214{
2215 int ret;
2216 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2217 struct skl *skl = ebus_to_skl(ebus);
2218 struct hdac_bus *bus = ebus_to_hbus(ebus);
2219 struct skl_module_cfg *mconfig;
3af36706
VK
2220
2221 if (!tplg_w->priv.size)
2222 goto bind_event;
2223
2224 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2225
2226 if (!mconfig)
2227 return -ENOMEM;
2228
2229 w->priv = mconfig;
09305da9 2230
b7c50555
VK
2231 /*
2232 * module binary can be loaded later, so set it to query when
2233 * module is load for a use case
2234 */
2235 mconfig->id.module_id = -1;
3af36706 2236
6277e832
SN
2237 /* Parse private data for tuples */
2238 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2239 if (ret < 0)
2240 return ret;
3af36706
VK
2241bind_event:
2242 if (tplg_w->event_type == 0) {
3373f716 2243 dev_dbg(bus->dev, "ASoC: No event handler required\n");
3af36706
VK
2244 return 0;
2245 }
2246
2247 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
b663a8c5
JK
2248 ARRAY_SIZE(skl_tplg_widget_ops),
2249 tplg_w->event_type);
3af36706
VK
2250
2251 if (ret) {
2252 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2253 __func__, tplg_w->event_type);
2254 return -EINVAL;
2255 }
2256
2257 return 0;
2258}
2259
140adfba
JK
2260static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2261 struct snd_soc_tplg_bytes_control *bc)
2262{
2263 struct skl_algo_data *ac;
2264 struct skl_dfw_algo_data *dfw_ac =
2265 (struct skl_dfw_algo_data *)bc->priv.data;
2266
2267 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2268 if (!ac)
2269 return -ENOMEM;
2270
2271 /* Fill private data */
2272 ac->max = dfw_ac->max;
2273 ac->param_id = dfw_ac->param_id;
2274 ac->set_params = dfw_ac->set_params;
0d682104 2275 ac->size = dfw_ac->max;
140adfba
JK
2276
2277 if (ac->max) {
2278 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2279 if (!ac->params)
2280 return -ENOMEM;
2281
edd7ea2d 2282 memcpy(ac->params, dfw_ac->params, ac->max);
140adfba
JK
2283 }
2284
2285 be->dobj.private = ac;
2286 return 0;
2287}
2288
2289static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2290 struct snd_kcontrol_new *kctl,
2291 struct snd_soc_tplg_ctl_hdr *hdr)
2292{
2293 struct soc_bytes_ext *sb;
2294 struct snd_soc_tplg_bytes_control *tplg_bc;
2295 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2296 struct hdac_bus *bus = ebus_to_hbus(ebus);
2297
2298 switch (hdr->ops.info) {
2299 case SND_SOC_TPLG_CTL_BYTES:
2300 tplg_bc = container_of(hdr,
2301 struct snd_soc_tplg_bytes_control, hdr);
2302 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2303 sb = (struct soc_bytes_ext *)kctl->private_value;
2304 if (tplg_bc->priv.size)
2305 return skl_init_algo_data(
2306 bus->dev, sb, tplg_bc);
2307 }
2308 break;
2309
2310 default:
2311 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
2312 hdr->ops.get, hdr->ops.put, hdr->ops.info);
2313 break;
2314 }
2315
2316 return 0;
2317}
2318
541070ce
SN
2319static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
2320 struct snd_soc_tplg_vendor_string_elem *str_elem,
eee0e16f 2321 struct skl *skl)
541070ce
SN
2322{
2323 int tkn_count = 0;
2324 static int ref_count;
2325
2326 switch (str_elem->token) {
2327 case SKL_TKN_STR_LIB_NAME:
eee0e16f 2328 if (ref_count > skl->skl_sst->lib_count - 1) {
541070ce
SN
2329 ref_count = 0;
2330 return -EINVAL;
2331 }
2332
eee0e16f
JK
2333 strncpy(skl->skl_sst->lib_info[ref_count].name,
2334 str_elem->string,
2335 ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name));
541070ce
SN
2336 ref_count++;
2337 tkn_count++;
2338 break;
2339
2340 default:
ecd286a9 2341 dev_err(dev, "Not a string token %d\n", str_elem->token);
541070ce
SN
2342 break;
2343 }
2344
2345 return tkn_count;
2346}
2347
2348static int skl_tplg_get_str_tkn(struct device *dev,
2349 struct snd_soc_tplg_vendor_array *array,
eee0e16f 2350 struct skl *skl)
541070ce
SN
2351{
2352 int tkn_count = 0, ret;
2353 struct snd_soc_tplg_vendor_string_elem *str_elem;
2354
2355 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
2356 while (tkn_count < array->num_elems) {
eee0e16f 2357 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
541070ce
SN
2358 str_elem++;
2359
2360 if (ret < 0)
2361 return ret;
2362
2363 tkn_count = tkn_count + ret;
2364 }
2365
2366 return tkn_count;
2367}
2368
2369static int skl_tplg_get_int_tkn(struct device *dev,
2370 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
eee0e16f 2371 struct skl *skl)
541070ce
SN
2372{
2373 int tkn_count = 0;
2374
2375 switch (tkn_elem->token) {
2376 case SKL_TKN_U32_LIB_COUNT:
eee0e16f 2377 skl->skl_sst->lib_count = tkn_elem->value;
541070ce
SN
2378 tkn_count++;
2379 break;
2380
2381 default:
ecd286a9 2382 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
541070ce
SN
2383 return -EINVAL;
2384 }
2385
2386 return tkn_count;
2387}
2388
2389/*
2390 * Fill the manifest structure by parsing the tokens based on the
2391 * type.
2392 */
2393static int skl_tplg_get_manifest_tkn(struct device *dev,
eee0e16f 2394 char *pvt_data, struct skl *skl,
541070ce
SN
2395 int block_size)
2396{
2397 int tkn_count = 0, ret;
2398 int off = 0, tuple_size = 0;
2399 struct snd_soc_tplg_vendor_array *array;
2400 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2401
2402 if (block_size <= 0)
2403 return -EINVAL;
2404
2405 while (tuple_size < block_size) {
2406 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2407 off += array->size;
2408 switch (array->type) {
2409 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
eee0e16f 2410 ret = skl_tplg_get_str_tkn(dev, array, skl);
541070ce
SN
2411
2412 if (ret < 0)
2413 return ret;
2414 tkn_count += ret;
2415
2416 tuple_size += tkn_count *
2417 sizeof(struct snd_soc_tplg_vendor_string_elem);
2418 continue;
2419
2420 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
ecd286a9 2421 dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
541070ce
SN
2422 continue;
2423
2424 default:
2425 tkn_elem = array->value;
2426 tkn_count = 0;
2427 break;
2428 }
2429
2430 while (tkn_count <= array->num_elems - 1) {
2431 ret = skl_tplg_get_int_tkn(dev,
eee0e16f 2432 tkn_elem, skl);
541070ce
SN
2433 if (ret < 0)
2434 return ret;
2435
2436 tkn_count = tkn_count + ret;
2437 tkn_elem++;
2438 tuple_size += tkn_count *
2439 sizeof(struct snd_soc_tplg_vendor_value_elem);
2440 break;
2441 }
2442 tkn_count = 0;
2443 }
2444
2445 return 0;
2446}
2447
2448/*
2449 * Parse manifest private data for tokens. The private data block is
2450 * preceded by descriptors for type and size of data block.
2451 */
2452static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
eee0e16f 2453 struct device *dev, struct skl *skl)
541070ce
SN
2454{
2455 struct snd_soc_tplg_vendor_array *array;
2456 int num_blocks, block_size = 0, block_type, off = 0;
2457 char *data;
2458 int ret;
2459
2460 /* Read the NUM_DATA_BLOCKS descriptor */
2461 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
2462 ret = skl_tplg_get_desc_blocks(dev, array);
2463 if (ret < 0)
2464 return ret;
2465 num_blocks = ret;
2466
2467 off += array->size;
2468 array = (struct snd_soc_tplg_vendor_array *)
2469 (manifest->priv.data + off);
2470
2471 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2472 while (num_blocks > 0) {
2473 ret = skl_tplg_get_desc_blocks(dev, array);
2474
2475 if (ret < 0)
2476 return ret;
2477 block_type = ret;
2478 off += array->size;
2479
2480 array = (struct snd_soc_tplg_vendor_array *)
2481 (manifest->priv.data + off);
2482
2483 ret = skl_tplg_get_desc_blocks(dev, array);
2484
2485 if (ret < 0)
2486 return ret;
2487 block_size = ret;
2488 off += array->size;
2489
2490 array = (struct snd_soc_tplg_vendor_array *)
2491 (manifest->priv.data + off);
2492
2493 data = (manifest->priv.data + off);
2494
2495 if (block_type == SKL_TYPE_TUPLE) {
eee0e16f 2496 ret = skl_tplg_get_manifest_tkn(dev, data, skl,
541070ce
SN
2497 block_size);
2498
2499 if (ret < 0)
2500 return ret;
2501
2502 --num_blocks;
2503 } else {
2504 return -EINVAL;
2505 }
2506 }
2507
2508 return 0;
2509}
2510
15ecaba9
K
2511static int skl_manifest_load(struct snd_soc_component *cmpnt,
2512 struct snd_soc_tplg_manifest *manifest)
2513{
15ecaba9
K
2514 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2515 struct hdac_bus *bus = ebus_to_hbus(ebus);
2516 struct skl *skl = ebus_to_skl(ebus);
15ecaba9 2517
c15ad605
VK
2518 /* proceed only if we have private data defined */
2519 if (manifest->priv.size == 0)
2520 return 0;
2521
eee0e16f 2522 skl_tplg_get_manifest_data(manifest, bus->dev, skl);
15ecaba9 2523
eee0e16f 2524 if (skl->skl_sst->lib_count > SKL_MAX_LIB) {
15ecaba9 2525 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
eee0e16f
JK
2526 skl->skl_sst->lib_count);
2527 return -EINVAL;
15ecaba9
K
2528 }
2529
eee0e16f 2530 return 0;
15ecaba9
K
2531}
2532
3af36706
VK
2533static struct snd_soc_tplg_ops skl_tplg_ops = {
2534 .widget_load = skl_tplg_widget_load,
140adfba
JK
2535 .control_load = skl_tplg_control_load,
2536 .bytes_ext_ops = skl_tlv_ops,
2537 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
15ecaba9 2538 .manifest = skl_manifest_load,
3af36706
VK
2539};
2540
287af4f9
JK
2541/*
2542 * A pipe can have multiple modules, each of them will be a DAPM widget as
2543 * well. While managing a pipeline we need to get the list of all the
2544 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
2545 * helps to get the SKL type widgets in that pipeline
2546 */
2547static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
2548{
2549 struct snd_soc_dapm_widget *w;
2550 struct skl_module_cfg *mcfg = NULL;
2551 struct skl_pipe_module *p_module = NULL;
2552 struct skl_pipe *pipe;
2553
2554 list_for_each_entry(w, &platform->component.card->widgets, list) {
2555 if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
2556 mcfg = w->priv;
2557 pipe = mcfg->pipe;
2558
2559 p_module = devm_kzalloc(platform->dev,
2560 sizeof(*p_module), GFP_KERNEL);
2561 if (!p_module)
2562 return -ENOMEM;
2563
2564 p_module->w = w;
2565 list_add_tail(&p_module->node, &pipe->w_list);
2566 }
2567 }
2568
2569 return 0;
2570}
2571
f0aa94fa
JK
2572static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
2573{
2574 struct skl_pipe_module *w_module;
2575 struct snd_soc_dapm_widget *w;
2576 struct skl_module_cfg *mconfig;
2577 bool host_found = false, link_found = false;
2578
2579 list_for_each_entry(w_module, &pipe->w_list, node) {
2580 w = w_module->w;
2581 mconfig = w->priv;
2582
2583 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
2584 host_found = true;
2585 else if (mconfig->dev_type != SKL_DEVICE_NONE)
2586 link_found = true;
2587 }
2588
2589 if (host_found && link_found)
2590 pipe->passthru = true;
2591 else
2592 pipe->passthru = false;
2593}
2594
3af36706
VK
2595/* This will be read from topology manifest, currently defined here */
2596#define SKL_MAX_MCPS 30000000
2597#define SKL_FW_MAX_MEM 1000000
2598
2599/*
2600 * SKL topology init routine
2601 */
2602int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
2603{
2604 int ret;
2605 const struct firmware *fw;
2606 struct hdac_bus *bus = ebus_to_hbus(ebus);
2607 struct skl *skl = ebus_to_skl(ebus);
f0aa94fa 2608 struct skl_pipeline *ppl;
3af36706 2609
4b235c43 2610 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3af36706 2611 if (ret < 0) {
b663a8c5 2612 dev_err(bus->dev, "tplg fw %s load failed with %d\n",
4b235c43
VK
2613 skl->tplg_name, ret);
2614 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
2615 if (ret < 0) {
2616 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
2617 "dfw_sst.bin", ret);
2618 return ret;
2619 }
3af36706
VK
2620 }
2621
2622 /*
2623 * The complete tplg for SKL is loaded as index 0, we don't use
2624 * any other index
2625 */
b663a8c5
JK
2626 ret = snd_soc_tplg_component_load(&platform->component,
2627 &skl_tplg_ops, fw, 0);
3af36706
VK
2628 if (ret < 0) {
2629 dev_err(bus->dev, "tplg component load failed%d\n", ret);
c14a82c7 2630 release_firmware(fw);
3af36706
VK
2631 return -EINVAL;
2632 }
2633
2634 skl->resource.max_mcps = SKL_MAX_MCPS;
2635 skl->resource.max_mem = SKL_FW_MAX_MEM;
2636
d8018361 2637 skl->tplg = fw;
287af4f9
JK
2638 ret = skl_tplg_create_pipe_widget_list(platform);
2639 if (ret < 0)
2640 return ret;
d8018361 2641
f0aa94fa
JK
2642 list_for_each_entry(ppl, &skl->ppl_list, node)
2643 skl_tplg_set_pipe_type(skl, ppl->pipe);
d8018361 2644
3af36706
VK
2645 return 0;
2646}