soundwire: amd: enable build for AMD SoundWire manager driver
[linux-block.git] / drivers / soundwire / amd_manager.c
CommitLineData
d8f48fbd
VM
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * SoundWire AMD Manager driver
4 *
5 * Copyright 2023 Advanced Micro Devices, Inc.
6 */
7
8#include <linux/completion.h>
9#include <linux/device.h>
10#include <linux/io.h>
11#include <linux/jiffies.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/soundwire/sdw.h>
16#include <linux/soundwire/sdw_registers.h>
17#include <linux/wait.h>
18#include <sound/pcm_params.h>
19#include <sound/soc.h>
20#include "bus.h"
21#include "amd_manager.h"
22
23#define DRV_NAME "amd_sdw_manager"
24
25#define to_amd_sdw(b) container_of(b, struct amd_sdw_manager, bus)
26
27static void amd_enable_sdw_pads(struct amd_sdw_manager *amd_manager)
28{
29 u32 sw_pad_pulldown_val;
30 u32 val;
31
32 mutex_lock(amd_manager->acp_sdw_lock);
33 val = readl(amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
34 val |= amd_manager->reg_mask->sw_pad_enable_mask;
35 writel(val, amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
36 usleep_range(1000, 1500);
37
38 sw_pad_pulldown_val = readl(amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
39 sw_pad_pulldown_val &= amd_manager->reg_mask->sw_pad_pulldown_mask;
40 writel(sw_pad_pulldown_val, amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
41 mutex_unlock(amd_manager->acp_sdw_lock);
42}
43
44static int amd_init_sdw_manager(struct amd_sdw_manager *amd_manager)
45{
46 u32 val;
47 int ret;
48
49 writel(AMD_SDW_ENABLE, amd_manager->mmio + ACP_SW_EN);
50 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, val, ACP_DELAY_US,
51 AMD_SDW_TIMEOUT);
52 if (ret)
53 return ret;
54
55 /* SoundWire manager bus reset */
56 writel(AMD_SDW_BUS_RESET_REQ, amd_manager->mmio + ACP_SW_BUS_RESET_CTRL);
57 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_BUS_RESET_CTRL, val,
58 (val & AMD_SDW_BUS_RESET_DONE), ACP_DELAY_US, AMD_SDW_TIMEOUT);
59 if (ret)
60 return ret;
61
62 writel(AMD_SDW_BUS_RESET_CLEAR_REQ, amd_manager->mmio + ACP_SW_BUS_RESET_CTRL);
63 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_BUS_RESET_CTRL, val, !val,
64 ACP_DELAY_US, AMD_SDW_TIMEOUT);
65 if (ret) {
66 dev_err(amd_manager->dev, "Failed to reset SoundWire manager instance%d\n",
67 amd_manager->instance);
68 return ret;
69 }
70
71 writel(AMD_SDW_DISABLE, amd_manager->mmio + ACP_SW_EN);
72 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, !val, ACP_DELAY_US,
73 AMD_SDW_TIMEOUT);
74}
75
76static int amd_enable_sdw_manager(struct amd_sdw_manager *amd_manager)
77{
78 u32 val;
79
80 writel(AMD_SDW_ENABLE, amd_manager->mmio + ACP_SW_EN);
81 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, val, ACP_DELAY_US,
82 AMD_SDW_TIMEOUT);
83}
84
85static int amd_disable_sdw_manager(struct amd_sdw_manager *amd_manager)
86{
87 u32 val;
88
89 writel(AMD_SDW_DISABLE, amd_manager->mmio + ACP_SW_EN);
90 /*
91 * After invoking manager disable sequence, check whether
92 * manager has executed clock stop sequence. In this case,
93 * manager should ignore checking enable status register.
94 */
95 val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
96 if (val)
97 return 0;
98 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, !val, ACP_DELAY_US,
99 AMD_SDW_TIMEOUT);
100}
101
102static void amd_enable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
103{
104 struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
105 u32 val;
106
107 mutex_lock(amd_manager->acp_sdw_lock);
108 val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
109 val |= reg_mask->acp_sdw_intr_mask;
110 writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
111 mutex_unlock(amd_manager->acp_sdw_lock);
112
113 writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
114 ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
115 writel(AMD_SDW_IRQ_MASK_8TO11, amd_manager->mmio +
116 ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
117 writel(AMD_SDW_IRQ_ERROR_MASK, amd_manager->mmio + ACP_SW_ERROR_INTR_MASK);
118}
119
120static void amd_disable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
121{
122 struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
123 u32 val;
124
125 mutex_lock(amd_manager->acp_sdw_lock);
126 val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
127 val &= ~reg_mask->acp_sdw_intr_mask;
128 writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
129 mutex_unlock(amd_manager->acp_sdw_lock);
130
131 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
132 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
133 writel(0x00, amd_manager->mmio + ACP_SW_ERROR_INTR_MASK);
134}
135
136static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
137{
138 u32 frame_size;
139
140 frame_size = (amd_manager->rows_index << 3) | amd_manager->cols_index;
141 writel(frame_size, amd_manager->mmio + ACP_SW_FRAMESIZE);
142}
143
144static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
145 int cmd_offset)
146{
147 u32 upper_data;
148 u32 lower_data = 0;
149 u16 addr;
150 u8 upper_addr, lower_addr;
151 u8 data = 0;
152
153 addr = msg->addr + cmd_offset;
154 upper_addr = (addr & 0xFF00) >> 8;
155 lower_addr = addr & 0xFF;
156
157 if (msg->flags == SDW_MSG_FLAG_WRITE)
158 data = msg->buf[cmd_offset];
159
160 upper_data = FIELD_PREP(AMD_SDW_MCP_CMD_DEV_ADDR, msg->dev_num);
161 upper_data |= FIELD_PREP(AMD_SDW_MCP_CMD_COMMAND, msg->flags + 2);
162 upper_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_ADDR_HIGH, upper_addr);
163 lower_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_ADDR_LOW, lower_addr);
164 lower_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_DATA, data);
165
166 *upper_word = upper_data;
167 *lower_word = lower_data;
168}
169
170static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lower_data,
171 u32 upper_data)
172{
173 u64 resp;
174 u32 lower_resp, upper_resp;
175 u32 sts;
176 int ret;
177
178 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
179 !(sts & AMD_SDW_IMM_CMD_BUSY), ACP_DELAY_US, AMD_SDW_TIMEOUT);
180 if (ret) {
181 dev_err(amd_manager->dev, "SDW%x previous cmd status clear failed\n",
182 amd_manager->instance);
183 return ret;
184 }
185
186 if (sts & AMD_SDW_IMM_RES_VALID) {
187 dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance);
188 writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
189 }
190 writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD);
191 writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD);
192
193 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
194 (sts & AMD_SDW_IMM_RES_VALID), ACP_DELAY_US, AMD_SDW_TIMEOUT);
195 if (ret) {
196 dev_err(amd_manager->dev, "SDW%x cmd response timeout occurred\n",
197 amd_manager->instance);
198 return ret;
199 }
200 upper_resp = readl(amd_manager->mmio + ACP_SW_IMM_RESP_UPPER_WORD);
201 lower_resp = readl(amd_manager->mmio + ACP_SW_IMM_RESP_LOWER_QWORD);
202
203 writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
204 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
205 !(sts & AMD_SDW_IMM_RES_VALID), ACP_DELAY_US, AMD_SDW_TIMEOUT);
206 if (ret) {
207 dev_err(amd_manager->dev, "SDW%x cmd status retry failed\n",
208 amd_manager->instance);
209 return ret;
210 }
211 resp = upper_resp;
212 resp = (resp << 32) | lower_resp;
213 return resp;
214}
215
216static enum sdw_command_response
217amd_program_scp_addr(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg)
218{
219 struct sdw_msg scp_msg = {0};
220 u64 response_buf[2] = {0};
221 u32 upper_data = 0, lower_data = 0;
222 int index;
223
224 scp_msg.dev_num = msg->dev_num;
225 scp_msg.addr = SDW_SCP_ADDRPAGE1;
226 scp_msg.buf = &msg->addr_page1;
227 scp_msg.flags = SDW_MSG_FLAG_WRITE;
228 amd_sdw_ctl_word_prep(&lower_data, &upper_data, &scp_msg, 0);
229 response_buf[0] = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
230 scp_msg.addr = SDW_SCP_ADDRPAGE2;
231 scp_msg.buf = &msg->addr_page2;
232 amd_sdw_ctl_word_prep(&lower_data, &upper_data, &scp_msg, 0);
233 response_buf[1] = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
234
235 for (index = 0; index < 2; index++) {
236 if (response_buf[index] == -ETIMEDOUT) {
237 dev_err_ratelimited(amd_manager->dev,
238 "SCP_addrpage command timeout for Slave %d\n",
239 msg->dev_num);
240 return SDW_CMD_TIMEOUT;
241 } else if (!(response_buf[index] & AMD_SDW_MCP_RESP_ACK)) {
242 if (response_buf[index] & AMD_SDW_MCP_RESP_NACK) {
243 dev_err_ratelimited(amd_manager->dev,
244 "SCP_addrpage NACKed for Slave %d\n",
245 msg->dev_num);
246 return SDW_CMD_FAIL;
247 }
248 dev_dbg_ratelimited(amd_manager->dev, "SCP_addrpage ignored for Slave %d\n",
249 msg->dev_num);
250 return SDW_CMD_IGNORED;
251 }
252 }
253 return SDW_CMD_OK;
254}
255
256static int amd_prep_msg(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg)
257{
258 int ret;
259
260 if (msg->page) {
261 ret = amd_program_scp_addr(amd_manager, msg);
262 if (ret) {
263 msg->len = 0;
264 return ret;
265 }
266 }
267 switch (msg->flags) {
268 case SDW_MSG_FLAG_READ:
269 case SDW_MSG_FLAG_WRITE:
270 break;
271 default:
272 dev_err(amd_manager->dev, "Invalid msg cmd: %d\n", msg->flags);
273 return -EINVAL;
274 }
275 return 0;
276}
277
278static enum sdw_command_response amd_sdw_fill_msg_resp(struct amd_sdw_manager *amd_manager,
279 struct sdw_msg *msg, u64 response,
280 int offset)
281{
282 if (response & AMD_SDW_MCP_RESP_ACK) {
283 if (msg->flags == SDW_MSG_FLAG_READ)
284 msg->buf[offset] = FIELD_GET(AMD_SDW_MCP_RESP_RDATA, response);
285 } else {
286 if (response == -ETIMEDOUT) {
287 dev_err_ratelimited(amd_manager->dev, "command timeout for Slave %d\n",
288 msg->dev_num);
289 return SDW_CMD_TIMEOUT;
290 } else if (response & AMD_SDW_MCP_RESP_NACK) {
291 dev_err_ratelimited(amd_manager->dev,
292 "command response NACK received for Slave %d\n",
293 msg->dev_num);
294 return SDW_CMD_FAIL;
295 }
296 dev_err_ratelimited(amd_manager->dev, "command is ignored for Slave %d\n",
297 msg->dev_num);
298 return SDW_CMD_IGNORED;
299 }
300 return SDW_CMD_OK;
301}
302
303static unsigned int _amd_sdw_xfer_msg(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg,
304 int cmd_offset)
305{
306 u64 response;
307 u32 upper_data = 0, lower_data = 0;
308
309 amd_sdw_ctl_word_prep(&lower_data, &upper_data, msg, cmd_offset);
310 response = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
311 return amd_sdw_fill_msg_resp(amd_manager, msg, response, cmd_offset);
312}
313
314static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
315{
316 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
317 int ret, i;
318
319 ret = amd_prep_msg(amd_manager, msg);
320 if (ret)
321 return SDW_CMD_FAIL_OTHER;
322 for (i = 0; i < msg->len; i++) {
323 ret = _amd_sdw_xfer_msg(amd_manager, msg, i);
324 if (ret)
325 return ret;
326 }
327 return SDW_CMD_OK;
328}
329
330static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
331{
332 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
333 u64 response;
334 u32 slave_stat;
335
336 response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
337 /* slave status from ping response */
338 slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
339 slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
340 dev_dbg(amd_manager->dev, "slave_stat:0x%x\n", slave_stat);
341 return slave_stat;
342}
343
344static int amd_sdw_compute_params(struct sdw_bus *bus)
345{
346 struct sdw_transport_data t_data = {0};
347 struct sdw_master_runtime *m_rt;
348 struct sdw_port_runtime *p_rt;
349 struct sdw_bus_params *b_params = &bus->params;
350 int port_bo, hstart, hstop, sample_int;
351 unsigned int rate, bps;
352
353 port_bo = 0;
354 hstart = 1;
355 hstop = bus->params.col - 1;
356 t_data.hstop = hstop;
357 t_data.hstart = hstart;
358
359 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
360 rate = m_rt->stream->params.rate;
361 bps = m_rt->stream->params.bps;
362 sample_int = (bus->params.curr_dr_freq / rate);
363 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
364 port_bo = (p_rt->num * 64) + 1;
365 dev_dbg(bus->dev, "p_rt->num=%d hstart=%d hstop=%d port_bo=%d\n",
366 p_rt->num, hstart, hstop, port_bo);
367 sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
368 false, SDW_BLK_GRP_CNT_1, sample_int,
369 port_bo, port_bo >> 8, hstart, hstop,
370 SDW_BLK_PKG_PER_PORT, 0x0);
371
372 sdw_fill_port_params(&p_rt->port_params,
373 p_rt->num, bps,
374 SDW_PORT_FLOW_MODE_ISOCH,
375 b_params->m_data_mode);
376 t_data.hstart = hstart;
377 t_data.hstop = hstop;
378 t_data.block_offset = port_bo;
379 t_data.sub_block_offset = 0;
380 }
381 sdw_compute_slave_ports(m_rt, &t_data);
382 }
383 return 0;
384}
385
386static int amd_sdw_port_params(struct sdw_bus *bus, struct sdw_port_params *p_params,
387 unsigned int bank)
388{
389 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
390 u32 frame_fmt_reg, dpn_frame_fmt;
391
392 dev_dbg(amd_manager->dev, "p_params->num:0x%x\n", p_params->num);
393 switch (amd_manager->instance) {
394 case ACP_SDW0:
395 frame_fmt_reg = sdw0_manager_dp_reg[p_params->num].frame_fmt_reg;
396 break;
397 case ACP_SDW1:
398 frame_fmt_reg = sdw1_manager_dp_reg[p_params->num].frame_fmt_reg;
399 break;
400 default:
401 return -EINVAL;
402 }
403
404 dpn_frame_fmt = readl(amd_manager->mmio + frame_fmt_reg);
405 u32p_replace_bits(&dpn_frame_fmt, p_params->flow_mode, AMD_DPN_FRAME_FMT_PFM);
406 u32p_replace_bits(&dpn_frame_fmt, p_params->data_mode, AMD_DPN_FRAME_FMT_PDM);
407 u32p_replace_bits(&dpn_frame_fmt, p_params->bps - 1, AMD_DPN_FRAME_FMT_WORD_LEN);
408 writel(dpn_frame_fmt, amd_manager->mmio + frame_fmt_reg);
409 return 0;
410}
411
412static int amd_sdw_transport_params(struct sdw_bus *bus,
413 struct sdw_transport_params *params,
414 enum sdw_reg_bank bank)
415{
416 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
417 u32 dpn_frame_fmt;
418 u32 dpn_sampleinterval;
419 u32 dpn_hctrl;
420 u32 dpn_offsetctrl;
421 u32 dpn_lanectrl;
422 u32 frame_fmt_reg, sample_int_reg, hctrl_dp0_reg;
423 u32 offset_reg, lane_ctrl_ch_en_reg;
424
425 switch (amd_manager->instance) {
426 case ACP_SDW0:
427 frame_fmt_reg = sdw0_manager_dp_reg[params->port_num].frame_fmt_reg;
428 sample_int_reg = sdw0_manager_dp_reg[params->port_num].sample_int_reg;
429 hctrl_dp0_reg = sdw0_manager_dp_reg[params->port_num].hctrl_dp0_reg;
430 offset_reg = sdw0_manager_dp_reg[params->port_num].offset_reg;
431 lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
432 break;
433 case ACP_SDW1:
434 frame_fmt_reg = sdw1_manager_dp_reg[params->port_num].frame_fmt_reg;
435 sample_int_reg = sdw1_manager_dp_reg[params->port_num].sample_int_reg;
436 hctrl_dp0_reg = sdw1_manager_dp_reg[params->port_num].hctrl_dp0_reg;
437 offset_reg = sdw1_manager_dp_reg[params->port_num].offset_reg;
438 lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
439 break;
440 default:
441 return -EINVAL;
442 }
443 writel(AMD_SDW_SSP_COUNTER_VAL, amd_manager->mmio + ACP_SW_SSP_COUNTER);
444
445 dpn_frame_fmt = readl(amd_manager->mmio + frame_fmt_reg);
446 u32p_replace_bits(&dpn_frame_fmt, params->blk_pkg_mode, AMD_DPN_FRAME_FMT_BLK_PKG_MODE);
447 u32p_replace_bits(&dpn_frame_fmt, params->blk_grp_ctrl, AMD_DPN_FRAME_FMT_BLK_GRP_CTRL);
448 u32p_replace_bits(&dpn_frame_fmt, SDW_STREAM_PCM, AMD_DPN_FRAME_FMT_PCM_OR_PDM);
449 writel(dpn_frame_fmt, amd_manager->mmio + frame_fmt_reg);
450
451 dpn_sampleinterval = params->sample_interval - 1;
452 writel(dpn_sampleinterval, amd_manager->mmio + sample_int_reg);
453
454 dpn_hctrl = FIELD_PREP(AMD_DPN_HCTRL_HSTOP, params->hstop);
455 dpn_hctrl |= FIELD_PREP(AMD_DPN_HCTRL_HSTART, params->hstart);
456 writel(dpn_hctrl, amd_manager->mmio + hctrl_dp0_reg);
457
458 dpn_offsetctrl = FIELD_PREP(AMD_DPN_OFFSET_CTRL_1, params->offset1);
459 dpn_offsetctrl |= FIELD_PREP(AMD_DPN_OFFSET_CTRL_2, params->offset2);
460 writel(dpn_offsetctrl, amd_manager->mmio + offset_reg);
461
462 /*
463 * lane_ctrl_ch_en_reg will be used to program lane_ctrl and ch_mask
464 * parameters.
465 */
466 dpn_lanectrl = readl(amd_manager->mmio + lane_ctrl_ch_en_reg);
467 u32p_replace_bits(&dpn_lanectrl, params->lane_ctrl, AMD_DPN_CH_EN_LCTRL);
468 writel(dpn_lanectrl, amd_manager->mmio + lane_ctrl_ch_en_reg);
469 return 0;
470}
471
472static int amd_sdw_port_enable(struct sdw_bus *bus,
473 struct sdw_enable_ch *enable_ch,
474 unsigned int bank)
475{
476 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
477 u32 dpn_ch_enable;
478 u32 lane_ctrl_ch_en_reg;
479
480 switch (amd_manager->instance) {
481 case ACP_SDW0:
482 lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
483 break;
484 case ACP_SDW1:
485 lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
486 break;
487 default:
488 return -EINVAL;
489 }
490
491 /*
492 * lane_ctrl_ch_en_reg will be used to program lane_ctrl and ch_mask
493 * parameters.
494 */
495 dpn_ch_enable = readl(amd_manager->mmio + lane_ctrl_ch_en_reg);
496 u32p_replace_bits(&dpn_ch_enable, enable_ch->ch_mask, AMD_DPN_CH_EN_CHMASK);
497 if (enable_ch->enable)
498 writel(dpn_ch_enable, amd_manager->mmio + lane_ctrl_ch_en_reg);
499 else
500 writel(0, amd_manager->mmio + lane_ctrl_ch_en_reg);
501 return 0;
502}
503
504static int sdw_master_read_amd_prop(struct sdw_bus *bus)
505{
506 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
507 struct fwnode_handle *link;
508 struct sdw_master_prop *prop;
509 u32 quirk_mask = 0;
510 u32 wake_en_mask = 0;
511 u32 power_mode_mask = 0;
512 char name[32];
513
514 prop = &bus->prop;
515 /* Find manager handle */
516 snprintf(name, sizeof(name), "mipi-sdw-link-%d-subproperties", bus->link_id);
517 link = device_get_named_child_node(bus->dev, name);
518 if (!link) {
519 dev_err(bus->dev, "Manager node %s not found\n", name);
520 return -EIO;
521 }
522 fwnode_property_read_u32(link, "amd-sdw-enable", &quirk_mask);
523 if (!(quirk_mask & AMD_SDW_QUIRK_MASK_BUS_ENABLE))
524 prop->hw_disabled = true;
525 prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
526 SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
527
528 fwnode_property_read_u32(link, "amd-sdw-wakeup-enable", &wake_en_mask);
529 amd_manager->wake_en_mask = wake_en_mask;
530 fwnode_property_read_u32(link, "amd-sdw-power-mode", &power_mode_mask);
531 amd_manager->power_mode_mask = power_mode_mask;
532 return 0;
533}
534
535static int amd_prop_read(struct sdw_bus *bus)
536{
537 sdw_master_read_prop(bus);
538 sdw_master_read_amd_prop(bus);
539 return 0;
540}
541
542static const struct sdw_master_port_ops amd_sdw_port_ops = {
543 .dpn_set_port_params = amd_sdw_port_params,
544 .dpn_set_port_transport_params = amd_sdw_transport_params,
545 .dpn_port_enable_ch = amd_sdw_port_enable,
546};
547
548static const struct sdw_master_ops amd_sdw_ops = {
549 .read_prop = amd_prop_read,
550 .xfer_msg = amd_sdw_xfer_msg,
551 .read_ping_status = amd_sdw_read_ping_status,
552};
553
2b13596f
VM
554static int amd_sdw_hw_params(struct snd_pcm_substream *substream,
555 struct snd_pcm_hw_params *params,
556 struct snd_soc_dai *dai)
557{
558 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
559 struct sdw_amd_dai_runtime *dai_runtime;
560 struct sdw_stream_config sconfig;
561 struct sdw_port_config *pconfig;
562 int ch, dir;
563 int ret;
564
565 dai_runtime = amd_manager->dai_runtime_array[dai->id];
566 if (!dai_runtime)
567 return -EIO;
568
569 ch = params_channels(params);
570 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
571 dir = SDW_DATA_DIR_RX;
572 else
573 dir = SDW_DATA_DIR_TX;
574 dev_dbg(amd_manager->dev, "dir:%d dai->id:0x%x\n", dir, dai->id);
575
576 sconfig.direction = dir;
577 sconfig.ch_count = ch;
578 sconfig.frame_rate = params_rate(params);
579 sconfig.type = dai_runtime->stream_type;
580
581 sconfig.bps = snd_pcm_format_width(params_format(params));
582
583 /* Port configuration */
584 pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
585 if (!pconfig) {
586 ret = -ENOMEM;
587 goto error;
588 }
589
590 pconfig->num = dai->id;
591 pconfig->ch_mask = (1 << ch) - 1;
592 ret = sdw_stream_add_master(&amd_manager->bus, &sconfig,
593 pconfig, 1, dai_runtime->stream);
594 if (ret)
595 dev_err(amd_manager->dev, "add manager to stream failed:%d\n", ret);
596
597 kfree(pconfig);
598error:
599 return ret;
600}
601
602static int amd_sdw_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
603{
604 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
605 struct sdw_amd_dai_runtime *dai_runtime;
606 int ret;
607
608 dai_runtime = amd_manager->dai_runtime_array[dai->id];
609 if (!dai_runtime)
610 return -EIO;
611
612 ret = sdw_stream_remove_master(&amd_manager->bus, dai_runtime->stream);
613 if (ret < 0)
614 dev_err(dai->dev, "remove manager from stream %s failed: %d\n",
615 dai_runtime->stream->name, ret);
616 return ret;
617}
618
619static int amd_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
620{
621 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
622 struct sdw_amd_dai_runtime *dai_runtime;
623
624 dai_runtime = amd_manager->dai_runtime_array[dai->id];
625 if (stream) {
626 /* first paranoia check */
627 if (dai_runtime) {
628 dev_err(dai->dev, "dai_runtime already allocated for dai %s\n", dai->name);
629 return -EINVAL;
630 }
631
632 /* allocate and set dai_runtime info */
633 dai_runtime = kzalloc(sizeof(*dai_runtime), GFP_KERNEL);
634 if (!dai_runtime)
635 return -ENOMEM;
636
637 dai_runtime->stream_type = SDW_STREAM_PCM;
638 dai_runtime->bus = &amd_manager->bus;
639 dai_runtime->stream = stream;
640 amd_manager->dai_runtime_array[dai->id] = dai_runtime;
641 } else {
642 /* second paranoia check */
643 if (!dai_runtime) {
644 dev_err(dai->dev, "dai_runtime not allocated for dai %s\n", dai->name);
645 return -EINVAL;
646 }
647
648 /* for NULL stream we release allocated dai_runtime */
649 kfree(dai_runtime);
650 amd_manager->dai_runtime_array[dai->id] = NULL;
651 }
652 return 0;
653}
654
655static int amd_pcm_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
656{
657 return amd_set_sdw_stream(dai, stream, direction);
658}
659
660static void *amd_get_sdw_stream(struct snd_soc_dai *dai, int direction)
661{
662 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
663 struct sdw_amd_dai_runtime *dai_runtime;
664
665 dai_runtime = amd_manager->dai_runtime_array[dai->id];
666 if (!dai_runtime)
667 return ERR_PTR(-EINVAL);
668
669 return dai_runtime->stream;
670}
671
672static const struct snd_soc_dai_ops amd_sdw_dai_ops = {
673 .hw_params = amd_sdw_hw_params,
674 .hw_free = amd_sdw_hw_free,
675 .set_stream = amd_pcm_set_sdw_stream,
676 .get_stream = amd_get_sdw_stream,
677};
678
679static const struct snd_soc_component_driver amd_sdw_dai_component = {
680 .name = "soundwire",
681};
682
683static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
684{
685 struct sdw_amd_dai_runtime **dai_runtime_array;
686 struct snd_soc_dai_driver *dais;
687 struct snd_soc_pcm_stream *stream;
688 struct device *dev;
689 int i, num_dais;
690
691 dev = amd_manager->dev;
692 num_dais = amd_manager->num_dout_ports + amd_manager->num_din_ports;
693 dais = devm_kcalloc(dev, num_dais, sizeof(*dais), GFP_KERNEL);
694 if (!dais)
695 return -ENOMEM;
696
697 dai_runtime_array = devm_kcalloc(dev, num_dais,
698 sizeof(struct sdw_amd_dai_runtime *),
699 GFP_KERNEL);
700 if (!dai_runtime_array)
701 return -ENOMEM;
702 amd_manager->dai_runtime_array = dai_runtime_array;
703 for (i = 0; i < num_dais; i++) {
704 dais[i].name = devm_kasprintf(dev, GFP_KERNEL, "SDW%d Pin%d", amd_manager->instance,
705 i);
706 if (!dais[i].name)
707 return -ENOMEM;
708 if (i < amd_manager->num_dout_ports)
709 stream = &dais[i].playback;
710 else
711 stream = &dais[i].capture;
712
713 stream->channels_min = 2;
714 stream->channels_max = 2;
715 stream->rates = SNDRV_PCM_RATE_48000;
716 stream->formats = SNDRV_PCM_FMTBIT_S16_LE;
717
718 dais[i].ops = &amd_sdw_dai_ops;
719 dais[i].id = i;
720 }
721
722 return devm_snd_soc_register_component(dev, &amd_sdw_dai_component,
723 dais, num_dais);
724}
725
d8f48fbd
VM
726static void amd_sdw_probe_work(struct work_struct *work)
727{
728 struct amd_sdw_manager *amd_manager = container_of(work, struct amd_sdw_manager,
729 probe_work);
730 struct sdw_master_prop *prop;
731 int ret;
732
733 prop = &amd_manager->bus.prop;
734 if (!prop->hw_disabled) {
735 amd_enable_sdw_pads(amd_manager);
736 ret = amd_init_sdw_manager(amd_manager);
737 if (ret)
738 return;
739 amd_enable_sdw_interrupts(amd_manager);
740 ret = amd_enable_sdw_manager(amd_manager);
741 if (ret)
742 return;
743 amd_sdw_set_frameshape(amd_manager);
744 }
745}
746
747static int amd_sdw_manager_probe(struct platform_device *pdev)
748{
749 const struct acp_sdw_pdata *pdata = pdev->dev.platform_data;
750 struct resource *res;
751 struct device *dev = &pdev->dev;
752 struct sdw_master_prop *prop;
753 struct sdw_bus_params *params;
754 struct amd_sdw_manager *amd_manager;
755 int ret;
756
757 amd_manager = devm_kzalloc(dev, sizeof(struct amd_sdw_manager), GFP_KERNEL);
758 if (!amd_manager)
759 return -ENOMEM;
760
761 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
762 if (!res)
763 return -ENOMEM;
764
765 amd_manager->acp_mmio = devm_ioremap(dev, res->start, resource_size(res));
766 if (IS_ERR(amd_manager->mmio)) {
767 dev_err(dev, "mmio not found\n");
768 return PTR_ERR(amd_manager->mmio);
769 }
770 amd_manager->instance = pdata->instance;
771 amd_manager->mmio = amd_manager->acp_mmio +
772 (amd_manager->instance * SDW_MANAGER_REG_OFFSET);
773 amd_manager->acp_sdw_lock = pdata->acp_sdw_lock;
774 amd_manager->cols_index = sdw_find_col_index(AMD_SDW_DEFAULT_COLUMNS);
775 amd_manager->rows_index = sdw_find_row_index(AMD_SDW_DEFAULT_ROWS);
776 amd_manager->dev = dev;
777 amd_manager->bus.ops = &amd_sdw_ops;
778 amd_manager->bus.port_ops = &amd_sdw_port_ops;
779 amd_manager->bus.compute_params = &amd_sdw_compute_params;
780 amd_manager->bus.clk_stop_timeout = 200;
781 amd_manager->bus.link_id = amd_manager->instance;
782
783 switch (amd_manager->instance) {
784 case ACP_SDW0:
785 amd_manager->num_dout_ports = AMD_SDW0_MAX_TX_PORTS;
786 amd_manager->num_din_ports = AMD_SDW0_MAX_RX_PORTS;
787 break;
788 case ACP_SDW1:
789 amd_manager->num_dout_ports = AMD_SDW1_MAX_TX_PORTS;
790 amd_manager->num_din_ports = AMD_SDW1_MAX_RX_PORTS;
791 break;
792 default:
793 return -EINVAL;
794 }
795
796 amd_manager->reg_mask = &sdw_manager_reg_mask_array[amd_manager->instance];
797 params = &amd_manager->bus.params;
798 params->max_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
799 params->curr_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
800 params->col = AMD_SDW_DEFAULT_COLUMNS;
801 params->row = AMD_SDW_DEFAULT_ROWS;
802 prop = &amd_manager->bus.prop;
803 prop->clk_freq = &amd_sdw_freq_tbl[0];
804 prop->mclk_freq = AMD_SDW_BUS_BASE_FREQ;
805
806 ret = sdw_bus_master_add(&amd_manager->bus, dev, dev->fwnode);
807 if (ret) {
808 dev_err(dev, "Failed to register SoundWire manager(%d)\n", ret);
809 return ret;
810 }
2b13596f
VM
811 ret = amd_sdw_register_dais(amd_manager);
812 if (ret) {
813 dev_err(dev, "CPU DAI registration failed\n");
814 sdw_bus_master_delete(&amd_manager->bus);
815 return ret;
816 }
d8f48fbd
VM
817 dev_set_drvdata(dev, amd_manager);
818 INIT_WORK(&amd_manager->probe_work, amd_sdw_probe_work);
819 /*
820 * Instead of having lengthy probe sequence, use deferred probe.
821 */
822 schedule_work(&amd_manager->probe_work);
823 return 0;
824}
825
826static int amd_sdw_manager_remove(struct platform_device *pdev)
827{
828 struct amd_sdw_manager *amd_manager = dev_get_drvdata(&pdev->dev);
829
830 cancel_work_sync(&amd_manager->probe_work);
831 amd_disable_sdw_interrupts(amd_manager);
832 sdw_bus_master_delete(&amd_manager->bus);
833 return amd_disable_sdw_manager(amd_manager);
834}
835
836static struct platform_driver amd_sdw_driver = {
837 .probe = &amd_sdw_manager_probe,
838 .remove = &amd_sdw_manager_remove,
839 .driver = {
840 .name = "amd_sdw_manager",
841 }
842};
843module_platform_driver(amd_sdw_driver);
844
845MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
846MODULE_DESCRIPTION("AMD SoundWire driver");
847MODULE_LICENSE("GPL");
848MODULE_ALIAS("platform:" DRV_NAME);