Merge tag 'mm-stable-2023-05-03-16-22' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / soundwire / amd_manager.c
CommitLineData
d8f48fbd
VM
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * SoundWire AMD Manager driver
4 *
5 * Copyright 2023 Advanced Micro Devices, Inc.
6 */
7
8#include <linux/completion.h>
9#include <linux/device.h>
10#include <linux/io.h>
11#include <linux/jiffies.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/soundwire/sdw.h>
16#include <linux/soundwire/sdw_registers.h>
81ff58ff 17#include <linux/pm_runtime.h>
d8f48fbd
VM
18#include <linux/wait.h>
19#include <sound/pcm_params.h>
20#include <sound/soc.h>
21#include "bus.h"
22#include "amd_manager.h"
23
24#define DRV_NAME "amd_sdw_manager"
25
26#define to_amd_sdw(b) container_of(b, struct amd_sdw_manager, bus)
27
28static void amd_enable_sdw_pads(struct amd_sdw_manager *amd_manager)
29{
30 u32 sw_pad_pulldown_val;
31 u32 val;
32
33 mutex_lock(amd_manager->acp_sdw_lock);
34 val = readl(amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
35 val |= amd_manager->reg_mask->sw_pad_enable_mask;
36 writel(val, amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
37 usleep_range(1000, 1500);
38
39 sw_pad_pulldown_val = readl(amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
40 sw_pad_pulldown_val &= amd_manager->reg_mask->sw_pad_pulldown_mask;
41 writel(sw_pad_pulldown_val, amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
42 mutex_unlock(amd_manager->acp_sdw_lock);
43}
44
45static int amd_init_sdw_manager(struct amd_sdw_manager *amd_manager)
46{
47 u32 val;
48 int ret;
49
50 writel(AMD_SDW_ENABLE, amd_manager->mmio + ACP_SW_EN);
51 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, val, ACP_DELAY_US,
52 AMD_SDW_TIMEOUT);
53 if (ret)
54 return ret;
55
56 /* SoundWire manager bus reset */
57 writel(AMD_SDW_BUS_RESET_REQ, amd_manager->mmio + ACP_SW_BUS_RESET_CTRL);
58 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_BUS_RESET_CTRL, val,
59 (val & AMD_SDW_BUS_RESET_DONE), ACP_DELAY_US, AMD_SDW_TIMEOUT);
60 if (ret)
61 return ret;
62
63 writel(AMD_SDW_BUS_RESET_CLEAR_REQ, amd_manager->mmio + ACP_SW_BUS_RESET_CTRL);
64 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_BUS_RESET_CTRL, val, !val,
65 ACP_DELAY_US, AMD_SDW_TIMEOUT);
66 if (ret) {
67 dev_err(amd_manager->dev, "Failed to reset SoundWire manager instance%d\n",
68 amd_manager->instance);
69 return ret;
70 }
71
72 writel(AMD_SDW_DISABLE, amd_manager->mmio + ACP_SW_EN);
73 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, !val, ACP_DELAY_US,
74 AMD_SDW_TIMEOUT);
75}
76
77static int amd_enable_sdw_manager(struct amd_sdw_manager *amd_manager)
78{
79 u32 val;
80
81 writel(AMD_SDW_ENABLE, amd_manager->mmio + ACP_SW_EN);
82 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, val, ACP_DELAY_US,
83 AMD_SDW_TIMEOUT);
84}
85
86static int amd_disable_sdw_manager(struct amd_sdw_manager *amd_manager)
87{
88 u32 val;
89
90 writel(AMD_SDW_DISABLE, amd_manager->mmio + ACP_SW_EN);
91 /*
92 * After invoking manager disable sequence, check whether
93 * manager has executed clock stop sequence. In this case,
94 * manager should ignore checking enable status register.
95 */
96 val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
97 if (val)
98 return 0;
99 return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, !val, ACP_DELAY_US,
100 AMD_SDW_TIMEOUT);
101}
102
103static void amd_enable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
104{
105 struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
106 u32 val;
107
108 mutex_lock(amd_manager->acp_sdw_lock);
109 val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
110 val |= reg_mask->acp_sdw_intr_mask;
111 writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
112 mutex_unlock(amd_manager->acp_sdw_lock);
113
114 writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
115 ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
116 writel(AMD_SDW_IRQ_MASK_8TO11, amd_manager->mmio +
117 ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
118 writel(AMD_SDW_IRQ_ERROR_MASK, amd_manager->mmio + ACP_SW_ERROR_INTR_MASK);
119}
120
121static void amd_disable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
122{
123 struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
124 u32 val;
125
126 mutex_lock(amd_manager->acp_sdw_lock);
127 val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
128 val &= ~reg_mask->acp_sdw_intr_mask;
129 writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
130 mutex_unlock(amd_manager->acp_sdw_lock);
131
132 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
133 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
134 writel(0x00, amd_manager->mmio + ACP_SW_ERROR_INTR_MASK);
135}
136
81ff58ff
VM
137static int amd_deinit_sdw_manager(struct amd_sdw_manager *amd_manager)
138{
139 amd_disable_sdw_interrupts(amd_manager);
140 return amd_disable_sdw_manager(amd_manager);
141}
142
d8f48fbd
VM
143static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
144{
145 u32 frame_size;
146
147 frame_size = (amd_manager->rows_index << 3) | amd_manager->cols_index;
148 writel(frame_size, amd_manager->mmio + ACP_SW_FRAMESIZE);
149}
150
151static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
152 int cmd_offset)
153{
154 u32 upper_data;
155 u32 lower_data = 0;
156 u16 addr;
157 u8 upper_addr, lower_addr;
158 u8 data = 0;
159
160 addr = msg->addr + cmd_offset;
161 upper_addr = (addr & 0xFF00) >> 8;
162 lower_addr = addr & 0xFF;
163
164 if (msg->flags == SDW_MSG_FLAG_WRITE)
165 data = msg->buf[cmd_offset];
166
167 upper_data = FIELD_PREP(AMD_SDW_MCP_CMD_DEV_ADDR, msg->dev_num);
168 upper_data |= FIELD_PREP(AMD_SDW_MCP_CMD_COMMAND, msg->flags + 2);
169 upper_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_ADDR_HIGH, upper_addr);
170 lower_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_ADDR_LOW, lower_addr);
171 lower_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_DATA, data);
172
173 *upper_word = upper_data;
174 *lower_word = lower_data;
175}
176
177static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lower_data,
178 u32 upper_data)
179{
180 u64 resp;
181 u32 lower_resp, upper_resp;
182 u32 sts;
183 int ret;
184
185 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
186 !(sts & AMD_SDW_IMM_CMD_BUSY), ACP_DELAY_US, AMD_SDW_TIMEOUT);
187 if (ret) {
188 dev_err(amd_manager->dev, "SDW%x previous cmd status clear failed\n",
189 amd_manager->instance);
190 return ret;
191 }
192
193 if (sts & AMD_SDW_IMM_RES_VALID) {
194 dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance);
195 writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
196 }
197 writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD);
198 writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD);
199
200 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
201 (sts & AMD_SDW_IMM_RES_VALID), ACP_DELAY_US, AMD_SDW_TIMEOUT);
202 if (ret) {
203 dev_err(amd_manager->dev, "SDW%x cmd response timeout occurred\n",
204 amd_manager->instance);
205 return ret;
206 }
207 upper_resp = readl(amd_manager->mmio + ACP_SW_IMM_RESP_UPPER_WORD);
208 lower_resp = readl(amd_manager->mmio + ACP_SW_IMM_RESP_LOWER_QWORD);
209
210 writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
211 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
212 !(sts & AMD_SDW_IMM_RES_VALID), ACP_DELAY_US, AMD_SDW_TIMEOUT);
213 if (ret) {
214 dev_err(amd_manager->dev, "SDW%x cmd status retry failed\n",
215 amd_manager->instance);
216 return ret;
217 }
218 resp = upper_resp;
219 resp = (resp << 32) | lower_resp;
220 return resp;
221}
222
223static enum sdw_command_response
224amd_program_scp_addr(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg)
225{
226 struct sdw_msg scp_msg = {0};
227 u64 response_buf[2] = {0};
228 u32 upper_data = 0, lower_data = 0;
229 int index;
230
231 scp_msg.dev_num = msg->dev_num;
232 scp_msg.addr = SDW_SCP_ADDRPAGE1;
233 scp_msg.buf = &msg->addr_page1;
234 scp_msg.flags = SDW_MSG_FLAG_WRITE;
235 amd_sdw_ctl_word_prep(&lower_data, &upper_data, &scp_msg, 0);
236 response_buf[0] = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
237 scp_msg.addr = SDW_SCP_ADDRPAGE2;
238 scp_msg.buf = &msg->addr_page2;
239 amd_sdw_ctl_word_prep(&lower_data, &upper_data, &scp_msg, 0);
240 response_buf[1] = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
241
242 for (index = 0; index < 2; index++) {
243 if (response_buf[index] == -ETIMEDOUT) {
244 dev_err_ratelimited(amd_manager->dev,
245 "SCP_addrpage command timeout for Slave %d\n",
246 msg->dev_num);
247 return SDW_CMD_TIMEOUT;
248 } else if (!(response_buf[index] & AMD_SDW_MCP_RESP_ACK)) {
249 if (response_buf[index] & AMD_SDW_MCP_RESP_NACK) {
250 dev_err_ratelimited(amd_manager->dev,
251 "SCP_addrpage NACKed for Slave %d\n",
252 msg->dev_num);
253 return SDW_CMD_FAIL;
254 }
255 dev_dbg_ratelimited(amd_manager->dev, "SCP_addrpage ignored for Slave %d\n",
256 msg->dev_num);
257 return SDW_CMD_IGNORED;
258 }
259 }
260 return SDW_CMD_OK;
261}
262
263static int amd_prep_msg(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg)
264{
265 int ret;
266
267 if (msg->page) {
268 ret = amd_program_scp_addr(amd_manager, msg);
269 if (ret) {
270 msg->len = 0;
271 return ret;
272 }
273 }
274 switch (msg->flags) {
275 case SDW_MSG_FLAG_READ:
276 case SDW_MSG_FLAG_WRITE:
277 break;
278 default:
279 dev_err(amd_manager->dev, "Invalid msg cmd: %d\n", msg->flags);
280 return -EINVAL;
281 }
282 return 0;
283}
284
285static enum sdw_command_response amd_sdw_fill_msg_resp(struct amd_sdw_manager *amd_manager,
286 struct sdw_msg *msg, u64 response,
287 int offset)
288{
289 if (response & AMD_SDW_MCP_RESP_ACK) {
290 if (msg->flags == SDW_MSG_FLAG_READ)
291 msg->buf[offset] = FIELD_GET(AMD_SDW_MCP_RESP_RDATA, response);
292 } else {
293 if (response == -ETIMEDOUT) {
294 dev_err_ratelimited(amd_manager->dev, "command timeout for Slave %d\n",
295 msg->dev_num);
296 return SDW_CMD_TIMEOUT;
297 } else if (response & AMD_SDW_MCP_RESP_NACK) {
298 dev_err_ratelimited(amd_manager->dev,
299 "command response NACK received for Slave %d\n",
300 msg->dev_num);
301 return SDW_CMD_FAIL;
302 }
303 dev_err_ratelimited(amd_manager->dev, "command is ignored for Slave %d\n",
304 msg->dev_num);
305 return SDW_CMD_IGNORED;
306 }
307 return SDW_CMD_OK;
308}
309
310static unsigned int _amd_sdw_xfer_msg(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg,
311 int cmd_offset)
312{
313 u64 response;
314 u32 upper_data = 0, lower_data = 0;
315
316 amd_sdw_ctl_word_prep(&lower_data, &upper_data, msg, cmd_offset);
317 response = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
318 return amd_sdw_fill_msg_resp(amd_manager, msg, response, cmd_offset);
319}
320
321static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
322{
323 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
324 int ret, i;
325
326 ret = amd_prep_msg(amd_manager, msg);
327 if (ret)
328 return SDW_CMD_FAIL_OTHER;
329 for (i = 0; i < msg->len; i++) {
330 ret = _amd_sdw_xfer_msg(amd_manager, msg, i);
331 if (ret)
332 return ret;
333 }
334 return SDW_CMD_OK;
335}
336
65f93e40
VM
337static void amd_sdw_fill_slave_status(struct amd_sdw_manager *amd_manager, u16 index, u32 status)
338{
339 switch (status) {
340 case SDW_SLAVE_ATTACHED:
341 case SDW_SLAVE_UNATTACHED:
342 case SDW_SLAVE_ALERT:
343 amd_manager->status[index] = status;
344 break;
345 default:
346 amd_manager->status[index] = SDW_SLAVE_RESERVED;
347 break;
348 }
349}
350
351static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
352{
353 u64 slave_stat;
354 u32 val;
355 u16 dev_index;
356
357 /* slave status response */
358 slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
359 slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
360 dev_dbg(amd_manager->dev, "slave_stat:0x%llx\n", slave_stat);
361 for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
362 val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
363 dev_dbg(amd_manager->dev, "val:0x%x\n", val);
364 amd_sdw_fill_slave_status(amd_manager, dev_index, val);
365 }
366}
367
368static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
369{
370 u64 response;
371
372 mutex_lock(&amd_manager->bus.msg_lock);
373 response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
374 mutex_unlock(&amd_manager->bus.msg_lock);
375 amd_sdw_process_ping_status(response, amd_manager);
376}
377
d8f48fbd
VM
378static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
379{
380 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
381 u64 response;
382 u32 slave_stat;
383
384 response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
385 /* slave status from ping response */
386 slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
387 slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
388 dev_dbg(amd_manager->dev, "slave_stat:0x%x\n", slave_stat);
389 return slave_stat;
390}
391
392static int amd_sdw_compute_params(struct sdw_bus *bus)
393{
394 struct sdw_transport_data t_data = {0};
395 struct sdw_master_runtime *m_rt;
396 struct sdw_port_runtime *p_rt;
397 struct sdw_bus_params *b_params = &bus->params;
398 int port_bo, hstart, hstop, sample_int;
399 unsigned int rate, bps;
400
401 port_bo = 0;
402 hstart = 1;
403 hstop = bus->params.col - 1;
404 t_data.hstop = hstop;
405 t_data.hstart = hstart;
406
407 list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
408 rate = m_rt->stream->params.rate;
409 bps = m_rt->stream->params.bps;
410 sample_int = (bus->params.curr_dr_freq / rate);
411 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
412 port_bo = (p_rt->num * 64) + 1;
413 dev_dbg(bus->dev, "p_rt->num=%d hstart=%d hstop=%d port_bo=%d\n",
414 p_rt->num, hstart, hstop, port_bo);
415 sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
416 false, SDW_BLK_GRP_CNT_1, sample_int,
417 port_bo, port_bo >> 8, hstart, hstop,
418 SDW_BLK_PKG_PER_PORT, 0x0);
419
420 sdw_fill_port_params(&p_rt->port_params,
421 p_rt->num, bps,
422 SDW_PORT_FLOW_MODE_ISOCH,
423 b_params->m_data_mode);
424 t_data.hstart = hstart;
425 t_data.hstop = hstop;
426 t_data.block_offset = port_bo;
427 t_data.sub_block_offset = 0;
428 }
429 sdw_compute_slave_ports(m_rt, &t_data);
430 }
431 return 0;
432}
433
434static int amd_sdw_port_params(struct sdw_bus *bus, struct sdw_port_params *p_params,
435 unsigned int bank)
436{
437 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
438 u32 frame_fmt_reg, dpn_frame_fmt;
439
440 dev_dbg(amd_manager->dev, "p_params->num:0x%x\n", p_params->num);
441 switch (amd_manager->instance) {
442 case ACP_SDW0:
443 frame_fmt_reg = sdw0_manager_dp_reg[p_params->num].frame_fmt_reg;
444 break;
445 case ACP_SDW1:
446 frame_fmt_reg = sdw1_manager_dp_reg[p_params->num].frame_fmt_reg;
447 break;
448 default:
449 return -EINVAL;
450 }
451
452 dpn_frame_fmt = readl(amd_manager->mmio + frame_fmt_reg);
453 u32p_replace_bits(&dpn_frame_fmt, p_params->flow_mode, AMD_DPN_FRAME_FMT_PFM);
454 u32p_replace_bits(&dpn_frame_fmt, p_params->data_mode, AMD_DPN_FRAME_FMT_PDM);
455 u32p_replace_bits(&dpn_frame_fmt, p_params->bps - 1, AMD_DPN_FRAME_FMT_WORD_LEN);
456 writel(dpn_frame_fmt, amd_manager->mmio + frame_fmt_reg);
457 return 0;
458}
459
460static int amd_sdw_transport_params(struct sdw_bus *bus,
461 struct sdw_transport_params *params,
462 enum sdw_reg_bank bank)
463{
464 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
465 u32 dpn_frame_fmt;
466 u32 dpn_sampleinterval;
467 u32 dpn_hctrl;
468 u32 dpn_offsetctrl;
469 u32 dpn_lanectrl;
470 u32 frame_fmt_reg, sample_int_reg, hctrl_dp0_reg;
471 u32 offset_reg, lane_ctrl_ch_en_reg;
472
473 switch (amd_manager->instance) {
474 case ACP_SDW0:
475 frame_fmt_reg = sdw0_manager_dp_reg[params->port_num].frame_fmt_reg;
476 sample_int_reg = sdw0_manager_dp_reg[params->port_num].sample_int_reg;
477 hctrl_dp0_reg = sdw0_manager_dp_reg[params->port_num].hctrl_dp0_reg;
478 offset_reg = sdw0_manager_dp_reg[params->port_num].offset_reg;
479 lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
480 break;
481 case ACP_SDW1:
482 frame_fmt_reg = sdw1_manager_dp_reg[params->port_num].frame_fmt_reg;
483 sample_int_reg = sdw1_manager_dp_reg[params->port_num].sample_int_reg;
484 hctrl_dp0_reg = sdw1_manager_dp_reg[params->port_num].hctrl_dp0_reg;
485 offset_reg = sdw1_manager_dp_reg[params->port_num].offset_reg;
486 lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
487 break;
488 default:
489 return -EINVAL;
490 }
491 writel(AMD_SDW_SSP_COUNTER_VAL, amd_manager->mmio + ACP_SW_SSP_COUNTER);
492
493 dpn_frame_fmt = readl(amd_manager->mmio + frame_fmt_reg);
494 u32p_replace_bits(&dpn_frame_fmt, params->blk_pkg_mode, AMD_DPN_FRAME_FMT_BLK_PKG_MODE);
495 u32p_replace_bits(&dpn_frame_fmt, params->blk_grp_ctrl, AMD_DPN_FRAME_FMT_BLK_GRP_CTRL);
496 u32p_replace_bits(&dpn_frame_fmt, SDW_STREAM_PCM, AMD_DPN_FRAME_FMT_PCM_OR_PDM);
497 writel(dpn_frame_fmt, amd_manager->mmio + frame_fmt_reg);
498
499 dpn_sampleinterval = params->sample_interval - 1;
500 writel(dpn_sampleinterval, amd_manager->mmio + sample_int_reg);
501
502 dpn_hctrl = FIELD_PREP(AMD_DPN_HCTRL_HSTOP, params->hstop);
503 dpn_hctrl |= FIELD_PREP(AMD_DPN_HCTRL_HSTART, params->hstart);
504 writel(dpn_hctrl, amd_manager->mmio + hctrl_dp0_reg);
505
506 dpn_offsetctrl = FIELD_PREP(AMD_DPN_OFFSET_CTRL_1, params->offset1);
507 dpn_offsetctrl |= FIELD_PREP(AMD_DPN_OFFSET_CTRL_2, params->offset2);
508 writel(dpn_offsetctrl, amd_manager->mmio + offset_reg);
509
510 /*
511 * lane_ctrl_ch_en_reg will be used to program lane_ctrl and ch_mask
512 * parameters.
513 */
514 dpn_lanectrl = readl(amd_manager->mmio + lane_ctrl_ch_en_reg);
515 u32p_replace_bits(&dpn_lanectrl, params->lane_ctrl, AMD_DPN_CH_EN_LCTRL);
516 writel(dpn_lanectrl, amd_manager->mmio + lane_ctrl_ch_en_reg);
517 return 0;
518}
519
520static int amd_sdw_port_enable(struct sdw_bus *bus,
521 struct sdw_enable_ch *enable_ch,
522 unsigned int bank)
523{
524 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
525 u32 dpn_ch_enable;
526 u32 lane_ctrl_ch_en_reg;
527
528 switch (amd_manager->instance) {
529 case ACP_SDW0:
530 lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
531 break;
532 case ACP_SDW1:
533 lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
534 break;
535 default:
536 return -EINVAL;
537 }
538
539 /*
540 * lane_ctrl_ch_en_reg will be used to program lane_ctrl and ch_mask
541 * parameters.
542 */
543 dpn_ch_enable = readl(amd_manager->mmio + lane_ctrl_ch_en_reg);
544 u32p_replace_bits(&dpn_ch_enable, enable_ch->ch_mask, AMD_DPN_CH_EN_CHMASK);
545 if (enable_ch->enable)
546 writel(dpn_ch_enable, amd_manager->mmio + lane_ctrl_ch_en_reg);
547 else
548 writel(0, amd_manager->mmio + lane_ctrl_ch_en_reg);
549 return 0;
550}
551
552static int sdw_master_read_amd_prop(struct sdw_bus *bus)
553{
554 struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
555 struct fwnode_handle *link;
556 struct sdw_master_prop *prop;
557 u32 quirk_mask = 0;
558 u32 wake_en_mask = 0;
559 u32 power_mode_mask = 0;
560 char name[32];
561
562 prop = &bus->prop;
563 /* Find manager handle */
564 snprintf(name, sizeof(name), "mipi-sdw-link-%d-subproperties", bus->link_id);
565 link = device_get_named_child_node(bus->dev, name);
566 if (!link) {
567 dev_err(bus->dev, "Manager node %s not found\n", name);
568 return -EIO;
569 }
570 fwnode_property_read_u32(link, "amd-sdw-enable", &quirk_mask);
571 if (!(quirk_mask & AMD_SDW_QUIRK_MASK_BUS_ENABLE))
572 prop->hw_disabled = true;
573 prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
574 SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
575
576 fwnode_property_read_u32(link, "amd-sdw-wakeup-enable", &wake_en_mask);
577 amd_manager->wake_en_mask = wake_en_mask;
578 fwnode_property_read_u32(link, "amd-sdw-power-mode", &power_mode_mask);
579 amd_manager->power_mode_mask = power_mode_mask;
580 return 0;
581}
582
583static int amd_prop_read(struct sdw_bus *bus)
584{
585 sdw_master_read_prop(bus);
586 sdw_master_read_amd_prop(bus);
587 return 0;
588}
589
590static const struct sdw_master_port_ops amd_sdw_port_ops = {
591 .dpn_set_port_params = amd_sdw_port_params,
592 .dpn_set_port_transport_params = amd_sdw_transport_params,
593 .dpn_port_enable_ch = amd_sdw_port_enable,
594};
595
596static const struct sdw_master_ops amd_sdw_ops = {
597 .read_prop = amd_prop_read,
598 .xfer_msg = amd_sdw_xfer_msg,
599 .read_ping_status = amd_sdw_read_ping_status,
600};
601
2b13596f
VM
602static int amd_sdw_hw_params(struct snd_pcm_substream *substream,
603 struct snd_pcm_hw_params *params,
604 struct snd_soc_dai *dai)
605{
606 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
607 struct sdw_amd_dai_runtime *dai_runtime;
608 struct sdw_stream_config sconfig;
609 struct sdw_port_config *pconfig;
610 int ch, dir;
611 int ret;
612
613 dai_runtime = amd_manager->dai_runtime_array[dai->id];
614 if (!dai_runtime)
615 return -EIO;
616
617 ch = params_channels(params);
618 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
619 dir = SDW_DATA_DIR_RX;
620 else
621 dir = SDW_DATA_DIR_TX;
622 dev_dbg(amd_manager->dev, "dir:%d dai->id:0x%x\n", dir, dai->id);
623
624 sconfig.direction = dir;
625 sconfig.ch_count = ch;
626 sconfig.frame_rate = params_rate(params);
627 sconfig.type = dai_runtime->stream_type;
628
629 sconfig.bps = snd_pcm_format_width(params_format(params));
630
631 /* Port configuration */
632 pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
633 if (!pconfig) {
634 ret = -ENOMEM;
635 goto error;
636 }
637
638 pconfig->num = dai->id;
639 pconfig->ch_mask = (1 << ch) - 1;
640 ret = sdw_stream_add_master(&amd_manager->bus, &sconfig,
641 pconfig, 1, dai_runtime->stream);
642 if (ret)
643 dev_err(amd_manager->dev, "add manager to stream failed:%d\n", ret);
644
645 kfree(pconfig);
646error:
647 return ret;
648}
649
650static int amd_sdw_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
651{
652 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
653 struct sdw_amd_dai_runtime *dai_runtime;
654 int ret;
655
656 dai_runtime = amd_manager->dai_runtime_array[dai->id];
657 if (!dai_runtime)
658 return -EIO;
659
660 ret = sdw_stream_remove_master(&amd_manager->bus, dai_runtime->stream);
661 if (ret < 0)
662 dev_err(dai->dev, "remove manager from stream %s failed: %d\n",
663 dai_runtime->stream->name, ret);
664 return ret;
665}
666
667static int amd_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
668{
669 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
670 struct sdw_amd_dai_runtime *dai_runtime;
671
672 dai_runtime = amd_manager->dai_runtime_array[dai->id];
673 if (stream) {
674 /* first paranoia check */
675 if (dai_runtime) {
676 dev_err(dai->dev, "dai_runtime already allocated for dai %s\n", dai->name);
677 return -EINVAL;
678 }
679
680 /* allocate and set dai_runtime info */
681 dai_runtime = kzalloc(sizeof(*dai_runtime), GFP_KERNEL);
682 if (!dai_runtime)
683 return -ENOMEM;
684
685 dai_runtime->stream_type = SDW_STREAM_PCM;
686 dai_runtime->bus = &amd_manager->bus;
687 dai_runtime->stream = stream;
688 amd_manager->dai_runtime_array[dai->id] = dai_runtime;
689 } else {
690 /* second paranoia check */
691 if (!dai_runtime) {
692 dev_err(dai->dev, "dai_runtime not allocated for dai %s\n", dai->name);
693 return -EINVAL;
694 }
695
696 /* for NULL stream we release allocated dai_runtime */
697 kfree(dai_runtime);
698 amd_manager->dai_runtime_array[dai->id] = NULL;
699 }
700 return 0;
701}
702
703static int amd_pcm_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
704{
705 return amd_set_sdw_stream(dai, stream, direction);
706}
707
708static void *amd_get_sdw_stream(struct snd_soc_dai *dai, int direction)
709{
710 struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
711 struct sdw_amd_dai_runtime *dai_runtime;
712
713 dai_runtime = amd_manager->dai_runtime_array[dai->id];
714 if (!dai_runtime)
715 return ERR_PTR(-EINVAL);
716
717 return dai_runtime->stream;
718}
719
720static const struct snd_soc_dai_ops amd_sdw_dai_ops = {
721 .hw_params = amd_sdw_hw_params,
722 .hw_free = amd_sdw_hw_free,
723 .set_stream = amd_pcm_set_sdw_stream,
724 .get_stream = amd_get_sdw_stream,
725};
726
727static const struct snd_soc_component_driver amd_sdw_dai_component = {
728 .name = "soundwire",
729};
730
731static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
732{
733 struct sdw_amd_dai_runtime **dai_runtime_array;
734 struct snd_soc_dai_driver *dais;
735 struct snd_soc_pcm_stream *stream;
736 struct device *dev;
737 int i, num_dais;
738
739 dev = amd_manager->dev;
740 num_dais = amd_manager->num_dout_ports + amd_manager->num_din_ports;
741 dais = devm_kcalloc(dev, num_dais, sizeof(*dais), GFP_KERNEL);
742 if (!dais)
743 return -ENOMEM;
744
745 dai_runtime_array = devm_kcalloc(dev, num_dais,
746 sizeof(struct sdw_amd_dai_runtime *),
747 GFP_KERNEL);
748 if (!dai_runtime_array)
749 return -ENOMEM;
750 amd_manager->dai_runtime_array = dai_runtime_array;
751 for (i = 0; i < num_dais; i++) {
752 dais[i].name = devm_kasprintf(dev, GFP_KERNEL, "SDW%d Pin%d", amd_manager->instance,
753 i);
754 if (!dais[i].name)
755 return -ENOMEM;
756 if (i < amd_manager->num_dout_ports)
757 stream = &dais[i].playback;
758 else
759 stream = &dais[i].capture;
760
761 stream->channels_min = 2;
762 stream->channels_max = 2;
763 stream->rates = SNDRV_PCM_RATE_48000;
764 stream->formats = SNDRV_PCM_FMTBIT_S16_LE;
765
766 dais[i].ops = &amd_sdw_dai_ops;
767 dais[i].id = i;
768 }
769
770 return devm_snd_soc_register_component(dev, &amd_sdw_dai_component,
771 dais, num_dais);
772}
773
65f93e40
VM
774static void amd_sdw_update_slave_status_work(struct work_struct *work)
775{
776 struct amd_sdw_manager *amd_manager =
777 container_of(work, struct amd_sdw_manager, amd_sdw_work);
778 int retry_count = 0;
779
780 if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
781 writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
782 writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
783 }
784
785update_status:
786 sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
787 /*
788 * During the peripheral enumeration sequence, the SoundWire manager interrupts
789 * are masked. Once the device number programming is done for all peripherals,
790 * interrupts will be unmasked. Read the peripheral device status from ping command
791 * and process the response. This sequence will ensure all peripheral devices enumerated
792 * and initialized properly.
793 */
794 if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
795 if (retry_count++ < SDW_MAX_DEVICES) {
796 writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
797 ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
798 writel(AMD_SDW_IRQ_MASK_8TO11, amd_manager->mmio +
799 ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
800 amd_sdw_read_and_process_ping_status(amd_manager);
801 goto update_status;
802 } else {
803 dev_err_ratelimited(amd_manager->dev,
804 "Device0 detected after %d iterations\n",
805 retry_count);
806 }
807 }
808}
809
810static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
811 struct amd_sdw_manager *amd_manager)
812{
813 u64 slave_stat;
814 u32 val;
815 int dev_index;
816
817 if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
818 memset(amd_manager->status, 0, sizeof(amd_manager->status));
819 slave_stat = status_change_0to7;
820 slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
821 dev_dbg(amd_manager->dev, "status_change_0to7:0x%x status_change_8to11:0x%x\n",
822 status_change_0to7, status_change_8to11);
823 if (slave_stat) {
824 for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
825 if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
826 val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
827 AMD_SDW_MCP_SLAVE_STATUS_MASK;
828 amd_sdw_fill_slave_status(amd_manager, dev_index, val);
829 }
830 }
831 }
832}
833
66c87883
VM
834static void amd_sdw_process_wake_event(struct amd_sdw_manager *amd_manager)
835{
836 pm_request_resume(amd_manager->dev);
837 writel(0x00, amd_manager->acp_mmio + ACP_SW_WAKE_EN(amd_manager->instance));
838 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
839}
840
65f93e40
VM
841static void amd_sdw_irq_thread(struct work_struct *work)
842{
843 struct amd_sdw_manager *amd_manager =
844 container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
845 u32 status_change_8to11;
846 u32 status_change_0to7;
847
848 status_change_8to11 = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
849 status_change_0to7 = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
850 dev_dbg(amd_manager->dev, "[SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
851 amd_manager->instance, status_change_0to7, status_change_8to11);
66c87883
VM
852 if (status_change_8to11 & AMD_SDW_WAKE_STAT_MASK)
853 return amd_sdw_process_wake_event(amd_manager);
854
65f93e40
VM
855 if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
856 amd_sdw_read_and_process_ping_status(amd_manager);
857 } else {
858 /* Check for the updated status on peripheral device */
859 amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
860 }
861 if (status_change_8to11 || status_change_0to7)
862 schedule_work(&amd_manager->amd_sdw_work);
863 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
864 writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
865}
866
d8f48fbd
VM
867static void amd_sdw_probe_work(struct work_struct *work)
868{
869 struct amd_sdw_manager *amd_manager = container_of(work, struct amd_sdw_manager,
870 probe_work);
871 struct sdw_master_prop *prop;
872 int ret;
873
874 prop = &amd_manager->bus.prop;
875 if (!prop->hw_disabled) {
876 amd_enable_sdw_pads(amd_manager);
877 ret = amd_init_sdw_manager(amd_manager);
878 if (ret)
879 return;
880 amd_enable_sdw_interrupts(amd_manager);
881 ret = amd_enable_sdw_manager(amd_manager);
882 if (ret)
883 return;
884 amd_sdw_set_frameshape(amd_manager);
885 }
81ff58ff
VM
886 /* Enable runtime PM */
887 pm_runtime_set_autosuspend_delay(amd_manager->dev, AMD_SDW_MASTER_SUSPEND_DELAY_MS);
888 pm_runtime_use_autosuspend(amd_manager->dev);
889 pm_runtime_mark_last_busy(amd_manager->dev);
890 pm_runtime_set_active(amd_manager->dev);
891 pm_runtime_enable(amd_manager->dev);
d8f48fbd
VM
892}
893
894static int amd_sdw_manager_probe(struct platform_device *pdev)
895{
896 const struct acp_sdw_pdata *pdata = pdev->dev.platform_data;
897 struct resource *res;
898 struct device *dev = &pdev->dev;
899 struct sdw_master_prop *prop;
900 struct sdw_bus_params *params;
901 struct amd_sdw_manager *amd_manager;
902 int ret;
903
904 amd_manager = devm_kzalloc(dev, sizeof(struct amd_sdw_manager), GFP_KERNEL);
905 if (!amd_manager)
906 return -ENOMEM;
907
908 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
909 if (!res)
910 return -ENOMEM;
911
912 amd_manager->acp_mmio = devm_ioremap(dev, res->start, resource_size(res));
913 if (IS_ERR(amd_manager->mmio)) {
914 dev_err(dev, "mmio not found\n");
915 return PTR_ERR(amd_manager->mmio);
916 }
917 amd_manager->instance = pdata->instance;
918 amd_manager->mmio = amd_manager->acp_mmio +
919 (amd_manager->instance * SDW_MANAGER_REG_OFFSET);
920 amd_manager->acp_sdw_lock = pdata->acp_sdw_lock;
921 amd_manager->cols_index = sdw_find_col_index(AMD_SDW_DEFAULT_COLUMNS);
922 amd_manager->rows_index = sdw_find_row_index(AMD_SDW_DEFAULT_ROWS);
923 amd_manager->dev = dev;
924 amd_manager->bus.ops = &amd_sdw_ops;
925 amd_manager->bus.port_ops = &amd_sdw_port_ops;
926 amd_manager->bus.compute_params = &amd_sdw_compute_params;
927 amd_manager->bus.clk_stop_timeout = 200;
928 amd_manager->bus.link_id = amd_manager->instance;
929
930 switch (amd_manager->instance) {
931 case ACP_SDW0:
932 amd_manager->num_dout_ports = AMD_SDW0_MAX_TX_PORTS;
933 amd_manager->num_din_ports = AMD_SDW0_MAX_RX_PORTS;
934 break;
935 case ACP_SDW1:
936 amd_manager->num_dout_ports = AMD_SDW1_MAX_TX_PORTS;
937 amd_manager->num_din_ports = AMD_SDW1_MAX_RX_PORTS;
938 break;
939 default:
940 return -EINVAL;
941 }
942
943 amd_manager->reg_mask = &sdw_manager_reg_mask_array[amd_manager->instance];
944 params = &amd_manager->bus.params;
945 params->max_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
946 params->curr_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
947 params->col = AMD_SDW_DEFAULT_COLUMNS;
948 params->row = AMD_SDW_DEFAULT_ROWS;
949 prop = &amd_manager->bus.prop;
950 prop->clk_freq = &amd_sdw_freq_tbl[0];
951 prop->mclk_freq = AMD_SDW_BUS_BASE_FREQ;
952
953 ret = sdw_bus_master_add(&amd_manager->bus, dev, dev->fwnode);
954 if (ret) {
955 dev_err(dev, "Failed to register SoundWire manager(%d)\n", ret);
956 return ret;
957 }
2b13596f
VM
958 ret = amd_sdw_register_dais(amd_manager);
959 if (ret) {
960 dev_err(dev, "CPU DAI registration failed\n");
961 sdw_bus_master_delete(&amd_manager->bus);
962 return ret;
963 }
d8f48fbd 964 dev_set_drvdata(dev, amd_manager);
65f93e40
VM
965 INIT_WORK(&amd_manager->amd_sdw_irq_thread, amd_sdw_irq_thread);
966 INIT_WORK(&amd_manager->amd_sdw_work, amd_sdw_update_slave_status_work);
d8f48fbd
VM
967 INIT_WORK(&amd_manager->probe_work, amd_sdw_probe_work);
968 /*
969 * Instead of having lengthy probe sequence, use deferred probe.
970 */
971 schedule_work(&amd_manager->probe_work);
972 return 0;
973}
974
975static int amd_sdw_manager_remove(struct platform_device *pdev)
976{
977 struct amd_sdw_manager *amd_manager = dev_get_drvdata(&pdev->dev);
978
81ff58ff 979 pm_runtime_disable(&pdev->dev);
d8f48fbd
VM
980 cancel_work_sync(&amd_manager->probe_work);
981 amd_disable_sdw_interrupts(amd_manager);
982 sdw_bus_master_delete(&amd_manager->bus);
983 return amd_disable_sdw_manager(amd_manager);
984}
985
81ff58ff
VM
986static int amd_sdw_clock_stop(struct amd_sdw_manager *amd_manager)
987{
988 u32 val;
989 int ret;
990
991 ret = sdw_bus_prep_clk_stop(&amd_manager->bus);
992 if (ret < 0 && ret != -ENODATA) {
993 dev_err(amd_manager->dev, "prepare clock stop failed %d", ret);
994 return 0;
995 }
996 ret = sdw_bus_clk_stop(&amd_manager->bus);
997 if (ret < 0 && ret != -ENODATA) {
998 dev_err(amd_manager->dev, "bus clock stop failed %d", ret);
999 return 0;
1000 }
1001
1002 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL, val,
1003 (val & AMD_SDW_CLK_STOP_DONE), ACP_DELAY_US, AMD_SDW_TIMEOUT);
1004 if (ret) {
1005 dev_err(amd_manager->dev, "SDW%x clock stop failed\n", amd_manager->instance);
1006 return 0;
1007 }
1008
1009 amd_manager->clk_stopped = true;
1010 if (amd_manager->wake_en_mask)
1011 writel(0x01, amd_manager->acp_mmio + ACP_SW_WAKE_EN(amd_manager->instance));
1012
1013 dev_dbg(amd_manager->dev, "SDW%x clock stop successful\n", amd_manager->instance);
1014 return 0;
1015}
1016
1017static int amd_sdw_clock_stop_exit(struct amd_sdw_manager *amd_manager)
1018{
1019 int ret;
1020 u32 val;
1021
1022 if (amd_manager->clk_stopped) {
1023 val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1024 val |= AMD_SDW_CLK_RESUME_REQ;
1025 writel(val, amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1026 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL, val,
1027 (val & AMD_SDW_CLK_RESUME_DONE), ACP_DELAY_US,
1028 AMD_SDW_TIMEOUT);
1029 if (val & AMD_SDW_CLK_RESUME_DONE) {
1030 writel(0, amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1031 ret = sdw_bus_exit_clk_stop(&amd_manager->bus);
1032 if (ret < 0)
1033 dev_err(amd_manager->dev, "bus failed to exit clock stop %d\n",
1034 ret);
1035 amd_manager->clk_stopped = false;
1036 }
1037 }
1038 if (amd_manager->clk_stopped) {
1039 dev_err(amd_manager->dev, "SDW%x clock stop exit failed\n", amd_manager->instance);
1040 return 0;
1041 }
1042 dev_dbg(amd_manager->dev, "SDW%x clock stop exit successful\n", amd_manager->instance);
1043 return 0;
1044}
1045
9cf1efc5
VM
1046static int amd_resume_child_device(struct device *dev, void *data)
1047{
1048 struct sdw_slave *slave = dev_to_sdw_dev(dev);
1049 int ret;
1050
1051 if (!slave->probed) {
1052 dev_dbg(dev, "skipping device, no probed driver\n");
1053 return 0;
1054 }
1055 if (!slave->dev_num_sticky) {
1056 dev_dbg(dev, "skipping device, never detected on bus\n");
1057 return 0;
1058 }
1059 ret = pm_request_resume(dev);
1060 if (ret < 0) {
1061 dev_err(dev, "pm_request_resume failed: %d\n", ret);
1062 return ret;
1063 }
1064 return 0;
1065}
1066
1067static int __maybe_unused amd_pm_prepare(struct device *dev)
1068{
1069 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1070 struct sdw_bus *bus = &amd_manager->bus;
1071 int ret;
1072
1073 if (bus->prop.hw_disabled) {
1074 dev_dbg(bus->dev, "SoundWire manager %d is disabled, ignoring\n",
1075 bus->link_id);
1076 return 0;
1077 }
1078 /*
1079 * When multiple peripheral devices connected over the same link, if SoundWire manager
1080 * device is not in runtime suspend state, observed that device alerts are missing
1081 * without pm_prepare on AMD platforms in clockstop mode0.
1082 */
1083 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1084 ret = pm_request_resume(dev);
1085 if (ret < 0) {
1086 dev_err(bus->dev, "pm_request_resume failed: %d\n", ret);
1087 return 0;
1088 }
1089 }
1090 /* To force peripheral devices to system level suspend state, resume the devices
1091 * from runtime suspend state first. Without that unable to dispatch the alert
1092 * status to peripheral driver during system level resume as they are in runtime
1093 * suspend state.
1094 */
1095 ret = device_for_each_child(bus->dev, NULL, amd_resume_child_device);
1096 if (ret < 0)
1097 dev_err(dev, "amd_resume_child_device failed: %d\n", ret);
1098 return 0;
1099}
1100
1101static int __maybe_unused amd_suspend(struct device *dev)
1102{
1103 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1104 struct sdw_bus *bus = &amd_manager->bus;
1105 int ret;
1106
1107 if (bus->prop.hw_disabled) {
1108 dev_dbg(bus->dev, "SoundWire manager %d is disabled, ignoring\n",
1109 bus->link_id);
1110 return 0;
1111 }
1112
1113 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1114 return amd_sdw_clock_stop(amd_manager);
1115 } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
1116 /*
1117 * As per hardware programming sequence on AMD platforms,
1118 * clock stop should be invoked first before powering-off
1119 */
1120 ret = amd_sdw_clock_stop(amd_manager);
1121 if (ret)
1122 return ret;
1123 return amd_deinit_sdw_manager(amd_manager);
1124 }
1125 return 0;
1126}
1127
81ff58ff
VM
1128static int __maybe_unused amd_suspend_runtime(struct device *dev)
1129{
1130 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1131 struct sdw_bus *bus = &amd_manager->bus;
1132 int ret;
1133
1134 if (bus->prop.hw_disabled) {
1135 dev_dbg(bus->dev, "SoundWire manager %d is disabled,\n",
1136 bus->link_id);
1137 return 0;
1138 }
1139 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1140 return amd_sdw_clock_stop(amd_manager);
1141 } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
1142 ret = amd_sdw_clock_stop(amd_manager);
1143 if (ret)
1144 return ret;
1145 return amd_deinit_sdw_manager(amd_manager);
1146 }
1147 return 0;
1148}
1149
1150static int __maybe_unused amd_resume_runtime(struct device *dev)
1151{
1152 struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
1153 struct sdw_bus *bus = &amd_manager->bus;
1154 int ret;
1155 u32 val;
1156
1157 if (bus->prop.hw_disabled) {
1158 dev_dbg(bus->dev, "SoundWire manager %d is disabled, ignoring\n",
1159 bus->link_id);
1160 return 0;
1161 }
1162
1163 if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
1164 return amd_sdw_clock_stop_exit(amd_manager);
1165 } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
1166 val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1167 if (val) {
1168 val |= AMD_SDW_CLK_RESUME_REQ;
1169 writel(val, amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1170 ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL, val,
1171 (val & AMD_SDW_CLK_RESUME_DONE), ACP_DELAY_US,
1172 AMD_SDW_TIMEOUT);
1173 if (val & AMD_SDW_CLK_RESUME_DONE) {
1174 writel(0, amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
1175 amd_manager->clk_stopped = false;
1176 }
1177 }
1178 sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1179 amd_init_sdw_manager(amd_manager);
1180 amd_enable_sdw_interrupts(amd_manager);
1181 ret = amd_enable_sdw_manager(amd_manager);
1182 if (ret)
1183 return ret;
1184 amd_sdw_set_frameshape(amd_manager);
1185 }
1186 return 0;
1187}
1188
1189static const struct dev_pm_ops amd_pm = {
9cf1efc5
VM
1190 .prepare = amd_pm_prepare,
1191 SET_SYSTEM_SLEEP_PM_OPS(amd_suspend, amd_resume_runtime)
81ff58ff
VM
1192 SET_RUNTIME_PM_OPS(amd_suspend_runtime, amd_resume_runtime, NULL)
1193};
1194
d8f48fbd
VM
1195static struct platform_driver amd_sdw_driver = {
1196 .probe = &amd_sdw_manager_probe,
1197 .remove = &amd_sdw_manager_remove,
1198 .driver = {
1199 .name = "amd_sdw_manager",
81ff58ff 1200 .pm = &amd_pm,
d8f48fbd
VM
1201 }
1202};
1203module_platform_driver(amd_sdw_driver);
1204
1205MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
1206MODULE_DESCRIPTION("AMD SoundWire driver");
1207MODULE_LICENSE("GPL");
1208MODULE_ALIAS("platform:" DRV_NAME);