soundwire: amd: add SoundWire manager interrupt handling
[linux-block.git] / drivers / soundwire / amd_manager.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * SoundWire AMD Manager driver
4  *
5  * Copyright 2023 Advanced Micro Devices, Inc.
6  */
7
8 #include <linux/completion.h>
9 #include <linux/device.h>
10 #include <linux/io.h>
11 #include <linux/jiffies.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/soundwire/sdw.h>
16 #include <linux/soundwire/sdw_registers.h>
17 #include <linux/wait.h>
18 #include <sound/pcm_params.h>
19 #include <sound/soc.h>
20 #include "bus.h"
21 #include "amd_manager.h"
22
23 #define DRV_NAME "amd_sdw_manager"
24
25 #define to_amd_sdw(b)   container_of(b, struct amd_sdw_manager, bus)
26
27 static void amd_enable_sdw_pads(struct amd_sdw_manager *amd_manager)
28 {
29         u32 sw_pad_pulldown_val;
30         u32 val;
31
32         mutex_lock(amd_manager->acp_sdw_lock);
33         val = readl(amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
34         val |= amd_manager->reg_mask->sw_pad_enable_mask;
35         writel(val, amd_manager->acp_mmio + ACP_SW_PAD_KEEPER_EN);
36         usleep_range(1000, 1500);
37
38         sw_pad_pulldown_val = readl(amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
39         sw_pad_pulldown_val &= amd_manager->reg_mask->sw_pad_pulldown_mask;
40         writel(sw_pad_pulldown_val, amd_manager->acp_mmio + ACP_PAD_PULLDOWN_CTRL);
41         mutex_unlock(amd_manager->acp_sdw_lock);
42 }
43
44 static int amd_init_sdw_manager(struct amd_sdw_manager *amd_manager)
45 {
46         u32 val;
47         int ret;
48
49         writel(AMD_SDW_ENABLE, amd_manager->mmio + ACP_SW_EN);
50         ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, val, ACP_DELAY_US,
51                                  AMD_SDW_TIMEOUT);
52         if (ret)
53                 return ret;
54
55         /* SoundWire manager bus reset */
56         writel(AMD_SDW_BUS_RESET_REQ, amd_manager->mmio + ACP_SW_BUS_RESET_CTRL);
57         ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_BUS_RESET_CTRL, val,
58                                  (val & AMD_SDW_BUS_RESET_DONE), ACP_DELAY_US, AMD_SDW_TIMEOUT);
59         if (ret)
60                 return ret;
61
62         writel(AMD_SDW_BUS_RESET_CLEAR_REQ, amd_manager->mmio + ACP_SW_BUS_RESET_CTRL);
63         ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_BUS_RESET_CTRL, val, !val,
64                                  ACP_DELAY_US, AMD_SDW_TIMEOUT);
65         if (ret) {
66                 dev_err(amd_manager->dev, "Failed to reset SoundWire manager instance%d\n",
67                         amd_manager->instance);
68                 return ret;
69         }
70
71         writel(AMD_SDW_DISABLE, amd_manager->mmio + ACP_SW_EN);
72         return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, !val, ACP_DELAY_US,
73                                   AMD_SDW_TIMEOUT);
74 }
75
76 static int amd_enable_sdw_manager(struct amd_sdw_manager *amd_manager)
77 {
78         u32 val;
79
80         writel(AMD_SDW_ENABLE, amd_manager->mmio + ACP_SW_EN);
81         return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, val, ACP_DELAY_US,
82                                   AMD_SDW_TIMEOUT);
83 }
84
85 static int amd_disable_sdw_manager(struct amd_sdw_manager *amd_manager)
86 {
87         u32 val;
88
89         writel(AMD_SDW_DISABLE, amd_manager->mmio + ACP_SW_EN);
90         /*
91          * After invoking manager disable sequence, check whether
92          * manager has executed clock stop sequence. In this case,
93          * manager should ignore checking enable status register.
94          */
95         val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
96         if (val)
97                 return 0;
98         return readl_poll_timeout(amd_manager->mmio + ACP_SW_EN_STATUS, val, !val, ACP_DELAY_US,
99                                   AMD_SDW_TIMEOUT);
100 }
101
102 static void amd_enable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
103 {
104         struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
105         u32 val;
106
107         mutex_lock(amd_manager->acp_sdw_lock);
108         val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
109         val |= reg_mask->acp_sdw_intr_mask;
110         writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
111         mutex_unlock(amd_manager->acp_sdw_lock);
112
113         writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
114                        ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
115         writel(AMD_SDW_IRQ_MASK_8TO11, amd_manager->mmio +
116                        ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
117         writel(AMD_SDW_IRQ_ERROR_MASK, amd_manager->mmio + ACP_SW_ERROR_INTR_MASK);
118 }
119
120 static void amd_disable_sdw_interrupts(struct amd_sdw_manager *amd_manager)
121 {
122         struct sdw_manager_reg_mask *reg_mask = amd_manager->reg_mask;
123         u32 val;
124
125         mutex_lock(amd_manager->acp_sdw_lock);
126         val = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
127         val &= ~reg_mask->acp_sdw_intr_mask;
128         writel(val, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(amd_manager->instance));
129         mutex_unlock(amd_manager->acp_sdw_lock);
130
131         writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
132         writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
133         writel(0x00, amd_manager->mmio + ACP_SW_ERROR_INTR_MASK);
134 }
135
136 static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
137 {
138         u32 frame_size;
139
140         frame_size = (amd_manager->rows_index << 3) | amd_manager->cols_index;
141         writel(frame_size, amd_manager->mmio + ACP_SW_FRAMESIZE);
142 }
143
144 static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
145                                   int cmd_offset)
146 {
147         u32 upper_data;
148         u32 lower_data = 0;
149         u16 addr;
150         u8 upper_addr, lower_addr;
151         u8 data = 0;
152
153         addr = msg->addr + cmd_offset;
154         upper_addr = (addr & 0xFF00) >> 8;
155         lower_addr = addr & 0xFF;
156
157         if (msg->flags == SDW_MSG_FLAG_WRITE)
158                 data = msg->buf[cmd_offset];
159
160         upper_data = FIELD_PREP(AMD_SDW_MCP_CMD_DEV_ADDR, msg->dev_num);
161         upper_data |= FIELD_PREP(AMD_SDW_MCP_CMD_COMMAND, msg->flags + 2);
162         upper_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_ADDR_HIGH, upper_addr);
163         lower_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_ADDR_LOW, lower_addr);
164         lower_data |= FIELD_PREP(AMD_SDW_MCP_CMD_REG_DATA, data);
165
166         *upper_word = upper_data;
167         *lower_word = lower_data;
168 }
169
170 static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lower_data,
171                                      u32 upper_data)
172 {
173         u64 resp;
174         u32 lower_resp, upper_resp;
175         u32 sts;
176         int ret;
177
178         ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
179                                  !(sts & AMD_SDW_IMM_CMD_BUSY), ACP_DELAY_US, AMD_SDW_TIMEOUT);
180         if (ret) {
181                 dev_err(amd_manager->dev, "SDW%x previous cmd status clear failed\n",
182                         amd_manager->instance);
183                 return ret;
184         }
185
186         if (sts & AMD_SDW_IMM_RES_VALID) {
187                 dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance);
188                 writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
189         }
190         writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD);
191         writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD);
192
193         ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
194                                  (sts & AMD_SDW_IMM_RES_VALID), ACP_DELAY_US, AMD_SDW_TIMEOUT);
195         if (ret) {
196                 dev_err(amd_manager->dev, "SDW%x cmd response timeout occurred\n",
197                         amd_manager->instance);
198                 return ret;
199         }
200         upper_resp = readl(amd_manager->mmio + ACP_SW_IMM_RESP_UPPER_WORD);
201         lower_resp = readl(amd_manager->mmio + ACP_SW_IMM_RESP_LOWER_QWORD);
202
203         writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
204         ret = readl_poll_timeout(amd_manager->mmio + ACP_SW_IMM_CMD_STS, sts,
205                                  !(sts & AMD_SDW_IMM_RES_VALID), ACP_DELAY_US, AMD_SDW_TIMEOUT);
206         if (ret) {
207                 dev_err(amd_manager->dev, "SDW%x cmd status retry failed\n",
208                         amd_manager->instance);
209                 return ret;
210         }
211         resp = upper_resp;
212         resp = (resp << 32) | lower_resp;
213         return resp;
214 }
215
216 static enum sdw_command_response
217 amd_program_scp_addr(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg)
218 {
219         struct sdw_msg scp_msg = {0};
220         u64 response_buf[2] = {0};
221         u32 upper_data = 0, lower_data = 0;
222         int index;
223
224         scp_msg.dev_num = msg->dev_num;
225         scp_msg.addr = SDW_SCP_ADDRPAGE1;
226         scp_msg.buf = &msg->addr_page1;
227         scp_msg.flags = SDW_MSG_FLAG_WRITE;
228         amd_sdw_ctl_word_prep(&lower_data, &upper_data, &scp_msg, 0);
229         response_buf[0] = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
230         scp_msg.addr = SDW_SCP_ADDRPAGE2;
231         scp_msg.buf = &msg->addr_page2;
232         amd_sdw_ctl_word_prep(&lower_data, &upper_data, &scp_msg, 0);
233         response_buf[1] = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
234
235         for (index = 0; index < 2; index++) {
236                 if (response_buf[index] == -ETIMEDOUT) {
237                         dev_err_ratelimited(amd_manager->dev,
238                                             "SCP_addrpage command timeout for Slave %d\n",
239                                             msg->dev_num);
240                         return SDW_CMD_TIMEOUT;
241                 } else if (!(response_buf[index] & AMD_SDW_MCP_RESP_ACK)) {
242                         if (response_buf[index] & AMD_SDW_MCP_RESP_NACK) {
243                                 dev_err_ratelimited(amd_manager->dev,
244                                                     "SCP_addrpage NACKed for Slave %d\n",
245                                                     msg->dev_num);
246                                 return SDW_CMD_FAIL;
247                         }
248                         dev_dbg_ratelimited(amd_manager->dev, "SCP_addrpage ignored for Slave %d\n",
249                                             msg->dev_num);
250                         return SDW_CMD_IGNORED;
251                 }
252         }
253         return SDW_CMD_OK;
254 }
255
256 static int amd_prep_msg(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg)
257 {
258         int ret;
259
260         if (msg->page) {
261                 ret = amd_program_scp_addr(amd_manager, msg);
262                 if (ret) {
263                         msg->len = 0;
264                         return ret;
265                 }
266         }
267         switch (msg->flags) {
268         case SDW_MSG_FLAG_READ:
269         case SDW_MSG_FLAG_WRITE:
270                 break;
271         default:
272                 dev_err(amd_manager->dev, "Invalid msg cmd: %d\n", msg->flags);
273                 return -EINVAL;
274         }
275         return 0;
276 }
277
278 static enum sdw_command_response amd_sdw_fill_msg_resp(struct amd_sdw_manager *amd_manager,
279                                                        struct sdw_msg *msg, u64 response,
280                                                        int offset)
281 {
282         if (response & AMD_SDW_MCP_RESP_ACK) {
283                 if (msg->flags == SDW_MSG_FLAG_READ)
284                         msg->buf[offset] = FIELD_GET(AMD_SDW_MCP_RESP_RDATA, response);
285         } else {
286                 if (response == -ETIMEDOUT) {
287                         dev_err_ratelimited(amd_manager->dev, "command timeout for Slave %d\n",
288                                             msg->dev_num);
289                         return SDW_CMD_TIMEOUT;
290                 } else if (response & AMD_SDW_MCP_RESP_NACK) {
291                         dev_err_ratelimited(amd_manager->dev,
292                                             "command response NACK received for Slave %d\n",
293                                             msg->dev_num);
294                         return SDW_CMD_FAIL;
295                 }
296                 dev_err_ratelimited(amd_manager->dev, "command is ignored for Slave %d\n",
297                                     msg->dev_num);
298                 return SDW_CMD_IGNORED;
299         }
300         return SDW_CMD_OK;
301 }
302
303 static unsigned int _amd_sdw_xfer_msg(struct amd_sdw_manager *amd_manager, struct sdw_msg *msg,
304                                       int cmd_offset)
305 {
306         u64 response;
307         u32 upper_data = 0, lower_data = 0;
308
309         amd_sdw_ctl_word_prep(&lower_data, &upper_data, msg, cmd_offset);
310         response = amd_sdw_send_cmd_get_resp(amd_manager, lower_data, upper_data);
311         return amd_sdw_fill_msg_resp(amd_manager, msg, response, cmd_offset);
312 }
313
314 static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
315 {
316         struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
317         int ret, i;
318
319         ret = amd_prep_msg(amd_manager, msg);
320         if (ret)
321                 return SDW_CMD_FAIL_OTHER;
322         for (i = 0; i < msg->len; i++) {
323                 ret = _amd_sdw_xfer_msg(amd_manager, msg, i);
324                 if (ret)
325                         return ret;
326         }
327         return SDW_CMD_OK;
328 }
329
330 static void amd_sdw_fill_slave_status(struct amd_sdw_manager *amd_manager, u16 index, u32 status)
331 {
332         switch (status) {
333         case SDW_SLAVE_ATTACHED:
334         case SDW_SLAVE_UNATTACHED:
335         case SDW_SLAVE_ALERT:
336                 amd_manager->status[index] = status;
337                 break;
338         default:
339                 amd_manager->status[index] = SDW_SLAVE_RESERVED;
340                 break;
341         }
342 }
343
344 static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
345 {
346         u64 slave_stat;
347         u32 val;
348         u16 dev_index;
349
350         /* slave status response */
351         slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
352         slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
353         dev_dbg(amd_manager->dev, "slave_stat:0x%llx\n", slave_stat);
354         for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
355                 val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
356                 dev_dbg(amd_manager->dev, "val:0x%x\n", val);
357                 amd_sdw_fill_slave_status(amd_manager, dev_index, val);
358         }
359 }
360
361 static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
362 {
363         u64 response;
364
365         mutex_lock(&amd_manager->bus.msg_lock);
366         response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
367         mutex_unlock(&amd_manager->bus.msg_lock);
368         amd_sdw_process_ping_status(response, amd_manager);
369 }
370
371 static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
372 {
373         struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
374         u64 response;
375         u32 slave_stat;
376
377         response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
378         /* slave status from ping response */
379         slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
380         slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
381         dev_dbg(amd_manager->dev, "slave_stat:0x%x\n", slave_stat);
382         return slave_stat;
383 }
384
385 static int amd_sdw_compute_params(struct sdw_bus *bus)
386 {
387         struct sdw_transport_data t_data = {0};
388         struct sdw_master_runtime *m_rt;
389         struct sdw_port_runtime *p_rt;
390         struct sdw_bus_params *b_params = &bus->params;
391         int port_bo, hstart, hstop, sample_int;
392         unsigned int rate, bps;
393
394         port_bo = 0;
395         hstart = 1;
396         hstop = bus->params.col - 1;
397         t_data.hstop = hstop;
398         t_data.hstart = hstart;
399
400         list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
401                 rate = m_rt->stream->params.rate;
402                 bps = m_rt->stream->params.bps;
403                 sample_int = (bus->params.curr_dr_freq / rate);
404                 list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
405                         port_bo = (p_rt->num * 64) + 1;
406                         dev_dbg(bus->dev, "p_rt->num=%d hstart=%d hstop=%d port_bo=%d\n",
407                                 p_rt->num, hstart, hstop, port_bo);
408                         sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
409                                               false, SDW_BLK_GRP_CNT_1, sample_int,
410                                               port_bo, port_bo >> 8, hstart, hstop,
411                                               SDW_BLK_PKG_PER_PORT, 0x0);
412
413                         sdw_fill_port_params(&p_rt->port_params,
414                                              p_rt->num, bps,
415                                              SDW_PORT_FLOW_MODE_ISOCH,
416                                              b_params->m_data_mode);
417                         t_data.hstart = hstart;
418                         t_data.hstop = hstop;
419                         t_data.block_offset = port_bo;
420                         t_data.sub_block_offset = 0;
421                 }
422                 sdw_compute_slave_ports(m_rt, &t_data);
423         }
424         return 0;
425 }
426
427 static int amd_sdw_port_params(struct sdw_bus *bus, struct sdw_port_params *p_params,
428                                unsigned int bank)
429 {
430         struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
431         u32 frame_fmt_reg, dpn_frame_fmt;
432
433         dev_dbg(amd_manager->dev, "p_params->num:0x%x\n", p_params->num);
434         switch (amd_manager->instance) {
435         case ACP_SDW0:
436                 frame_fmt_reg = sdw0_manager_dp_reg[p_params->num].frame_fmt_reg;
437                 break;
438         case ACP_SDW1:
439                 frame_fmt_reg = sdw1_manager_dp_reg[p_params->num].frame_fmt_reg;
440                 break;
441         default:
442                 return -EINVAL;
443         }
444
445         dpn_frame_fmt = readl(amd_manager->mmio + frame_fmt_reg);
446         u32p_replace_bits(&dpn_frame_fmt, p_params->flow_mode, AMD_DPN_FRAME_FMT_PFM);
447         u32p_replace_bits(&dpn_frame_fmt, p_params->data_mode, AMD_DPN_FRAME_FMT_PDM);
448         u32p_replace_bits(&dpn_frame_fmt, p_params->bps - 1, AMD_DPN_FRAME_FMT_WORD_LEN);
449         writel(dpn_frame_fmt, amd_manager->mmio + frame_fmt_reg);
450         return 0;
451 }
452
453 static int amd_sdw_transport_params(struct sdw_bus *bus,
454                                     struct sdw_transport_params *params,
455                                     enum sdw_reg_bank bank)
456 {
457         struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
458         u32 dpn_frame_fmt;
459         u32 dpn_sampleinterval;
460         u32 dpn_hctrl;
461         u32 dpn_offsetctrl;
462         u32 dpn_lanectrl;
463         u32 frame_fmt_reg, sample_int_reg, hctrl_dp0_reg;
464         u32 offset_reg, lane_ctrl_ch_en_reg;
465
466         switch (amd_manager->instance) {
467         case ACP_SDW0:
468                 frame_fmt_reg = sdw0_manager_dp_reg[params->port_num].frame_fmt_reg;
469                 sample_int_reg = sdw0_manager_dp_reg[params->port_num].sample_int_reg;
470                 hctrl_dp0_reg = sdw0_manager_dp_reg[params->port_num].hctrl_dp0_reg;
471                 offset_reg = sdw0_manager_dp_reg[params->port_num].offset_reg;
472                 lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
473                 break;
474         case ACP_SDW1:
475                 frame_fmt_reg = sdw1_manager_dp_reg[params->port_num].frame_fmt_reg;
476                 sample_int_reg = sdw1_manager_dp_reg[params->port_num].sample_int_reg;
477                 hctrl_dp0_reg = sdw1_manager_dp_reg[params->port_num].hctrl_dp0_reg;
478                 offset_reg = sdw1_manager_dp_reg[params->port_num].offset_reg;
479                 lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
480                 break;
481         default:
482                 return -EINVAL;
483         }
484         writel(AMD_SDW_SSP_COUNTER_VAL, amd_manager->mmio + ACP_SW_SSP_COUNTER);
485
486         dpn_frame_fmt = readl(amd_manager->mmio + frame_fmt_reg);
487         u32p_replace_bits(&dpn_frame_fmt, params->blk_pkg_mode, AMD_DPN_FRAME_FMT_BLK_PKG_MODE);
488         u32p_replace_bits(&dpn_frame_fmt, params->blk_grp_ctrl, AMD_DPN_FRAME_FMT_BLK_GRP_CTRL);
489         u32p_replace_bits(&dpn_frame_fmt, SDW_STREAM_PCM, AMD_DPN_FRAME_FMT_PCM_OR_PDM);
490         writel(dpn_frame_fmt, amd_manager->mmio + frame_fmt_reg);
491
492         dpn_sampleinterval = params->sample_interval - 1;
493         writel(dpn_sampleinterval, amd_manager->mmio + sample_int_reg);
494
495         dpn_hctrl = FIELD_PREP(AMD_DPN_HCTRL_HSTOP, params->hstop);
496         dpn_hctrl |= FIELD_PREP(AMD_DPN_HCTRL_HSTART, params->hstart);
497         writel(dpn_hctrl, amd_manager->mmio + hctrl_dp0_reg);
498
499         dpn_offsetctrl = FIELD_PREP(AMD_DPN_OFFSET_CTRL_1, params->offset1);
500         dpn_offsetctrl |= FIELD_PREP(AMD_DPN_OFFSET_CTRL_2, params->offset2);
501         writel(dpn_offsetctrl, amd_manager->mmio + offset_reg);
502
503         /*
504          * lane_ctrl_ch_en_reg will be used to program lane_ctrl and ch_mask
505          * parameters.
506          */
507         dpn_lanectrl = readl(amd_manager->mmio + lane_ctrl_ch_en_reg);
508         u32p_replace_bits(&dpn_lanectrl, params->lane_ctrl, AMD_DPN_CH_EN_LCTRL);
509         writel(dpn_lanectrl, amd_manager->mmio + lane_ctrl_ch_en_reg);
510         return 0;
511 }
512
513 static int amd_sdw_port_enable(struct sdw_bus *bus,
514                                struct sdw_enable_ch *enable_ch,
515                                unsigned int bank)
516 {
517         struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
518         u32 dpn_ch_enable;
519         u32 lane_ctrl_ch_en_reg;
520
521         switch (amd_manager->instance) {
522         case ACP_SDW0:
523                 lane_ctrl_ch_en_reg = sdw0_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
524                 break;
525         case ACP_SDW1:
526                 lane_ctrl_ch_en_reg = sdw1_manager_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
527                 break;
528         default:
529                 return -EINVAL;
530         }
531
532         /*
533          * lane_ctrl_ch_en_reg will be used to program lane_ctrl and ch_mask
534          * parameters.
535          */
536         dpn_ch_enable = readl(amd_manager->mmio + lane_ctrl_ch_en_reg);
537         u32p_replace_bits(&dpn_ch_enable, enable_ch->ch_mask, AMD_DPN_CH_EN_CHMASK);
538         if (enable_ch->enable)
539                 writel(dpn_ch_enable, amd_manager->mmio + lane_ctrl_ch_en_reg);
540         else
541                 writel(0, amd_manager->mmio + lane_ctrl_ch_en_reg);
542         return 0;
543 }
544
545 static int sdw_master_read_amd_prop(struct sdw_bus *bus)
546 {
547         struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
548         struct fwnode_handle *link;
549         struct sdw_master_prop *prop;
550         u32 quirk_mask = 0;
551         u32 wake_en_mask = 0;
552         u32 power_mode_mask = 0;
553         char name[32];
554
555         prop = &bus->prop;
556         /* Find manager handle */
557         snprintf(name, sizeof(name), "mipi-sdw-link-%d-subproperties", bus->link_id);
558         link = device_get_named_child_node(bus->dev, name);
559         if (!link) {
560                 dev_err(bus->dev, "Manager node %s not found\n", name);
561                 return -EIO;
562         }
563         fwnode_property_read_u32(link, "amd-sdw-enable", &quirk_mask);
564         if (!(quirk_mask & AMD_SDW_QUIRK_MASK_BUS_ENABLE))
565                 prop->hw_disabled = true;
566         prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
567                        SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
568
569         fwnode_property_read_u32(link, "amd-sdw-wakeup-enable", &wake_en_mask);
570         amd_manager->wake_en_mask = wake_en_mask;
571         fwnode_property_read_u32(link, "amd-sdw-power-mode", &power_mode_mask);
572         amd_manager->power_mode_mask = power_mode_mask;
573         return 0;
574 }
575
576 static int amd_prop_read(struct sdw_bus *bus)
577 {
578         sdw_master_read_prop(bus);
579         sdw_master_read_amd_prop(bus);
580         return 0;
581 }
582
583 static const struct sdw_master_port_ops amd_sdw_port_ops = {
584         .dpn_set_port_params = amd_sdw_port_params,
585         .dpn_set_port_transport_params = amd_sdw_transport_params,
586         .dpn_port_enable_ch = amd_sdw_port_enable,
587 };
588
589 static const struct sdw_master_ops amd_sdw_ops = {
590         .read_prop = amd_prop_read,
591         .xfer_msg = amd_sdw_xfer_msg,
592         .read_ping_status = amd_sdw_read_ping_status,
593 };
594
595 static int amd_sdw_hw_params(struct snd_pcm_substream *substream,
596                              struct snd_pcm_hw_params *params,
597                              struct snd_soc_dai *dai)
598 {
599         struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
600         struct sdw_amd_dai_runtime *dai_runtime;
601         struct sdw_stream_config sconfig;
602         struct sdw_port_config *pconfig;
603         int ch, dir;
604         int ret;
605
606         dai_runtime = amd_manager->dai_runtime_array[dai->id];
607         if (!dai_runtime)
608                 return -EIO;
609
610         ch = params_channels(params);
611         if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
612                 dir = SDW_DATA_DIR_RX;
613         else
614                 dir = SDW_DATA_DIR_TX;
615         dev_dbg(amd_manager->dev, "dir:%d dai->id:0x%x\n", dir, dai->id);
616
617         sconfig.direction = dir;
618         sconfig.ch_count = ch;
619         sconfig.frame_rate = params_rate(params);
620         sconfig.type = dai_runtime->stream_type;
621
622         sconfig.bps = snd_pcm_format_width(params_format(params));
623
624         /* Port configuration */
625         pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
626         if (!pconfig) {
627                 ret =  -ENOMEM;
628                 goto error;
629         }
630
631         pconfig->num = dai->id;
632         pconfig->ch_mask = (1 << ch) - 1;
633         ret = sdw_stream_add_master(&amd_manager->bus, &sconfig,
634                                     pconfig, 1, dai_runtime->stream);
635         if (ret)
636                 dev_err(amd_manager->dev, "add manager to stream failed:%d\n", ret);
637
638         kfree(pconfig);
639 error:
640         return ret;
641 }
642
643 static int amd_sdw_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
644 {
645         struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
646         struct sdw_amd_dai_runtime *dai_runtime;
647         int ret;
648
649         dai_runtime = amd_manager->dai_runtime_array[dai->id];
650         if (!dai_runtime)
651                 return -EIO;
652
653         ret = sdw_stream_remove_master(&amd_manager->bus, dai_runtime->stream);
654         if (ret < 0)
655                 dev_err(dai->dev, "remove manager from stream %s failed: %d\n",
656                         dai_runtime->stream->name, ret);
657         return ret;
658 }
659
660 static int amd_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
661 {
662         struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
663         struct sdw_amd_dai_runtime *dai_runtime;
664
665         dai_runtime = amd_manager->dai_runtime_array[dai->id];
666         if (stream) {
667                 /* first paranoia check */
668                 if (dai_runtime) {
669                         dev_err(dai->dev, "dai_runtime already allocated for dai %s\n", dai->name);
670                         return -EINVAL;
671                 }
672
673                 /* allocate and set dai_runtime info */
674                 dai_runtime = kzalloc(sizeof(*dai_runtime), GFP_KERNEL);
675                 if (!dai_runtime)
676                         return -ENOMEM;
677
678                 dai_runtime->stream_type = SDW_STREAM_PCM;
679                 dai_runtime->bus = &amd_manager->bus;
680                 dai_runtime->stream = stream;
681                 amd_manager->dai_runtime_array[dai->id] = dai_runtime;
682         } else {
683                 /* second paranoia check */
684                 if (!dai_runtime) {
685                         dev_err(dai->dev, "dai_runtime not allocated for dai %s\n", dai->name);
686                         return -EINVAL;
687                 }
688
689                 /* for NULL stream we release allocated dai_runtime */
690                 kfree(dai_runtime);
691                 amd_manager->dai_runtime_array[dai->id] = NULL;
692         }
693         return 0;
694 }
695
696 static int amd_pcm_set_sdw_stream(struct snd_soc_dai *dai, void *stream, int direction)
697 {
698         return amd_set_sdw_stream(dai, stream, direction);
699 }
700
701 static void *amd_get_sdw_stream(struct snd_soc_dai *dai, int direction)
702 {
703         struct amd_sdw_manager *amd_manager = snd_soc_dai_get_drvdata(dai);
704         struct sdw_amd_dai_runtime *dai_runtime;
705
706         dai_runtime = amd_manager->dai_runtime_array[dai->id];
707         if (!dai_runtime)
708                 return ERR_PTR(-EINVAL);
709
710         return dai_runtime->stream;
711 }
712
713 static const struct snd_soc_dai_ops amd_sdw_dai_ops = {
714         .hw_params = amd_sdw_hw_params,
715         .hw_free = amd_sdw_hw_free,
716         .set_stream = amd_pcm_set_sdw_stream,
717         .get_stream = amd_get_sdw_stream,
718 };
719
720 static const struct snd_soc_component_driver amd_sdw_dai_component = {
721         .name = "soundwire",
722 };
723
724 static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
725 {
726         struct sdw_amd_dai_runtime **dai_runtime_array;
727         struct snd_soc_dai_driver *dais;
728         struct snd_soc_pcm_stream *stream;
729         struct device *dev;
730         int i, num_dais;
731
732         dev = amd_manager->dev;
733         num_dais = amd_manager->num_dout_ports + amd_manager->num_din_ports;
734         dais = devm_kcalloc(dev, num_dais, sizeof(*dais), GFP_KERNEL);
735         if (!dais)
736                 return -ENOMEM;
737
738         dai_runtime_array = devm_kcalloc(dev, num_dais,
739                                          sizeof(struct sdw_amd_dai_runtime *),
740                                          GFP_KERNEL);
741         if (!dai_runtime_array)
742                 return -ENOMEM;
743         amd_manager->dai_runtime_array = dai_runtime_array;
744         for (i = 0; i < num_dais; i++) {
745                 dais[i].name = devm_kasprintf(dev, GFP_KERNEL, "SDW%d Pin%d", amd_manager->instance,
746                                               i);
747                 if (!dais[i].name)
748                         return -ENOMEM;
749                 if (i < amd_manager->num_dout_ports)
750                         stream = &dais[i].playback;
751                 else
752                         stream = &dais[i].capture;
753
754                 stream->channels_min = 2;
755                 stream->channels_max = 2;
756                 stream->rates = SNDRV_PCM_RATE_48000;
757                 stream->formats = SNDRV_PCM_FMTBIT_S16_LE;
758
759                 dais[i].ops = &amd_sdw_dai_ops;
760                 dais[i].id = i;
761         }
762
763         return devm_snd_soc_register_component(dev, &amd_sdw_dai_component,
764                                                dais, num_dais);
765 }
766
767 static void amd_sdw_update_slave_status_work(struct work_struct *work)
768 {
769         struct amd_sdw_manager *amd_manager =
770                 container_of(work, struct amd_sdw_manager, amd_sdw_work);
771         int retry_count = 0;
772
773         if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
774                 writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
775                 writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
776         }
777
778 update_status:
779         sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
780         /*
781          * During the peripheral enumeration sequence, the SoundWire manager interrupts
782          * are masked. Once the device number programming is done for all peripherals,
783          * interrupts will be unmasked. Read the peripheral device status from ping command
784          * and process the response. This sequence will ensure all peripheral devices enumerated
785          * and initialized properly.
786          */
787         if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
788                 if (retry_count++ < SDW_MAX_DEVICES) {
789                         writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
790                                ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
791                         writel(AMD_SDW_IRQ_MASK_8TO11, amd_manager->mmio +
792                                ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
793                         amd_sdw_read_and_process_ping_status(amd_manager);
794                         goto update_status;
795                 } else {
796                         dev_err_ratelimited(amd_manager->dev,
797                                             "Device0 detected after %d iterations\n",
798                                             retry_count);
799                 }
800         }
801 }
802
803 static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
804                                         struct amd_sdw_manager *amd_manager)
805 {
806         u64 slave_stat;
807         u32 val;
808         int dev_index;
809
810         if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
811                 memset(amd_manager->status, 0, sizeof(amd_manager->status));
812         slave_stat = status_change_0to7;
813         slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
814         dev_dbg(amd_manager->dev, "status_change_0to7:0x%x status_change_8to11:0x%x\n",
815                 status_change_0to7, status_change_8to11);
816         if (slave_stat) {
817                 for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
818                         if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
819                                 val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
820                                       AMD_SDW_MCP_SLAVE_STATUS_MASK;
821                                 amd_sdw_fill_slave_status(amd_manager, dev_index, val);
822                         }
823                 }
824         }
825 }
826
827 static void amd_sdw_irq_thread(struct work_struct *work)
828 {
829         struct amd_sdw_manager *amd_manager =
830                         container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
831         u32 status_change_8to11;
832         u32 status_change_0to7;
833
834         status_change_8to11 = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
835         status_change_0to7 = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
836         dev_dbg(amd_manager->dev, "[SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
837                 amd_manager->instance, status_change_0to7, status_change_8to11);
838         if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
839                 amd_sdw_read_and_process_ping_status(amd_manager);
840         } else {
841                 /* Check for the updated status on peripheral device */
842                 amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
843         }
844         if (status_change_8to11 || status_change_0to7)
845                 schedule_work(&amd_manager->amd_sdw_work);
846         writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
847         writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
848 }
849
850 static void amd_sdw_probe_work(struct work_struct *work)
851 {
852         struct amd_sdw_manager *amd_manager = container_of(work, struct amd_sdw_manager,
853                                                            probe_work);
854         struct sdw_master_prop *prop;
855         int ret;
856
857         prop = &amd_manager->bus.prop;
858         if (!prop->hw_disabled) {
859                 amd_enable_sdw_pads(amd_manager);
860                 ret = amd_init_sdw_manager(amd_manager);
861                 if (ret)
862                         return;
863                 amd_enable_sdw_interrupts(amd_manager);
864                 ret = amd_enable_sdw_manager(amd_manager);
865                 if (ret)
866                         return;
867                 amd_sdw_set_frameshape(amd_manager);
868         }
869 }
870
871 static int amd_sdw_manager_probe(struct platform_device *pdev)
872 {
873         const struct acp_sdw_pdata *pdata = pdev->dev.platform_data;
874         struct resource *res;
875         struct device *dev = &pdev->dev;
876         struct sdw_master_prop *prop;
877         struct sdw_bus_params *params;
878         struct amd_sdw_manager *amd_manager;
879         int ret;
880
881         amd_manager = devm_kzalloc(dev, sizeof(struct amd_sdw_manager), GFP_KERNEL);
882         if (!amd_manager)
883                 return -ENOMEM;
884
885         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
886         if (!res)
887                 return -ENOMEM;
888
889         amd_manager->acp_mmio = devm_ioremap(dev, res->start, resource_size(res));
890         if (IS_ERR(amd_manager->mmio)) {
891                 dev_err(dev, "mmio not found\n");
892                 return PTR_ERR(amd_manager->mmio);
893         }
894         amd_manager->instance = pdata->instance;
895         amd_manager->mmio = amd_manager->acp_mmio +
896                             (amd_manager->instance * SDW_MANAGER_REG_OFFSET);
897         amd_manager->acp_sdw_lock = pdata->acp_sdw_lock;
898         amd_manager->cols_index = sdw_find_col_index(AMD_SDW_DEFAULT_COLUMNS);
899         amd_manager->rows_index = sdw_find_row_index(AMD_SDW_DEFAULT_ROWS);
900         amd_manager->dev = dev;
901         amd_manager->bus.ops = &amd_sdw_ops;
902         amd_manager->bus.port_ops = &amd_sdw_port_ops;
903         amd_manager->bus.compute_params = &amd_sdw_compute_params;
904         amd_manager->bus.clk_stop_timeout = 200;
905         amd_manager->bus.link_id = amd_manager->instance;
906
907         switch (amd_manager->instance) {
908         case ACP_SDW0:
909                 amd_manager->num_dout_ports = AMD_SDW0_MAX_TX_PORTS;
910                 amd_manager->num_din_ports = AMD_SDW0_MAX_RX_PORTS;
911                 break;
912         case ACP_SDW1:
913                 amd_manager->num_dout_ports = AMD_SDW1_MAX_TX_PORTS;
914                 amd_manager->num_din_ports = AMD_SDW1_MAX_RX_PORTS;
915                 break;
916         default:
917                 return -EINVAL;
918         }
919
920         amd_manager->reg_mask = &sdw_manager_reg_mask_array[amd_manager->instance];
921         params = &amd_manager->bus.params;
922         params->max_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
923         params->curr_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
924         params->col = AMD_SDW_DEFAULT_COLUMNS;
925         params->row = AMD_SDW_DEFAULT_ROWS;
926         prop = &amd_manager->bus.prop;
927         prop->clk_freq = &amd_sdw_freq_tbl[0];
928         prop->mclk_freq = AMD_SDW_BUS_BASE_FREQ;
929
930         ret = sdw_bus_master_add(&amd_manager->bus, dev, dev->fwnode);
931         if (ret) {
932                 dev_err(dev, "Failed to register SoundWire manager(%d)\n", ret);
933                 return ret;
934         }
935         ret = amd_sdw_register_dais(amd_manager);
936         if (ret) {
937                 dev_err(dev, "CPU DAI registration failed\n");
938                 sdw_bus_master_delete(&amd_manager->bus);
939                 return ret;
940         }
941         dev_set_drvdata(dev, amd_manager);
942         INIT_WORK(&amd_manager->amd_sdw_irq_thread, amd_sdw_irq_thread);
943         INIT_WORK(&amd_manager->amd_sdw_work, amd_sdw_update_slave_status_work);
944         INIT_WORK(&amd_manager->probe_work, amd_sdw_probe_work);
945         /*
946          * Instead of having lengthy probe sequence, use deferred probe.
947          */
948         schedule_work(&amd_manager->probe_work);
949         return 0;
950 }
951
952 static int amd_sdw_manager_remove(struct platform_device *pdev)
953 {
954         struct amd_sdw_manager *amd_manager = dev_get_drvdata(&pdev->dev);
955
956         cancel_work_sync(&amd_manager->probe_work);
957         amd_disable_sdw_interrupts(amd_manager);
958         sdw_bus_master_delete(&amd_manager->bus);
959         return amd_disable_sdw_manager(amd_manager);
960 }
961
962 static struct platform_driver amd_sdw_driver = {
963         .probe  = &amd_sdw_manager_probe,
964         .remove = &amd_sdw_manager_remove,
965         .driver = {
966                 .name   = "amd_sdw_manager",
967         }
968 };
969 module_platform_driver(amd_sdw_driver);
970
971 MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
972 MODULE_DESCRIPTION("AMD SoundWire driver");
973 MODULE_LICENSE("GPL");
974 MODULE_ALIAS("platform:" DRV_NAME);