1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
4 #include <linux/acpi.h>
5 #include <linux/delay.h>
6 #include <linux/mod_devicetable.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/soundwire/sdw_registers.h>
9 #include <linux/soundwire/sdw.h>
10 #include <linux/soundwire/sdw_type.h>
11 #include <linux/string_choices.h>
14 #include "sysfs_local.h"
16 static DEFINE_IDA(sdw_bus_ida);
18 static int sdw_get_id(struct sdw_bus *bus)
20 int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL);
27 if (bus->controller_id == -1)
28 bus->controller_id = rc;
34 * sdw_bus_master_add() - add a bus Master instance
36 * @parent: parent device
37 * @fwnode: firmware node handle
39 * Initializes the bus instance, read properties and create child
42 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
43 struct fwnode_handle *fwnode)
45 struct sdw_master_prop *prop = NULL;
49 pr_err("SoundWire parent device is not set\n");
53 ret = sdw_get_id(bus);
55 dev_err(parent, "Failed to get bus id\n");
59 ida_init(&bus->slave_ida);
61 ret = sdw_master_device_add(bus, parent, fwnode);
63 dev_err(parent, "Failed to add master device at link %d\n",
69 dev_err(bus->dev, "SoundWire Bus ops are not set\n");
73 if (!bus->compute_params) {
75 "Bandwidth allocation not configured, compute_params no set\n");
80 * Give each bus_lock and msg_lock a unique key so that lockdep won't
81 * trigger a deadlock warning when the locks of several buses are
82 * grabbed during configuration of a multi-bus stream.
84 lockdep_register_key(&bus->msg_lock_key);
85 __mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key);
87 lockdep_register_key(&bus->bus_lock_key);
88 __mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key);
90 INIT_LIST_HEAD(&bus->slaves);
91 INIT_LIST_HEAD(&bus->m_rt_list);
94 * Initialize multi_link flag
96 bus->multi_link = false;
97 if (bus->ops->read_prop) {
98 ret = bus->ops->read_prop(bus);
101 "Bus read properties failed:%d\n", ret);
106 sdw_bus_debugfs_init(bus);
109 * Device numbers in SoundWire are 0 through 15. Enumeration device
110 * number (0), Broadcast device number (15), Group numbers (12 and
111 * 13) and Master device number (14) are not used for assignment so
112 * mask these and other higher bits.
115 /* Set higher order bits */
116 *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM);
118 /* Set enumeration device number and broadcast device number */
119 set_bit(SDW_ENUM_DEV_NUM, bus->assigned);
120 set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned);
122 /* Set group device numbers and master device number */
123 set_bit(SDW_GROUP12_DEV_NUM, bus->assigned);
124 set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
125 set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
127 ret = sdw_irq_create(bus, fwnode);
132 * SDW is an enumerable bus, but devices can be powered off. So,
133 * they won't be able to report as present.
135 * Create Slave devices based on Slaves described in
136 * the respective firmware (ACPI/DT)
138 if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev))
139 ret = sdw_acpi_find_slaves(bus);
140 else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node)
141 ret = sdw_of_find_slaves(bus);
143 ret = -ENOTSUPP; /* No ACPI/DT so error out */
146 dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
152 * Initialize clock values based on Master properties. The max
153 * frequency is read from max_clk_freq property. Current assumption
154 * is that the bus will start at highest clock frequency when
157 * Default active bank will be 0 as out of reset the Slaves have
158 * to start with bank 0 (Table 40 of Spec)
161 bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
162 bus->params.curr_dr_freq = bus->params.max_dr_freq;
163 bus->params.curr_bank = SDW_BANK0;
164 bus->params.next_bank = SDW_BANK1;
168 EXPORT_SYMBOL(sdw_bus_master_add);
170 static int sdw_delete_slave(struct device *dev, void *data)
172 struct sdw_slave *slave = dev_to_sdw_dev(dev);
173 struct sdw_bus *bus = slave->bus;
175 pm_runtime_disable(dev);
177 sdw_slave_debugfs_exit(slave);
179 mutex_lock(&bus->bus_lock);
181 if (slave->dev_num) { /* clear dev_num if assigned */
182 clear_bit(slave->dev_num, bus->assigned);
183 if (bus->ops && bus->ops->put_device_num)
184 bus->ops->put_device_num(bus, slave);
186 list_del_init(&slave->node);
187 mutex_unlock(&bus->bus_lock);
189 device_unregister(dev);
194 * sdw_bus_master_delete() - delete the bus master instance
195 * @bus: bus to be deleted
197 * Remove the instance, delete the child devices.
199 void sdw_bus_master_delete(struct sdw_bus *bus)
201 device_for_each_child(bus->dev, NULL, sdw_delete_slave);
205 sdw_master_device_del(bus);
207 sdw_bus_debugfs_exit(bus);
208 lockdep_unregister_key(&bus->bus_lock_key);
209 lockdep_unregister_key(&bus->msg_lock_key);
210 ida_free(&sdw_bus_ida, bus->id);
212 EXPORT_SYMBOL(sdw_bus_master_delete);
218 static inline int find_response_code(enum sdw_command_response resp)
224 case SDW_CMD_IGNORED:
227 case SDW_CMD_TIMEOUT:
235 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
237 int retry = bus->prop.err_threshold;
238 enum sdw_command_response resp;
241 for (i = 0; i <= retry; i++) {
242 resp = bus->ops->xfer_msg(bus, msg);
243 ret = find_response_code(resp);
245 /* if cmd is ok or ignored return */
246 if (ret == 0 || ret == -ENODATA)
253 static inline int do_transfer_defer(struct sdw_bus *bus,
256 struct sdw_defer *defer = &bus->defer_msg;
257 int retry = bus->prop.err_threshold;
258 enum sdw_command_response resp;
262 defer->length = msg->len;
263 init_completion(&defer->complete);
265 for (i = 0; i <= retry; i++) {
266 resp = bus->ops->xfer_msg_defer(bus);
267 ret = find_response_code(resp);
268 /* if cmd is ok or ignored return */
269 if (ret == 0 || ret == -ENODATA)
276 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
280 ret = do_transfer(bus, msg);
281 if (ret != 0 && ret != -ENODATA)
282 dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n",
284 str_write_read(msg->flags & SDW_MSG_FLAG_WRITE),
285 msg->addr, msg->len);
291 * sdw_transfer() - Synchronous transfer message to a SDW Slave device
293 * @msg: SDW message to be xfered
295 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
299 mutex_lock(&bus->msg_lock);
301 ret = sdw_transfer_unlocked(bus, msg);
303 mutex_unlock(&bus->msg_lock);
309 * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers
311 * @sync_delay: Delay before reading status
313 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay)
317 if (!bus->ops->read_ping_status)
321 * wait for peripheral to sync if desired. 10-15ms should be more than
322 * enough in most cases.
325 usleep_range(10000, 15000);
327 mutex_lock(&bus->msg_lock);
329 status = bus->ops->read_ping_status(bus);
331 mutex_unlock(&bus->msg_lock);
334 dev_warn(bus->dev, "%s: no peripherals attached\n", __func__);
336 dev_dbg(bus->dev, "PING status: %#x\n", status);
338 EXPORT_SYMBOL(sdw_show_ping_status);
341 * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
343 * @msg: SDW message to be xfered
345 * Caller needs to hold the msg_lock lock while calling this
347 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg)
351 if (!bus->ops->xfer_msg_defer)
354 ret = do_transfer_defer(bus, msg);
355 if (ret != 0 && ret != -ENODATA)
356 dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
362 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
363 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
365 memset(msg, 0, sizeof(*msg));
366 msg->addr = addr; /* addr is 16 bit and truncated here */
368 msg->dev_num = dev_num;
372 if (addr < SDW_REG_NO_PAGE) /* no paging area */
375 if (addr >= SDW_REG_MAX) { /* illegal addr */
376 pr_err("SDW: Invalid address %x passed\n", addr);
380 if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
381 if (slave && !slave->prop.paging_support)
383 /* no need for else as that will fall-through to paging */
386 /* paging mandatory */
387 if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) {
388 pr_err("SDW: Invalid device for paging :%d\n", dev_num);
393 pr_err("SDW: No slave for paging addr\n");
397 if (!slave->prop.paging_support) {
399 "address %x needs paging but no support\n", addr);
403 msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr);
404 msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr);
405 msg->addr |= BIT(15);
412 * Read/Write IO functions.
415 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags,
416 size_t count, u8 *val)
423 // Only handle bytes up to next page boundary
424 size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR));
426 ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val);
430 ret = sdw_transfer(slave->bus, &msg);
431 if (ret < 0 && !slave->is_mockup_device)
443 * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM
445 * @addr: Register address
447 * @val: Buffer for values to be read
449 * Note that if the message crosses a page boundary each page will be
450 * transferred under a separate invocation of the msg_lock.
452 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
454 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val);
456 EXPORT_SYMBOL(sdw_nread_no_pm);
459 * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM
461 * @addr: Register address
463 * @val: Buffer for values to be written
465 * Note that if the message crosses a page boundary each page will be
466 * transferred under a separate invocation of the msg_lock.
468 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
470 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val);
472 EXPORT_SYMBOL(sdw_nwrite_no_pm);
475 * sdw_write_no_pm() - Write a SDW Slave register with no PM
477 * @addr: Register address
478 * @value: Register value
480 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
482 return sdw_nwrite_no_pm(slave, addr, 1, &value);
484 EXPORT_SYMBOL(sdw_write_no_pm);
487 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
493 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
494 SDW_MSG_FLAG_READ, &buf);
498 ret = sdw_transfer(bus, &msg);
506 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
511 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
512 SDW_MSG_FLAG_WRITE, &value);
516 return sdw_transfer(bus, &msg);
519 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr)
525 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
526 SDW_MSG_FLAG_READ, &buf);
530 ret = sdw_transfer_unlocked(bus, &msg);
536 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked);
538 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
543 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
544 SDW_MSG_FLAG_WRITE, &value);
548 return sdw_transfer_unlocked(bus, &msg);
550 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
553 * sdw_read_no_pm() - Read a SDW Slave register with no PM
555 * @addr: Register address
557 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
562 ret = sdw_nread_no_pm(slave, addr, 1, &buf);
568 EXPORT_SYMBOL(sdw_read_no_pm);
570 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
574 tmp = sdw_read_no_pm(slave, addr);
578 tmp = (tmp & ~mask) | val;
579 return sdw_write_no_pm(slave, addr, tmp);
581 EXPORT_SYMBOL(sdw_update_no_pm);
583 /* Read-Modify-Write Slave register */
584 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
588 tmp = sdw_read(slave, addr);
592 tmp = (tmp & ~mask) | val;
593 return sdw_write(slave, addr, tmp);
595 EXPORT_SYMBOL(sdw_update);
598 * sdw_nread() - Read "n" contiguous SDW Slave registers
600 * @addr: Register address
602 * @val: Buffer for values to be read
604 * This version of the function will take a PM reference to the slave
606 * Note that if the message crosses a page boundary each page will be
607 * transferred under a separate invocation of the msg_lock.
609 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
613 ret = pm_runtime_get_sync(&slave->dev);
614 if (ret < 0 && ret != -EACCES) {
615 pm_runtime_put_noidle(&slave->dev);
619 ret = sdw_nread_no_pm(slave, addr, count, val);
621 pm_runtime_mark_last_busy(&slave->dev);
622 pm_runtime_put(&slave->dev);
626 EXPORT_SYMBOL(sdw_nread);
629 * sdw_nwrite() - Write "n" contiguous SDW Slave registers
631 * @addr: Register address
633 * @val: Buffer for values to be written
635 * This version of the function will take a PM reference to the slave
637 * Note that if the message crosses a page boundary each page will be
638 * transferred under a separate invocation of the msg_lock.
640 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
644 ret = pm_runtime_get_sync(&slave->dev);
645 if (ret < 0 && ret != -EACCES) {
646 pm_runtime_put_noidle(&slave->dev);
650 ret = sdw_nwrite_no_pm(slave, addr, count, val);
652 pm_runtime_mark_last_busy(&slave->dev);
653 pm_runtime_put(&slave->dev);
657 EXPORT_SYMBOL(sdw_nwrite);
660 * sdw_read() - Read a SDW Slave register
662 * @addr: Register address
664 * This version of the function will take a PM reference to the slave
667 int sdw_read(struct sdw_slave *slave, u32 addr)
672 ret = sdw_nread(slave, addr, 1, &buf);
678 EXPORT_SYMBOL(sdw_read);
681 * sdw_write() - Write a SDW Slave register
683 * @addr: Register address
684 * @value: Register value
686 * This version of the function will take a PM reference to the slave
689 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
691 return sdw_nwrite(slave, addr, 1, &value);
693 EXPORT_SYMBOL(sdw_write);
699 /* called with bus_lock held */
700 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
702 struct sdw_slave *slave;
704 list_for_each_entry(slave, &bus->slaves, node) {
705 if (slave->dev_num == i)
712 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
714 if (slave->id.mfg_id != id.mfg_id ||
715 slave->id.part_id != id.part_id ||
716 slave->id.class_id != id.class_id ||
717 (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
718 slave->id.unique_id != id.unique_id))
723 EXPORT_SYMBOL(sdw_compare_devid);
725 /* called with bus_lock held */
726 static int sdw_get_device_num(struct sdw_slave *slave)
728 struct sdw_bus *bus = slave->bus;
731 if (bus->ops && bus->ops->get_device_num) {
732 bit = bus->ops->get_device_num(bus, slave);
736 bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES);
737 if (bit == SDW_MAX_DEVICES) {
744 * Do not update dev_num in Slave data structure here,
745 * Update once program dev_num is successful
747 set_bit(bit, bus->assigned);
753 static int sdw_assign_device_num(struct sdw_slave *slave)
755 struct sdw_bus *bus = slave->bus;
756 struct device *dev = bus->dev;
759 /* check first if device number is assigned, if so reuse that */
760 if (!slave->dev_num) {
761 if (!slave->dev_num_sticky) {
764 mutex_lock(&slave->bus->bus_lock);
765 dev_num = sdw_get_device_num(slave);
766 mutex_unlock(&slave->bus->bus_lock);
768 dev_err(dev, "Get dev_num failed: %d\n", dev_num);
772 slave->dev_num_sticky = dev_num;
774 dev_dbg(dev, "Slave already registered, reusing dev_num: %d\n",
775 slave->dev_num_sticky);
779 /* Clear the slave->dev_num to transfer message on device 0 */
782 ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, slave->dev_num_sticky);
784 dev_err(dev, "Program device_num %d failed: %d\n",
785 slave->dev_num_sticky, ret);
789 /* After xfer of msg, restore dev_num */
790 slave->dev_num = slave->dev_num_sticky;
792 if (bus->ops && bus->ops->new_peripheral_assigned)
793 bus->ops->new_peripheral_assigned(bus, slave, slave->dev_num);
798 void sdw_extract_slave_id(struct sdw_bus *bus,
799 u64 addr, struct sdw_slave_id *id)
801 dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
803 id->sdw_version = SDW_VERSION(addr);
804 id->unique_id = SDW_UNIQUE_ID(addr);
805 id->mfg_id = SDW_MFG_ID(addr);
806 id->part_id = SDW_PART_ID(addr);
807 id->class_id = SDW_CLASS_ID(addr);
810 "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n",
811 id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version);
813 EXPORT_SYMBOL(sdw_extract_slave_id);
815 bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave)
818 * Dynamic scaling is a defined by SDCA. However, some devices expose the class ID but
819 * can't support dynamic scaling. We might need a quirk to handle such devices.
821 return slave->id.class_id;
823 EXPORT_SYMBOL(is_clock_scaling_supported_by_slave);
825 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
827 u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
828 struct sdw_slave *slave, *_s;
829 struct sdw_slave_id id;
837 /* No Slave, so use raw xfer api */
838 ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
839 SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
844 ret = sdw_transfer(bus, &msg);
845 if (ret == -ENODATA) { /* end of device id reads */
846 dev_dbg(bus->dev, "No more devices to enumerate\n");
851 dev_err(bus->dev, "DEVID read fail:%d\n", ret);
856 * Construct the addr and extract. Cast the higher shift
857 * bits to avoid truncation due to size limit.
859 addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) |
860 ((u64)buf[2] << 24) | ((u64)buf[1] << 32) |
863 sdw_extract_slave_id(bus, addr, &id);
866 /* Now compare with entries */
867 list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
868 if (sdw_compare_devid(slave, id) == 0) {
872 * To prevent skipping state-machine stages don't
873 * program a device until we've seen it UNATTACH.
874 * Must return here because no other device on #0
875 * can be detected until this one has been
876 * assigned a device ID.
878 if (slave->status != SDW_SLAVE_UNATTACHED)
882 * Assign a new dev_num to this Slave and
883 * not mark it present. It will be marked
884 * present after it reports ATTACHED on new
887 ret = sdw_assign_device_num(slave);
890 "Assign dev_num failed:%d\n",
902 /* TODO: Park this device in Group 13 */
905 * add Slave device even if there is no platform
906 * firmware description. There will be no driver probe
907 * but the user/integration will be able to see the
908 * device, enumeration status and device number in sysfs
910 sdw_slave_add(bus, &id, NULL);
912 dev_err(bus->dev, "Slave Entry not found\n");
918 * Check till error out or retry (count) exhausts.
919 * Device can drop off and rejoin during enumeration
920 * so count till twice the bound.
923 } while (ret == 0 && count < (SDW_MAX_DEVICES * 2));
928 static void sdw_modify_slave_status(struct sdw_slave *slave,
929 enum sdw_slave_status status)
931 struct sdw_bus *bus = slave->bus;
933 mutex_lock(&bus->bus_lock);
936 "changing status slave %d status %d new status %d\n",
937 slave->dev_num, slave->status, status);
939 if (status == SDW_SLAVE_UNATTACHED) {
941 "initializing enumeration and init completion for Slave %d\n",
944 reinit_completion(&slave->enumeration_complete);
945 reinit_completion(&slave->initialization_complete);
947 } else if ((status == SDW_SLAVE_ATTACHED) &&
948 (slave->status == SDW_SLAVE_UNATTACHED)) {
950 "signaling enumeration completion for Slave %d\n",
953 complete_all(&slave->enumeration_complete);
955 slave->status = status;
956 mutex_unlock(&bus->bus_lock);
959 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
960 enum sdw_clk_stop_mode mode,
961 enum sdw_clk_stop_type type)
965 mutex_lock(&slave->sdw_dev_lock);
968 struct device *dev = &slave->dev;
969 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
971 if (drv->ops && drv->ops->clk_stop)
972 ret = drv->ops->clk_stop(slave, mode, type);
975 mutex_unlock(&slave->sdw_dev_lock);
980 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
981 enum sdw_clk_stop_mode mode,
988 wake_en = slave->prop.wake_capable;
991 val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP;
993 if (mode == SDW_CLK_STOP_MODE1)
994 val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1;
997 val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
999 ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
1001 if (ret != -ENODATA)
1002 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
1006 val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
1009 ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
1011 if (ret < 0 && ret != -ENODATA)
1012 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret);
1017 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num, bool prepare)
1019 int retry = bus->clk_stop_timeout;
1023 val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT);
1025 if (val != -ENODATA)
1026 dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val);
1029 val &= SDW_SCP_STAT_CLK_STP_NF;
1031 dev_dbg(bus->dev, "clock stop %s done slave:%d\n",
1032 prepare ? "prepare" : "deprepare",
1037 usleep_range(1000, 1500);
1041 dev_dbg(bus->dev, "clock stop %s did not complete for slave:%d\n",
1042 prepare ? "prepare" : "deprepare",
1049 * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop
1051 * @bus: SDW bus instance
1053 * Query Slave for clock stop mode and prepare for that mode.
1055 int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
1057 bool simple_clk_stop = true;
1058 struct sdw_slave *slave;
1059 bool is_slave = false;
1063 * In order to save on transition time, prepare
1064 * each Slave and then wait for all Slave(s) to be
1065 * prepared for clock stop.
1066 * If one of the Slave devices has lost sync and
1067 * replies with Command Ignored/-ENODATA, we continue
1070 list_for_each_entry(slave, &bus->slaves, node) {
1071 if (!slave->dev_num)
1074 if (slave->status != SDW_SLAVE_ATTACHED &&
1075 slave->status != SDW_SLAVE_ALERT)
1078 /* Identify if Slave(s) are available on Bus */
1081 ret = sdw_slave_clk_stop_callback(slave,
1083 SDW_CLK_PRE_PREPARE);
1084 if (ret < 0 && ret != -ENODATA) {
1085 dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret);
1089 /* Only prepare a Slave device if needed */
1090 if (!slave->prop.simple_clk_stop_capable) {
1091 simple_clk_stop = false;
1093 ret = sdw_slave_clk_stop_prepare(slave,
1096 if (ret < 0 && ret != -ENODATA) {
1097 dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret);
1103 /* Skip remaining clock stop preparation if no Slave is attached */
1108 * Don't wait for all Slaves to be ready if they follow the simple
1111 if (!simple_clk_stop) {
1112 ret = sdw_bus_wait_for_clk_prep_deprep(bus,
1113 SDW_BROADCAST_DEV_NUM, true);
1115 * if there are no Slave devices present and the reply is
1116 * Command_Ignored/-ENODATA, we don't need to continue with the
1117 * flow and can just return here. The error code is not modified
1118 * and its handling left as an exercise for the caller.
1124 /* Inform slaves that prep is done */
1125 list_for_each_entry(slave, &bus->slaves, node) {
1126 if (!slave->dev_num)
1129 if (slave->status != SDW_SLAVE_ATTACHED &&
1130 slave->status != SDW_SLAVE_ALERT)
1133 ret = sdw_slave_clk_stop_callback(slave,
1135 SDW_CLK_POST_PREPARE);
1137 if (ret < 0 && ret != -ENODATA) {
1138 dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret);
1145 EXPORT_SYMBOL(sdw_bus_prep_clk_stop);
1148 * sdw_bus_clk_stop: stop bus clock
1150 * @bus: SDW bus instance
1152 * After preparing the Slaves for clock stop, stop the clock by broadcasting
1153 * write to SCP_CTRL register.
1155 int sdw_bus_clk_stop(struct sdw_bus *bus)
1160 * broadcast clock stop now, attached Slaves will ACK this,
1161 * unattached will ignore
1163 ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM,
1164 SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW);
1166 if (ret != -ENODATA)
1167 dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret);
1173 EXPORT_SYMBOL(sdw_bus_clk_stop);
1176 * sdw_bus_exit_clk_stop: Exit clock stop mode
1178 * @bus: SDW bus instance
1180 * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves
1181 * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate
1184 int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
1186 bool simple_clk_stop = true;
1187 struct sdw_slave *slave;
1188 bool is_slave = false;
1192 * In order to save on transition time, de-prepare
1193 * each Slave and then wait for all Slave(s) to be
1194 * de-prepared after clock resume.
1196 list_for_each_entry(slave, &bus->slaves, node) {
1197 if (!slave->dev_num)
1200 if (slave->status != SDW_SLAVE_ATTACHED &&
1201 slave->status != SDW_SLAVE_ALERT)
1204 /* Identify if Slave(s) are available on Bus */
1207 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1208 SDW_CLK_PRE_DEPREPARE);
1210 dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret);
1212 /* Only de-prepare a Slave device if needed */
1213 if (!slave->prop.simple_clk_stop_capable) {
1214 simple_clk_stop = false;
1216 ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0,
1220 dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret);
1224 /* Skip remaining clock stop de-preparation if no Slave is attached */
1229 * Don't wait for all Slaves to be ready if they follow the simple
1232 if (!simple_clk_stop) {
1233 ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM, false);
1235 dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret);
1238 list_for_each_entry(slave, &bus->slaves, node) {
1239 if (!slave->dev_num)
1242 if (slave->status != SDW_SLAVE_ATTACHED &&
1243 slave->status != SDW_SLAVE_ALERT)
1246 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1247 SDW_CLK_POST_DEPREPARE);
1249 dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret);
1254 EXPORT_SYMBOL(sdw_bus_exit_clk_stop);
1256 int sdw_configure_dpn_intr(struct sdw_slave *slave,
1257 int port, bool enable, int mask)
1263 if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) {
1264 dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n",
1265 str_on_off(enable));
1266 mask |= SDW_DPN_INT_TEST_FAIL;
1269 addr = SDW_DPN_INTMASK(port);
1271 /* Set/Clear port ready interrupt mask */
1274 val |= SDW_DPN_INT_PORT_READY;
1277 val &= ~SDW_DPN_INT_PORT_READY;
1280 ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
1282 dev_err(&slave->dev,
1283 "SDW_DPN_INTMASK write failed:%d\n", val);
1288 int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base)
1290 u32 mclk_freq = slave->bus->prop.mclk_freq;
1291 u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
1296 dev_err(&slave->dev,
1297 "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n");
1302 * map base frequency using Table 89 of SoundWire 1.2 spec.
1303 * The order of the tests just follows the specification, this
1304 * is not a selection between possible values or a search for
1305 * the best value but just a mapping. Only one case per platform
1307 * Some BIOS have inconsistent values for mclk_freq but a
1308 * correct root so we force the mclk_freq to avoid variations.
1310 if (!(19200000 % mclk_freq)) {
1311 mclk_freq = 19200000;
1312 *base = SDW_SCP_BASE_CLOCK_19200000_HZ;
1313 } else if (!(22579200 % mclk_freq)) {
1314 mclk_freq = 22579200;
1315 *base = SDW_SCP_BASE_CLOCK_22579200_HZ;
1316 } else if (!(24576000 % mclk_freq)) {
1317 mclk_freq = 24576000;
1318 *base = SDW_SCP_BASE_CLOCK_24576000_HZ;
1319 } else if (!(32000000 % mclk_freq)) {
1320 mclk_freq = 32000000;
1321 *base = SDW_SCP_BASE_CLOCK_32000000_HZ;
1322 } else if (!(96000000 % mclk_freq)) {
1323 mclk_freq = 24000000;
1324 *base = SDW_SCP_BASE_CLOCK_24000000_HZ;
1326 dev_err(&slave->dev,
1327 "Unsupported clock base, mclk %d\n",
1332 if (mclk_freq % curr_freq) {
1333 dev_err(&slave->dev,
1334 "mclk %d is not multiple of bus curr_freq %d\n",
1335 mclk_freq, curr_freq);
1339 scale = mclk_freq / curr_freq;
1342 * map scale to Table 90 of SoundWire 1.2 spec - and check
1343 * that the scale is a power of two and maximum 64
1345 scale_index = ilog2(scale);
1347 if (BIT(scale_index) != scale || scale_index > 6) {
1348 dev_err(&slave->dev,
1349 "No match found for scale %d, bus mclk %d curr_freq %d\n",
1350 scale, mclk_freq, curr_freq);
1355 dev_dbg(&slave->dev,
1356 "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
1357 *base, scale_index, mclk_freq, curr_freq);
1361 EXPORT_SYMBOL(sdw_slave_get_scale_index);
1363 static int sdw_slave_set_frequency(struct sdw_slave *slave)
1370 * frequency base and scale registers are required for SDCA
1371 * devices. They may also be used for 1.2+/non-SDCA devices.
1372 * Driver can set the property directly, for now there's no
1373 * DisCo property to discover support for the scaling registers
1374 * from platform firmware.
1376 if (!slave->id.class_id && !slave->prop.clock_reg_supported)
1379 scale_index = sdw_slave_get_scale_index(slave, &base);
1380 if (scale_index < 0)
1383 ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
1385 dev_err(&slave->dev,
1386 "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
1390 /* initialize scale for both banks */
1391 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
1393 dev_err(&slave->dev,
1394 "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
1397 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
1399 dev_err(&slave->dev,
1400 "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
1405 static int sdw_initialize_slave(struct sdw_slave *slave)
1407 struct sdw_slave_prop *prop = &slave->prop;
1412 ret = sdw_slave_set_frequency(slave);
1416 if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) {
1417 /* Clear bus clash interrupt before enabling interrupt mask */
1418 status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1420 dev_err(&slave->dev,
1421 "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status);
1424 if (status & SDW_SCP_INT1_BUS_CLASH) {
1425 dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n");
1426 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH);
1428 dev_err(&slave->dev,
1429 "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret);
1434 if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) &&
1435 !(prop->quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
1436 /* Clear parity interrupt before enabling interrupt mask */
1437 status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1439 dev_err(&slave->dev,
1440 "SDW_SCP_INT1 (PARITY) read failed:%d\n", status);
1443 if (status & SDW_SCP_INT1_PARITY) {
1444 dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n");
1445 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY);
1447 dev_err(&slave->dev,
1448 "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret);
1455 * Set SCP_INT1_MASK register, typically bus clash and
1456 * implementation-defined interrupt mask. The Parity detection
1457 * may not always be correct on startup so its use is
1458 * device-dependent, it might e.g. only be enabled in
1459 * steady-state after a couple of frames.
1461 val = prop->scp_int1_mask;
1463 /* Enable SCP interrupts */
1464 ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
1466 dev_err(&slave->dev,
1467 "SDW_SCP_INTMASK1 write failed:%d\n", ret);
1471 /* No need to continue if DP0 is not present */
1472 if (!prop->dp0_prop)
1475 /* Enable DP0 interrupts */
1476 val = prop->dp0_prop->imp_def_interrupts;
1477 val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
1479 ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val);
1481 dev_err(&slave->dev,
1482 "SDW_DP0_INTMASK read failed:%d\n", ret);
1486 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
1488 u8 clear, impl_int_mask;
1489 int status, status2, ret, count = 0;
1491 status = sdw_read_no_pm(slave, SDW_DP0_INT);
1493 dev_err(&slave->dev,
1494 "SDW_DP0_INT read failed:%d\n", status);
1499 clear = status & ~(SDW_DP0_INTERRUPTS | SDW_DP0_SDCA_CASCADE);
1501 if (status & SDW_DP0_INT_TEST_FAIL) {
1502 dev_err(&slave->dev, "Test fail for port 0\n");
1503 clear |= SDW_DP0_INT_TEST_FAIL;
1507 * Assumption: PORT_READY interrupt will be received only for
1508 * ports implementing Channel Prepare state machine (CP_SM)
1511 if (status & SDW_DP0_INT_PORT_READY) {
1512 complete(&slave->port_ready[0]);
1513 clear |= SDW_DP0_INT_PORT_READY;
1516 if (status & SDW_DP0_INT_BRA_FAILURE) {
1517 dev_err(&slave->dev, "BRA failed\n");
1518 clear |= SDW_DP0_INT_BRA_FAILURE;
1521 impl_int_mask = SDW_DP0_INT_IMPDEF1 |
1522 SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3;
1524 if (status & impl_int_mask) {
1525 clear |= impl_int_mask;
1526 *slave_status = clear;
1529 /* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */
1530 ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear);
1532 dev_err(&slave->dev,
1533 "SDW_DP0_INT write failed:%d\n", ret);
1537 /* Read DP0 interrupt again */
1538 status2 = sdw_read_no_pm(slave, SDW_DP0_INT);
1540 dev_err(&slave->dev,
1541 "SDW_DP0_INT read failed:%d\n", status2);
1544 /* filter to limit loop to interrupts identified in the first status read */
1549 /* we can get alerts while processing so keep retrying */
1550 } while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1552 if (count == SDW_READ_INTR_CLEAR_RETRY)
1553 dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n");
1558 static int sdw_handle_port_interrupt(struct sdw_slave *slave,
1559 int port, u8 *slave_status)
1561 u8 clear, impl_int_mask;
1562 int status, status2, ret, count = 0;
1566 return sdw_handle_dp0_interrupt(slave, slave_status);
1568 addr = SDW_DPN_INT(port);
1569 status = sdw_read_no_pm(slave, addr);
1571 dev_err(&slave->dev,
1572 "SDW_DPN_INT read failed:%d\n", status);
1578 clear = status & ~SDW_DPN_INTERRUPTS;
1580 if (status & SDW_DPN_INT_TEST_FAIL) {
1581 dev_err(&slave->dev, "Test fail for port:%d\n", port);
1582 clear |= SDW_DPN_INT_TEST_FAIL;
1586 * Assumption: PORT_READY interrupt will be received only
1587 * for ports implementing CP_SM.
1589 if (status & SDW_DPN_INT_PORT_READY) {
1590 complete(&slave->port_ready[port]);
1591 clear |= SDW_DPN_INT_PORT_READY;
1594 impl_int_mask = SDW_DPN_INT_IMPDEF1 |
1595 SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
1597 if (status & impl_int_mask) {
1598 clear |= impl_int_mask;
1599 *slave_status = clear;
1602 /* clear the interrupt but don't touch reserved fields */
1603 ret = sdw_write_no_pm(slave, addr, clear);
1605 dev_err(&slave->dev,
1606 "SDW_DPN_INT write failed:%d\n", ret);
1610 /* Read DPN interrupt again */
1611 status2 = sdw_read_no_pm(slave, addr);
1613 dev_err(&slave->dev,
1614 "SDW_DPN_INT read failed:%d\n", status2);
1617 /* filter to limit loop to interrupts identified in the first status read */
1622 /* we can get alerts while processing so keep retrying */
1623 } while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1625 if (count == SDW_READ_INTR_CLEAR_RETRY)
1626 dev_warn(&slave->dev, "Reached MAX_RETRY on port read");
1631 static int sdw_handle_slave_alerts(struct sdw_slave *slave)
1633 struct sdw_slave_intr_status slave_intr;
1634 u8 clear = 0, bit, port_status[15] = {0};
1635 int port_num, stat, ret, count = 0;
1638 u8 sdca_cascade = 0;
1643 sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
1645 ret = pm_runtime_get_sync(&slave->dev);
1646 if (ret < 0 && ret != -EACCES) {
1647 dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
1648 pm_runtime_put_noidle(&slave->dev);
1652 /* Read Intstat 1, Intstat 2 and Intstat 3 registers */
1653 ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1655 dev_err(&slave->dev,
1656 "SDW_SCP_INT1 read failed:%d\n", ret);
1661 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1663 dev_err(&slave->dev,
1664 "SDW_SCP_INT2/3 read failed:%d\n", ret);
1668 if (slave->id.class_id) {
1669 ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1671 dev_err(&slave->dev,
1672 "SDW_DP0_INT read failed:%d\n", ret);
1675 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1679 slave_notify = false;
1682 * Check parity, bus clash and Slave (impl defined)
1685 if (buf & SDW_SCP_INT1_PARITY) {
1686 parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY;
1687 parity_quirk = !slave->first_interrupt_done &&
1688 (slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY);
1690 if (parity_check && !parity_quirk)
1691 dev_err(&slave->dev, "Parity error detected\n");
1692 clear |= SDW_SCP_INT1_PARITY;
1695 if (buf & SDW_SCP_INT1_BUS_CLASH) {
1696 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH)
1697 dev_err(&slave->dev, "Bus clash detected\n");
1698 clear |= SDW_SCP_INT1_BUS_CLASH;
1702 * When bus clash or parity errors are detected, such errors
1703 * are unlikely to be recoverable errors.
1704 * TODO: In such scenario, reset bus. Make this configurable
1705 * via sysfs property with bus reset being the default.
1708 if (buf & SDW_SCP_INT1_IMPL_DEF) {
1709 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) {
1710 dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
1711 slave_notify = true;
1713 clear |= SDW_SCP_INT1_IMPL_DEF;
1716 /* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */
1718 slave_notify = true;
1720 /* Check port 0 - 3 interrupts */
1721 port = buf & SDW_SCP_INT1_PORT0_3;
1723 /* To get port number corresponding to bits, shift it */
1724 port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port);
1725 for_each_set_bit(bit, &port, 8) {
1726 sdw_handle_port_interrupt(slave, bit,
1730 /* Check if cascade 2 interrupt is present */
1731 if (buf & SDW_SCP_INT1_SCP2_CASCADE) {
1732 port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
1733 for_each_set_bit(bit, &port, 8) {
1734 /* scp2 ports start from 4 */
1736 sdw_handle_port_interrupt(slave,
1738 &port_status[port_num]);
1742 /* now check last cascade */
1743 if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) {
1744 port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
1745 for_each_set_bit(bit, &port, 8) {
1746 /* scp3 ports start from 11 */
1747 port_num = bit + 11;
1748 sdw_handle_port_interrupt(slave,
1750 &port_status[port_num]);
1754 /* Update the Slave driver */
1756 mutex_lock(&slave->sdw_dev_lock);
1758 if (slave->probed) {
1759 struct device *dev = &slave->dev;
1760 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1762 if (slave->prop.use_domain_irq && slave->irq)
1763 handle_nested_irq(slave->irq);
1765 if (drv->ops && drv->ops->interrupt_callback) {
1766 slave_intr.sdca_cascade = sdca_cascade;
1767 slave_intr.control_port = clear;
1768 memcpy(slave_intr.port, &port_status,
1769 sizeof(slave_intr.port));
1771 drv->ops->interrupt_callback(slave, &slave_intr);
1775 mutex_unlock(&slave->sdw_dev_lock);
1779 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear);
1781 dev_err(&slave->dev,
1782 "SDW_SCP_INT1 write failed:%d\n", ret);
1786 /* at this point all initial interrupt sources were handled */
1787 slave->first_interrupt_done = true;
1790 * Read status again to ensure no new interrupts arrived
1791 * while servicing interrupts.
1793 ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1795 dev_err(&slave->dev,
1796 "SDW_SCP_INT1 recheck read failed:%d\n", ret);
1801 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1803 dev_err(&slave->dev,
1804 "SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
1808 if (slave->id.class_id) {
1809 ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1811 dev_err(&slave->dev,
1812 "SDW_DP0_INT recheck read failed:%d\n", ret);
1815 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1819 * Make sure no interrupts are pending
1821 stat = buf || buf2[0] || buf2[1] || sdca_cascade;
1824 * Exit loop if Slave is continuously in ALERT state even
1825 * after servicing the interrupt multiple times.
1829 /* we can get alerts while processing so keep retrying */
1830 } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
1832 if (count == SDW_READ_INTR_CLEAR_RETRY)
1833 dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n");
1836 pm_runtime_mark_last_busy(&slave->dev);
1837 pm_runtime_put_autosuspend(&slave->dev);
1842 static int sdw_update_slave_status(struct sdw_slave *slave,
1843 enum sdw_slave_status status)
1847 mutex_lock(&slave->sdw_dev_lock);
1849 if (slave->probed) {
1850 struct device *dev = &slave->dev;
1851 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1853 if (drv->ops && drv->ops->update_status)
1854 ret = drv->ops->update_status(slave, status);
1857 mutex_unlock(&slave->sdw_dev_lock);
1863 * sdw_handle_slave_status() - Handle Slave status
1864 * @bus: SDW bus instance
1865 * @status: Status for all Slave(s)
1867 int sdw_handle_slave_status(struct sdw_bus *bus,
1868 enum sdw_slave_status status[])
1870 enum sdw_slave_status prev_status;
1871 struct sdw_slave *slave;
1872 bool attached_initializing, id_programmed;
1875 /* first check if any Slaves fell off the bus */
1876 for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1877 mutex_lock(&bus->bus_lock);
1878 if (test_bit(i, bus->assigned) == false) {
1879 mutex_unlock(&bus->bus_lock);
1882 mutex_unlock(&bus->bus_lock);
1884 slave = sdw_get_slave(bus, i);
1888 if (status[i] == SDW_SLAVE_UNATTACHED &&
1889 slave->status != SDW_SLAVE_UNATTACHED) {
1890 dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n",
1892 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1894 /* Ensure driver knows that peripheral unattached */
1895 ret = sdw_update_slave_status(slave, status[i]);
1897 dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret);
1901 if (status[0] == SDW_SLAVE_ATTACHED) {
1902 dev_dbg(bus->dev, "Slave attached, programming device number\n");
1905 * Programming a device number will have side effects,
1906 * so we deal with other devices at a later time.
1907 * This relies on those devices reporting ATTACHED, which will
1908 * trigger another call to this function. This will only
1909 * happen if at least one device ID was programmed.
1910 * Error returns from sdw_program_device_num() are currently
1911 * ignored because there's no useful recovery that can be done.
1912 * Returning the error here could result in the current status
1913 * of other devices not being handled, because if no device IDs
1914 * were programmed there's nothing to guarantee a status change
1915 * to trigger another call to this function.
1917 sdw_program_device_num(bus, &id_programmed);
1922 /* Continue to check other slave statuses */
1923 for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1924 mutex_lock(&bus->bus_lock);
1925 if (test_bit(i, bus->assigned) == false) {
1926 mutex_unlock(&bus->bus_lock);
1929 mutex_unlock(&bus->bus_lock);
1931 slave = sdw_get_slave(bus, i);
1935 attached_initializing = false;
1937 switch (status[i]) {
1938 case SDW_SLAVE_UNATTACHED:
1939 if (slave->status == SDW_SLAVE_UNATTACHED)
1942 dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n",
1945 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1948 case SDW_SLAVE_ALERT:
1949 ret = sdw_handle_slave_alerts(slave);
1951 dev_err(&slave->dev,
1952 "Slave %d alert handling failed: %d\n",
1956 case SDW_SLAVE_ATTACHED:
1957 if (slave->status == SDW_SLAVE_ATTACHED)
1960 prev_status = slave->status;
1961 sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED);
1963 if (prev_status == SDW_SLAVE_ALERT)
1966 attached_initializing = true;
1968 ret = sdw_initialize_slave(slave);
1970 dev_err(&slave->dev,
1971 "Slave %d initialization failed: %d\n",
1977 dev_err(&slave->dev, "Invalid slave %d status:%d\n",
1982 ret = sdw_update_slave_status(slave, status[i]);
1984 dev_err(&slave->dev,
1985 "Update Slave status failed:%d\n", ret);
1986 if (attached_initializing) {
1987 dev_dbg(&slave->dev,
1988 "signaling initialization completion for Slave %d\n",
1991 complete_all(&slave->initialization_complete);
1994 * If the manager became pm_runtime active, the peripherals will be
1995 * restarted and attach, but their pm_runtime status may remain
1996 * suspended. If the 'update_slave_status' callback initiates
1997 * any sort of deferred processing, this processing would not be
1998 * cancelled on pm_runtime suspend.
1999 * To avoid such zombie states, we queue a request to resume.
2000 * This would be a no-op in case the peripheral was being resumed
2001 * by e.g. the ALSA/ASoC framework.
2003 pm_request_resume(&slave->dev);
2009 EXPORT_SYMBOL(sdw_handle_slave_status);
2011 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
2013 struct sdw_slave *slave;
2016 /* Check all non-zero devices */
2017 for (i = 1; i <= SDW_MAX_DEVICES; i++) {
2018 mutex_lock(&bus->bus_lock);
2019 if (test_bit(i, bus->assigned) == false) {
2020 mutex_unlock(&bus->bus_lock);
2023 mutex_unlock(&bus->bus_lock);
2025 slave = sdw_get_slave(bus, i);
2029 if (slave->status != SDW_SLAVE_UNATTACHED) {
2030 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
2031 slave->first_interrupt_done = false;
2032 sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED);
2035 /* keep track of request, used in pm_runtime resume */
2036 slave->unattach_request = request;
2039 EXPORT_SYMBOL(sdw_clear_slave_status);
2041 int sdw_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2043 if (msg->len > SDW_BPT_MSG_MAX_BYTES) {
2044 dev_err(bus->dev, "Invalid BPT message length %d\n", msg->len);
2048 /* check device is enumerated */
2049 if (slave->dev_num == SDW_ENUM_DEV_NUM ||
2050 slave->dev_num > SDW_MAX_DEVICES) {
2051 dev_err(&slave->dev, "Invalid device number %d\n", slave->dev_num);
2055 /* make sure all callbacks are defined */
2056 if (!bus->ops->bpt_send_async ||
2057 !bus->ops->bpt_wait) {
2058 dev_err(bus->dev, "BPT callbacks not defined\n");
2062 return bus->ops->bpt_send_async(bus, slave, msg);
2064 EXPORT_SYMBOL(sdw_bpt_send_async);
2066 int sdw_bpt_wait(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2068 return bus->ops->bpt_wait(bus, slave, msg);
2070 EXPORT_SYMBOL(sdw_bpt_wait);
2072 int sdw_bpt_send_sync(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
2076 ret = sdw_bpt_send_async(bus, slave, msg);
2080 return sdw_bpt_wait(bus, slave, msg);
2082 EXPORT_SYMBOL(sdw_bpt_send_sync);