1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2023, Linaro Ltd. All rights reserved.
7 #include <linux/interrupt.h>
8 #include <linux/kernel.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 #include <linux/regmap.h>
13 #include <linux/regulator/consumer.h>
14 #include <linux/slab.h>
15 #include <linux/usb/pd.h>
16 #include <linux/usb/tcpm.h>
17 #include "qcom_pmic_typec_pdphy.h"
19 struct pmic_typec_pdphy_irq_data {
22 struct pmic_typec_pdphy *pmic_typec_pdphy;
25 struct pmic_typec_pdphy {
27 struct tcpm_port *tcpm_port;
28 struct regmap *regmap;
32 struct pmic_typec_pdphy_irq_data *irq_data;
34 struct work_struct reset_work;
35 struct work_struct receive_work;
36 struct regulator *vdd_pdphy;
37 spinlock_t lock; /* Register atomicity */
40 static void qcom_pmic_typec_pdphy_reset_on(struct pmic_typec_pdphy *pmic_typec_pdphy)
42 struct device *dev = pmic_typec_pdphy->dev;
46 ret = regmap_write(pmic_typec_pdphy->regmap,
47 pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
51 ret = regmap_write(pmic_typec_pdphy->regmap,
52 pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG, 0);
58 dev_err(dev, "pd_reset_on error\n");
61 static void qcom_pmic_typec_pdphy_reset_off(struct pmic_typec_pdphy *pmic_typec_pdphy)
63 struct device *dev = pmic_typec_pdphy->dev;
66 ret = regmap_write(pmic_typec_pdphy->regmap,
67 pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG,
68 FRAME_FILTER_EN_SOP | FRAME_FILTER_EN_HARD_RESET);
70 dev_err(dev, "pd_reset_off error\n");
73 static void qcom_pmic_typec_pdphy_sig_reset_work(struct work_struct *work)
75 struct pmic_typec_pdphy *pmic_typec_pdphy = container_of(work, struct pmic_typec_pdphy,
79 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
81 qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
82 qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
84 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
86 tcpm_pd_hard_reset(pmic_typec_pdphy->tcpm_port);
90 qcom_pmic_typec_pdphy_clear_tx_control_reg(struct pmic_typec_pdphy *pmic_typec_pdphy)
92 struct device *dev = pmic_typec_pdphy->dev;
96 /* Clear TX control register */
97 ret = regmap_write(pmic_typec_pdphy->regmap,
98 pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
102 /* Perform readback to ensure sufficient delay for command to latch */
103 ret = regmap_read(pmic_typec_pdphy->regmap,
104 pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, &val);
108 dev_err(dev, "pd_clear_tx_control_reg: clear tx flag\n");
114 qcom_pmic_typec_pdphy_pd_transmit_signal(struct pmic_typec_pdphy *pmic_typec_pdphy,
115 enum tcpm_transmit_type type,
116 unsigned int negotiated_rev)
118 struct device *dev = pmic_typec_pdphy->dev;
123 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
125 /* Clear TX control register */
126 ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
130 val = TX_CONTROL_SEND_SIGNAL;
131 if (negotiated_rev == PD_REV30)
132 val |= TX_CONTROL_RETRY_COUNT(2);
134 val |= TX_CONTROL_RETRY_COUNT(3);
136 if (type == TCPC_TX_CABLE_RESET || type == TCPC_TX_HARD_RESET)
137 val |= TX_CONTROL_FRAME_TYPE(1);
139 ret = regmap_write(pmic_typec_pdphy->regmap,
140 pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
143 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
145 dev_vdbg(dev, "pd_transmit_signal: type %d negotiate_rev %d send %d\n",
146 type, negotiated_rev, ret);
152 qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pdphy,
153 enum tcpm_transmit_type type,
154 const struct pd_message *msg,
155 unsigned int negotiated_rev)
157 struct device *dev = pmic_typec_pdphy->dev;
158 unsigned int val, hdr_len, txbuf_len, txsize_len;
162 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
164 ret = regmap_read(pmic_typec_pdphy->regmap,
165 pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG,
171 dev_err(dev, "pd_transmit_payload: RX message pending\n");
176 /* Clear TX control register */
177 ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
181 hdr_len = sizeof(msg->header);
182 txbuf_len = pd_header_cnt_le(msg->header) * 4;
183 txsize_len = hdr_len + txbuf_len - 1;
185 /* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */
186 ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
187 pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG,
188 &msg->header, hdr_len);
192 /* Write payload to USB_PDPHY_TX_BUFFER_DATA_REG for txbuf_len */
194 ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
195 pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_DATA_REG,
196 &msg->payload, txbuf_len);
201 /* Write total length ((header + data) - 1) to USB_PDPHY_TX_SIZE_REG */
202 ret = regmap_write(pmic_typec_pdphy->regmap,
203 pmic_typec_pdphy->base + USB_PDPHY_TX_SIZE_REG,
208 /* Clear TX control register */
209 ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
213 /* Initiate transmit with retry count as indicated by PD revision */
214 val = TX_CONTROL_FRAME_TYPE(type) | TX_CONTROL_SEND_MSG;
215 if (pd_header_rev(msg->header) == PD_REV30)
216 val |= TX_CONTROL_RETRY_COUNT(2);
218 val |= TX_CONTROL_RETRY_COUNT(3);
220 ret = regmap_write(pmic_typec_pdphy->regmap,
221 pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
224 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
227 dev_err(dev, "pd_transmit_payload: hdr %*ph data %*ph ret %d\n",
228 hdr_len, &msg->header, txbuf_len, &msg->payload, ret);
234 int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
235 enum tcpm_transmit_type type,
236 const struct pd_message *msg,
237 unsigned int negotiated_rev)
239 struct device *dev = pmic_typec_pdphy->dev;
243 ret = qcom_pmic_typec_pdphy_pd_transmit_payload(pmic_typec_pdphy,
247 ret = qcom_pmic_typec_pdphy_pd_transmit_signal(pmic_typec_pdphy,
253 dev_dbg(dev, "pd_transmit: type %x result %d\n", type, ret);
258 static void qcom_pmic_typec_pdphy_pd_receive(struct pmic_typec_pdphy *pmic_typec_pdphy)
260 struct device *dev = pmic_typec_pdphy->dev;
261 struct pd_message msg;
262 unsigned int size, rx_status;
266 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
268 ret = regmap_read(pmic_typec_pdphy->regmap,
269 pmic_typec_pdphy->base + USB_PDPHY_RX_SIZE_REG, &size);
273 /* Hardware requires +1 of the real read value to be passed */
274 if (size < 1 || size > sizeof(msg.payload) + 1) {
275 dev_dbg(dev, "pd_receive: invalid size %d\n", size);
280 ret = regmap_read(pmic_typec_pdphy->regmap,
281 pmic_typec_pdphy->base + USB_PDPHY_RX_STATUS_REG,
287 ret = regmap_bulk_read(pmic_typec_pdphy->regmap,
288 pmic_typec_pdphy->base + USB_PDPHY_RX_BUFFER_REG,
293 /* Return ownership of RX buffer to hardware */
294 ret = regmap_write(pmic_typec_pdphy->regmap,
295 pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, 0);
298 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
301 dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size);
302 tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg);
306 static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id)
308 struct pmic_typec_pdphy_irq_data *irq_data = dev_id;
309 struct pmic_typec_pdphy *pmic_typec_pdphy = irq_data->pmic_typec_pdphy;
310 struct device *dev = pmic_typec_pdphy->dev;
312 switch (irq_data->virq) {
313 case PMIC_PDPHY_SIG_TX_IRQ:
314 dev_err(dev, "isr: tx_sig\n");
316 case PMIC_PDPHY_SIG_RX_IRQ:
317 schedule_work(&pmic_typec_pdphy->reset_work);
319 case PMIC_PDPHY_MSG_TX_IRQ:
320 tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
323 case PMIC_PDPHY_MSG_RX_IRQ:
324 qcom_pmic_typec_pdphy_pd_receive(pmic_typec_pdphy);
326 case PMIC_PDPHY_MSG_TX_FAIL_IRQ:
327 tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
330 case PMIC_PDPHY_MSG_TX_DISCARD_IRQ:
331 tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
339 int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on)
344 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
346 ret = regmap_write(pmic_typec_pdphy->regmap,
347 pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, !on);
349 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
351 dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", on ? "on" : "off");
356 int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
357 bool data_role_host, bool power_role_src)
359 struct device *dev = pmic_typec_pdphy->dev;
363 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
365 ret = regmap_update_bits(pmic_typec_pdphy->regmap,
366 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
367 MSG_CONFIG_PORT_DATA_ROLE |
368 MSG_CONFIG_PORT_POWER_ROLE,
369 data_role_host << 3 | power_role_src << 2);
371 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
373 dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
374 data_role_host, power_role_src);
379 static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdphy)
381 struct device *dev = pmic_typec_pdphy->dev;
384 ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
388 /* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */
389 ret = regmap_update_bits(pmic_typec_pdphy->regmap,
390 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
391 MSG_CONFIG_SPEC_REV_MASK, PD_REV20);
395 ret = regmap_write(pmic_typec_pdphy->regmap,
396 pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
400 ret = regmap_write(pmic_typec_pdphy->regmap,
401 pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG,
406 qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
409 regulator_disable(pmic_typec_pdphy->vdd_pdphy);
410 dev_err(dev, "pdphy_enable fail %d\n", ret);
416 static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdphy)
420 qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
422 ret = regmap_write(pmic_typec_pdphy->regmap,
423 pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
425 regulator_disable(pmic_typec_pdphy->vdd_pdphy);
430 static int pmic_typec_pdphy_reset(struct pmic_typec_pdphy *pmic_typec_pdphy)
434 ret = qcom_pmic_typec_pdphy_disable(pmic_typec_pdphy);
438 usleep_range(400, 500);
439 ret = qcom_pmic_typec_pdphy_enable(pmic_typec_pdphy);
444 int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
445 struct tcpm_port *tcpm_port)
450 pmic_typec_pdphy->tcpm_port = tcpm_port;
452 ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
456 for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
457 enable_irq(pmic_typec_pdphy->irq_data[i].irq);
462 void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
466 for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
467 disable_irq(pmic_typec_pdphy->irq_data[i].irq);
469 qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
472 struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
474 return devm_kzalloc(dev, sizeof(struct pmic_typec_pdphy), GFP_KERNEL);
477 int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
478 struct pmic_typec_pdphy *pmic_typec_pdphy,
479 struct pmic_typec_pdphy_resources *res,
480 struct regmap *regmap,
483 struct device *dev = &pdev->dev;
484 struct pmic_typec_pdphy_irq_data *irq_data;
487 if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS)
490 irq_data = devm_kzalloc(dev, sizeof(*irq_data) * res->nr_irqs,
495 pmic_typec_pdphy->vdd_pdphy = devm_regulator_get(dev, "vdd-pdphy");
496 if (IS_ERR(pmic_typec_pdphy->vdd_pdphy))
497 return PTR_ERR(pmic_typec_pdphy->vdd_pdphy);
499 pmic_typec_pdphy->dev = dev;
500 pmic_typec_pdphy->base = base;
501 pmic_typec_pdphy->regmap = regmap;
502 pmic_typec_pdphy->nr_irqs = res->nr_irqs;
503 pmic_typec_pdphy->irq_data = irq_data;
504 spin_lock_init(&pmic_typec_pdphy->lock);
505 INIT_WORK(&pmic_typec_pdphy->reset_work, qcom_pmic_typec_pdphy_sig_reset_work);
507 for (i = 0; i < res->nr_irqs; i++, irq_data++) {
508 irq = platform_get_irq_byname(pdev, res->irq_params[i].irq_name);
512 irq_data->pmic_typec_pdphy = pmic_typec_pdphy;
514 irq_data->virq = res->irq_params[i].virq;
516 ret = devm_request_threaded_irq(dev, irq, NULL,
517 qcom_pmic_typec_pdphy_isr,
518 IRQF_ONESHOT | IRQF_NO_AUTOEN,
519 res->irq_params[i].irq_name,