1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2023, Intel Corporation.
4 * Intel Visual Sensing Controller Transport Layer Linux driver
7 #include <linux/acpi.h>
8 #include <linux/cleanup.h>
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/irq.h>
15 #include <linux/irqreturn.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/platform_device.h>
19 #include <linux/spi/spi.h>
20 #include <linux/types.h>
24 #define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS 20
25 #define VSC_TP_ROM_BOOTUP_DELAY_MS 10
26 #define VSC_TP_ROM_XFER_POLL_TIMEOUT_US (500 * USEC_PER_MSEC)
27 #define VSC_TP_ROM_XFER_POLL_DELAY_US (20 * USEC_PER_MSEC)
28 #define VSC_TP_WAIT_FW_ASSERTED_TIMEOUT (2 * HZ)
29 #define VSC_TP_MAX_XFER_COUNT 5
31 #define VSC_TP_PACKET_SYNC 0x31
32 #define VSC_TP_CRC_SIZE sizeof(u32)
33 #define VSC_TP_MAX_MSG_SIZE 2048
34 /* SPI xfer timeout size */
35 #define VSC_TP_XFER_TIMEOUT_BYTES 700
36 #define VSC_TP_PACKET_PADDING_SIZE 1
37 #define VSC_TP_PACKET_SIZE(pkt) \
38 (sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
39 #define VSC_TP_MAX_PACKET_SIZE \
40 (sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
41 #define VSC_TP_MAX_XFER_SIZE \
42 (VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
43 #define VSC_TP_NEXT_XFER_LEN(len, offset) \
44 (len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
46 struct vsc_tp_packet {
51 __u8 buf[] __counted_by(len);
55 /* do the actual data transfer */
56 struct spi_device *spi;
58 /* bind with mei framework */
59 struct platform_device *pdev;
61 struct gpio_desc *wakeuphost;
62 struct gpio_desc *resetfw;
63 struct gpio_desc *wakeupfw;
65 /* command sequence number */
73 wait_queue_head_t xfer_wait;
75 vsc_tp_event_cb_t event_notify;
76 void *event_notify_context;
78 /* used to protect command download */
83 static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
84 static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
85 static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
86 static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
88 static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
89 { "wakeuphost-gpios", &wakeuphost_gpio, 1 },
90 { "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
91 { "resetfw-gpios", &resetfw_gpio, 1 },
92 { "wakeupfw-gpios", &wakeupfw, 1 },
96 /* wakeup firmware and wait for response */
97 static int vsc_tp_wakeup_request(struct vsc_tp *tp)
101 gpiod_set_value_cansleep(tp->wakeupfw, 0);
103 ret = wait_event_timeout(tp->xfer_wait,
104 atomic_read(&tp->assert_cnt) &&
105 gpiod_get_value_cansleep(tp->wakeuphost),
106 VSC_TP_WAIT_FW_ASSERTED_TIMEOUT);
113 static void vsc_tp_wakeup_release(struct vsc_tp *tp)
115 atomic_dec_if_positive(&tp->assert_cnt);
117 gpiod_set_value_cansleep(tp->wakeupfw, 1);
120 static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
122 struct spi_message msg = { 0 };
123 struct spi_transfer xfer = {
129 spi_message_init_with_transfers(&msg, &xfer, 1);
131 return spi_sync_locked(tp->spi, &msg);
134 static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
135 void *ibuf, u16 ilen)
137 int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
138 int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
139 u8 *src, *crc_src, *rx_buf = tp->rx_buf;
140 int count_down = VSC_TP_MAX_XFER_COUNT;
141 u32 recv_crc = 0, crc = ~0;
142 struct vsc_tp_packet ack;
143 u8 *dst = (u8 *)&ack;
147 ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len);
150 memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
154 src_len = next_xfer_len;
156 src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len);
160 src_len = next_xfer_len - (src - rx_buf);
163 /* traverse received data */
164 while (src_len > 0) {
165 cpy_len = min(src_len, dst_len);
166 memcpy(dst, src, cpy_len);
173 if (offset < sizeof(ack)) {
175 crc = crc32(crc, crc_src, cpy_len);
180 if (le16_to_cpu(ack.len)) {
182 dst_len = min(ilen, le16_to_cpu(ack.len));
184 dst = (u8 *)&recv_crc;
185 dst_len = sizeof(recv_crc);
187 } else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
189 crc = crc32(crc, crc_src, cpy_len);
192 int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
194 cpy_len = min(src_len, remain);
196 crc = crc32(crc, src, cpy_len);
200 dst = (u8 *)&recv_crc;
201 dst_len = sizeof(recv_crc);
205 next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
206 } else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
210 /* terminate the traverse */
214 next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
217 } while (next_xfer_len > 0 && --count_down);
219 if (next_xfer_len > 0)
222 if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
223 dev_err(&tp->spi->dev, "recv crc or seq error\n");
227 if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
228 ack.cmd == VSC_TP_CMD_BUSY) {
229 dev_err(&tp->spi->dev, "recv cmd ack error\n");
233 return min(le16_to_cpu(ack.len), ilen);
237 * vsc_tp_xfer - transfer data to firmware
238 * @tp: vsc_tp device handle
239 * @cmd: the command to be sent to the device
240 * @obuf: the tx buffer to be sent to the device
241 * @olen: the length of tx buffer
242 * @ibuf: the rx buffer to receive from the device
243 * @ilen: the length of rx buffer
244 * Return: the length of received data in case of success,
245 * otherwise negative value
247 int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
248 void *ibuf, size_t ilen)
250 struct vsc_tp_packet *pkt = tp->tx_buf;
254 if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
257 guard(mutex)(&tp->mutex);
259 pkt->sync = VSC_TP_PACKET_SYNC;
261 pkt->len = cpu_to_le16(olen);
262 pkt->seq = cpu_to_le32(++tp->seq);
263 memcpy(pkt->buf, obuf, olen);
265 crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
266 memcpy(pkt->buf + olen, &crc, sizeof(crc));
268 ret = vsc_tp_wakeup_request(tp);
270 dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
272 ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
274 vsc_tp_wakeup_release(tp);
278 EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, VSC_TP);
281 * vsc_tp_rom_xfer - transfer data to rom code
282 * @tp: vsc_tp device handle
283 * @obuf: the data buffer to be sent to the device
284 * @ibuf: the buffer to receive data from the device
285 * @len: the length of tx buffer and rx buffer
286 * Return: 0 in case of success, negative value in case of error
288 int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
290 size_t words = len / sizeof(__be32);
293 if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
296 guard(mutex)(&tp->mutex);
298 /* rom xfer is big endian */
299 cpu_to_be32_array(tp->tx_buf, obuf, words);
301 ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
302 !ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
303 VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
306 dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
310 ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len);
315 cpu_to_be32_array(ibuf, tp->rx_buf, words);
321 * vsc_tp_reset - reset vsc transport layer
322 * @tp: vsc_tp device handle
324 void vsc_tp_reset(struct vsc_tp *tp)
326 disable_irq(tp->spi->irq);
328 /* toggle reset pin */
329 gpiod_set_value_cansleep(tp->resetfw, 0);
330 msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
331 gpiod_set_value_cansleep(tp->resetfw, 1);
334 msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
337 * Set default host wakeup pin to non-active
338 * to avoid unexpected host irq interrupt.
340 gpiod_set_value_cansleep(tp->wakeupfw, 1);
342 atomic_set(&tp->assert_cnt, 0);
344 enable_irq(tp->spi->irq);
346 EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, VSC_TP);
349 * vsc_tp_need_read - check if device has data to sent
350 * @tp: vsc_tp device handle
351 * Return: true if device has data to sent, otherwise false
353 bool vsc_tp_need_read(struct vsc_tp *tp)
355 if (!atomic_read(&tp->assert_cnt))
357 if (!gpiod_get_value_cansleep(tp->wakeuphost))
359 if (!gpiod_get_value_cansleep(tp->wakeupfw))
364 EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, VSC_TP);
367 * vsc_tp_register_event_cb - register a callback function to receive event
368 * @tp: vsc_tp device handle
369 * @event_cb: callback function
370 * @context: execution context of event callback
371 * Return: 0 in case of success, negative value in case of error
373 int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
376 tp->event_notify = event_cb;
377 tp->event_notify_context = context;
381 EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
384 * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
385 * @tp: vsc_tp device handle
387 void vsc_tp_intr_synchronize(struct vsc_tp *tp)
389 synchronize_irq(tp->spi->irq);
391 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, VSC_TP);
394 * vsc_tp_intr_enable - enable vsc_tp interrupt
395 * @tp: vsc_tp device handle
397 void vsc_tp_intr_enable(struct vsc_tp *tp)
399 enable_irq(tp->spi->irq);
401 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, VSC_TP);
404 * vsc_tp_intr_disable - disable vsc_tp interrupt
405 * @tp: vsc_tp device handle
407 void vsc_tp_intr_disable(struct vsc_tp *tp)
409 disable_irq(tp->spi->irq);
411 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
413 static irqreturn_t vsc_tp_isr(int irq, void *data)
415 struct vsc_tp *tp = data;
417 atomic_inc(&tp->assert_cnt);
419 wake_up(&tp->xfer_wait);
421 return IRQ_WAKE_THREAD;
424 static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
426 struct vsc_tp *tp = data;
428 if (tp->event_notify)
429 tp->event_notify(tp->event_notify_context);
434 static int vsc_tp_match_any(struct acpi_device *adev, void *data)
436 struct acpi_device **__adev = data;
443 static int vsc_tp_probe(struct spi_device *spi)
445 struct platform_device_info pinfo = { 0 };
446 struct device *dev = &spi->dev;
447 struct platform_device *pdev;
448 struct acpi_device *adev;
452 tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
456 tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
460 tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
464 ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios);
468 tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
469 if (IS_ERR(tp->wakeuphost))
470 return PTR_ERR(tp->wakeuphost);
472 tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH);
473 if (IS_ERR(tp->resetfw))
474 return PTR_ERR(tp->resetfw);
476 tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH);
477 if (IS_ERR(tp->wakeupfw))
478 return PTR_ERR(tp->wakeupfw);
480 atomic_set(&tp->assert_cnt, 0);
481 init_waitqueue_head(&tp->xfer_wait);
484 irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
485 ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
487 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
492 mutex_init(&tp->mutex);
494 /* only one child acpi device */
495 ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
496 vsc_tp_match_any, &adev);
499 goto err_destroy_lock;
501 pinfo.fwnode = acpi_fwnode_handle(adev);
503 pinfo.name = "intel_vsc";
505 pinfo.size_data = sizeof(tp);
506 pinfo.id = PLATFORM_DEVID_NONE;
508 pdev = platform_device_register_full(&pinfo);
511 goto err_destroy_lock;
515 spi_set_drvdata(spi, tp);
520 mutex_destroy(&tp->mutex);
525 static void vsc_tp_remove(struct spi_device *spi)
527 struct vsc_tp *tp = spi_get_drvdata(spi);
529 platform_device_unregister(tp->pdev);
531 mutex_destroy(&tp->mutex);
534 static const struct acpi_device_id vsc_tp_acpi_ids[] = {
535 { "INTC1009" }, /* Raptor Lake */
536 { "INTC1058" }, /* Tiger Lake */
537 { "INTC1094" }, /* Alder Lake */
538 { "INTC10D0" }, /* Meteor Lake */
541 MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
543 static struct spi_driver vsc_tp_driver = {
544 .probe = vsc_tp_probe,
545 .remove = vsc_tp_remove,
548 .acpi_match_table = vsc_tp_acpi_ids,
551 module_spi_driver(vsc_tp_driver);
553 MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
554 MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
555 MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
556 MODULE_LICENSE("GPL");