staging: typec: tcpm: Prevent TCPM from looping in SRC_TRYWAIT
[linux-block.git] / drivers / staging / typec / tcpm.c
CommitLineData
f0690a25
GR
1/*
2 * Copyright 2015-2017 Google, Inc
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * USB Power Delivery protocol stack.
15 */
16
17#include <linux/completion.h>
18#include <linux/debugfs.h>
19#include <linux/device.h>
02d5be46 20#include <linux/jiffies.h>
f0690a25
GR
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/mutex.h>
24#include <linux/proc_fs.h>
25#include <linux/sched/clock.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/usb/typec.h>
30#include <linux/workqueue.h>
31
32#include "pd.h"
33#include "pd_vdo.h"
34#include "pd_bdo.h"
35#include "tcpm.h"
36
37#define FOREACH_STATE(S) \
38 S(INVALID_STATE), \
39 S(DRP_TOGGLING), \
40 S(SRC_UNATTACHED), \
41 S(SRC_ATTACH_WAIT), \
42 S(SRC_ATTACHED), \
43 S(SRC_STARTUP), \
44 S(SRC_SEND_CAPABILITIES), \
45 S(SRC_NEGOTIATE_CAPABILITIES), \
46 S(SRC_TRANSITION_SUPPLY), \
47 S(SRC_READY), \
48 S(SRC_WAIT_NEW_CAPABILITIES), \
49 \
50 S(SNK_UNATTACHED), \
51 S(SNK_ATTACH_WAIT), \
52 S(SNK_DEBOUNCED), \
53 S(SNK_ATTACHED), \
54 S(SNK_STARTUP), \
55 S(SNK_DISCOVERY), \
56 S(SNK_DISCOVERY_DEBOUNCE), \
57 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
58 S(SNK_WAIT_CAPABILITIES), \
59 S(SNK_NEGOTIATE_CAPABILITIES), \
60 S(SNK_TRANSITION_SINK), \
61 S(SNK_TRANSITION_SINK_VBUS), \
62 S(SNK_READY), \
63 \
64 S(ACC_UNATTACHED), \
65 S(DEBUG_ACC_ATTACHED), \
66 S(AUDIO_ACC_ATTACHED), \
67 S(AUDIO_ACC_DEBOUNCE), \
68 \
69 S(HARD_RESET_SEND), \
70 S(HARD_RESET_START), \
71 S(SRC_HARD_RESET_VBUS_OFF), \
72 S(SRC_HARD_RESET_VBUS_ON), \
73 S(SNK_HARD_RESET_SINK_OFF), \
74 S(SNK_HARD_RESET_WAIT_VBUS), \
75 S(SNK_HARD_RESET_SINK_ON), \
76 \
77 S(SOFT_RESET), \
78 S(SOFT_RESET_SEND), \
79 \
80 S(DR_SWAP_ACCEPT), \
81 S(DR_SWAP_SEND), \
82 S(DR_SWAP_SEND_TIMEOUT), \
83 S(DR_SWAP_CANCEL), \
84 S(DR_SWAP_CHANGE_DR), \
85 \
86 S(PR_SWAP_ACCEPT), \
87 S(PR_SWAP_SEND), \
88 S(PR_SWAP_SEND_TIMEOUT), \
89 S(PR_SWAP_CANCEL), \
90 S(PR_SWAP_START), \
91 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
92 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
93 S(PR_SWAP_SRC_SNK_SINK_ON), \
94 S(PR_SWAP_SNK_SRC_SINK_OFF), \
95 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
96 \
97 S(VCONN_SWAP_ACCEPT), \
98 S(VCONN_SWAP_SEND), \
99 S(VCONN_SWAP_SEND_TIMEOUT), \
100 S(VCONN_SWAP_CANCEL), \
101 S(VCONN_SWAP_START), \
102 S(VCONN_SWAP_WAIT_FOR_VCONN), \
103 S(VCONN_SWAP_TURN_ON_VCONN), \
104 S(VCONN_SWAP_TURN_OFF_VCONN), \
105 \
106 S(SNK_TRY), \
107 S(SNK_TRY_WAIT), \
108 S(SRC_TRYWAIT), \
02d5be46 109 S(SRC_TRYWAIT_DEBOUNCE), \
f0690a25
GR
110 S(SRC_TRYWAIT_UNATTACHED), \
111 \
112 S(SRC_TRY), \
113 S(SRC_TRY_DEBOUNCE), \
114 S(SNK_TRYWAIT), \
115 S(SNK_TRYWAIT_DEBOUNCE), \
116 S(SNK_TRYWAIT_VBUS), \
117 S(BIST_RX), \
118 \
119 S(ERROR_RECOVERY), \
b17dd571
GR
120 S(PORT_RESET), \
121 S(PORT_RESET_WAIT_OFF)
f0690a25
GR
122
123#define GENERATE_ENUM(e) e
124#define GENERATE_STRING(s) #s
125
126enum tcpm_state {
127 FOREACH_STATE(GENERATE_ENUM)
128};
129
130static const char * const tcpm_states[] = {
131 FOREACH_STATE(GENERATE_STRING)
132};
133
134enum vdm_states {
135 VDM_STATE_ERR_BUSY = -3,
136 VDM_STATE_ERR_SEND = -2,
137 VDM_STATE_ERR_TMOUT = -1,
138 VDM_STATE_DONE = 0,
139 /* Anything >0 represents an active state */
140 VDM_STATE_READY = 1,
141 VDM_STATE_BUSY = 2,
142 VDM_STATE_WAIT_RSP_BUSY = 3,
143};
144
145enum pd_msg_request {
146 PD_MSG_NONE = 0,
147 PD_MSG_CTRL_REJECT,
148 PD_MSG_CTRL_WAIT,
149 PD_MSG_DATA_SINK_CAP,
150 PD_MSG_DATA_SOURCE_CAP,
151};
152
153/* Events from low level driver */
154
155#define TCPM_CC_EVENT BIT(0)
156#define TCPM_VBUS_EVENT BIT(1)
157#define TCPM_RESET_EVENT BIT(2)
158
159#define LOG_BUFFER_ENTRIES 1024
160#define LOG_BUFFER_ENTRY_SIZE 128
161
162/* Alternate mode support */
163
164#define SVID_DISCOVERY_MAX 16
165
166struct pd_mode_data {
167 int svid_index; /* current SVID index */
168 int nsvids;
169 u16 svids[SVID_DISCOVERY_MAX];
170 int altmodes; /* number of alternate modes */
171 struct typec_altmode_desc altmode_desc[SVID_DISCOVERY_MAX];
172};
173
174struct tcpm_port {
175 struct device *dev;
176
177 struct mutex lock; /* tcpm state machine lock */
178 struct workqueue_struct *wq;
179
180 struct typec_capability typec_caps;
181 struct typec_port *typec_port;
182
183 struct tcpc_dev *tcpc;
184
185 enum typec_role vconn_role;
186 enum typec_role pwr_role;
187 enum typec_data_role data_role;
188 enum typec_pwr_opmode pwr_opmode;
189
190 struct usb_pd_identity partner_ident;
191 struct typec_partner_desc partner_desc;
192 struct typec_partner *partner;
193
194 enum typec_cc_status cc_req;
195
196 enum typec_cc_status cc1;
197 enum typec_cc_status cc2;
198 enum typec_cc_polarity polarity;
199
200 bool attached;
201 bool connected;
9b0ae699 202 enum typec_port_type port_type;
f0690a25
GR
203 bool vbus_present;
204 bool vbus_never_low;
205 bool vbus_source;
206 bool vbus_charge;
207
208 bool send_discover;
209 bool op_vsafe5v;
210
211 int try_role;
212 int try_snk_count;
213 int try_src_count;
214
215 enum pd_msg_request queued_message;
216
217 enum tcpm_state enter_state;
218 enum tcpm_state prev_state;
219 enum tcpm_state state;
220 enum tcpm_state delayed_state;
221 unsigned long delayed_runtime;
222 unsigned long delay_ms;
223
224 spinlock_t pd_event_lock;
225 u32 pd_events;
226
227 struct work_struct event_work;
228 struct delayed_work state_machine;
229 struct delayed_work vdm_state_machine;
230 bool state_machine_running;
231
232 struct completion tx_complete;
233 enum tcpm_transmit_status tx_status;
234
235 struct mutex swap_lock; /* swap command lock */
236 bool swap_pending;
b17dd571 237 bool non_pd_role_swap;
f0690a25
GR
238 struct completion swap_complete;
239 int swap_status;
240
241 unsigned int message_id;
242 unsigned int caps_count;
243 unsigned int hard_reset_count;
244 bool pd_capable;
245 bool explicit_contract;
5fec4b54 246 unsigned int rx_msgid;
f0690a25
GR
247
248 /* Partner capabilities/requests */
249 u32 sink_request;
250 u32 source_caps[PDO_MAX_OBJECTS];
251 unsigned int nr_source_caps;
252 u32 sink_caps[PDO_MAX_OBJECTS];
253 unsigned int nr_sink_caps;
254
255 /* Local capabilities */
256 u32 src_pdo[PDO_MAX_OBJECTS];
257 unsigned int nr_src_pdo;
258 u32 snk_pdo[PDO_MAX_OBJECTS];
259 unsigned int nr_snk_pdo;
193a6801
GR
260 u32 snk_vdo[VDO_MAX_OBJECTS];
261 unsigned int nr_snk_vdo;
f0690a25
GR
262
263 unsigned int max_snk_mv;
264 unsigned int max_snk_ma;
265 unsigned int max_snk_mw;
266 unsigned int operating_snk_mw;
267
268 /* Requested current / voltage */
269 u32 current_limit;
270 u32 supply_voltage;
271
272 u32 bist_request;
273
274 /* PD state for Vendor Defined Messages */
275 enum vdm_states vdm_state;
276 u32 vdm_retries;
277 /* next Vendor Defined Message to send */
278 u32 vdo_data[VDO_MAX_SIZE];
279 u8 vdo_count;
280 /* VDO to retry if UFP responder replied busy */
281 u32 vdo_retry;
282
283 /* Alternate mode data */
284
285 struct pd_mode_data mode_data;
286 struct typec_altmode *partner_altmode[SVID_DISCOVERY_MAX];
287 struct typec_altmode *port_altmode[SVID_DISCOVERY_MAX];
288
02d5be46
BJS
289 /* Deadline in jiffies to exit src_try_wait state */
290 unsigned long max_wait;
291
f0690a25
GR
292#ifdef CONFIG_DEBUG_FS
293 struct dentry *dentry;
294 struct mutex logbuffer_lock; /* log buffer access lock */
295 int logbuffer_head;
296 int logbuffer_tail;
297 u8 *logbuffer[LOG_BUFFER_ENTRIES];
298#endif
299};
300
301struct pd_rx_event {
302 struct work_struct work;
303 struct tcpm_port *port;
304 struct pd_message msg;
305};
306
307#define tcpm_cc_is_sink(cc) \
308 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
309 (cc) == TYPEC_CC_RP_3_0)
310
311#define tcpm_port_is_sink(port) \
312 ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
313 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
314
315#define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
316#define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
317#define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
318
319#define tcpm_port_is_source(port) \
320 ((tcpm_cc_is_source((port)->cc1) && \
321 !tcpm_cc_is_source((port)->cc2)) || \
322 (tcpm_cc_is_source((port)->cc2) && \
323 !tcpm_cc_is_source((port)->cc1)))
324
325#define tcpm_port_is_debug(port) \
326 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
327
328#define tcpm_port_is_audio(port) \
329 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
330
331#define tcpm_port_is_audio_detached(port) \
332 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
333 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
334
335#define tcpm_try_snk(port) \
ff6c8cb1
BJS
336 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
337 (port)->port_type == TYPEC_PORT_DRP)
f0690a25
GR
338
339#define tcpm_try_src(port) \
ff6c8cb1
BJS
340 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
341 (port)->port_type == TYPEC_PORT_DRP)
f0690a25
GR
342
343static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
344{
9b0ae699 345 if (port->port_type == TYPEC_PORT_DRP) {
b46a9c90
BJS
346 if (port->try_role == TYPEC_SINK)
347 return SNK_UNATTACHED;
348 else if (port->try_role == TYPEC_SOURCE)
349 return SRC_UNATTACHED;
350 else if (port->tcpc->config->default_role == TYPEC_SINK)
351 return SNK_UNATTACHED;
352 /* Fall through to return SRC_UNATTACHED */
9b0ae699 353 } else if (port->port_type == TYPEC_PORT_UFP) {
f0690a25 354 return SNK_UNATTACHED;
b46a9c90 355 }
f0690a25
GR
356 return SRC_UNATTACHED;
357}
358
359static inline
360struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
361{
362 return container_of(cap, struct tcpm_port, typec_caps);
363}
364
365static bool tcpm_port_is_disconnected(struct tcpm_port *port)
366{
367 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
368 port->cc2 == TYPEC_CC_OPEN) ||
369 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
370 port->cc1 == TYPEC_CC_OPEN) ||
371 (port->polarity == TYPEC_POLARITY_CC2 &&
372 port->cc2 == TYPEC_CC_OPEN)));
373}
374
375/*
376 * Logging
377 */
378
379#ifdef CONFIG_DEBUG_FS
380
381static bool tcpm_log_full(struct tcpm_port *port)
382{
383 return port->logbuffer_tail ==
384 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
385}
386
e79e0125 387__printf(2, 0)
f0690a25
GR
388static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
389{
390 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
391 u64 ts_nsec = local_clock();
392 unsigned long rem_nsec;
393
394 if (!port->logbuffer[port->logbuffer_head]) {
395 port->logbuffer[port->logbuffer_head] =
396 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
397 if (!port->logbuffer[port->logbuffer_head])
398 return;
399 }
400
401 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
402
403 mutex_lock(&port->logbuffer_lock);
404
405 if (tcpm_log_full(port)) {
406 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
407 strcpy(tmpbuffer, "overflow");
408 }
409
410 if (port->logbuffer_head < 0 ||
411 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
412 dev_warn(port->dev,
413 "Bad log buffer index %d\n", port->logbuffer_head);
414 goto abort;
415 }
416
417 if (!port->logbuffer[port->logbuffer_head]) {
418 dev_warn(port->dev,
419 "Log buffer index %d is NULL\n", port->logbuffer_head);
420 goto abort;
421 }
422
423 rem_nsec = do_div(ts_nsec, 1000000000);
424 scnprintf(port->logbuffer[port->logbuffer_head],
425 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
426 (unsigned long)ts_nsec, rem_nsec / 1000,
427 tmpbuffer);
428 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
429
430abort:
431 mutex_unlock(&port->logbuffer_lock);
432}
433
e79e0125 434__printf(2, 3)
f0690a25
GR
435static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
436{
437 va_list args;
438
439 /* Do not log while disconnected and unattached */
440 if (tcpm_port_is_disconnected(port) &&
441 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
442 port->state == DRP_TOGGLING))
443 return;
444
445 va_start(args, fmt);
446 _tcpm_log(port, fmt, args);
447 va_end(args);
448}
449
e79e0125 450__printf(2, 3)
f0690a25
GR
451static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
452{
453 va_list args;
454
455 va_start(args, fmt);
456 _tcpm_log(port, fmt, args);
457 va_end(args);
458}
459
460static void tcpm_log_source_caps(struct tcpm_port *port)
461{
462 int i;
463
464 for (i = 0; i < port->nr_source_caps; i++) {
465 u32 pdo = port->source_caps[i];
466 enum pd_pdo_type type = pdo_type(pdo);
467 char msg[64];
468
469 switch (type) {
470 case PDO_TYPE_FIXED:
471 scnprintf(msg, sizeof(msg),
472 "%u mV, %u mA [%s%s%s%s%s%s]",
473 pdo_fixed_voltage(pdo),
474 pdo_max_current(pdo),
475 (pdo & PDO_FIXED_DUAL_ROLE) ?
476 "R" : "",
477 (pdo & PDO_FIXED_SUSPEND) ?
478 "S" : "",
479 (pdo & PDO_FIXED_HIGHER_CAP) ?
480 "H" : "",
481 (pdo & PDO_FIXED_USB_COMM) ?
482 "U" : "",
483 (pdo & PDO_FIXED_DATA_SWAP) ?
484 "D" : "",
485 (pdo & PDO_FIXED_EXTPOWER) ?
486 "E" : "");
487 break;
488 case PDO_TYPE_VAR:
489 scnprintf(msg, sizeof(msg),
490 "%u-%u mV, %u mA",
491 pdo_min_voltage(pdo),
492 pdo_max_voltage(pdo),
493 pdo_max_current(pdo));
494 break;
495 case PDO_TYPE_BATT:
496 scnprintf(msg, sizeof(msg),
497 "%u-%u mV, %u mW",
498 pdo_min_voltage(pdo),
499 pdo_max_voltage(pdo),
500 pdo_max_power(pdo));
501 break;
502 default:
503 strcpy(msg, "undefined");
504 break;
505 }
506 tcpm_log(port, " PDO %d: type %d, %s",
507 i, type, msg);
508 }
509}
510
511static int tcpm_seq_show(struct seq_file *s, void *v)
512{
513 struct tcpm_port *port = (struct tcpm_port *)s->private;
514 int tail;
515
516 mutex_lock(&port->logbuffer_lock);
517 tail = port->logbuffer_tail;
518 while (tail != port->logbuffer_head) {
519 seq_printf(s, "%s\n", port->logbuffer[tail]);
520 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
521 }
522 if (!seq_has_overflowed(s))
523 port->logbuffer_tail = tail;
524 mutex_unlock(&port->logbuffer_lock);
525
526 return 0;
527}
528
529static int tcpm_debug_open(struct inode *inode, struct file *file)
530{
531 return single_open(file, tcpm_seq_show, inode->i_private);
532}
533
534static const struct file_operations tcpm_debug_operations = {
535 .open = tcpm_debug_open,
536 .llseek = seq_lseek,
537 .read = seq_read,
538 .release = single_release,
539};
540
541static struct dentry *rootdir;
542
543static int tcpm_debugfs_init(struct tcpm_port *port)
544{
545 mutex_init(&port->logbuffer_lock);
546 /* /sys/kernel/debug/tcpm/usbcX */
547 if (!rootdir) {
548 rootdir = debugfs_create_dir("tcpm", NULL);
549 if (!rootdir)
550 return -ENOMEM;
551 }
552
553 port->dentry = debugfs_create_file(dev_name(port->dev),
554 S_IFREG | 0444, rootdir,
555 port, &tcpm_debug_operations);
556
557 return 0;
558}
559
560static void tcpm_debugfs_exit(struct tcpm_port *port)
561{
562 debugfs_remove(port->dentry);
563}
564
565#else
566
e79e0125 567__printf(2, 3)
f0690a25 568static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
e79e0125 569__printf(2, 3)
f0690a25
GR
570static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
571static void tcpm_log_source_caps(struct tcpm_port *port) { }
572static int tcpm_debugfs_init(const struct tcpm_port *port) { return 0; }
573static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
574
575#endif
576
577static int tcpm_pd_transmit(struct tcpm_port *port,
578 enum tcpm_transmit_type type,
579 const struct pd_message *msg)
580{
581 unsigned long timeout;
582 int ret;
583
584 if (msg)
585 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
586 else
587 tcpm_log(port, "PD TX, type: %#x", type);
588
589 reinit_completion(&port->tx_complete);
590 ret = port->tcpc->pd_transmit(port->tcpc, type, msg);
591 if (ret < 0)
592 return ret;
593
594 mutex_unlock(&port->lock);
595 timeout = wait_for_completion_timeout(&port->tx_complete,
596 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
597 mutex_lock(&port->lock);
598 if (!timeout)
599 return -ETIMEDOUT;
600
601 switch (port->tx_status) {
602 case TCPC_TX_SUCCESS:
603 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
604 return 0;
605 case TCPC_TX_DISCARDED:
606 return -EAGAIN;
607 case TCPC_TX_FAILED:
608 default:
609 return -EIO;
610 }
611}
612
613void tcpm_pd_transmit_complete(struct tcpm_port *port,
614 enum tcpm_transmit_status status)
615{
616 tcpm_log(port, "PD TX complete, status: %u", status);
617 port->tx_status = status;
618 complete(&port->tx_complete);
619}
620EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
621
622static int tcpm_mux_set(struct tcpm_port *port, enum tcpc_mux_mode mode,
623 enum tcpc_usb_switch config)
624{
625 int ret = 0;
626
627 tcpm_log(port, "Requesting mux mode %d, config %d, polarity %d",
628 mode, config, port->polarity);
629
630 if (port->tcpc->mux)
631 ret = port->tcpc->mux->set(port->tcpc->mux, mode, config,
632 port->polarity);
633
634 return ret;
635}
636
637static int tcpm_set_polarity(struct tcpm_port *port,
638 enum typec_cc_polarity polarity)
639{
640 int ret;
641
642 tcpm_log(port, "polarity %d", polarity);
643
644 ret = port->tcpc->set_polarity(port->tcpc, polarity);
645 if (ret < 0)
646 return ret;
647
648 port->polarity = polarity;
649
650 return 0;
651}
652
653static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
654{
655 int ret;
656
657 tcpm_log(port, "vconn:=%d", enable);
658
659 ret = port->tcpc->set_vconn(port->tcpc, enable);
660 if (!ret) {
661 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
662 typec_set_vconn_role(port->typec_port, port->vconn_role);
663 }
664
665 return ret;
666}
667
668static u32 tcpm_get_current_limit(struct tcpm_port *port)
669{
670 enum typec_cc_status cc;
671 u32 limit;
672
673 cc = port->polarity ? port->cc2 : port->cc1;
674 switch (cc) {
675 case TYPEC_CC_RP_1_5:
676 limit = 1500;
677 break;
678 case TYPEC_CC_RP_3_0:
679 limit = 3000;
680 break;
681 case TYPEC_CC_RP_DEF:
682 default:
683 limit = 0;
684 break;
685 }
686
687 return limit;
688}
689
690static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
691{
692 int ret = -EOPNOTSUPP;
693
694 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
695
696 if (port->tcpc->set_current_limit)
697 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
698
699 return ret;
700}
701
702/*
703 * Determine RP value to set based on maximum current supported
704 * by a port if configured as source.
705 * Returns CC value to report to link partner.
706 */
707static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
708{
709 const u32 *src_pdo = port->src_pdo;
710 int nr_pdo = port->nr_src_pdo;
711 int i;
712
713 /*
714 * Search for first entry with matching voltage.
715 * It should report the maximum supported current.
716 */
717 for (i = 0; i < nr_pdo; i++) {
718 const u32 pdo = src_pdo[i];
719
720 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
721 pdo_fixed_voltage(pdo) == 5000) {
722 unsigned int curr = pdo_max_current(pdo);
723
724 if (curr >= 3000)
725 return TYPEC_CC_RP_3_0;
726 else if (curr >= 1500)
727 return TYPEC_CC_RP_1_5;
728 return TYPEC_CC_RP_DEF;
729 }
730 }
731
732 return TYPEC_CC_RP_DEF;
733}
734
735static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
736{
737 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
738 port->data_role);
739}
740
741static int tcpm_set_roles(struct tcpm_port *port, bool attached,
742 enum typec_role role, enum typec_data_role data)
743{
744 int ret;
745
746 if (data == TYPEC_HOST)
747 ret = tcpm_mux_set(port, TYPEC_MUX_USB,
748 TCPC_USB_SWITCH_CONNECT);
749 else
750 ret = tcpm_mux_set(port, TYPEC_MUX_NONE,
751 TCPC_USB_SWITCH_DISCONNECT);
752 if (ret < 0)
753 return ret;
754
755 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
756 if (ret < 0)
757 return ret;
758
759 port->pwr_role = role;
760 port->data_role = data;
761 typec_set_data_role(port->typec_port, data);
762 typec_set_pwr_role(port->typec_port, role);
763
764 return 0;
765}
766
767static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
768{
769 int ret;
770
771 ret = port->tcpc->set_roles(port->tcpc, true, role,
772 port->data_role);
773 if (ret < 0)
774 return ret;
775
776 port->pwr_role = role;
777 typec_set_pwr_role(port->typec_port, role);
778
779 return 0;
780}
781
782static int tcpm_pd_send_source_caps(struct tcpm_port *port)
783{
784 struct pd_message msg;
785 int i;
786
787 memset(&msg, 0, sizeof(msg));
788 if (!port->nr_src_pdo) {
789 /* No source capabilities defined, sink only */
790 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
791 port->pwr_role,
792 port->data_role,
793 port->message_id, 0);
794 } else {
795 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
796 port->pwr_role,
797 port->data_role,
798 port->message_id,
799 port->nr_src_pdo);
800 }
801 for (i = 0; i < port->nr_src_pdo; i++)
802 msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
803
804 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
805}
806
807static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
808{
809 struct pd_message msg;
810 int i;
811
812 memset(&msg, 0, sizeof(msg));
813 if (!port->nr_snk_pdo) {
814 /* No sink capabilities defined, source only */
815 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
816 port->pwr_role,
817 port->data_role,
818 port->message_id, 0);
819 } else {
820 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
821 port->pwr_role,
822 port->data_role,
823 port->message_id,
824 port->nr_snk_pdo);
825 }
826 for (i = 0; i < port->nr_snk_pdo; i++)
827 msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
828
829 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
830}
831
832static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
833 unsigned int delay_ms)
834{
835 if (delay_ms) {
836 tcpm_log(port, "pending state change %s -> %s @ %u ms",
837 tcpm_states[port->state], tcpm_states[state],
838 delay_ms);
839 port->delayed_state = state;
840 mod_delayed_work(port->wq, &port->state_machine,
841 msecs_to_jiffies(delay_ms));
842 port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms);
843 port->delay_ms = delay_ms;
844 } else {
845 tcpm_log(port, "state change %s -> %s",
846 tcpm_states[port->state], tcpm_states[state]);
847 port->delayed_state = INVALID_STATE;
848 port->prev_state = port->state;
849 port->state = state;
850 /*
851 * Don't re-queue the state machine work item if we're currently
852 * in the state machine and we're immediately changing states.
853 * tcpm_state_machine_work() will continue running the state
854 * machine.
855 */
856 if (!port->state_machine_running)
857 mod_delayed_work(port->wq, &port->state_machine, 0);
858 }
859}
860
861static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
862 unsigned int delay_ms)
863{
864 if (port->enter_state == port->state)
865 tcpm_set_state(port, state, delay_ms);
866 else
867 tcpm_log(port,
868 "skipped %sstate change %s -> %s [%u ms], context state %s",
869 delay_ms ? "delayed " : "",
870 tcpm_states[port->state], tcpm_states[state],
871 delay_ms, tcpm_states[port->enter_state]);
872}
873
874static void tcpm_queue_message(struct tcpm_port *port,
875 enum pd_msg_request message)
876{
877 port->queued_message = message;
878 mod_delayed_work(port->wq, &port->state_machine, 0);
879}
880
881/*
882 * VDM/VDO handling functions
883 */
884static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
885 const u32 *data, int cnt)
886{
887 port->vdo_count = cnt + 1;
888 port->vdo_data[0] = header;
889 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
890 /* Set ready, vdm state machine will actually send */
891 port->vdm_retries = 0;
892 port->vdm_state = VDM_STATE_READY;
893}
894
895static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
896 int cnt)
897{
898 u32 vdo = le32_to_cpu(payload[VDO_INDEX_IDH]);
899 u32 product = le32_to_cpu(payload[VDO_INDEX_PRODUCT]);
900
901 memset(&port->mode_data, 0, sizeof(port->mode_data));
902
903#if 0 /* Not really a match */
904 switch (PD_IDH_PTYPE(vdo)) {
905 case IDH_PTYPE_UNDEF:
906 port->partner.type = TYPEC_PARTNER_NONE; /* no longer exists */
907 break;
908 case IDH_PTYPE_HUB:
909 break;
910 case IDH_PTYPE_PERIPH:
911 break;
912 case IDH_PTYPE_PCABLE:
913 break;
914 case IDH_PTYPE_ACABLE:
915 break;
916 case IDH_PTYPE_AMA:
917 port->partner.type = TYPEC_PARTNER_ALTMODE;
918 break;
919 default:
920 break;
921 }
922#endif
923
924 port->partner_ident.id_header = vdo;
925 port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
926 port->partner_ident.product = product;
927
928 typec_partner_set_identity(port->partner);
929
930 tcpm_log(port, "Identity: %04x:%04x.%04x",
931 PD_IDH_VID(vdo),
932 PD_PRODUCT_PID(product), product & 0xffff);
933}
934
935static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
936 int cnt)
937{
938 struct pd_mode_data *pmdata = &port->mode_data;
939 int i;
940
941 for (i = 1; i < cnt; i++) {
942 u32 p = le32_to_cpu(payload[i]);
943 u16 svid;
944
945 svid = (p >> 16) & 0xffff;
946 if (!svid)
947 return false;
948
949 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
950 goto abort;
951
952 pmdata->svids[pmdata->nsvids++] = svid;
953 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
954
955 svid = p & 0xffff;
956 if (!svid)
957 return false;
958
959 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
960 goto abort;
961
962 pmdata->svids[pmdata->nsvids++] = svid;
963 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
964 }
965 return true;
966abort:
967 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
968 return false;
969}
970
971static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
972 int cnt)
973{
974 struct pd_mode_data *pmdata = &port->mode_data;
975 struct typec_altmode_desc *paltmode;
976 struct typec_mode_desc *pmode;
977 int i;
978
979 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
980 /* Already logged in svdm_consume_svids() */
981 return;
982 }
983
984 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
985 memset(paltmode, 0, sizeof(*paltmode));
986
987 paltmode->svid = pmdata->svids[pmdata->svid_index];
988
989 tcpm_log(port, " Alternate mode %d: SVID 0x%04x",
990 pmdata->altmodes, paltmode->svid);
991
992 for (i = 1; i < cnt && paltmode->n_modes < ALTMODE_MAX_MODES; i++) {
993 pmode = &paltmode->modes[paltmode->n_modes];
994 memset(pmode, 0, sizeof(*pmode));
995 pmode->vdo = le32_to_cpu(payload[i]);
996 pmode->index = i - 1;
997 paltmode->n_modes++;
998 tcpm_log(port, " VDO %d: 0x%08x",
999 pmode->index, pmode->vdo);
1000 }
1001 port->partner_altmode[pmdata->altmodes] =
1002 typec_partner_register_altmode(port->partner, paltmode);
1003 if (port->partner_altmode[pmdata->altmodes] == NULL) {
1004 tcpm_log(port,
1005 "Failed to register alternate modes for SVID 0x%04x",
1006 paltmode->svid);
1007 return;
1008 }
1009 pmdata->altmodes++;
1010}
1011
1012#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1013
1014static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1015 u32 *response)
1016{
1017 u32 p0 = le32_to_cpu(payload[0]);
1018 int cmd_type = PD_VDO_CMDT(p0);
1019 int cmd = PD_VDO_CMD(p0);
1020 struct pd_mode_data *modep;
1021 int rlen = 0;
1022 u16 svid;
193a6801 1023 int i;
f0690a25
GR
1024
1025 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1026 p0, cmd_type, cmd, cnt);
1027
1028 modep = &port->mode_data;
1029
1030 switch (cmd_type) {
1031 case CMDT_INIT:
1032 switch (cmd) {
1033 case CMD_DISCOVER_IDENT:
193a6801
GR
1034 /* 6.4.4.3.1: Only respond as UFP (device) */
1035 if (port->data_role == TYPEC_DEVICE &&
1036 port->nr_snk_vdo) {
1037 for (i = 0; i < port->nr_snk_vdo; i++)
cbe5843e 1038 response[i + 1] = port->snk_vdo[i];
193a6801
GR
1039 rlen = port->nr_snk_vdo + 1;
1040 }
f0690a25
GR
1041 break;
1042 case CMD_DISCOVER_SVID:
1043 break;
1044 case CMD_DISCOVER_MODES:
1045 break;
1046 case CMD_ENTER_MODE:
1047 break;
1048 case CMD_EXIT_MODE:
1049 break;
1050 case CMD_ATTENTION:
1051 break;
1052 default:
1053 break;
1054 }
1055 if (rlen >= 1) {
1056 response[0] = p0 | VDO_CMDT(CMDT_RSP_ACK);
1057 } else if (rlen == 0) {
1058 response[0] = p0 | VDO_CMDT(CMDT_RSP_NAK);
1059 rlen = 1;
1060 } else {
1061 response[0] = p0 | VDO_CMDT(CMDT_RSP_BUSY);
1062 rlen = 1;
1063 }
1064 break;
1065 case CMDT_RSP_ACK:
1066 /* silently drop message if we are not connected */
1067 if (!port->partner)
1068 break;
1069
1070 switch (cmd) {
1071 case CMD_DISCOVER_IDENT:
1072 /* 6.4.4.3.1 */
1073 svdm_consume_identity(port, payload, cnt);
1074 response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
1075 rlen = 1;
1076 break;
1077 case CMD_DISCOVER_SVID:
1078 /* 6.4.4.3.2 */
1079 if (svdm_consume_svids(port, payload, cnt)) {
1080 response[0] = VDO(USB_SID_PD, 1,
1081 CMD_DISCOVER_SVID);
1082 rlen = 1;
1083 } else if (modep->nsvids && supports_modal(port)) {
1084 response[0] = VDO(modep->svids[0], 1,
1085 CMD_DISCOVER_MODES);
1086 rlen = 1;
1087 }
1088 break;
1089 case CMD_DISCOVER_MODES:
1090 /* 6.4.4.3.3 */
1091 svdm_consume_modes(port, payload, cnt);
1092 modep->svid_index++;
1093 if (modep->svid_index < modep->nsvids) {
1094 svid = modep->svids[modep->svid_index];
1095 response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
1096 rlen = 1;
1097 } else {
1098#if 0
1099 response[0] = pd_dfp_enter_mode(port, 0, 0);
1100 if (response[0])
1101 rlen = 1;
1102#endif
1103 }
1104 break;
1105 case CMD_ENTER_MODE:
1106 break;
1107 default:
1108 break;
1109 }
1110 break;
1111 default:
1112 break;
1113 }
1114
1115 return rlen;
1116}
1117
1118static void tcpm_handle_vdm_request(struct tcpm_port *port,
1119 const __le32 *payload, int cnt)
1120{
1121 int rlen = 0;
1122 u32 response[8] = { };
1123 u32 p0 = le32_to_cpu(payload[0]);
1124
1125 if (port->vdm_state == VDM_STATE_BUSY) {
1126 /* If UFP responded busy retry after timeout */
1127 if (PD_VDO_CMDT(p0) == CMDT_RSP_BUSY) {
1128 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1129 port->vdo_retry = (p0 & ~VDO_CMDT_MASK) |
1130 CMDT_INIT;
1131 mod_delayed_work(port->wq, &port->vdm_state_machine,
1132 msecs_to_jiffies(PD_T_VDM_BUSY));
1133 return;
1134 }
1135 port->vdm_state = VDM_STATE_DONE;
1136 }
1137
1138 if (PD_VDO_SVDM(p0))
1139 rlen = tcpm_pd_svdm(port, payload, cnt, response);
1140#if 0
1141 else
1142 rlen = tcpm_pd_custom_vdm(port, cnt, payload, response);
1143#endif
1144
1145 if (rlen > 0) {
1146 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1147 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1148 }
1149}
1150
1151static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1152 const u32 *data, int count)
1153{
1154 u32 header;
1155
1156 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1157 count = VDO_MAX_SIZE - 1;
1158
1159 /* set VDM header with VID & CMD */
1160 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1161 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd);
1162 tcpm_queue_vdm(port, header, data, count);
1163
1164 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1165}
1166
1167static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1168{
1169 unsigned int timeout;
1170 int cmd = PD_VDO_CMD(vdm_hdr);
1171
1172 /* its not a structured VDM command */
1173 if (!PD_VDO_SVDM(vdm_hdr))
1174 return PD_T_VDM_UNSTRUCTURED;
1175
1176 switch (PD_VDO_CMDT(vdm_hdr)) {
1177 case CMDT_INIT:
1178 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1179 timeout = PD_T_VDM_WAIT_MODE_E;
1180 else
1181 timeout = PD_T_VDM_SNDR_RSP;
1182 break;
1183 default:
1184 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1185 timeout = PD_T_VDM_E_MODE;
1186 else
1187 timeout = PD_T_VDM_RCVR_RSP;
1188 break;
1189 }
1190 return timeout;
1191}
1192
1193static void vdm_run_state_machine(struct tcpm_port *port)
1194{
1195 struct pd_message msg;
1196 int i, res;
1197
1198 switch (port->vdm_state) {
1199 case VDM_STATE_READY:
1200 /* Only transmit VDM if attached */
1201 if (!port->attached) {
1202 port->vdm_state = VDM_STATE_ERR_BUSY;
1203 break;
1204 }
1205
1206 /*
1207 * if there's traffic or we're not in PDO ready state don't send
1208 * a VDM.
1209 */
1210 if (port->state != SRC_READY && port->state != SNK_READY)
1211 break;
1212
1213 /* Prepare and send VDM */
1214 memset(&msg, 0, sizeof(msg));
1215 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
1216 port->pwr_role,
1217 port->data_role,
1218 port->message_id, port->vdo_count);
1219 for (i = 0; i < port->vdo_count; i++)
1220 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
1221 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1222 if (res < 0) {
1223 port->vdm_state = VDM_STATE_ERR_SEND;
1224 } else {
1225 unsigned long timeout;
1226
1227 port->vdm_retries = 0;
1228 port->vdm_state = VDM_STATE_BUSY;
1229 timeout = vdm_ready_timeout(port->vdo_data[0]);
1230 mod_delayed_work(port->wq, &port->vdm_state_machine,
1231 timeout);
1232 }
1233 break;
1234 case VDM_STATE_WAIT_RSP_BUSY:
1235 port->vdo_data[0] = port->vdo_retry;
1236 port->vdo_count = 1;
1237 port->vdm_state = VDM_STATE_READY;
1238 break;
1239 case VDM_STATE_BUSY:
1240 port->vdm_state = VDM_STATE_ERR_TMOUT;
1241 break;
1242 case VDM_STATE_ERR_SEND:
1243 /*
1244 * A partner which does not support USB PD will not reply,
1245 * so this is not a fatal error. At the same time, some
1246 * devices may not return GoodCRC under some circumstances,
1247 * so we need to retry.
1248 */
1249 if (port->vdm_retries < 3) {
1250 tcpm_log(port, "VDM Tx error, retry");
1251 port->vdm_retries++;
1252 port->vdm_state = VDM_STATE_READY;
1253 }
1254 break;
1255 default:
1256 break;
1257 }
1258}
1259
1260static void vdm_state_machine_work(struct work_struct *work)
1261{
1262 struct tcpm_port *port = container_of(work, struct tcpm_port,
1263 vdm_state_machine.work);
1264 enum vdm_states prev_state;
1265
1266 mutex_lock(&port->lock);
1267
1268 /*
1269 * Continue running as long as the port is not busy and there was
1270 * a state change.
1271 */
1272 do {
1273 prev_state = port->vdm_state;
1274 vdm_run_state_machine(port);
1275 } while (port->vdm_state != prev_state &&
1276 port->vdm_state != VDM_STATE_BUSY);
1277
1278 mutex_unlock(&port->lock);
1279}
1280
1281/*
1282 * PD (data, control) command handling functions
1283 */
1284static void tcpm_pd_data_request(struct tcpm_port *port,
1285 const struct pd_message *msg)
1286{
1287 enum pd_data_msg_type type = pd_header_type_le(msg->header);
1288 unsigned int cnt = pd_header_cnt_le(msg->header);
1289 unsigned int i;
1290
1291 switch (type) {
1292 case PD_DATA_SOURCE_CAP:
1293 if (port->pwr_role != TYPEC_SINK)
1294 break;
1295
1296 for (i = 0; i < cnt; i++)
1297 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
1298
1299 port->nr_source_caps = cnt;
1300
1301 tcpm_log_source_caps(port);
1302
1303 /*
1304 * This message may be received even if VBUS is not
1305 * present. This is quite unexpected; see USB PD
1306 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
1307 * However, at the same time, we must be ready to
1308 * receive this message and respond to it 15ms after
1309 * receiving PS_RDY during power swap operations, no matter
1310 * if VBUS is available or not (USB PD specification,
1311 * section 6.5.9.2).
1312 * So we need to accept the message either way,
1313 * but be prepared to keep waiting for VBUS after it was
1314 * handled.
1315 */
1316 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
1317 break;
1318 case PD_DATA_REQUEST:
1319 if (port->pwr_role != TYPEC_SOURCE ||
1320 cnt != 1) {
1321 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1322 break;
1323 }
1324 port->sink_request = le32_to_cpu(msg->payload[0]);
1325 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
1326 break;
1327 case PD_DATA_SINK_CAP:
1328 /* We don't do anything with this at the moment... */
1329 for (i = 0; i < cnt; i++)
1330 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
1331 port->nr_sink_caps = cnt;
1332 break;
1333 case PD_DATA_VENDOR_DEF:
1334 tcpm_handle_vdm_request(port, msg->payload, cnt);
1335 break;
1336 case PD_DATA_BIST:
1337 if (port->state == SRC_READY || port->state == SNK_READY) {
1338 port->bist_request = le32_to_cpu(msg->payload[0]);
1339 tcpm_set_state(port, BIST_RX, 0);
1340 }
1341 break;
1342 default:
1343 tcpm_log(port, "Unhandled data message type %#x", type);
1344 break;
1345 }
1346}
1347
1348static void tcpm_pd_ctrl_request(struct tcpm_port *port,
1349 const struct pd_message *msg)
1350{
1351 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1352 enum tcpm_state next_state;
1353
1354 switch (type) {
1355 case PD_CTRL_GOOD_CRC:
1356 case PD_CTRL_PING:
1357 break;
1358 case PD_CTRL_GET_SOURCE_CAP:
1359 switch (port->state) {
1360 case SRC_READY:
1361 case SNK_READY:
1362 tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
1363 break;
1364 default:
1365 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1366 break;
1367 }
1368 break;
1369 case PD_CTRL_GET_SINK_CAP:
1370 switch (port->state) {
1371 case SRC_READY:
1372 case SNK_READY:
1373 tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
1374 break;
1375 default:
1376 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1377 break;
1378 }
1379 break;
1380 case PD_CTRL_GOTO_MIN:
1381 break;
1382 case PD_CTRL_PS_RDY:
1383 switch (port->state) {
1384 case SNK_TRANSITION_SINK:
1385 if (port->vbus_present) {
1386 tcpm_set_current_limit(port,
1387 port->current_limit,
1388 port->supply_voltage);
8bf05746 1389 port->explicit_contract = true;
f0690a25
GR
1390 tcpm_set_state(port, SNK_READY, 0);
1391 } else {
1392 /*
1393 * Seen after power swap. Keep waiting for VBUS
1394 * in a transitional state.
1395 */
1396 tcpm_set_state(port,
1397 SNK_TRANSITION_SINK_VBUS, 0);
1398 }
1399 break;
1400 case PR_SWAP_SRC_SNK_SOURCE_OFF:
1401 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
1402 break;
1403 case PR_SWAP_SNK_SRC_SINK_OFF:
1404 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
1405 break;
1406 case VCONN_SWAP_WAIT_FOR_VCONN:
1407 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
1408 break;
1409 default:
1410 break;
1411 }
1412 break;
1413 case PD_CTRL_REJECT:
1414 case PD_CTRL_WAIT:
1415 switch (port->state) {
1416 case SNK_NEGOTIATE_CAPABILITIES:
1417 /* USB PD specification, Figure 8-43 */
1418 if (port->explicit_contract)
1419 next_state = SNK_READY;
1420 else
1421 next_state = SNK_WAIT_CAPABILITIES;
1422 tcpm_set_state(port, next_state, 0);
1423 break;
1424 case DR_SWAP_SEND:
1425 port->swap_status = (type == PD_CTRL_WAIT ?
1426 -EAGAIN : -EOPNOTSUPP);
1427 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
1428 break;
1429 case PR_SWAP_SEND:
1430 port->swap_status = (type == PD_CTRL_WAIT ?
1431 -EAGAIN : -EOPNOTSUPP);
1432 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
1433 break;
1434 case VCONN_SWAP_SEND:
1435 port->swap_status = (type == PD_CTRL_WAIT ?
1436 -EAGAIN : -EOPNOTSUPP);
1437 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
1438 break;
1439 default:
1440 break;
1441 }
1442 break;
1443 case PD_CTRL_ACCEPT:
1444 switch (port->state) {
1445 case SNK_NEGOTIATE_CAPABILITIES:
1446 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
1447 break;
1448 case SOFT_RESET_SEND:
1449 port->message_id = 0;
5fec4b54 1450 port->rx_msgid = -1;
f0690a25
GR
1451 if (port->pwr_role == TYPEC_SOURCE)
1452 next_state = SRC_SEND_CAPABILITIES;
1453 else
1454 next_state = SNK_WAIT_CAPABILITIES;
1455 tcpm_set_state(port, next_state, 0);
1456 break;
1457 case DR_SWAP_SEND:
1458 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
1459 break;
1460 case PR_SWAP_SEND:
1461 tcpm_set_state(port, PR_SWAP_START, 0);
1462 break;
1463 case VCONN_SWAP_SEND:
1464 tcpm_set_state(port, VCONN_SWAP_START, 0);
1465 break;
1466 default:
1467 break;
1468 }
1469 break;
1470 case PD_CTRL_SOFT_RESET:
1471 tcpm_set_state(port, SOFT_RESET, 0);
1472 break;
1473 case PD_CTRL_DR_SWAP:
9b0ae699 1474 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
1475 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1476 break;
1477 }
1478 /*
1479 * XXX
1480 * 6.3.9: If an alternate mode is active, a request to swap
1481 * alternate modes shall trigger a port reset.
1482 */
1483 switch (port->state) {
1484 case SRC_READY:
1485 case SNK_READY:
1486 tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
1487 break;
1488 default:
1489 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1490 break;
1491 }
1492 break;
1493 case PD_CTRL_PR_SWAP:
9b0ae699 1494 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
1495 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1496 break;
1497 }
1498 switch (port->state) {
1499 case SRC_READY:
1500 case SNK_READY:
1501 tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
1502 break;
1503 default:
1504 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1505 break;
1506 }
1507 break;
1508 case PD_CTRL_VCONN_SWAP:
1509 switch (port->state) {
1510 case SRC_READY:
1511 case SNK_READY:
1512 tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
1513 break;
1514 default:
1515 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1516 break;
1517 }
1518 break;
1519 default:
1520 tcpm_log(port, "Unhandled ctrl message type %#x", type);
1521 break;
1522 }
1523}
1524
1525static void tcpm_pd_rx_handler(struct work_struct *work)
1526{
1527 struct pd_rx_event *event = container_of(work,
1528 struct pd_rx_event, work);
1529 const struct pd_message *msg = &event->msg;
1530 unsigned int cnt = pd_header_cnt_le(msg->header);
1531 struct tcpm_port *port = event->port;
1532
1533 mutex_lock(&port->lock);
1534
1535 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
1536 port->attached);
1537
1538 if (port->attached) {
5fec4b54
GR
1539 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1540 unsigned int msgid = pd_header_msgid_le(msg->header);
1541
1542 /*
1543 * USB PD standard, 6.6.1.2:
1544 * "... if MessageID value in a received Message is the
1545 * same as the stored value, the receiver shall return a
1546 * GoodCRC Message with that MessageID value and drop
1547 * the Message (this is a retry of an already received
1548 * Message). Note: this shall not apply to the Soft_Reset
1549 * Message which always has a MessageID value of zero."
1550 */
1551 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
1552 goto done;
1553 port->rx_msgid = msgid;
1554
f0690a25
GR
1555 /*
1556 * If both ends believe to be DFP/host, we have a data role
1557 * mismatch.
1558 */
1559 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
1560 (port->data_role == TYPEC_HOST)) {
1561 tcpm_log(port,
1562 "Data role mismatch, initiating error recovery");
1563 tcpm_set_state(port, ERROR_RECOVERY, 0);
1564 } else {
1565 if (cnt)
1566 tcpm_pd_data_request(port, msg);
1567 else
1568 tcpm_pd_ctrl_request(port, msg);
1569 }
1570 }
1571
5fec4b54 1572done:
f0690a25
GR
1573 mutex_unlock(&port->lock);
1574 kfree(event);
1575}
1576
1577void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
1578{
1579 struct pd_rx_event *event;
1580
1581 event = kzalloc(sizeof(*event), GFP_ATOMIC);
1582 if (!event)
1583 return;
1584
1585 INIT_WORK(&event->work, tcpm_pd_rx_handler);
1586 event->port = port;
1587 memcpy(&event->msg, msg, sizeof(*msg));
1588 queue_work(port->wq, &event->work);
1589}
1590EXPORT_SYMBOL_GPL(tcpm_pd_receive);
1591
1592static int tcpm_pd_send_control(struct tcpm_port *port,
1593 enum pd_ctrl_msg_type type)
1594{
1595 struct pd_message msg;
1596
1597 memset(&msg, 0, sizeof(msg));
1598 msg.header = PD_HEADER_LE(type, port->pwr_role,
1599 port->data_role,
1600 port->message_id, 0);
1601
1602 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1603}
1604
1605/*
1606 * Send queued message without affecting state.
1607 * Return true if state machine should go back to sleep,
1608 * false otherwise.
1609 */
1610static bool tcpm_send_queued_message(struct tcpm_port *port)
1611{
1612 enum pd_msg_request queued_message;
1613
1614 do {
1615 queued_message = port->queued_message;
1616 port->queued_message = PD_MSG_NONE;
1617
1618 switch (queued_message) {
1619 case PD_MSG_CTRL_WAIT:
1620 tcpm_pd_send_control(port, PD_CTRL_WAIT);
1621 break;
1622 case PD_MSG_CTRL_REJECT:
1623 tcpm_pd_send_control(port, PD_CTRL_REJECT);
1624 break;
1625 case PD_MSG_DATA_SINK_CAP:
1626 tcpm_pd_send_sink_caps(port);
1627 break;
1628 case PD_MSG_DATA_SOURCE_CAP:
1629 tcpm_pd_send_source_caps(port);
1630 break;
1631 default:
1632 break;
1633 }
1634 } while (port->queued_message != PD_MSG_NONE);
1635
1636 if (port->delayed_state != INVALID_STATE) {
1637 if (time_is_after_jiffies(port->delayed_runtime)) {
1638 mod_delayed_work(port->wq, &port->state_machine,
1639 port->delayed_runtime - jiffies);
1640 return true;
1641 }
1642 port->delayed_state = INVALID_STATE;
1643 }
1644 return false;
1645}
1646
1647static int tcpm_pd_check_request(struct tcpm_port *port)
1648{
1649 u32 pdo, rdo = port->sink_request;
1650 unsigned int max, op, pdo_max, index;
1651 enum pd_pdo_type type;
1652
1653 index = rdo_index(rdo);
1654 if (!index || index > port->nr_src_pdo)
1655 return -EINVAL;
1656
1657 pdo = port->src_pdo[index - 1];
1658 type = pdo_type(pdo);
1659 switch (type) {
1660 case PDO_TYPE_FIXED:
1661 case PDO_TYPE_VAR:
1662 max = rdo_max_current(rdo);
1663 op = rdo_op_current(rdo);
1664 pdo_max = pdo_max_current(pdo);
1665
1666 if (op > pdo_max)
1667 return -EINVAL;
1668 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1669 return -EINVAL;
1670
1671 if (type == PDO_TYPE_FIXED)
1672 tcpm_log(port,
1673 "Requested %u mV, %u mA for %u / %u mA",
1674 pdo_fixed_voltage(pdo), pdo_max, op, max);
1675 else
1676 tcpm_log(port,
1677 "Requested %u -> %u mV, %u mA for %u / %u mA",
1678 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1679 pdo_max, op, max);
1680 break;
1681 case PDO_TYPE_BATT:
1682 max = rdo_max_power(rdo);
1683 op = rdo_op_power(rdo);
1684 pdo_max = pdo_max_power(pdo);
1685
1686 if (op > pdo_max)
1687 return -EINVAL;
1688 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1689 return -EINVAL;
1690 tcpm_log(port,
1691 "Requested %u -> %u mV, %u mW for %u / %u mW",
1692 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1693 pdo_max, op, max);
1694 break;
1695 default:
1696 return -EINVAL;
1697 }
1698
1699 port->op_vsafe5v = index == 1;
1700
1701 return 0;
1702}
1703
1704static int tcpm_pd_select_pdo(struct tcpm_port *port)
1705{
1706 unsigned int i, max_mw = 0, max_mv = 0;
1707 int ret = -EINVAL;
1708
1709 /*
1710 * Select the source PDO providing the most power while staying within
1711 * the board's voltage limits. Prefer PDO providing exp
1712 */
1713 for (i = 0; i < port->nr_source_caps; i++) {
1714 u32 pdo = port->source_caps[i];
1715 enum pd_pdo_type type = pdo_type(pdo);
1716 unsigned int mv, ma, mw;
1717
1718 if (type == PDO_TYPE_FIXED)
1719 mv = pdo_fixed_voltage(pdo);
1720 else
1721 mv = pdo_min_voltage(pdo);
1722
1723 if (type == PDO_TYPE_BATT) {
1724 mw = pdo_max_power(pdo);
1725 } else {
1726 ma = min(pdo_max_current(pdo),
1727 port->max_snk_ma);
1728 mw = ma * mv / 1000;
1729 }
1730
1731 /* Perfer higher voltages if available */
1732 if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
1733 mv <= port->max_snk_mv) {
1734 ret = i;
1735 max_mw = mw;
1736 max_mv = mv;
1737 }
1738 }
1739
1740 return ret;
1741}
1742
1743static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1744{
1745 unsigned int mv, ma, mw, flags;
1746 unsigned int max_ma, max_mw;
1747 enum pd_pdo_type type;
1748 int index;
1749 u32 pdo;
1750
1751 index = tcpm_pd_select_pdo(port);
1752 if (index < 0)
1753 return -EINVAL;
1754 pdo = port->source_caps[index];
1755 type = pdo_type(pdo);
1756
1757 if (type == PDO_TYPE_FIXED)
1758 mv = pdo_fixed_voltage(pdo);
1759 else
1760 mv = pdo_min_voltage(pdo);
1761
1762 /* Select maximum available current within the board's power limit */
1763 if (type == PDO_TYPE_BATT) {
1764 mw = pdo_max_power(pdo);
1765 ma = 1000 * min(mw, port->max_snk_mw) / mv;
1766 } else {
1767 ma = min(pdo_max_current(pdo),
1768 1000 * port->max_snk_mw / mv);
1769 }
1770 ma = min(ma, port->max_snk_ma);
1771
931693f9 1772 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
f0690a25
GR
1773
1774 /* Set mismatch bit if offered power is less than operating power */
1775 mw = ma * mv / 1000;
1776 max_ma = ma;
1777 max_mw = mw;
1778 if (mw < port->operating_snk_mw) {
1779 flags |= RDO_CAP_MISMATCH;
1780 max_mw = port->operating_snk_mw;
1781 max_ma = max_mw * 1000 / mv;
1782 }
1783
1784 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
1785 port->cc_req, port->cc1, port->cc2, port->vbus_source,
1786 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
1787 port->polarity);
1788
1789 if (type == PDO_TYPE_BATT) {
1790 *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
1791
1792 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
1793 index, mv, mw,
1794 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1795 } else {
1796 *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
1797
1798 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
1799 index, mv, ma,
1800 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1801 }
1802
1803 port->current_limit = ma;
1804 port->supply_voltage = mv;
1805
1806 return 0;
1807}
1808
1809static int tcpm_pd_send_request(struct tcpm_port *port)
1810{
1811 struct pd_message msg;
1812 int ret;
1813 u32 rdo;
1814
1815 ret = tcpm_pd_build_request(port, &rdo);
1816 if (ret < 0)
1817 return ret;
1818
1819 memset(&msg, 0, sizeof(msg));
1820 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
1821 port->pwr_role,
1822 port->data_role,
1823 port->message_id, 1);
1824 msg.payload[0] = cpu_to_le32(rdo);
1825
1826 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1827}
1828
1829static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
1830{
1831 int ret;
1832
1833 if (enable && port->vbus_charge)
1834 return -EINVAL;
1835
1836 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
1837
1838 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
1839 if (ret < 0)
1840 return ret;
1841
1842 port->vbus_source = enable;
1843 return 0;
1844}
1845
1846static int tcpm_set_charge(struct tcpm_port *port, bool charge)
1847{
1848 int ret;
1849
1850 if (charge && port->vbus_source)
1851 return -EINVAL;
1852
1853 if (charge != port->vbus_charge) {
1854 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
1855 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
1856 charge);
1857 if (ret < 0)
1858 return ret;
1859 }
1860 port->vbus_charge = charge;
1861 return 0;
1862}
1863
1864static bool tcpm_start_drp_toggling(struct tcpm_port *port)
1865{
1866 int ret;
1867
1868 if (port->tcpc->start_drp_toggling &&
9b0ae699 1869 port->port_type == TYPEC_PORT_DRP) {
f0690a25
GR
1870 tcpm_log_force(port, "Start DRP toggling");
1871 ret = port->tcpc->start_drp_toggling(port->tcpc,
1872 tcpm_rp_cc(port));
1873 if (!ret)
1874 return true;
1875 }
1876
1877 return false;
1878}
1879
1880static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
1881{
1882 tcpm_log(port, "cc:=%d", cc);
1883 port->cc_req = cc;
1884 port->tcpc->set_cc(port->tcpc, cc);
1885}
1886
1887static int tcpm_init_vbus(struct tcpm_port *port)
1888{
1889 int ret;
1890
1891 ret = port->tcpc->set_vbus(port->tcpc, false, false);
1892 port->vbus_source = false;
1893 port->vbus_charge = false;
1894 return ret;
1895}
1896
1897static int tcpm_init_vconn(struct tcpm_port *port)
1898{
1899 int ret;
1900
1901 ret = port->tcpc->set_vconn(port->tcpc, false);
1902 port->vconn_role = TYPEC_SINK;
1903 return ret;
1904}
1905
1906static void tcpm_typec_connect(struct tcpm_port *port)
1907{
1908 if (!port->connected) {
1909 /* Make sure we don't report stale identity information */
1910 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
1911 port->partner_desc.usb_pd = port->pd_capable;
1912 if (tcpm_port_is_debug(port))
1913 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
1914 else if (tcpm_port_is_audio(port))
1915 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
1916 else
1917 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
1918 port->partner = typec_register_partner(port->typec_port,
1919 &port->partner_desc);
1920 port->connected = true;
1921 }
1922}
1923
1924static int tcpm_src_attach(struct tcpm_port *port)
1925{
1926 enum typec_cc_polarity polarity =
1927 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
1928 : TYPEC_POLARITY_CC1;
1929 int ret;
1930
1931 if (port->attached)
1932 return 0;
1933
1934 ret = tcpm_set_polarity(port, polarity);
1935 if (ret < 0)
1936 return ret;
1937
1938 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
1939 if (ret < 0)
1940 return ret;
1941
1942 ret = port->tcpc->set_pd_rx(port->tcpc, true);
1943 if (ret < 0)
1944 goto out_disable_mux;
1945
1946 /*
1947 * USB Type-C specification, version 1.2,
1948 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
1949 * Enable VCONN only if the non-RD port is set to RA.
1950 */
1951 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
1952 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
1953 ret = tcpm_set_vconn(port, true);
1954 if (ret < 0)
1955 goto out_disable_pd;
1956 }
1957
1958 ret = tcpm_set_vbus(port, true);
1959 if (ret < 0)
1960 goto out_disable_vconn;
1961
1962 port->pd_capable = false;
1963
1964 port->partner = NULL;
1965
1966 port->attached = true;
1967 port->send_discover = true;
1968
1969 return 0;
1970
1971out_disable_vconn:
1972 tcpm_set_vconn(port, false);
1973out_disable_pd:
1974 port->tcpc->set_pd_rx(port->tcpc, false);
1975out_disable_mux:
1976 tcpm_mux_set(port, TYPEC_MUX_NONE, TCPC_USB_SWITCH_DISCONNECT);
1977 return ret;
1978}
1979
1980static void tcpm_typec_disconnect(struct tcpm_port *port)
1981{
1982 if (port->connected) {
1983 typec_unregister_partner(port->partner);
1984 port->partner = NULL;
1985 port->connected = false;
1986 }
1987}
1988
1989static void tcpm_unregister_altmodes(struct tcpm_port *port)
1990{
1991 struct pd_mode_data *modep = &port->mode_data;
1992 int i;
1993
1994 for (i = 0; i < modep->altmodes; i++) {
1995 typec_unregister_altmode(port->partner_altmode[i]);
1996 port->partner_altmode[i] = NULL;
1997 }
1998
1999 memset(modep, 0, sizeof(*modep));
2000}
2001
2002static void tcpm_reset_port(struct tcpm_port *port)
2003{
2004 tcpm_unregister_altmodes(port);
2005 tcpm_typec_disconnect(port);
2006 port->attached = false;
2007 port->pd_capable = false;
2008
5fec4b54
GR
2009 /*
2010 * First Rx ID should be 0; set this to a sentinel of -1 so that
2011 * we can check tcpm_pd_rx_handler() if we had seen it before.
2012 */
2013 port->rx_msgid = -1;
2014
f0690a25
GR
2015 port->tcpc->set_pd_rx(port->tcpc, false);
2016 tcpm_init_vbus(port); /* also disables charging */
2017 tcpm_init_vconn(port);
2018 tcpm_set_current_limit(port, 0, 0);
2019 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
2020 tcpm_set_attached_state(port, false);
2021 port->try_src_count = 0;
2022 port->try_snk_count = 0;
2023}
2024
2025static void tcpm_detach(struct tcpm_port *port)
2026{
2027 if (!port->attached)
2028 return;
2029
2030 if (tcpm_port_is_disconnected(port))
2031 port->hard_reset_count = 0;
2032
2033 tcpm_reset_port(port);
2034}
2035
2036static void tcpm_src_detach(struct tcpm_port *port)
2037{
2038 tcpm_detach(port);
2039}
2040
2041static int tcpm_snk_attach(struct tcpm_port *port)
2042{
2043 int ret;
2044
2045 if (port->attached)
2046 return 0;
2047
2048 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
2049 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
2050 if (ret < 0)
2051 return ret;
2052
2053 ret = tcpm_set_roles(port, true, TYPEC_SINK, TYPEC_DEVICE);
2054 if (ret < 0)
2055 return ret;
2056
2057 port->pd_capable = false;
2058
2059 port->partner = NULL;
2060
2061 port->attached = true;
2062 port->send_discover = true;
2063
2064 return 0;
2065}
2066
2067static void tcpm_snk_detach(struct tcpm_port *port)
2068{
2069 tcpm_detach(port);
2070
2071 /* XXX: (Dis)connect SuperSpeed mux? */
2072}
2073
2074static int tcpm_acc_attach(struct tcpm_port *port)
2075{
2076 int ret;
2077
2078 if (port->attached)
2079 return 0;
2080
2081 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
2082 if (ret < 0)
2083 return ret;
2084
2085 port->partner = NULL;
2086
2087 tcpm_typec_connect(port);
2088
2089 port->attached = true;
2090
2091 return 0;
2092}
2093
2094static void tcpm_acc_detach(struct tcpm_port *port)
2095{
2096 tcpm_detach(port);
2097}
2098
2099static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
2100{
2101 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
2102 return HARD_RESET_SEND;
2103 if (port->pd_capable)
2104 return ERROR_RECOVERY;
2105 if (port->pwr_role == TYPEC_SOURCE)
2106 return SRC_UNATTACHED;
2107 if (port->state == SNK_WAIT_CAPABILITIES)
2108 return SNK_READY;
2109 return SNK_UNATTACHED;
2110}
2111
2112static inline enum tcpm_state ready_state(struct tcpm_port *port)
2113{
2114 if (port->pwr_role == TYPEC_SOURCE)
2115 return SRC_READY;
2116 else
2117 return SNK_READY;
2118}
2119
2120static inline enum tcpm_state unattached_state(struct tcpm_port *port)
2121{
2122 if (port->pwr_role == TYPEC_SOURCE)
2123 return SRC_UNATTACHED;
2124 else
2125 return SNK_UNATTACHED;
2126}
2127
2128static void tcpm_check_send_discover(struct tcpm_port *port)
2129{
2130 if (port->data_role == TYPEC_HOST && port->send_discover &&
2131 port->pd_capable) {
2132 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
2133 port->send_discover = false;
2134 }
2135}
2136
2137static void tcpm_swap_complete(struct tcpm_port *port, int result)
2138{
2139 if (port->swap_pending) {
2140 port->swap_status = result;
2141 port->swap_pending = false;
b17dd571 2142 port->non_pd_role_swap = false;
f0690a25
GR
2143 complete(&port->swap_complete);
2144 }
2145}
2146
53b70e5c 2147static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
fce042f0
BJS
2148{
2149 switch (cc) {
2150 case TYPEC_CC_RP_1_5:
2151 return TYPEC_PWR_MODE_1_5A;
2152 case TYPEC_CC_RP_3_0:
2153 return TYPEC_PWR_MODE_3_0A;
2154 case TYPEC_CC_RP_DEF:
2155 default:
2156 return TYPEC_PWR_MODE_USB;
2157 }
2158}
2159
f0690a25
GR
2160static void run_state_machine(struct tcpm_port *port)
2161{
2162 int ret;
fce042f0 2163 enum typec_pwr_opmode opmode;
f0690a25
GR
2164
2165 port->enter_state = port->state;
2166 switch (port->state) {
2167 case DRP_TOGGLING:
2168 break;
2169 /* SRC states */
2170 case SRC_UNATTACHED:
b17dd571
GR
2171 if (!port->non_pd_role_swap)
2172 tcpm_swap_complete(port, -ENOTCONN);
f0690a25
GR
2173 tcpm_src_detach(port);
2174 if (tcpm_start_drp_toggling(port)) {
2175 tcpm_set_state(port, DRP_TOGGLING, 0);
2176 break;
2177 }
2178 tcpm_set_cc(port, tcpm_rp_cc(port));
9b0ae699 2179 if (port->port_type == TYPEC_PORT_DRP)
f0690a25
GR
2180 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
2181 break;
2182 case SRC_ATTACH_WAIT:
2183 if (tcpm_port_is_debug(port))
2184 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
2185 PD_T_CC_DEBOUNCE);
2186 else if (tcpm_port_is_audio(port))
2187 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
2188 PD_T_CC_DEBOUNCE);
2189 else if (tcpm_port_is_source(port))
2190 tcpm_set_state(port,
2191 tcpm_try_snk(port) ? SNK_TRY
2192 : SRC_ATTACHED,
2193 PD_T_CC_DEBOUNCE);
2194 break;
2195
2196 case SNK_TRY:
2197 port->try_snk_count++;
2198 /*
2199 * Requirements:
2200 * - Do not drive vconn or vbus
2201 * - Terminate CC pins (both) to Rd
2202 * Action:
2203 * - Wait for tDRPTry (PD_T_DRP_TRY).
2204 * Until then, ignore any state changes.
2205 */
2206 tcpm_set_cc(port, TYPEC_CC_RD);
2207 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
2208 break;
2209 case SNK_TRY_WAIT:
2210 if (port->vbus_present && tcpm_port_is_sink(port)) {
2211 tcpm_set_state(port, SNK_ATTACHED, 0);
2212 break;
2213 }
2214 if (!tcpm_port_is_sink(port)) {
2215 tcpm_set_state(port, SRC_TRYWAIT,
2216 PD_T_PD_DEBOUNCE);
02d5be46 2217 port->max_wait = 0;
f0690a25
GR
2218 break;
2219 }
2220 /* No vbus, cc state is sink or open */
2221 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED, PD_T_DRP_TRYWAIT);
2222 break;
2223 case SRC_TRYWAIT:
2224 tcpm_set_cc(port, tcpm_rp_cc(port));
02d5be46
BJS
2225 if (port->max_wait == 0) {
2226 port->max_wait = jiffies +
2227 msecs_to_jiffies(PD_T_DRP_TRY);
f0690a25
GR
2228 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2229 PD_T_DRP_TRY);
02d5be46
BJS
2230 } else {
2231 if (time_is_after_jiffies(port->max_wait))
2232 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2233 jiffies_to_msecs(port->max_wait -
2234 jiffies));
2235 else
2236 tcpm_set_state(port, SNK_UNATTACHED, 0);
2237 }
2238 break;
2239 case SRC_TRYWAIT_DEBOUNCE:
2240 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
f0690a25
GR
2241 break;
2242 case SRC_TRYWAIT_UNATTACHED:
2243 tcpm_set_state(port, SNK_UNATTACHED, 0);
2244 break;
2245
2246 case SRC_ATTACHED:
2247 ret = tcpm_src_attach(port);
2248 tcpm_set_state(port, SRC_UNATTACHED,
2249 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
2250 break;
2251 case SRC_STARTUP:
fce042f0
BJS
2252 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
2253 typec_set_pwr_opmode(port->typec_port, opmode);
f0690a25
GR
2254 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2255 port->caps_count = 0;
2256 port->message_id = 0;
5fec4b54 2257 port->rx_msgid = -1;
f0690a25
GR
2258 port->explicit_contract = false;
2259 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2260 break;
2261 case SRC_SEND_CAPABILITIES:
2262 port->caps_count++;
2263 if (port->caps_count > PD_N_CAPS_COUNT) {
2264 tcpm_set_state(port, SRC_READY, 0);
2265 break;
2266 }
2267 ret = tcpm_pd_send_source_caps(port);
2268 if (ret < 0) {
2269 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
2270 PD_T_SEND_SOURCE_CAP);
2271 } else {
2272 /*
2273 * Per standard, we should clear the reset counter here.
2274 * However, that can result in state machine hang-ups.
2275 * Reset it only in READY state to improve stability.
2276 */
2277 /* port->hard_reset_count = 0; */
2278 port->caps_count = 0;
2279 port->pd_capable = true;
2280 tcpm_set_state_cond(port, hard_reset_state(port),
2281 PD_T_SEND_SOURCE_CAP);
2282 }
2283 break;
2284 case SRC_NEGOTIATE_CAPABILITIES:
2285 ret = tcpm_pd_check_request(port);
2286 if (ret < 0) {
2287 tcpm_pd_send_control(port, PD_CTRL_REJECT);
2288 if (!port->explicit_contract) {
2289 tcpm_set_state(port,
2290 SRC_WAIT_NEW_CAPABILITIES, 0);
2291 } else {
2292 tcpm_set_state(port, SRC_READY, 0);
2293 }
2294 } else {
2295 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2296 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
2297 PD_T_SRC_TRANSITION);
2298 }
2299 break;
2300 case SRC_TRANSITION_SUPPLY:
2301 /* XXX: regulator_set_voltage(vbus, ...) */
2302 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2303 port->explicit_contract = true;
2304 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
2305 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2306 tcpm_set_state_cond(port, SRC_READY, 0);
2307 break;
2308 case SRC_READY:
2309#if 1
2310 port->hard_reset_count = 0;
2311#endif
2312 port->try_src_count = 0;
2313
3113bf1a 2314 tcpm_swap_complete(port, 0);
f0690a25 2315 tcpm_typec_connect(port);
f0690a25
GR
2316 tcpm_check_send_discover(port);
2317 /*
2318 * 6.3.5
2319 * Sending ping messages is not necessary if
2320 * - the source operates at vSafe5V
2321 * or
2322 * - The system is not operating in PD mode
2323 * or
2324 * - Both partners are connected using a Type-C connector
2325 * XXX How do we know that ?
2326 */
2327 if (port->pwr_opmode == TYPEC_PWR_MODE_PD &&
2328 !port->op_vsafe5v) {
2329 tcpm_pd_send_control(port, PD_CTRL_PING);
2330 tcpm_set_state_cond(port, SRC_READY,
2331 PD_T_SOURCE_ACTIVITY);
2332 }
2333 break;
2334 case SRC_WAIT_NEW_CAPABILITIES:
2335 /* Nothing to do... */
2336 break;
2337
2338 /* SNK states */
2339 case SNK_UNATTACHED:
b17dd571
GR
2340 if (!port->non_pd_role_swap)
2341 tcpm_swap_complete(port, -ENOTCONN);
f0690a25
GR
2342 tcpm_snk_detach(port);
2343 if (tcpm_start_drp_toggling(port)) {
2344 tcpm_set_state(port, DRP_TOGGLING, 0);
2345 break;
2346 }
2347 tcpm_set_cc(port, TYPEC_CC_RD);
9b0ae699 2348 if (port->port_type == TYPEC_PORT_DRP)
f0690a25
GR
2349 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
2350 break;
2351 case SNK_ATTACH_WAIT:
2352 if ((port->cc1 == TYPEC_CC_OPEN &&
2353 port->cc2 != TYPEC_CC_OPEN) ||
2354 (port->cc1 != TYPEC_CC_OPEN &&
2355 port->cc2 == TYPEC_CC_OPEN))
2356 tcpm_set_state(port, SNK_DEBOUNCED,
2357 PD_T_CC_DEBOUNCE);
2358 else if (tcpm_port_is_disconnected(port))
2359 tcpm_set_state(port, SNK_UNATTACHED,
2360 PD_T_PD_DEBOUNCE);
2361 break;
2362 case SNK_DEBOUNCED:
2363 if (tcpm_port_is_disconnected(port))
2364 tcpm_set_state(port, SNK_UNATTACHED,
2365 PD_T_PD_DEBOUNCE);
2366 else if (port->vbus_present)
2367 tcpm_set_state(port,
2368 tcpm_try_src(port) ? SRC_TRY
2369 : SNK_ATTACHED,
2370 0);
2371 else
2372 /* Wait for VBUS, but not forever */
2373 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2374 break;
2375
2376 case SRC_TRY:
2377 port->try_src_count++;
2378 tcpm_set_cc(port, tcpm_rp_cc(port));
2379 tcpm_set_state(port, SNK_TRYWAIT, PD_T_DRP_TRY);
2380 break;
2381 case SRC_TRY_DEBOUNCE:
2382 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
2383 break;
2384 case SNK_TRYWAIT:
2385 tcpm_set_cc(port, TYPEC_CC_RD);
2386 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, PD_T_CC_DEBOUNCE);
2387 break;
2388 case SNK_TRYWAIT_DEBOUNCE:
2389 if (port->vbus_present) {
2390 tcpm_set_state(port, SNK_ATTACHED, 0);
2391 break;
2392 }
2393 if (tcpm_port_is_disconnected(port)) {
2394 tcpm_set_state(port, SNK_UNATTACHED,
2395 PD_T_PD_DEBOUNCE);
2396 break;
2397 }
2398 if (tcpm_port_is_source(port))
2399 tcpm_set_state(port, SRC_ATTACHED, 0);
2400 /* XXX Are we supposed to stay in this state ? */
2401 break;
2402 case SNK_TRYWAIT_VBUS:
2403 tcpm_set_state(port, SNK_ATTACHED, PD_T_CC_DEBOUNCE);
2404 break;
2405
2406 case SNK_ATTACHED:
2407 ret = tcpm_snk_attach(port);
2408 if (ret < 0)
2409 tcpm_set_state(port, SNK_UNATTACHED, 0);
2410 else
2411 tcpm_set_state(port, SNK_STARTUP, 0);
2412 break;
2413 case SNK_STARTUP:
2414 /* XXX: callback into infrastructure */
fce042f0
BJS
2415 opmode = tcpm_get_pwr_opmode(port->polarity ?
2416 port->cc2 : port->cc1);
2417 typec_set_pwr_opmode(port->typec_port, opmode);
f0690a25
GR
2418 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2419 port->message_id = 0;
5fec4b54 2420 port->rx_msgid = -1;
f0690a25
GR
2421 port->explicit_contract = false;
2422 tcpm_set_state(port, SNK_DISCOVERY, 0);
2423 break;
2424 case SNK_DISCOVERY:
2425 if (port->vbus_present) {
2426 tcpm_set_current_limit(port,
2427 tcpm_get_current_limit(port),
2428 5000);
2429 tcpm_set_charge(port, true);
2430 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2431 break;
2432 }
2433 /*
2434 * For DRP, timeouts differ. Also, handling is supposed to be
2435 * different and much more complex (dead battery detection;
2436 * see USB power delivery specification, section 8.3.3.6.1.5.1).
2437 */
2438 tcpm_set_state(port, hard_reset_state(port),
9b0ae699 2439 port->port_type == TYPEC_PORT_DRP ?
f0690a25
GR
2440 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
2441 break;
2442 case SNK_DISCOVERY_DEBOUNCE:
2443 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
2444 PD_T_CC_DEBOUNCE);
2445 break;
2446 case SNK_DISCOVERY_DEBOUNCE_DONE:
2447 if (!tcpm_port_is_disconnected(port) &&
2448 tcpm_port_is_sink(port) &&
2449 time_is_after_jiffies(port->delayed_runtime)) {
2450 tcpm_set_state(port, SNK_DISCOVERY,
2451 port->delayed_runtime - jiffies);
2452 break;
2453 }
2454 tcpm_set_state(port, unattached_state(port), 0);
2455 break;
2456 case SNK_WAIT_CAPABILITIES:
2457 ret = port->tcpc->set_pd_rx(port->tcpc, true);
2458 if (ret < 0) {
2459 tcpm_set_state(port, SNK_READY, 0);
2460 break;
2461 }
2462 /*
2463 * If VBUS has never been low, and we time out waiting
2464 * for source cap, try a soft reset first, in case we
2465 * were already in a stable contract before this boot.
2466 * Do this only once.
2467 */
2468 if (port->vbus_never_low) {
2469 port->vbus_never_low = false;
2470 tcpm_set_state(port, SOFT_RESET_SEND,
2471 PD_T_SINK_WAIT_CAP);
2472 } else {
2473 tcpm_set_state(port, hard_reset_state(port),
2474 PD_T_SINK_WAIT_CAP);
2475 }
2476 break;
2477 case SNK_NEGOTIATE_CAPABILITIES:
2478 port->pd_capable = true;
2479 port->hard_reset_count = 0;
2480 ret = tcpm_pd_send_request(port);
2481 if (ret < 0) {
2482 /* Let the Source send capabilities again. */
2483 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2484 } else {
2485 tcpm_set_state_cond(port, hard_reset_state(port),
2486 PD_T_SENDER_RESPONSE);
2487 }
2488 break;
2489 case SNK_TRANSITION_SINK:
2490 case SNK_TRANSITION_SINK_VBUS:
2491 tcpm_set_state(port, hard_reset_state(port),
2492 PD_T_PS_TRANSITION);
2493 break;
2494 case SNK_READY:
2495 port->try_snk_count = 0;
8bf05746
BJS
2496 if (port->explicit_contract) {
2497 typec_set_pwr_opmode(port->typec_port,
2498 TYPEC_PWR_MODE_PD);
2499 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2500 }
f0690a25 2501
3113bf1a 2502 tcpm_swap_complete(port, 0);
f0690a25 2503 tcpm_typec_connect(port);
f0690a25
GR
2504 tcpm_check_send_discover(port);
2505 break;
2506
2507 /* Accessory states */
2508 case ACC_UNATTACHED:
2509 tcpm_acc_detach(port);
2510 tcpm_set_state(port, SRC_UNATTACHED, 0);
2511 break;
2512 case DEBUG_ACC_ATTACHED:
2513 case AUDIO_ACC_ATTACHED:
2514 ret = tcpm_acc_attach(port);
2515 if (ret < 0)
2516 tcpm_set_state(port, ACC_UNATTACHED, 0);
2517 break;
2518 case AUDIO_ACC_DEBOUNCE:
2519 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
2520 break;
2521
2522 /* Hard_Reset states */
2523 case HARD_RESET_SEND:
2524 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
2525 tcpm_set_state(port, HARD_RESET_START, 0);
2526 break;
2527 case HARD_RESET_START:
2528 port->hard_reset_count++;
2529 port->tcpc->set_pd_rx(port->tcpc, false);
2530 tcpm_unregister_altmodes(port);
2531 port->send_discover = true;
2532 if (port->pwr_role == TYPEC_SOURCE)
2533 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
2534 PD_T_PS_HARD_RESET);
2535 else
2536 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
2537 break;
2538 case SRC_HARD_RESET_VBUS_OFF:
2539 tcpm_set_vconn(port, true);
2540 tcpm_set_vbus(port, false);
2541 tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
2542 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
2543 break;
2544 case SRC_HARD_RESET_VBUS_ON:
2545 tcpm_set_vbus(port, true);
2546 port->tcpc->set_pd_rx(port->tcpc, true);
2547 tcpm_set_attached_state(port, true);
2548 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
2549 break;
2550 case SNK_HARD_RESET_SINK_OFF:
2551 tcpm_set_vconn(port, false);
2552 tcpm_set_charge(port, false);
2553 tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
2554 /*
2555 * VBUS may or may not toggle, depending on the adapter.
2556 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
2557 * directly after timeout.
2558 */
2559 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
2560 break;
2561 case SNK_HARD_RESET_WAIT_VBUS:
2562 /* Assume we're disconnected if VBUS doesn't come back. */
2563 tcpm_set_state(port, SNK_UNATTACHED,
2564 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
2565 break;
2566 case SNK_HARD_RESET_SINK_ON:
2567 /* Note: There is no guarantee that VBUS is on in this state */
2568 /*
2569 * XXX:
2570 * The specification suggests that dual mode ports in sink
2571 * mode should transition to state PE_SRC_Transition_to_default.
2572 * See USB power delivery specification chapter 8.3.3.6.1.3.
2573 * This would mean to to
2574 * - turn off VCONN, reset power supply
2575 * - request hardware reset
2576 * - turn on VCONN
2577 * - Transition to state PE_Src_Startup
2578 * SNK only ports shall transition to state Snk_Startup
2579 * (see chapter 8.3.3.3.8).
2580 * Similar, dual-mode ports in source mode should transition
2581 * to PE_SNK_Transition_to_default.
2582 */
2583 tcpm_set_attached_state(port, true);
2584 tcpm_set_state(port, SNK_STARTUP, 0);
2585 break;
2586
2587 /* Soft_Reset states */
2588 case SOFT_RESET:
2589 port->message_id = 0;
5fec4b54 2590 port->rx_msgid = -1;
f0690a25
GR
2591 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2592 if (port->pwr_role == TYPEC_SOURCE)
2593 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2594 else
2595 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2596 break;
2597 case SOFT_RESET_SEND:
2598 port->message_id = 0;
5fec4b54 2599 port->rx_msgid = -1;
f0690a25
GR
2600 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
2601 tcpm_set_state_cond(port, hard_reset_state(port), 0);
2602 else
2603 tcpm_set_state_cond(port, hard_reset_state(port),
2604 PD_T_SENDER_RESPONSE);
2605 break;
2606
2607 /* DR_Swap states */
2608 case DR_SWAP_SEND:
2609 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
2610 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
2611 PD_T_SENDER_RESPONSE);
2612 break;
2613 case DR_SWAP_ACCEPT:
2614 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2615 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
2616 break;
2617 case DR_SWAP_SEND_TIMEOUT:
2618 tcpm_swap_complete(port, -ETIMEDOUT);
2619 tcpm_set_state(port, ready_state(port), 0);
2620 break;
2621 case DR_SWAP_CHANGE_DR:
2622 if (port->data_role == TYPEC_HOST) {
2623 tcpm_unregister_altmodes(port);
2624 tcpm_set_roles(port, true, port->pwr_role,
2625 TYPEC_DEVICE);
2626 } else {
2627 tcpm_set_roles(port, true, port->pwr_role,
2628 TYPEC_HOST);
2629 port->send_discover = true;
2630 }
f0690a25
GR
2631 tcpm_set_state(port, ready_state(port), 0);
2632 break;
2633
2634 /* PR_Swap states */
2635 case PR_SWAP_ACCEPT:
2636 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2637 tcpm_set_state(port, PR_SWAP_START, 0);
2638 break;
2639 case PR_SWAP_SEND:
2640 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
2641 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
2642 PD_T_SENDER_RESPONSE);
2643 break;
2644 case PR_SWAP_SEND_TIMEOUT:
2645 tcpm_swap_complete(port, -ETIMEDOUT);
2646 tcpm_set_state(port, ready_state(port), 0);
2647 break;
2648 case PR_SWAP_START:
2649 if (port->pwr_role == TYPEC_SOURCE)
2650 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
2651 PD_T_SRC_TRANSITION);
2652 else
2653 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
2654 break;
2655 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2656 tcpm_set_vbus(port, false);
2657 port->explicit_contract = false;
2658 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
2659 PD_T_PS_SOURCE_OFF);
2660 break;
2661 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2662 tcpm_set_cc(port, TYPEC_CC_RD);
050161ea
GR
2663 /*
2664 * USB-PD standard, 6.2.1.4, Port Power Role:
2665 * "During the Power Role Swap Sequence, for the initial Source
2666 * Port, the Port Power Role field shall be set to Sink in the
2667 * PS_RDY Message indicating that the initial Source’s power
2668 * supply is turned off"
2669 */
2670 tcpm_set_pwr_role(port, TYPEC_SINK);
f0690a25
GR
2671 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
2672 tcpm_set_state(port, ERROR_RECOVERY, 0);
2673 break;
2674 }
2675 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2676 break;
2677 case PR_SWAP_SRC_SNK_SINK_ON:
f0690a25
GR
2678 tcpm_set_state(port, SNK_STARTUP, 0);
2679 break;
2680 case PR_SWAP_SNK_SRC_SINK_OFF:
2681 tcpm_set_charge(port, false);
2682 tcpm_set_state(port, hard_reset_state(port),
2683 PD_T_PS_SOURCE_OFF);
2684 break;
2685 case PR_SWAP_SNK_SRC_SOURCE_ON:
2686 tcpm_set_cc(port, tcpm_rp_cc(port));
2687 tcpm_set_vbus(port, true);
050161ea
GR
2688 /*
2689 * USB PD standard, 6.2.1.4:
2690 * "Subsequent Messages initiated by the Policy Engine,
2691 * such as the PS_RDY Message sent to indicate that Vbus
2692 * is ready, will have the Port Power Role field set to
2693 * Source."
2694 */
f0690a25 2695 tcpm_set_pwr_role(port, TYPEC_SOURCE);
050161ea 2696 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
f0690a25
GR
2697 tcpm_set_state(port, SRC_STARTUP, 0);
2698 break;
2699
2700 case VCONN_SWAP_ACCEPT:
2701 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2702 tcpm_set_state(port, VCONN_SWAP_START, 0);
2703 break;
2704 case VCONN_SWAP_SEND:
2705 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
2706 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
2707 PD_T_SENDER_RESPONSE);
2708 break;
2709 case VCONN_SWAP_SEND_TIMEOUT:
2710 tcpm_swap_complete(port, -ETIMEDOUT);
2711 tcpm_set_state(port, ready_state(port), 0);
2712 break;
2713 case VCONN_SWAP_START:
2714 if (port->vconn_role == TYPEC_SOURCE)
2715 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
2716 else
2717 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
2718 break;
2719 case VCONN_SWAP_WAIT_FOR_VCONN:
2720 tcpm_set_state(port, hard_reset_state(port),
2721 PD_T_VCONN_SOURCE_ON);
2722 break;
2723 case VCONN_SWAP_TURN_ON_VCONN:
2724 tcpm_set_vconn(port, true);
2725 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
f0690a25
GR
2726 tcpm_set_state(port, ready_state(port), 0);
2727 break;
2728 case VCONN_SWAP_TURN_OFF_VCONN:
2729 tcpm_set_vconn(port, false);
f0690a25
GR
2730 tcpm_set_state(port, ready_state(port), 0);
2731 break;
2732
2733 case DR_SWAP_CANCEL:
2734 case PR_SWAP_CANCEL:
2735 case VCONN_SWAP_CANCEL:
2736 tcpm_swap_complete(port, port->swap_status);
2737 if (port->pwr_role == TYPEC_SOURCE)
2738 tcpm_set_state(port, SRC_READY, 0);
2739 else
2740 tcpm_set_state(port, SNK_READY, 0);
2741 break;
2742
2743 case BIST_RX:
2744 switch (BDO_MODE_MASK(port->bist_request)) {
2745 case BDO_MODE_CARRIER2:
2746 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
2747 break;
2748 default:
2749 break;
2750 }
2751 /* Always switch to unattached state */
2752 tcpm_set_state(port, unattached_state(port), 0);
2753 break;
2754 case ERROR_RECOVERY:
2755 tcpm_swap_complete(port, -EPROTO);
b17dd571
GR
2756 tcpm_set_state(port, PORT_RESET, 0);
2757 break;
2758 case PORT_RESET:
f0690a25 2759 tcpm_reset_port(port);
f0690a25 2760 tcpm_set_cc(port, TYPEC_CC_OPEN);
b17dd571 2761 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
f0690a25
GR
2762 PD_T_ERROR_RECOVERY);
2763 break;
b17dd571 2764 case PORT_RESET_WAIT_OFF:
f0690a25
GR
2765 tcpm_set_state(port,
2766 tcpm_default_state(port),
2767 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
2768 break;
2769 default:
2770 WARN(1, "Unexpected port state %d\n", port->state);
2771 break;
2772 }
2773}
2774
2775static void tcpm_state_machine_work(struct work_struct *work)
2776{
2777 struct tcpm_port *port = container_of(work, struct tcpm_port,
2778 state_machine.work);
2779 enum tcpm_state prev_state;
2780
2781 mutex_lock(&port->lock);
2782 port->state_machine_running = true;
2783
2784 if (port->queued_message && tcpm_send_queued_message(port))
2785 goto done;
2786
2787 /* If we were queued due to a delayed state change, update it now */
2788 if (port->delayed_state) {
2789 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
2790 tcpm_states[port->state],
2791 tcpm_states[port->delayed_state], port->delay_ms);
2792 port->prev_state = port->state;
2793 port->state = port->delayed_state;
2794 port->delayed_state = INVALID_STATE;
2795 }
2796
2797 /*
2798 * Continue running as long as we have (non-delayed) state changes
2799 * to make.
2800 */
2801 do {
2802 prev_state = port->state;
2803 run_state_machine(port);
2804 if (port->queued_message)
2805 tcpm_send_queued_message(port);
2806 } while (port->state != prev_state && !port->delayed_state);
2807
2808done:
2809 port->state_machine_running = false;
2810 mutex_unlock(&port->lock);
2811}
2812
2813static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
2814 enum typec_cc_status cc2)
2815{
2816 enum typec_cc_status old_cc1, old_cc2;
2817 enum tcpm_state new_state;
2818
2819 old_cc1 = port->cc1;
2820 old_cc2 = port->cc2;
2821 port->cc1 = cc1;
2822 port->cc2 = cc2;
2823
2824 tcpm_log_force(port,
2825 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
2826 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
2827 port->polarity,
2828 tcpm_port_is_disconnected(port) ? "disconnected"
2829 : "connected");
2830
2831 switch (port->state) {
2832 case DRP_TOGGLING:
2833 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2834 tcpm_port_is_source(port))
2835 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2836 else if (tcpm_port_is_sink(port))
2837 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2838 break;
2839 case SRC_UNATTACHED:
2840 case ACC_UNATTACHED:
2841 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2842 tcpm_port_is_source(port))
2843 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2844 break;
2845 case SRC_ATTACH_WAIT:
2846 if (tcpm_port_is_disconnected(port) ||
2847 tcpm_port_is_audio_detached(port))
2848 tcpm_set_state(port, SRC_UNATTACHED, 0);
2849 else if (cc1 != old_cc1 || cc2 != old_cc2)
2850 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2851 break;
2852 case SRC_ATTACHED:
2853 if (tcpm_port_is_disconnected(port))
2854 tcpm_set_state(port, SRC_UNATTACHED, 0);
2855 break;
2856
2857 case SNK_UNATTACHED:
2858 if (tcpm_port_is_sink(port))
2859 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2860 break;
2861 case SNK_ATTACH_WAIT:
2862 if ((port->cc1 == TYPEC_CC_OPEN &&
2863 port->cc2 != TYPEC_CC_OPEN) ||
2864 (port->cc1 != TYPEC_CC_OPEN &&
2865 port->cc2 == TYPEC_CC_OPEN))
2866 new_state = SNK_DEBOUNCED;
2867 else if (tcpm_port_is_disconnected(port))
2868 new_state = SNK_UNATTACHED;
2869 else
2870 break;
2871 if (new_state != port->delayed_state)
2872 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2873 break;
2874 case SNK_DEBOUNCED:
2875 if (tcpm_port_is_disconnected(port))
2876 new_state = SNK_UNATTACHED;
2877 else if (port->vbus_present)
2878 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
2879 else
2880 new_state = SNK_UNATTACHED;
2881 if (new_state != port->delayed_state)
2882 tcpm_set_state(port, SNK_DEBOUNCED, 0);
2883 break;
2884 case SNK_READY:
2885 if (tcpm_port_is_disconnected(port))
2886 tcpm_set_state(port, unattached_state(port), 0);
2887 else if (!port->pd_capable &&
2888 (cc1 != old_cc1 || cc2 != old_cc2))
2889 tcpm_set_current_limit(port,
2890 tcpm_get_current_limit(port),
2891 5000);
2892 break;
2893
2894 case AUDIO_ACC_ATTACHED:
2895 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
2896 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
2897 break;
2898 case AUDIO_ACC_DEBOUNCE:
2899 if (tcpm_port_is_audio(port))
2900 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
2901 break;
2902
2903 case DEBUG_ACC_ATTACHED:
2904 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
2905 tcpm_set_state(port, ACC_UNATTACHED, 0);
2906 break;
2907
2908 case SNK_TRY:
2909 /* Do nothing, waiting for timeout */
2910 break;
2911
2912 case SNK_DISCOVERY:
2913 /* CC line is unstable, wait for debounce */
2914 if (tcpm_port_is_disconnected(port))
2915 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
2916 break;
2917 case SNK_DISCOVERY_DEBOUNCE:
2918 break;
2919
2920 case SRC_TRYWAIT:
2921 /* Hand over to state machine if needed */
2922 if (!port->vbus_present && tcpm_port_is_source(port))
02d5be46
BJS
2923 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
2924 break;
2925 case SRC_TRYWAIT_DEBOUNCE:
2926 if (port->vbus_present || !tcpm_port_is_source(port))
f0690a25
GR
2927 tcpm_set_state(port, SRC_TRYWAIT, 0);
2928 break;
2929 case SNK_TRY_WAIT:
2930 if (port->vbus_present && tcpm_port_is_sink(port)) {
2931 tcpm_set_state(port, SNK_ATTACHED, 0);
2932 break;
2933 }
2934 if (!tcpm_port_is_sink(port))
2935 new_state = SRC_TRYWAIT;
2936 else
2937 new_state = SRC_TRYWAIT_UNATTACHED;
2938
2939 if (new_state != port->delayed_state)
2940 tcpm_set_state(port, SNK_TRY_WAIT, 0);
2941 break;
2942
2943 case SRC_TRY:
c79d92bd
BJS
2944 if (tcpm_port_is_source(port))
2945 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
f0690a25
GR
2946 break;
2947 case SRC_TRY_DEBOUNCE:
2948 tcpm_set_state(port, SRC_TRY, 0);
2949 break;
2950 case SNK_TRYWAIT_DEBOUNCE:
2951 if (port->vbus_present) {
2952 tcpm_set_state(port, SNK_ATTACHED, 0);
2953 break;
2954 }
2955 if (tcpm_port_is_source(port)) {
2956 tcpm_set_state(port, SRC_ATTACHED, 0);
2957 break;
2958 }
2959 if (tcpm_port_is_disconnected(port) &&
2960 port->delayed_state != SNK_UNATTACHED)
2961 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
2962 break;
2963
2964 case PR_SWAP_SNK_SRC_SINK_OFF:
2965 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2966 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2967 /*
2968 * CC state change is expected here; we just turned off power.
2969 * Ignore it.
2970 */
2971 break;
2972
2973 default:
2974 if (tcpm_port_is_disconnected(port))
2975 tcpm_set_state(port, unattached_state(port), 0);
2976 break;
2977 }
2978}
2979
2980static void _tcpm_pd_vbus_on(struct tcpm_port *port)
2981{
2982 enum tcpm_state new_state;
2983
2984 tcpm_log_force(port, "VBUS on");
2985 port->vbus_present = true;
2986 switch (port->state) {
2987 case SNK_TRANSITION_SINK_VBUS:
8bf05746 2988 port->explicit_contract = true;
f0690a25
GR
2989 tcpm_set_state(port, SNK_READY, 0);
2990 break;
2991 case SNK_DISCOVERY:
2992 tcpm_set_state(port, SNK_DISCOVERY, 0);
2993 break;
2994
2995 case SNK_DEBOUNCED:
2996 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
2997 : SNK_ATTACHED,
2998 0);
2999 break;
3000 case SNK_HARD_RESET_WAIT_VBUS:
3001 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
3002 break;
3003 case SRC_ATTACHED:
3004 tcpm_set_state(port, SRC_STARTUP, 0);
3005 break;
3006 case SRC_HARD_RESET_VBUS_ON:
3007 tcpm_set_state(port, SRC_STARTUP, 0);
3008 break;
3009
3010 case SNK_TRY:
3011 /* Do nothing, waiting for timeout */
3012 break;
3013 case SRC_TRYWAIT:
02d5be46
BJS
3014 /* Do nothing, Waiting for Rd to be detected */
3015 break;
3016 case SRC_TRYWAIT_DEBOUNCE:
3017 tcpm_set_state(port, SRC_TRYWAIT, 0);
f0690a25
GR
3018 break;
3019 case SNK_TRY_WAIT:
3020 if (tcpm_port_is_sink(port)) {
3021 tcpm_set_state(port, SNK_ATTACHED, 0);
3022 break;
3023 }
3024 if (!tcpm_port_is_sink(port))
3025 new_state = SRC_TRYWAIT;
3026 else
3027 new_state = SRC_TRYWAIT_UNATTACHED;
3028
3029 if (new_state != port->delayed_state)
3030 tcpm_set_state(port, SNK_TRY_WAIT, 0);
3031 break;
3032 case SNK_TRYWAIT:
3033 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
3034 break;
3035
3036 default:
3037 break;
3038 }
3039}
3040
3041static void _tcpm_pd_vbus_off(struct tcpm_port *port)
3042{
3043 enum tcpm_state new_state;
3044
3045 tcpm_log_force(port, "VBUS off");
3046 port->vbus_present = false;
3047 port->vbus_never_low = false;
3048 switch (port->state) {
3049 case SNK_HARD_RESET_SINK_OFF:
3050 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
3051 break;
3052 case SRC_HARD_RESET_VBUS_OFF:
3053 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
3054 break;
3055 case HARD_RESET_SEND:
3056 break;
3057
3058 case SNK_TRY:
3059 /* Do nothing, waiting for timeout */
3060 break;
3061 case SRC_TRYWAIT:
3062 /* Hand over to state machine if needed */
3063 if (tcpm_port_is_source(port))
02d5be46 3064 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
f0690a25
GR
3065 break;
3066 case SNK_TRY_WAIT:
3067 if (!tcpm_port_is_sink(port))
3068 new_state = SRC_TRYWAIT;
3069 else
3070 new_state = SRC_TRYWAIT_UNATTACHED;
3071
3072 if (new_state != port->delayed_state)
3073 tcpm_set_state(port, SNK_TRY_WAIT, 0);
3074 break;
3075 case SNK_TRYWAIT_VBUS:
3076 tcpm_set_state(port, SNK_TRYWAIT, 0);
3077 break;
3078
3079 case SNK_ATTACH_WAIT:
3080 tcpm_set_state(port, SNK_UNATTACHED, 0);
3081 break;
3082
3083 case SNK_NEGOTIATE_CAPABILITIES:
3084 break;
3085
3086 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3087 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
3088 break;
3089
3090 case PR_SWAP_SNK_SRC_SINK_OFF:
3091 /* Do nothing, expected */
3092 break;
3093
b17dd571 3094 case PORT_RESET_WAIT_OFF:
c749d4d0 3095 tcpm_set_state(port, tcpm_default_state(port), 0);
f0690a25
GR
3096 break;
3097
3098 default:
3099 if (port->pwr_role == TYPEC_SINK &&
3100 port->attached)
3101 tcpm_set_state(port, SNK_UNATTACHED, 0);
3102 break;
3103 }
3104}
3105
3106static void _tcpm_pd_hard_reset(struct tcpm_port *port)
3107{
3108 tcpm_log_force(port, "Received hard reset");
3109 /*
3110 * If we keep receiving hard reset requests, executing the hard reset
3111 * must have failed. Revert to error recovery if that happens.
3112 */
3113 tcpm_set_state(port,
3114 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
3115 HARD_RESET_START : ERROR_RECOVERY,
3116 0);
3117}
3118
3119static void tcpm_pd_event_handler(struct work_struct *work)
3120{
3121 struct tcpm_port *port = container_of(work, struct tcpm_port,
3122 event_work);
3123 u32 events;
3124
3125 mutex_lock(&port->lock);
3126
3127 spin_lock(&port->pd_event_lock);
3128 while (port->pd_events) {
3129 events = port->pd_events;
3130 port->pd_events = 0;
3131 spin_unlock(&port->pd_event_lock);
3132 if (events & TCPM_RESET_EVENT)
3133 _tcpm_pd_hard_reset(port);
3134 if (events & TCPM_VBUS_EVENT) {
3135 bool vbus;
3136
3137 vbus = port->tcpc->get_vbus(port->tcpc);
3138 if (vbus)
3139 _tcpm_pd_vbus_on(port);
3140 else
3141 _tcpm_pd_vbus_off(port);
3142 }
3143 if (events & TCPM_CC_EVENT) {
3144 enum typec_cc_status cc1, cc2;
3145
3146 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3147 _tcpm_cc_change(port, cc1, cc2);
3148 }
3149 spin_lock(&port->pd_event_lock);
3150 }
3151 spin_unlock(&port->pd_event_lock);
3152 mutex_unlock(&port->lock);
3153}
3154
3155void tcpm_cc_change(struct tcpm_port *port)
3156{
3157 spin_lock(&port->pd_event_lock);
3158 port->pd_events |= TCPM_CC_EVENT;
3159 spin_unlock(&port->pd_event_lock);
3160 queue_work(port->wq, &port->event_work);
3161}
3162EXPORT_SYMBOL_GPL(tcpm_cc_change);
3163
3164void tcpm_vbus_change(struct tcpm_port *port)
3165{
3166 spin_lock(&port->pd_event_lock);
3167 port->pd_events |= TCPM_VBUS_EVENT;
3168 spin_unlock(&port->pd_event_lock);
3169 queue_work(port->wq, &port->event_work);
3170}
3171EXPORT_SYMBOL_GPL(tcpm_vbus_change);
3172
3173void tcpm_pd_hard_reset(struct tcpm_port *port)
3174{
3175 spin_lock(&port->pd_event_lock);
3176 port->pd_events = TCPM_RESET_EVENT;
3177 spin_unlock(&port->pd_event_lock);
3178 queue_work(port->wq, &port->event_work);
3179}
3180EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
3181
3182static int tcpm_dr_set(const struct typec_capability *cap,
3183 enum typec_data_role data)
3184{
3185 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3186 int ret;
3187
3188 mutex_lock(&port->swap_lock);
3189 mutex_lock(&port->lock);
3190
9b0ae699 3191 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
3192 ret = -EINVAL;
3193 goto port_unlock;
3194 }
3195 if (port->state != SRC_READY && port->state != SNK_READY) {
3196 ret = -EAGAIN;
3197 goto port_unlock;
3198 }
3199
3200 if (port->data_role == data) {
3201 ret = 0;
3202 goto port_unlock;
3203 }
3204
3205 /*
3206 * XXX
3207 * 6.3.9: If an alternate mode is active, a request to swap
3208 * alternate modes shall trigger a port reset.
3209 * Reject data role swap request in this case.
3210 */
3211
b17dd571
GR
3212 if (!port->pd_capable) {
3213 /*
3214 * If the partner is not PD capable, reset the port to
3215 * trigger a role change. This can only work if a preferred
3216 * role is configured, and if it matches the requested role.
3217 */
3218 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
3219 port->try_role == port->pwr_role) {
3220 ret = -EINVAL;
3221 goto port_unlock;
3222 }
3223 port->non_pd_role_swap = true;
3224 tcpm_set_state(port, PORT_RESET, 0);
3225 } else {
3226 tcpm_set_state(port, DR_SWAP_SEND, 0);
3227 }
3228
f0690a25
GR
3229 port->swap_status = 0;
3230 port->swap_pending = true;
3231 reinit_completion(&port->swap_complete);
f0690a25
GR
3232 mutex_unlock(&port->lock);
3233
9adf9f9e
GR
3234 if (!wait_for_completion_timeout(&port->swap_complete,
3235 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3236 ret = -ETIMEDOUT;
3237 else
3238 ret = port->swap_status;
f0690a25 3239
b17dd571 3240 port->non_pd_role_swap = false;
f0690a25
GR
3241 goto swap_unlock;
3242
3243port_unlock:
3244 mutex_unlock(&port->lock);
3245swap_unlock:
3246 mutex_unlock(&port->swap_lock);
3247 return ret;
3248}
3249
3250static int tcpm_pr_set(const struct typec_capability *cap,
3251 enum typec_role role)
3252{
3253 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3254 int ret;
3255
3256 mutex_lock(&port->swap_lock);
3257 mutex_lock(&port->lock);
3258
9b0ae699 3259 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
3260 ret = -EINVAL;
3261 goto port_unlock;
3262 }
3263 if (port->state != SRC_READY && port->state != SNK_READY) {
3264 ret = -EAGAIN;
3265 goto port_unlock;
3266 }
3267
3268 if (role == port->pwr_role) {
3269 ret = 0;
3270 goto port_unlock;
3271 }
3272
f0690a25
GR
3273 port->swap_status = 0;
3274 port->swap_pending = true;
3275 reinit_completion(&port->swap_complete);
3276 tcpm_set_state(port, PR_SWAP_SEND, 0);
3277 mutex_unlock(&port->lock);
3278
9adf9f9e
GR
3279 if (!wait_for_completion_timeout(&port->swap_complete,
3280 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3281 ret = -ETIMEDOUT;
3282 else
3283 ret = port->swap_status;
f0690a25 3284
f0690a25
GR
3285 goto swap_unlock;
3286
3287port_unlock:
3288 mutex_unlock(&port->lock);
3289swap_unlock:
3290 mutex_unlock(&port->swap_lock);
3291 return ret;
3292}
3293
3294static int tcpm_vconn_set(const struct typec_capability *cap,
3295 enum typec_role role)
3296{
3297 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3298 int ret;
3299
3300 mutex_lock(&port->swap_lock);
3301 mutex_lock(&port->lock);
3302
3303 if (port->state != SRC_READY && port->state != SNK_READY) {
3304 ret = -EAGAIN;
3305 goto port_unlock;
3306 }
3307
3308 if (role == port->vconn_role) {
3309 ret = 0;
3310 goto port_unlock;
3311 }
3312
3313 port->swap_status = 0;
3314 port->swap_pending = true;
3315 reinit_completion(&port->swap_complete);
3316 tcpm_set_state(port, VCONN_SWAP_SEND, 0);
3317 mutex_unlock(&port->lock);
3318
9adf9f9e
GR
3319 if (!wait_for_completion_timeout(&port->swap_complete,
3320 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3321 ret = -ETIMEDOUT;
3322 else
3323 ret = port->swap_status;
f0690a25 3324
f0690a25
GR
3325 goto swap_unlock;
3326
3327port_unlock:
3328 mutex_unlock(&port->lock);
3329swap_unlock:
3330 mutex_unlock(&port->swap_lock);
3331 return ret;
3332}
3333
3334static int tcpm_try_role(const struct typec_capability *cap, int role)
3335{
3336 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3337 struct tcpc_dev *tcpc = port->tcpc;
3338 int ret = 0;
3339
3340 mutex_lock(&port->lock);
3341 if (tcpc->try_role)
3342 ret = tcpc->try_role(tcpc, role);
3343 if (!ret && !tcpc->config->try_role_hw)
3344 port->try_role = role;
3345 port->try_src_count = 0;
3346 port->try_snk_count = 0;
3347 mutex_unlock(&port->lock);
3348
3349 return ret;
3350}
3351
3352static void tcpm_init(struct tcpm_port *port)
3353{
3354 enum typec_cc_status cc1, cc2;
3355
3356 port->tcpc->init(port->tcpc);
3357
3358 tcpm_reset_port(port);
3359
3360 /*
3361 * XXX
3362 * Should possibly wait for VBUS to settle if it was enabled locally
3363 * since tcpm_reset_port() will disable VBUS.
3364 */
3365 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
3366 if (port->vbus_present)
3367 port->vbus_never_low = true;
3368
3369 tcpm_set_state(port, tcpm_default_state(port), 0);
3370
3371 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3372 _tcpm_cc_change(port, cc1, cc2);
3373
3374 /*
3375 * Some adapters need a clean slate at startup, and won't recover
3376 * otherwise. So do not try to be fancy and force a clean disconnect.
3377 */
b17dd571 3378 tcpm_set_state(port, PORT_RESET, 0);
f0690a25
GR
3379}
3380
9b0ae699
BJS
3381static int tcpm_port_type_set(const struct typec_capability *cap,
3382 enum typec_port_type type)
3383{
3384 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3385
3386 mutex_lock(&port->lock);
3387 if (type == port->port_type)
3388 goto port_unlock;
3389
3390 port->port_type = type;
3391
3392 if (!port->connected) {
3393 tcpm_set_state(port, PORT_RESET, 0);
3394 } else if (type == TYPEC_PORT_UFP) {
3395 if (!(port->pwr_role == TYPEC_SINK &&
3396 port->data_role == TYPEC_DEVICE))
3397 tcpm_set_state(port, PORT_RESET, 0);
3398 } else if (type == TYPEC_PORT_DFP) {
3399 if (!(port->pwr_role == TYPEC_SOURCE &&
3400 port->data_role == TYPEC_HOST))
3401 tcpm_set_state(port, PORT_RESET, 0);
3402 }
3403
3404port_unlock:
3405 mutex_unlock(&port->lock);
3406 return 0;
3407}
3408
f0690a25
GR
3409void tcpm_tcpc_reset(struct tcpm_port *port)
3410{
3411 mutex_lock(&port->lock);
3412 /* XXX: Maintain PD connection if possible? */
3413 tcpm_init(port);
3414 mutex_unlock(&port->lock);
3415}
3416EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
3417
3418static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
3419 unsigned int nr_pdo)
3420{
3421 unsigned int i;
3422
3423 if (nr_pdo > PDO_MAX_OBJECTS)
3424 nr_pdo = PDO_MAX_OBJECTS;
3425
3426 for (i = 0; i < nr_pdo; i++)
3427 dest_pdo[i] = src_pdo[i];
3428
3429 return nr_pdo;
3430}
3431
193a6801
GR
3432static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
3433 unsigned int nr_vdo)
3434{
3435 unsigned int i;
3436
3437 if (nr_vdo > VDO_MAX_OBJECTS)
3438 nr_vdo = VDO_MAX_OBJECTS;
3439
3440 for (i = 0; i < nr_vdo; i++)
3441 dest_vdo[i] = src_vdo[i];
3442
3443 return nr_vdo;
3444}
3445
f0690a25
GR
3446void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
3447 unsigned int nr_pdo)
3448{
3449 mutex_lock(&port->lock);
3450 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, pdo, nr_pdo);
3451 switch (port->state) {
3452 case SRC_UNATTACHED:
3453 case SRC_ATTACH_WAIT:
3454 case SRC_TRYWAIT:
3455 tcpm_set_cc(port, tcpm_rp_cc(port));
3456 break;
3457 case SRC_SEND_CAPABILITIES:
3458 case SRC_NEGOTIATE_CAPABILITIES:
3459 case SRC_READY:
3460 case SRC_WAIT_NEW_CAPABILITIES:
3461 tcpm_set_cc(port, tcpm_rp_cc(port));
3462 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
3463 break;
3464 default:
3465 break;
3466 }
3467 mutex_unlock(&port->lock);
3468}
3469EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities);
3470
3471void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
3472 unsigned int nr_pdo,
3473 unsigned int max_snk_mv,
3474 unsigned int max_snk_ma,
3475 unsigned int max_snk_mw,
3476 unsigned int operating_snk_mw)
3477{
3478 mutex_lock(&port->lock);
3479 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
3480 port->max_snk_mv = max_snk_mv;
3481 port->max_snk_ma = max_snk_ma;
3482 port->max_snk_mw = max_snk_mw;
3483 port->operating_snk_mw = operating_snk_mw;
3484
3485 switch (port->state) {
3486 case SNK_NEGOTIATE_CAPABILITIES:
3487 case SNK_READY:
3488 case SNK_TRANSITION_SINK:
3489 case SNK_TRANSITION_SINK_VBUS:
3490 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3491 break;
3492 default:
3493 break;
3494 }
3495 mutex_unlock(&port->lock);
3496}
3497EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
3498
3499struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3500{
3501 struct tcpm_port *port;
3502 int i, err;
3503
3504 if (!dev || !tcpc || !tcpc->config ||
3505 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
3506 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
3507 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
3508 return ERR_PTR(-EINVAL);
3509
3510 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
3511 if (!port)
3512 return ERR_PTR(-ENOMEM);
3513
3514 port->dev = dev;
3515 port->tcpc = tcpc;
3516
3517 mutex_init(&port->lock);
3518 mutex_init(&port->swap_lock);
3519
3520 port->wq = create_singlethread_workqueue(dev_name(dev));
3521 if (!port->wq)
3522 return ERR_PTR(-ENOMEM);
3523 INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work);
3524 INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work);
3525 INIT_WORK(&port->event_work, tcpm_pd_event_handler);
3526
3527 spin_lock_init(&port->pd_event_lock);
3528
3529 init_completion(&port->tx_complete);
3530 init_completion(&port->swap_complete);
3531
3532 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcpc->config->src_pdo,
3533 tcpc->config->nr_src_pdo);
3534 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3535 tcpc->config->nr_snk_pdo);
193a6801
GR
3536 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3537 tcpc->config->nr_snk_vdo);
f0690a25
GR
3538
3539 port->max_snk_mv = tcpc->config->max_snk_mv;
3540 port->max_snk_ma = tcpc->config->max_snk_ma;
3541 port->max_snk_mw = tcpc->config->max_snk_mw;
3542 port->operating_snk_mw = tcpc->config->operating_snk_mw;
3543 if (!tcpc->config->try_role_hw)
3544 port->try_role = tcpc->config->default_role;
3545 else
3546 port->try_role = TYPEC_NO_PREFERRED_ROLE;
3547
3548 port->typec_caps.prefer_role = tcpc->config->default_role;
3549 port->typec_caps.type = tcpc->config->type;
3550 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
3551 port->typec_caps.pd_revision = 0x0200; /* USB-PD spec release 2.0 */
3552 port->typec_caps.dr_set = tcpm_dr_set;
3553 port->typec_caps.pr_set = tcpm_pr_set;
3554 port->typec_caps.vconn_set = tcpm_vconn_set;
3555 port->typec_caps.try_role = tcpm_try_role;
9b0ae699 3556 port->typec_caps.port_type_set = tcpm_port_type_set;
f0690a25
GR
3557
3558 port->partner_desc.identity = &port->partner_ident;
9b0ae699 3559 port->port_type = tcpc->config->type;
f0690a25
GR
3560 /*
3561 * TODO:
3562 * - alt_modes, set_alt_mode
3563 * - {debug,audio}_accessory
3564 */
3565
3566 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
3567 if (!port->typec_port) {
3568 err = -ENOMEM;
3569 goto out_destroy_wq;
3570 }
3571
3572 if (tcpc->config->alt_modes) {
3c41dbde 3573 const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
f0690a25
GR
3574
3575 i = 0;
3576 while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
3577 port->port_altmode[i] =
3578 typec_port_register_altmode(port->typec_port,
3579 paltmode);
3580 if (!port->port_altmode[i]) {
3581 tcpm_log(port,
3582 "%s: failed to register port alternate mode 0x%x",
3583 dev_name(dev), paltmode->svid);
3584 break;
3585 }
3586 i++;
3587 paltmode++;
3588 }
3589 }
3590
3591 tcpm_debugfs_init(port);
3592 mutex_lock(&port->lock);
3593 tcpm_init(port);
3594 mutex_unlock(&port->lock);
3595
3596 tcpm_log(port, "%s: registered", dev_name(dev));
3597 return port;
3598
3599out_destroy_wq:
3600 destroy_workqueue(port->wq);
3601 return ERR_PTR(err);
3602}
3603EXPORT_SYMBOL_GPL(tcpm_register_port);
3604
3605void tcpm_unregister_port(struct tcpm_port *port)
3606{
3607 int i;
3608
3609 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
3610 typec_unregister_altmode(port->port_altmode[i]);
3611 typec_unregister_port(port->typec_port);
3612 tcpm_debugfs_exit(port);
3613 destroy_workqueue(port->wq);
3614}
3615EXPORT_SYMBOL_GPL(tcpm_unregister_port);
3616
3617MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
3618MODULE_DESCRIPTION("USB Type-C Port Manager");
3619MODULE_LICENSE("GPL");