staging: typec: pd: Document struct pd_message
[linux-2.6-block.git] / drivers / staging / typec / tcpm.c
CommitLineData
f0690a25
GR
1/*
2 * Copyright 2015-2017 Google, Inc
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * USB Power Delivery protocol stack.
15 */
16
17#include <linux/completion.h>
18#include <linux/debugfs.h>
19#include <linux/device.h>
02d5be46 20#include <linux/jiffies.h>
f0690a25
GR
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/mutex.h>
24#include <linux/proc_fs.h>
25#include <linux/sched/clock.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/usb/typec.h>
30#include <linux/workqueue.h>
31
32#include "pd.h"
33#include "pd_vdo.h"
34#include "pd_bdo.h"
35#include "tcpm.h"
36
37#define FOREACH_STATE(S) \
38 S(INVALID_STATE), \
39 S(DRP_TOGGLING), \
40 S(SRC_UNATTACHED), \
41 S(SRC_ATTACH_WAIT), \
42 S(SRC_ATTACHED), \
43 S(SRC_STARTUP), \
44 S(SRC_SEND_CAPABILITIES), \
45 S(SRC_NEGOTIATE_CAPABILITIES), \
46 S(SRC_TRANSITION_SUPPLY), \
47 S(SRC_READY), \
48 S(SRC_WAIT_NEW_CAPABILITIES), \
49 \
50 S(SNK_UNATTACHED), \
51 S(SNK_ATTACH_WAIT), \
52 S(SNK_DEBOUNCED), \
53 S(SNK_ATTACHED), \
54 S(SNK_STARTUP), \
55 S(SNK_DISCOVERY), \
56 S(SNK_DISCOVERY_DEBOUNCE), \
57 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
58 S(SNK_WAIT_CAPABILITIES), \
59 S(SNK_NEGOTIATE_CAPABILITIES), \
60 S(SNK_TRANSITION_SINK), \
61 S(SNK_TRANSITION_SINK_VBUS), \
62 S(SNK_READY), \
63 \
64 S(ACC_UNATTACHED), \
65 S(DEBUG_ACC_ATTACHED), \
66 S(AUDIO_ACC_ATTACHED), \
67 S(AUDIO_ACC_DEBOUNCE), \
68 \
69 S(HARD_RESET_SEND), \
70 S(HARD_RESET_START), \
71 S(SRC_HARD_RESET_VBUS_OFF), \
72 S(SRC_HARD_RESET_VBUS_ON), \
73 S(SNK_HARD_RESET_SINK_OFF), \
74 S(SNK_HARD_RESET_WAIT_VBUS), \
75 S(SNK_HARD_RESET_SINK_ON), \
76 \
77 S(SOFT_RESET), \
78 S(SOFT_RESET_SEND), \
79 \
80 S(DR_SWAP_ACCEPT), \
81 S(DR_SWAP_SEND), \
82 S(DR_SWAP_SEND_TIMEOUT), \
83 S(DR_SWAP_CANCEL), \
84 S(DR_SWAP_CHANGE_DR), \
85 \
86 S(PR_SWAP_ACCEPT), \
87 S(PR_SWAP_SEND), \
88 S(PR_SWAP_SEND_TIMEOUT), \
89 S(PR_SWAP_CANCEL), \
90 S(PR_SWAP_START), \
91 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
92 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
b965b631 93 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
f0690a25
GR
94 S(PR_SWAP_SRC_SNK_SINK_ON), \
95 S(PR_SWAP_SNK_SRC_SINK_OFF), \
96 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
b965b631 97 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
f0690a25
GR
98 \
99 S(VCONN_SWAP_ACCEPT), \
100 S(VCONN_SWAP_SEND), \
101 S(VCONN_SWAP_SEND_TIMEOUT), \
102 S(VCONN_SWAP_CANCEL), \
103 S(VCONN_SWAP_START), \
104 S(VCONN_SWAP_WAIT_FOR_VCONN), \
105 S(VCONN_SWAP_TURN_ON_VCONN), \
106 S(VCONN_SWAP_TURN_OFF_VCONN), \
107 \
108 S(SNK_TRY), \
109 S(SNK_TRY_WAIT), \
a0a3e04e
BJS
110 S(SNK_TRY_WAIT_DEBOUNCE), \
111 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
f0690a25 112 S(SRC_TRYWAIT), \
02d5be46 113 S(SRC_TRYWAIT_DEBOUNCE), \
f0690a25
GR
114 S(SRC_TRYWAIT_UNATTACHED), \
115 \
116 S(SRC_TRY), \
131c7d12 117 S(SRC_TRY_WAIT), \
f0690a25
GR
118 S(SRC_TRY_DEBOUNCE), \
119 S(SNK_TRYWAIT), \
120 S(SNK_TRYWAIT_DEBOUNCE), \
121 S(SNK_TRYWAIT_VBUS), \
122 S(BIST_RX), \
123 \
124 S(ERROR_RECOVERY), \
b17dd571
GR
125 S(PORT_RESET), \
126 S(PORT_RESET_WAIT_OFF)
f0690a25
GR
127
128#define GENERATE_ENUM(e) e
129#define GENERATE_STRING(s) #s
130
131enum tcpm_state {
132 FOREACH_STATE(GENERATE_ENUM)
133};
134
135static const char * const tcpm_states[] = {
136 FOREACH_STATE(GENERATE_STRING)
137};
138
139enum vdm_states {
140 VDM_STATE_ERR_BUSY = -3,
141 VDM_STATE_ERR_SEND = -2,
142 VDM_STATE_ERR_TMOUT = -1,
143 VDM_STATE_DONE = 0,
144 /* Anything >0 represents an active state */
145 VDM_STATE_READY = 1,
146 VDM_STATE_BUSY = 2,
147 VDM_STATE_WAIT_RSP_BUSY = 3,
148};
149
150enum pd_msg_request {
151 PD_MSG_NONE = 0,
152 PD_MSG_CTRL_REJECT,
153 PD_MSG_CTRL_WAIT,
154 PD_MSG_DATA_SINK_CAP,
155 PD_MSG_DATA_SOURCE_CAP,
156};
157
158/* Events from low level driver */
159
160#define TCPM_CC_EVENT BIT(0)
161#define TCPM_VBUS_EVENT BIT(1)
162#define TCPM_RESET_EVENT BIT(2)
163
164#define LOG_BUFFER_ENTRIES 1024
165#define LOG_BUFFER_ENTRY_SIZE 128
166
167/* Alternate mode support */
168
169#define SVID_DISCOVERY_MAX 16
170
171struct pd_mode_data {
172 int svid_index; /* current SVID index */
173 int nsvids;
174 u16 svids[SVID_DISCOVERY_MAX];
175 int altmodes; /* number of alternate modes */
176 struct typec_altmode_desc altmode_desc[SVID_DISCOVERY_MAX];
177};
178
179struct tcpm_port {
180 struct device *dev;
181
182 struct mutex lock; /* tcpm state machine lock */
183 struct workqueue_struct *wq;
184
185 struct typec_capability typec_caps;
186 struct typec_port *typec_port;
187
188 struct tcpc_dev *tcpc;
189
190 enum typec_role vconn_role;
191 enum typec_role pwr_role;
192 enum typec_data_role data_role;
193 enum typec_pwr_opmode pwr_opmode;
194
195 struct usb_pd_identity partner_ident;
196 struct typec_partner_desc partner_desc;
197 struct typec_partner *partner;
198
199 enum typec_cc_status cc_req;
200
201 enum typec_cc_status cc1;
202 enum typec_cc_status cc2;
203 enum typec_cc_polarity polarity;
204
205 bool attached;
206 bool connected;
9b0ae699 207 enum typec_port_type port_type;
f0690a25
GR
208 bool vbus_present;
209 bool vbus_never_low;
210 bool vbus_source;
211 bool vbus_charge;
212
213 bool send_discover;
214 bool op_vsafe5v;
215
216 int try_role;
217 int try_snk_count;
218 int try_src_count;
219
220 enum pd_msg_request queued_message;
221
222 enum tcpm_state enter_state;
223 enum tcpm_state prev_state;
224 enum tcpm_state state;
225 enum tcpm_state delayed_state;
226 unsigned long delayed_runtime;
227 unsigned long delay_ms;
228
229 spinlock_t pd_event_lock;
230 u32 pd_events;
231
232 struct work_struct event_work;
233 struct delayed_work state_machine;
234 struct delayed_work vdm_state_machine;
235 bool state_machine_running;
236
237 struct completion tx_complete;
238 enum tcpm_transmit_status tx_status;
239
240 struct mutex swap_lock; /* swap command lock */
241 bool swap_pending;
b17dd571 242 bool non_pd_role_swap;
f0690a25
GR
243 struct completion swap_complete;
244 int swap_status;
245
246 unsigned int message_id;
247 unsigned int caps_count;
248 unsigned int hard_reset_count;
249 bool pd_capable;
250 bool explicit_contract;
5fec4b54 251 unsigned int rx_msgid;
f0690a25
GR
252
253 /* Partner capabilities/requests */
254 u32 sink_request;
255 u32 source_caps[PDO_MAX_OBJECTS];
256 unsigned int nr_source_caps;
257 u32 sink_caps[PDO_MAX_OBJECTS];
258 unsigned int nr_sink_caps;
259
260 /* Local capabilities */
261 u32 src_pdo[PDO_MAX_OBJECTS];
262 unsigned int nr_src_pdo;
263 u32 snk_pdo[PDO_MAX_OBJECTS];
264 unsigned int nr_snk_pdo;
193a6801
GR
265 u32 snk_vdo[VDO_MAX_OBJECTS];
266 unsigned int nr_snk_vdo;
f0690a25
GR
267
268 unsigned int max_snk_mv;
269 unsigned int max_snk_ma;
270 unsigned int max_snk_mw;
271 unsigned int operating_snk_mw;
272
273 /* Requested current / voltage */
274 u32 current_limit;
275 u32 supply_voltage;
276
277 u32 bist_request;
278
279 /* PD state for Vendor Defined Messages */
280 enum vdm_states vdm_state;
281 u32 vdm_retries;
282 /* next Vendor Defined Message to send */
283 u32 vdo_data[VDO_MAX_SIZE];
284 u8 vdo_count;
285 /* VDO to retry if UFP responder replied busy */
286 u32 vdo_retry;
287
288 /* Alternate mode data */
289
290 struct pd_mode_data mode_data;
291 struct typec_altmode *partner_altmode[SVID_DISCOVERY_MAX];
292 struct typec_altmode *port_altmode[SVID_DISCOVERY_MAX];
293
02d5be46
BJS
294 /* Deadline in jiffies to exit src_try_wait state */
295 unsigned long max_wait;
296
f0690a25
GR
297#ifdef CONFIG_DEBUG_FS
298 struct dentry *dentry;
299 struct mutex logbuffer_lock; /* log buffer access lock */
300 int logbuffer_head;
301 int logbuffer_tail;
302 u8 *logbuffer[LOG_BUFFER_ENTRIES];
303#endif
304};
305
306struct pd_rx_event {
307 struct work_struct work;
308 struct tcpm_port *port;
309 struct pd_message msg;
310};
311
312#define tcpm_cc_is_sink(cc) \
313 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
314 (cc) == TYPEC_CC_RP_3_0)
315
316#define tcpm_port_is_sink(port) \
317 ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
318 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
319
320#define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
321#define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
322#define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
323
324#define tcpm_port_is_source(port) \
325 ((tcpm_cc_is_source((port)->cc1) && \
326 !tcpm_cc_is_source((port)->cc2)) || \
327 (tcpm_cc_is_source((port)->cc2) && \
328 !tcpm_cc_is_source((port)->cc1)))
329
330#define tcpm_port_is_debug(port) \
331 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
332
333#define tcpm_port_is_audio(port) \
334 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
335
336#define tcpm_port_is_audio_detached(port) \
337 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
338 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
339
340#define tcpm_try_snk(port) \
ff6c8cb1
BJS
341 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
342 (port)->port_type == TYPEC_PORT_DRP)
f0690a25
GR
343
344#define tcpm_try_src(port) \
ff6c8cb1
BJS
345 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
346 (port)->port_type == TYPEC_PORT_DRP)
f0690a25
GR
347
348static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
349{
9b0ae699 350 if (port->port_type == TYPEC_PORT_DRP) {
b46a9c90
BJS
351 if (port->try_role == TYPEC_SINK)
352 return SNK_UNATTACHED;
353 else if (port->try_role == TYPEC_SOURCE)
354 return SRC_UNATTACHED;
355 else if (port->tcpc->config->default_role == TYPEC_SINK)
356 return SNK_UNATTACHED;
357 /* Fall through to return SRC_UNATTACHED */
9b0ae699 358 } else if (port->port_type == TYPEC_PORT_UFP) {
f0690a25 359 return SNK_UNATTACHED;
b46a9c90 360 }
f0690a25
GR
361 return SRC_UNATTACHED;
362}
363
364static inline
365struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
366{
367 return container_of(cap, struct tcpm_port, typec_caps);
368}
369
370static bool tcpm_port_is_disconnected(struct tcpm_port *port)
371{
372 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
373 port->cc2 == TYPEC_CC_OPEN) ||
374 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
375 port->cc1 == TYPEC_CC_OPEN) ||
376 (port->polarity == TYPEC_POLARITY_CC2 &&
377 port->cc2 == TYPEC_CC_OPEN)));
378}
379
380/*
381 * Logging
382 */
383
384#ifdef CONFIG_DEBUG_FS
385
386static bool tcpm_log_full(struct tcpm_port *port)
387{
388 return port->logbuffer_tail ==
389 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
390}
391
e79e0125 392__printf(2, 0)
f0690a25
GR
393static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
394{
395 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
396 u64 ts_nsec = local_clock();
397 unsigned long rem_nsec;
398
399 if (!port->logbuffer[port->logbuffer_head]) {
400 port->logbuffer[port->logbuffer_head] =
401 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
402 if (!port->logbuffer[port->logbuffer_head])
403 return;
404 }
405
406 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
407
408 mutex_lock(&port->logbuffer_lock);
409
410 if (tcpm_log_full(port)) {
411 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
412 strcpy(tmpbuffer, "overflow");
413 }
414
415 if (port->logbuffer_head < 0 ||
416 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
417 dev_warn(port->dev,
418 "Bad log buffer index %d\n", port->logbuffer_head);
419 goto abort;
420 }
421
422 if (!port->logbuffer[port->logbuffer_head]) {
423 dev_warn(port->dev,
424 "Log buffer index %d is NULL\n", port->logbuffer_head);
425 goto abort;
426 }
427
428 rem_nsec = do_div(ts_nsec, 1000000000);
429 scnprintf(port->logbuffer[port->logbuffer_head],
430 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
431 (unsigned long)ts_nsec, rem_nsec / 1000,
432 tmpbuffer);
433 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
434
435abort:
436 mutex_unlock(&port->logbuffer_lock);
437}
438
e79e0125 439__printf(2, 3)
f0690a25
GR
440static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
441{
442 va_list args;
443
444 /* Do not log while disconnected and unattached */
445 if (tcpm_port_is_disconnected(port) &&
446 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
447 port->state == DRP_TOGGLING))
448 return;
449
450 va_start(args, fmt);
451 _tcpm_log(port, fmt, args);
452 va_end(args);
453}
454
e79e0125 455__printf(2, 3)
f0690a25
GR
456static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
457{
458 va_list args;
459
460 va_start(args, fmt);
461 _tcpm_log(port, fmt, args);
462 va_end(args);
463}
464
465static void tcpm_log_source_caps(struct tcpm_port *port)
466{
467 int i;
468
469 for (i = 0; i < port->nr_source_caps; i++) {
470 u32 pdo = port->source_caps[i];
471 enum pd_pdo_type type = pdo_type(pdo);
472 char msg[64];
473
474 switch (type) {
475 case PDO_TYPE_FIXED:
476 scnprintf(msg, sizeof(msg),
477 "%u mV, %u mA [%s%s%s%s%s%s]",
478 pdo_fixed_voltage(pdo),
479 pdo_max_current(pdo),
480 (pdo & PDO_FIXED_DUAL_ROLE) ?
481 "R" : "",
482 (pdo & PDO_FIXED_SUSPEND) ?
483 "S" : "",
484 (pdo & PDO_FIXED_HIGHER_CAP) ?
485 "H" : "",
486 (pdo & PDO_FIXED_USB_COMM) ?
487 "U" : "",
488 (pdo & PDO_FIXED_DATA_SWAP) ?
489 "D" : "",
490 (pdo & PDO_FIXED_EXTPOWER) ?
491 "E" : "");
492 break;
493 case PDO_TYPE_VAR:
494 scnprintf(msg, sizeof(msg),
495 "%u-%u mV, %u mA",
496 pdo_min_voltage(pdo),
497 pdo_max_voltage(pdo),
498 pdo_max_current(pdo));
499 break;
500 case PDO_TYPE_BATT:
501 scnprintf(msg, sizeof(msg),
502 "%u-%u mV, %u mW",
503 pdo_min_voltage(pdo),
504 pdo_max_voltage(pdo),
505 pdo_max_power(pdo));
506 break;
507 default:
508 strcpy(msg, "undefined");
509 break;
510 }
511 tcpm_log(port, " PDO %d: type %d, %s",
512 i, type, msg);
513 }
514}
515
516static int tcpm_seq_show(struct seq_file *s, void *v)
517{
518 struct tcpm_port *port = (struct tcpm_port *)s->private;
519 int tail;
520
521 mutex_lock(&port->logbuffer_lock);
522 tail = port->logbuffer_tail;
523 while (tail != port->logbuffer_head) {
524 seq_printf(s, "%s\n", port->logbuffer[tail]);
525 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
526 }
527 if (!seq_has_overflowed(s))
528 port->logbuffer_tail = tail;
529 mutex_unlock(&port->logbuffer_lock);
530
531 return 0;
532}
533
534static int tcpm_debug_open(struct inode *inode, struct file *file)
535{
536 return single_open(file, tcpm_seq_show, inode->i_private);
537}
538
539static const struct file_operations tcpm_debug_operations = {
540 .open = tcpm_debug_open,
541 .llseek = seq_lseek,
542 .read = seq_read,
543 .release = single_release,
544};
545
546static struct dentry *rootdir;
547
548static int tcpm_debugfs_init(struct tcpm_port *port)
549{
550 mutex_init(&port->logbuffer_lock);
551 /* /sys/kernel/debug/tcpm/usbcX */
552 if (!rootdir) {
553 rootdir = debugfs_create_dir("tcpm", NULL);
554 if (!rootdir)
555 return -ENOMEM;
556 }
557
558 port->dentry = debugfs_create_file(dev_name(port->dev),
559 S_IFREG | 0444, rootdir,
560 port, &tcpm_debug_operations);
561
562 return 0;
563}
564
565static void tcpm_debugfs_exit(struct tcpm_port *port)
566{
567 debugfs_remove(port->dentry);
568}
569
570#else
571
e79e0125 572__printf(2, 3)
f0690a25 573static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
e79e0125 574__printf(2, 3)
f0690a25
GR
575static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
576static void tcpm_log_source_caps(struct tcpm_port *port) { }
577static int tcpm_debugfs_init(const struct tcpm_port *port) { return 0; }
578static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
579
580#endif
581
582static int tcpm_pd_transmit(struct tcpm_port *port,
583 enum tcpm_transmit_type type,
584 const struct pd_message *msg)
585{
586 unsigned long timeout;
587 int ret;
588
589 if (msg)
590 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
591 else
592 tcpm_log(port, "PD TX, type: %#x", type);
593
594 reinit_completion(&port->tx_complete);
595 ret = port->tcpc->pd_transmit(port->tcpc, type, msg);
596 if (ret < 0)
597 return ret;
598
599 mutex_unlock(&port->lock);
600 timeout = wait_for_completion_timeout(&port->tx_complete,
601 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
602 mutex_lock(&port->lock);
603 if (!timeout)
604 return -ETIMEDOUT;
605
606 switch (port->tx_status) {
607 case TCPC_TX_SUCCESS:
608 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
609 return 0;
610 case TCPC_TX_DISCARDED:
611 return -EAGAIN;
612 case TCPC_TX_FAILED:
613 default:
614 return -EIO;
615 }
616}
617
618void tcpm_pd_transmit_complete(struct tcpm_port *port,
619 enum tcpm_transmit_status status)
620{
621 tcpm_log(port, "PD TX complete, status: %u", status);
622 port->tx_status = status;
623 complete(&port->tx_complete);
624}
625EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
626
627static int tcpm_mux_set(struct tcpm_port *port, enum tcpc_mux_mode mode,
628 enum tcpc_usb_switch config)
629{
630 int ret = 0;
631
632 tcpm_log(port, "Requesting mux mode %d, config %d, polarity %d",
633 mode, config, port->polarity);
634
635 if (port->tcpc->mux)
636 ret = port->tcpc->mux->set(port->tcpc->mux, mode, config,
637 port->polarity);
638
639 return ret;
640}
641
642static int tcpm_set_polarity(struct tcpm_port *port,
643 enum typec_cc_polarity polarity)
644{
645 int ret;
646
647 tcpm_log(port, "polarity %d", polarity);
648
649 ret = port->tcpc->set_polarity(port->tcpc, polarity);
650 if (ret < 0)
651 return ret;
652
653 port->polarity = polarity;
654
655 return 0;
656}
657
658static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
659{
660 int ret;
661
662 tcpm_log(port, "vconn:=%d", enable);
663
664 ret = port->tcpc->set_vconn(port->tcpc, enable);
665 if (!ret) {
666 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
667 typec_set_vconn_role(port->typec_port, port->vconn_role);
668 }
669
670 return ret;
671}
672
673static u32 tcpm_get_current_limit(struct tcpm_port *port)
674{
675 enum typec_cc_status cc;
676 u32 limit;
677
678 cc = port->polarity ? port->cc2 : port->cc1;
679 switch (cc) {
680 case TYPEC_CC_RP_1_5:
681 limit = 1500;
682 break;
683 case TYPEC_CC_RP_3_0:
684 limit = 3000;
685 break;
686 case TYPEC_CC_RP_DEF:
687 default:
ea62cfc7
HG
688 if (port->tcpc->get_current_limit)
689 limit = port->tcpc->get_current_limit(port->tcpc);
690 else
691 limit = 0;
f0690a25
GR
692 break;
693 }
694
695 return limit;
696}
697
698static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
699{
700 int ret = -EOPNOTSUPP;
701
702 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
703
704 if (port->tcpc->set_current_limit)
705 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
706
707 return ret;
708}
709
710/*
711 * Determine RP value to set based on maximum current supported
712 * by a port if configured as source.
713 * Returns CC value to report to link partner.
714 */
715static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
716{
717 const u32 *src_pdo = port->src_pdo;
718 int nr_pdo = port->nr_src_pdo;
719 int i;
720
721 /*
722 * Search for first entry with matching voltage.
723 * It should report the maximum supported current.
724 */
725 for (i = 0; i < nr_pdo; i++) {
726 const u32 pdo = src_pdo[i];
727
728 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
729 pdo_fixed_voltage(pdo) == 5000) {
730 unsigned int curr = pdo_max_current(pdo);
731
732 if (curr >= 3000)
733 return TYPEC_CC_RP_3_0;
734 else if (curr >= 1500)
735 return TYPEC_CC_RP_1_5;
736 return TYPEC_CC_RP_DEF;
737 }
738 }
739
740 return TYPEC_CC_RP_DEF;
741}
742
743static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
744{
745 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
746 port->data_role);
747}
748
749static int tcpm_set_roles(struct tcpm_port *port, bool attached,
750 enum typec_role role, enum typec_data_role data)
751{
752 int ret;
753
754 if (data == TYPEC_HOST)
755 ret = tcpm_mux_set(port, TYPEC_MUX_USB,
756 TCPC_USB_SWITCH_CONNECT);
757 else
758 ret = tcpm_mux_set(port, TYPEC_MUX_NONE,
759 TCPC_USB_SWITCH_DISCONNECT);
760 if (ret < 0)
761 return ret;
762
763 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
764 if (ret < 0)
765 return ret;
766
767 port->pwr_role = role;
768 port->data_role = data;
769 typec_set_data_role(port->typec_port, data);
770 typec_set_pwr_role(port->typec_port, role);
771
772 return 0;
773}
774
775static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
776{
777 int ret;
778
779 ret = port->tcpc->set_roles(port->tcpc, true, role,
780 port->data_role);
781 if (ret < 0)
782 return ret;
783
784 port->pwr_role = role;
785 typec_set_pwr_role(port->typec_port, role);
786
787 return 0;
788}
789
790static int tcpm_pd_send_source_caps(struct tcpm_port *port)
791{
792 struct pd_message msg;
793 int i;
794
795 memset(&msg, 0, sizeof(msg));
796 if (!port->nr_src_pdo) {
797 /* No source capabilities defined, sink only */
798 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
799 port->pwr_role,
800 port->data_role,
801 port->message_id, 0);
802 } else {
803 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
804 port->pwr_role,
805 port->data_role,
806 port->message_id,
807 port->nr_src_pdo);
808 }
809 for (i = 0; i < port->nr_src_pdo; i++)
810 msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
811
812 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
813}
814
815static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
816{
817 struct pd_message msg;
818 int i;
819
820 memset(&msg, 0, sizeof(msg));
821 if (!port->nr_snk_pdo) {
822 /* No sink capabilities defined, source only */
823 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
824 port->pwr_role,
825 port->data_role,
826 port->message_id, 0);
827 } else {
828 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
829 port->pwr_role,
830 port->data_role,
831 port->message_id,
832 port->nr_snk_pdo);
833 }
834 for (i = 0; i < port->nr_snk_pdo; i++)
835 msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
836
837 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
838}
839
840static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
841 unsigned int delay_ms)
842{
843 if (delay_ms) {
844 tcpm_log(port, "pending state change %s -> %s @ %u ms",
845 tcpm_states[port->state], tcpm_states[state],
846 delay_ms);
847 port->delayed_state = state;
848 mod_delayed_work(port->wq, &port->state_machine,
849 msecs_to_jiffies(delay_ms));
850 port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms);
851 port->delay_ms = delay_ms;
852 } else {
853 tcpm_log(port, "state change %s -> %s",
854 tcpm_states[port->state], tcpm_states[state]);
855 port->delayed_state = INVALID_STATE;
856 port->prev_state = port->state;
857 port->state = state;
858 /*
859 * Don't re-queue the state machine work item if we're currently
860 * in the state machine and we're immediately changing states.
861 * tcpm_state_machine_work() will continue running the state
862 * machine.
863 */
864 if (!port->state_machine_running)
865 mod_delayed_work(port->wq, &port->state_machine, 0);
866 }
867}
868
869static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
870 unsigned int delay_ms)
871{
872 if (port->enter_state == port->state)
873 tcpm_set_state(port, state, delay_ms);
874 else
875 tcpm_log(port,
876 "skipped %sstate change %s -> %s [%u ms], context state %s",
877 delay_ms ? "delayed " : "",
878 tcpm_states[port->state], tcpm_states[state],
879 delay_ms, tcpm_states[port->enter_state]);
880}
881
882static void tcpm_queue_message(struct tcpm_port *port,
883 enum pd_msg_request message)
884{
885 port->queued_message = message;
886 mod_delayed_work(port->wq, &port->state_machine, 0);
887}
888
889/*
890 * VDM/VDO handling functions
891 */
892static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
893 const u32 *data, int cnt)
894{
895 port->vdo_count = cnt + 1;
896 port->vdo_data[0] = header;
897 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
898 /* Set ready, vdm state machine will actually send */
899 port->vdm_retries = 0;
900 port->vdm_state = VDM_STATE_READY;
901}
902
903static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
904 int cnt)
905{
906 u32 vdo = le32_to_cpu(payload[VDO_INDEX_IDH]);
907 u32 product = le32_to_cpu(payload[VDO_INDEX_PRODUCT]);
908
909 memset(&port->mode_data, 0, sizeof(port->mode_data));
910
f0690a25
GR
911 port->partner_ident.id_header = vdo;
912 port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
913 port->partner_ident.product = product;
914
915 typec_partner_set_identity(port->partner);
916
917 tcpm_log(port, "Identity: %04x:%04x.%04x",
918 PD_IDH_VID(vdo),
919 PD_PRODUCT_PID(product), product & 0xffff);
920}
921
922static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
923 int cnt)
924{
925 struct pd_mode_data *pmdata = &port->mode_data;
926 int i;
927
928 for (i = 1; i < cnt; i++) {
929 u32 p = le32_to_cpu(payload[i]);
930 u16 svid;
931
932 svid = (p >> 16) & 0xffff;
933 if (!svid)
934 return false;
935
936 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
937 goto abort;
938
939 pmdata->svids[pmdata->nsvids++] = svid;
940 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
941
942 svid = p & 0xffff;
943 if (!svid)
944 return false;
945
946 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
947 goto abort;
948
949 pmdata->svids[pmdata->nsvids++] = svid;
950 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
951 }
952 return true;
953abort:
954 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
955 return false;
956}
957
958static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
959 int cnt)
960{
961 struct pd_mode_data *pmdata = &port->mode_data;
962 struct typec_altmode_desc *paltmode;
963 struct typec_mode_desc *pmode;
964 int i;
965
966 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
967 /* Already logged in svdm_consume_svids() */
968 return;
969 }
970
971 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
972 memset(paltmode, 0, sizeof(*paltmode));
973
974 paltmode->svid = pmdata->svids[pmdata->svid_index];
975
976 tcpm_log(port, " Alternate mode %d: SVID 0x%04x",
977 pmdata->altmodes, paltmode->svid);
978
979 for (i = 1; i < cnt && paltmode->n_modes < ALTMODE_MAX_MODES; i++) {
980 pmode = &paltmode->modes[paltmode->n_modes];
981 memset(pmode, 0, sizeof(*pmode));
982 pmode->vdo = le32_to_cpu(payload[i]);
983 pmode->index = i - 1;
984 paltmode->n_modes++;
985 tcpm_log(port, " VDO %d: 0x%08x",
986 pmode->index, pmode->vdo);
987 }
988 port->partner_altmode[pmdata->altmodes] =
989 typec_partner_register_altmode(port->partner, paltmode);
990 if (port->partner_altmode[pmdata->altmodes] == NULL) {
991 tcpm_log(port,
992 "Failed to register alternate modes for SVID 0x%04x",
993 paltmode->svid);
994 return;
995 }
996 pmdata->altmodes++;
997}
998
999#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1000
1001static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1002 u32 *response)
1003{
1004 u32 p0 = le32_to_cpu(payload[0]);
1005 int cmd_type = PD_VDO_CMDT(p0);
1006 int cmd = PD_VDO_CMD(p0);
1007 struct pd_mode_data *modep;
1008 int rlen = 0;
1009 u16 svid;
193a6801 1010 int i;
f0690a25
GR
1011
1012 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1013 p0, cmd_type, cmd, cnt);
1014
1015 modep = &port->mode_data;
1016
1017 switch (cmd_type) {
1018 case CMDT_INIT:
1019 switch (cmd) {
1020 case CMD_DISCOVER_IDENT:
193a6801
GR
1021 /* 6.4.4.3.1: Only respond as UFP (device) */
1022 if (port->data_role == TYPEC_DEVICE &&
1023 port->nr_snk_vdo) {
1024 for (i = 0; i < port->nr_snk_vdo; i++)
cbe5843e 1025 response[i + 1] = port->snk_vdo[i];
193a6801
GR
1026 rlen = port->nr_snk_vdo + 1;
1027 }
f0690a25
GR
1028 break;
1029 case CMD_DISCOVER_SVID:
1030 break;
1031 case CMD_DISCOVER_MODES:
1032 break;
1033 case CMD_ENTER_MODE:
1034 break;
1035 case CMD_EXIT_MODE:
1036 break;
1037 case CMD_ATTENTION:
1038 break;
1039 default:
1040 break;
1041 }
1042 if (rlen >= 1) {
1043 response[0] = p0 | VDO_CMDT(CMDT_RSP_ACK);
1044 } else if (rlen == 0) {
1045 response[0] = p0 | VDO_CMDT(CMDT_RSP_NAK);
1046 rlen = 1;
1047 } else {
1048 response[0] = p0 | VDO_CMDT(CMDT_RSP_BUSY);
1049 rlen = 1;
1050 }
1051 break;
1052 case CMDT_RSP_ACK:
1053 /* silently drop message if we are not connected */
1054 if (!port->partner)
1055 break;
1056
1057 switch (cmd) {
1058 case CMD_DISCOVER_IDENT:
1059 /* 6.4.4.3.1 */
1060 svdm_consume_identity(port, payload, cnt);
1061 response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
1062 rlen = 1;
1063 break;
1064 case CMD_DISCOVER_SVID:
1065 /* 6.4.4.3.2 */
1066 if (svdm_consume_svids(port, payload, cnt)) {
1067 response[0] = VDO(USB_SID_PD, 1,
1068 CMD_DISCOVER_SVID);
1069 rlen = 1;
1070 } else if (modep->nsvids && supports_modal(port)) {
1071 response[0] = VDO(modep->svids[0], 1,
1072 CMD_DISCOVER_MODES);
1073 rlen = 1;
1074 }
1075 break;
1076 case CMD_DISCOVER_MODES:
1077 /* 6.4.4.3.3 */
1078 svdm_consume_modes(port, payload, cnt);
1079 modep->svid_index++;
1080 if (modep->svid_index < modep->nsvids) {
1081 svid = modep->svids[modep->svid_index];
1082 response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
1083 rlen = 1;
1084 } else {
44262fad 1085 /* enter alternate mode if/when implemented */
f0690a25
GR
1086 }
1087 break;
1088 case CMD_ENTER_MODE:
1089 break;
1090 default:
1091 break;
1092 }
1093 break;
1094 default:
1095 break;
1096 }
1097
1098 return rlen;
1099}
1100
1101static void tcpm_handle_vdm_request(struct tcpm_port *port,
1102 const __le32 *payload, int cnt)
1103{
1104 int rlen = 0;
1105 u32 response[8] = { };
1106 u32 p0 = le32_to_cpu(payload[0]);
1107
1108 if (port->vdm_state == VDM_STATE_BUSY) {
1109 /* If UFP responded busy retry after timeout */
1110 if (PD_VDO_CMDT(p0) == CMDT_RSP_BUSY) {
1111 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1112 port->vdo_retry = (p0 & ~VDO_CMDT_MASK) |
1113 CMDT_INIT;
1114 mod_delayed_work(port->wq, &port->vdm_state_machine,
1115 msecs_to_jiffies(PD_T_VDM_BUSY));
1116 return;
1117 }
1118 port->vdm_state = VDM_STATE_DONE;
1119 }
1120
1121 if (PD_VDO_SVDM(p0))
1122 rlen = tcpm_pd_svdm(port, payload, cnt, response);
f0690a25
GR
1123
1124 if (rlen > 0) {
1125 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1126 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1127 }
1128}
1129
1130static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1131 const u32 *data, int count)
1132{
1133 u32 header;
1134
1135 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1136 count = VDO_MAX_SIZE - 1;
1137
1138 /* set VDM header with VID & CMD */
1139 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1140 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd);
1141 tcpm_queue_vdm(port, header, data, count);
1142
1143 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1144}
1145
1146static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1147{
1148 unsigned int timeout;
1149 int cmd = PD_VDO_CMD(vdm_hdr);
1150
1151 /* its not a structured VDM command */
1152 if (!PD_VDO_SVDM(vdm_hdr))
1153 return PD_T_VDM_UNSTRUCTURED;
1154
1155 switch (PD_VDO_CMDT(vdm_hdr)) {
1156 case CMDT_INIT:
1157 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1158 timeout = PD_T_VDM_WAIT_MODE_E;
1159 else
1160 timeout = PD_T_VDM_SNDR_RSP;
1161 break;
1162 default:
1163 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1164 timeout = PD_T_VDM_E_MODE;
1165 else
1166 timeout = PD_T_VDM_RCVR_RSP;
1167 break;
1168 }
1169 return timeout;
1170}
1171
1172static void vdm_run_state_machine(struct tcpm_port *port)
1173{
1174 struct pd_message msg;
1175 int i, res;
1176
1177 switch (port->vdm_state) {
1178 case VDM_STATE_READY:
1179 /* Only transmit VDM if attached */
1180 if (!port->attached) {
1181 port->vdm_state = VDM_STATE_ERR_BUSY;
1182 break;
1183 }
1184
1185 /*
1186 * if there's traffic or we're not in PDO ready state don't send
1187 * a VDM.
1188 */
1189 if (port->state != SRC_READY && port->state != SNK_READY)
1190 break;
1191
1192 /* Prepare and send VDM */
1193 memset(&msg, 0, sizeof(msg));
1194 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
1195 port->pwr_role,
1196 port->data_role,
1197 port->message_id, port->vdo_count);
1198 for (i = 0; i < port->vdo_count; i++)
1199 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
1200 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1201 if (res < 0) {
1202 port->vdm_state = VDM_STATE_ERR_SEND;
1203 } else {
1204 unsigned long timeout;
1205
1206 port->vdm_retries = 0;
1207 port->vdm_state = VDM_STATE_BUSY;
1208 timeout = vdm_ready_timeout(port->vdo_data[0]);
1209 mod_delayed_work(port->wq, &port->vdm_state_machine,
1210 timeout);
1211 }
1212 break;
1213 case VDM_STATE_WAIT_RSP_BUSY:
1214 port->vdo_data[0] = port->vdo_retry;
1215 port->vdo_count = 1;
1216 port->vdm_state = VDM_STATE_READY;
1217 break;
1218 case VDM_STATE_BUSY:
1219 port->vdm_state = VDM_STATE_ERR_TMOUT;
1220 break;
1221 case VDM_STATE_ERR_SEND:
1222 /*
1223 * A partner which does not support USB PD will not reply,
1224 * so this is not a fatal error. At the same time, some
1225 * devices may not return GoodCRC under some circumstances,
1226 * so we need to retry.
1227 */
1228 if (port->vdm_retries < 3) {
1229 tcpm_log(port, "VDM Tx error, retry");
1230 port->vdm_retries++;
1231 port->vdm_state = VDM_STATE_READY;
1232 }
1233 break;
1234 default:
1235 break;
1236 }
1237}
1238
1239static void vdm_state_machine_work(struct work_struct *work)
1240{
1241 struct tcpm_port *port = container_of(work, struct tcpm_port,
1242 vdm_state_machine.work);
1243 enum vdm_states prev_state;
1244
1245 mutex_lock(&port->lock);
1246
1247 /*
1248 * Continue running as long as the port is not busy and there was
1249 * a state change.
1250 */
1251 do {
1252 prev_state = port->vdm_state;
1253 vdm_run_state_machine(port);
1254 } while (port->vdm_state != prev_state &&
1255 port->vdm_state != VDM_STATE_BUSY);
1256
1257 mutex_unlock(&port->lock);
1258}
1259
1260/*
1261 * PD (data, control) command handling functions
1262 */
1263static void tcpm_pd_data_request(struct tcpm_port *port,
1264 const struct pd_message *msg)
1265{
1266 enum pd_data_msg_type type = pd_header_type_le(msg->header);
1267 unsigned int cnt = pd_header_cnt_le(msg->header);
1268 unsigned int i;
1269
1270 switch (type) {
1271 case PD_DATA_SOURCE_CAP:
1272 if (port->pwr_role != TYPEC_SINK)
1273 break;
1274
1275 for (i = 0; i < cnt; i++)
1276 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
1277
1278 port->nr_source_caps = cnt;
1279
1280 tcpm_log_source_caps(port);
1281
1282 /*
1283 * This message may be received even if VBUS is not
1284 * present. This is quite unexpected; see USB PD
1285 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
1286 * However, at the same time, we must be ready to
1287 * receive this message and respond to it 15ms after
1288 * receiving PS_RDY during power swap operations, no matter
1289 * if VBUS is available or not (USB PD specification,
1290 * section 6.5.9.2).
1291 * So we need to accept the message either way,
1292 * but be prepared to keep waiting for VBUS after it was
1293 * handled.
1294 */
1295 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
1296 break;
1297 case PD_DATA_REQUEST:
1298 if (port->pwr_role != TYPEC_SOURCE ||
1299 cnt != 1) {
1300 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1301 break;
1302 }
1303 port->sink_request = le32_to_cpu(msg->payload[0]);
1304 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
1305 break;
1306 case PD_DATA_SINK_CAP:
1307 /* We don't do anything with this at the moment... */
1308 for (i = 0; i < cnt; i++)
1309 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
1310 port->nr_sink_caps = cnt;
1311 break;
1312 case PD_DATA_VENDOR_DEF:
1313 tcpm_handle_vdm_request(port, msg->payload, cnt);
1314 break;
1315 case PD_DATA_BIST:
1316 if (port->state == SRC_READY || port->state == SNK_READY) {
1317 port->bist_request = le32_to_cpu(msg->payload[0]);
1318 tcpm_set_state(port, BIST_RX, 0);
1319 }
1320 break;
1321 default:
1322 tcpm_log(port, "Unhandled data message type %#x", type);
1323 break;
1324 }
1325}
1326
1327static void tcpm_pd_ctrl_request(struct tcpm_port *port,
1328 const struct pd_message *msg)
1329{
1330 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1331 enum tcpm_state next_state;
1332
1333 switch (type) {
1334 case PD_CTRL_GOOD_CRC:
1335 case PD_CTRL_PING:
1336 break;
1337 case PD_CTRL_GET_SOURCE_CAP:
1338 switch (port->state) {
1339 case SRC_READY:
1340 case SNK_READY:
1341 tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
1342 break;
1343 default:
1344 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1345 break;
1346 }
1347 break;
1348 case PD_CTRL_GET_SINK_CAP:
1349 switch (port->state) {
1350 case SRC_READY:
1351 case SNK_READY:
1352 tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
1353 break;
1354 default:
1355 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1356 break;
1357 }
1358 break;
1359 case PD_CTRL_GOTO_MIN:
1360 break;
1361 case PD_CTRL_PS_RDY:
1362 switch (port->state) {
1363 case SNK_TRANSITION_SINK:
1364 if (port->vbus_present) {
1365 tcpm_set_current_limit(port,
1366 port->current_limit,
1367 port->supply_voltage);
8bf05746 1368 port->explicit_contract = true;
f0690a25
GR
1369 tcpm_set_state(port, SNK_READY, 0);
1370 } else {
1371 /*
1372 * Seen after power swap. Keep waiting for VBUS
1373 * in a transitional state.
1374 */
1375 tcpm_set_state(port,
1376 SNK_TRANSITION_SINK_VBUS, 0);
1377 }
1378 break;
b965b631 1379 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
f0690a25
GR
1380 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
1381 break;
1382 case PR_SWAP_SNK_SRC_SINK_OFF:
1383 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
1384 break;
1385 case VCONN_SWAP_WAIT_FOR_VCONN:
1386 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
1387 break;
1388 default:
1389 break;
1390 }
1391 break;
1392 case PD_CTRL_REJECT:
1393 case PD_CTRL_WAIT:
1394 switch (port->state) {
1395 case SNK_NEGOTIATE_CAPABILITIES:
1396 /* USB PD specification, Figure 8-43 */
1397 if (port->explicit_contract)
1398 next_state = SNK_READY;
1399 else
1400 next_state = SNK_WAIT_CAPABILITIES;
1401 tcpm_set_state(port, next_state, 0);
1402 break;
1403 case DR_SWAP_SEND:
1404 port->swap_status = (type == PD_CTRL_WAIT ?
1405 -EAGAIN : -EOPNOTSUPP);
1406 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
1407 break;
1408 case PR_SWAP_SEND:
1409 port->swap_status = (type == PD_CTRL_WAIT ?
1410 -EAGAIN : -EOPNOTSUPP);
1411 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
1412 break;
1413 case VCONN_SWAP_SEND:
1414 port->swap_status = (type == PD_CTRL_WAIT ?
1415 -EAGAIN : -EOPNOTSUPP);
1416 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
1417 break;
1418 default:
1419 break;
1420 }
1421 break;
1422 case PD_CTRL_ACCEPT:
1423 switch (port->state) {
1424 case SNK_NEGOTIATE_CAPABILITIES:
1425 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
1426 break;
1427 case SOFT_RESET_SEND:
1428 port->message_id = 0;
5fec4b54 1429 port->rx_msgid = -1;
f0690a25
GR
1430 if (port->pwr_role == TYPEC_SOURCE)
1431 next_state = SRC_SEND_CAPABILITIES;
1432 else
1433 next_state = SNK_WAIT_CAPABILITIES;
1434 tcpm_set_state(port, next_state, 0);
1435 break;
1436 case DR_SWAP_SEND:
1437 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
1438 break;
1439 case PR_SWAP_SEND:
1440 tcpm_set_state(port, PR_SWAP_START, 0);
1441 break;
1442 case VCONN_SWAP_SEND:
1443 tcpm_set_state(port, VCONN_SWAP_START, 0);
1444 break;
1445 default:
1446 break;
1447 }
1448 break;
1449 case PD_CTRL_SOFT_RESET:
1450 tcpm_set_state(port, SOFT_RESET, 0);
1451 break;
1452 case PD_CTRL_DR_SWAP:
9b0ae699 1453 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
1454 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1455 break;
1456 }
1457 /*
1458 * XXX
1459 * 6.3.9: If an alternate mode is active, a request to swap
1460 * alternate modes shall trigger a port reset.
1461 */
1462 switch (port->state) {
1463 case SRC_READY:
1464 case SNK_READY:
1465 tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
1466 break;
1467 default:
1468 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1469 break;
1470 }
1471 break;
1472 case PD_CTRL_PR_SWAP:
9b0ae699 1473 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
1474 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1475 break;
1476 }
1477 switch (port->state) {
1478 case SRC_READY:
1479 case SNK_READY:
1480 tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
1481 break;
1482 default:
1483 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1484 break;
1485 }
1486 break;
1487 case PD_CTRL_VCONN_SWAP:
1488 switch (port->state) {
1489 case SRC_READY:
1490 case SNK_READY:
1491 tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
1492 break;
1493 default:
1494 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1495 break;
1496 }
1497 break;
1498 default:
1499 tcpm_log(port, "Unhandled ctrl message type %#x", type);
1500 break;
1501 }
1502}
1503
1504static void tcpm_pd_rx_handler(struct work_struct *work)
1505{
1506 struct pd_rx_event *event = container_of(work,
1507 struct pd_rx_event, work);
1508 const struct pd_message *msg = &event->msg;
1509 unsigned int cnt = pd_header_cnt_le(msg->header);
1510 struct tcpm_port *port = event->port;
1511
1512 mutex_lock(&port->lock);
1513
1514 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
1515 port->attached);
1516
1517 if (port->attached) {
5fec4b54
GR
1518 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1519 unsigned int msgid = pd_header_msgid_le(msg->header);
1520
1521 /*
1522 * USB PD standard, 6.6.1.2:
1523 * "... if MessageID value in a received Message is the
1524 * same as the stored value, the receiver shall return a
1525 * GoodCRC Message with that MessageID value and drop
1526 * the Message (this is a retry of an already received
1527 * Message). Note: this shall not apply to the Soft_Reset
1528 * Message which always has a MessageID value of zero."
1529 */
1530 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
1531 goto done;
1532 port->rx_msgid = msgid;
1533
f0690a25
GR
1534 /*
1535 * If both ends believe to be DFP/host, we have a data role
1536 * mismatch.
1537 */
1538 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
1539 (port->data_role == TYPEC_HOST)) {
1540 tcpm_log(port,
1541 "Data role mismatch, initiating error recovery");
1542 tcpm_set_state(port, ERROR_RECOVERY, 0);
1543 } else {
1544 if (cnt)
1545 tcpm_pd_data_request(port, msg);
1546 else
1547 tcpm_pd_ctrl_request(port, msg);
1548 }
1549 }
1550
5fec4b54 1551done:
f0690a25
GR
1552 mutex_unlock(&port->lock);
1553 kfree(event);
1554}
1555
1556void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
1557{
1558 struct pd_rx_event *event;
1559
1560 event = kzalloc(sizeof(*event), GFP_ATOMIC);
1561 if (!event)
1562 return;
1563
1564 INIT_WORK(&event->work, tcpm_pd_rx_handler);
1565 event->port = port;
1566 memcpy(&event->msg, msg, sizeof(*msg));
1567 queue_work(port->wq, &event->work);
1568}
1569EXPORT_SYMBOL_GPL(tcpm_pd_receive);
1570
1571static int tcpm_pd_send_control(struct tcpm_port *port,
1572 enum pd_ctrl_msg_type type)
1573{
1574 struct pd_message msg;
1575
1576 memset(&msg, 0, sizeof(msg));
1577 msg.header = PD_HEADER_LE(type, port->pwr_role,
1578 port->data_role,
1579 port->message_id, 0);
1580
1581 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1582}
1583
1584/*
1585 * Send queued message without affecting state.
1586 * Return true if state machine should go back to sleep,
1587 * false otherwise.
1588 */
1589static bool tcpm_send_queued_message(struct tcpm_port *port)
1590{
1591 enum pd_msg_request queued_message;
1592
1593 do {
1594 queued_message = port->queued_message;
1595 port->queued_message = PD_MSG_NONE;
1596
1597 switch (queued_message) {
1598 case PD_MSG_CTRL_WAIT:
1599 tcpm_pd_send_control(port, PD_CTRL_WAIT);
1600 break;
1601 case PD_MSG_CTRL_REJECT:
1602 tcpm_pd_send_control(port, PD_CTRL_REJECT);
1603 break;
1604 case PD_MSG_DATA_SINK_CAP:
1605 tcpm_pd_send_sink_caps(port);
1606 break;
1607 case PD_MSG_DATA_SOURCE_CAP:
1608 tcpm_pd_send_source_caps(port);
1609 break;
1610 default:
1611 break;
1612 }
1613 } while (port->queued_message != PD_MSG_NONE);
1614
1615 if (port->delayed_state != INVALID_STATE) {
1616 if (time_is_after_jiffies(port->delayed_runtime)) {
1617 mod_delayed_work(port->wq, &port->state_machine,
1618 port->delayed_runtime - jiffies);
1619 return true;
1620 }
1621 port->delayed_state = INVALID_STATE;
1622 }
1623 return false;
1624}
1625
1626static int tcpm_pd_check_request(struct tcpm_port *port)
1627{
1628 u32 pdo, rdo = port->sink_request;
1629 unsigned int max, op, pdo_max, index;
1630 enum pd_pdo_type type;
1631
1632 index = rdo_index(rdo);
1633 if (!index || index > port->nr_src_pdo)
1634 return -EINVAL;
1635
1636 pdo = port->src_pdo[index - 1];
1637 type = pdo_type(pdo);
1638 switch (type) {
1639 case PDO_TYPE_FIXED:
1640 case PDO_TYPE_VAR:
1641 max = rdo_max_current(rdo);
1642 op = rdo_op_current(rdo);
1643 pdo_max = pdo_max_current(pdo);
1644
1645 if (op > pdo_max)
1646 return -EINVAL;
1647 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1648 return -EINVAL;
1649
1650 if (type == PDO_TYPE_FIXED)
1651 tcpm_log(port,
1652 "Requested %u mV, %u mA for %u / %u mA",
1653 pdo_fixed_voltage(pdo), pdo_max, op, max);
1654 else
1655 tcpm_log(port,
1656 "Requested %u -> %u mV, %u mA for %u / %u mA",
1657 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1658 pdo_max, op, max);
1659 break;
1660 case PDO_TYPE_BATT:
1661 max = rdo_max_power(rdo);
1662 op = rdo_op_power(rdo);
1663 pdo_max = pdo_max_power(pdo);
1664
1665 if (op > pdo_max)
1666 return -EINVAL;
1667 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1668 return -EINVAL;
1669 tcpm_log(port,
1670 "Requested %u -> %u mV, %u mW for %u / %u mW",
1671 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1672 pdo_max, op, max);
1673 break;
1674 default:
1675 return -EINVAL;
1676 }
1677
1678 port->op_vsafe5v = index == 1;
1679
1680 return 0;
1681}
1682
1683static int tcpm_pd_select_pdo(struct tcpm_port *port)
1684{
1685 unsigned int i, max_mw = 0, max_mv = 0;
1686 int ret = -EINVAL;
1687
1688 /*
1689 * Select the source PDO providing the most power while staying within
1690 * the board's voltage limits. Prefer PDO providing exp
1691 */
1692 for (i = 0; i < port->nr_source_caps; i++) {
1693 u32 pdo = port->source_caps[i];
1694 enum pd_pdo_type type = pdo_type(pdo);
1695 unsigned int mv, ma, mw;
1696
1697 if (type == PDO_TYPE_FIXED)
1698 mv = pdo_fixed_voltage(pdo);
1699 else
1700 mv = pdo_min_voltage(pdo);
1701
1702 if (type == PDO_TYPE_BATT) {
1703 mw = pdo_max_power(pdo);
1704 } else {
1705 ma = min(pdo_max_current(pdo),
1706 port->max_snk_ma);
1707 mw = ma * mv / 1000;
1708 }
1709
1710 /* Perfer higher voltages if available */
1711 if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
1712 mv <= port->max_snk_mv) {
1713 ret = i;
1714 max_mw = mw;
1715 max_mv = mv;
1716 }
1717 }
1718
1719 return ret;
1720}
1721
1722static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1723{
1724 unsigned int mv, ma, mw, flags;
1725 unsigned int max_ma, max_mw;
1726 enum pd_pdo_type type;
1727 int index;
1728 u32 pdo;
1729
1730 index = tcpm_pd_select_pdo(port);
1731 if (index < 0)
1732 return -EINVAL;
1733 pdo = port->source_caps[index];
1734 type = pdo_type(pdo);
1735
1736 if (type == PDO_TYPE_FIXED)
1737 mv = pdo_fixed_voltage(pdo);
1738 else
1739 mv = pdo_min_voltage(pdo);
1740
1741 /* Select maximum available current within the board's power limit */
1742 if (type == PDO_TYPE_BATT) {
1743 mw = pdo_max_power(pdo);
1744 ma = 1000 * min(mw, port->max_snk_mw) / mv;
1745 } else {
1746 ma = min(pdo_max_current(pdo),
1747 1000 * port->max_snk_mw / mv);
1748 }
1749 ma = min(ma, port->max_snk_ma);
1750
931693f9 1751 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
f0690a25
GR
1752
1753 /* Set mismatch bit if offered power is less than operating power */
1754 mw = ma * mv / 1000;
1755 max_ma = ma;
1756 max_mw = mw;
1757 if (mw < port->operating_snk_mw) {
1758 flags |= RDO_CAP_MISMATCH;
1759 max_mw = port->operating_snk_mw;
1760 max_ma = max_mw * 1000 / mv;
1761 }
1762
1763 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
1764 port->cc_req, port->cc1, port->cc2, port->vbus_source,
1765 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
1766 port->polarity);
1767
1768 if (type == PDO_TYPE_BATT) {
1769 *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
1770
1771 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
1772 index, mv, mw,
1773 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1774 } else {
1775 *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
1776
1777 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
1778 index, mv, ma,
1779 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1780 }
1781
1782 port->current_limit = ma;
1783 port->supply_voltage = mv;
1784
1785 return 0;
1786}
1787
1788static int tcpm_pd_send_request(struct tcpm_port *port)
1789{
1790 struct pd_message msg;
1791 int ret;
1792 u32 rdo;
1793
1794 ret = tcpm_pd_build_request(port, &rdo);
1795 if (ret < 0)
1796 return ret;
1797
1798 memset(&msg, 0, sizeof(msg));
1799 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
1800 port->pwr_role,
1801 port->data_role,
1802 port->message_id, 1);
1803 msg.payload[0] = cpu_to_le32(rdo);
1804
1805 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1806}
1807
1808static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
1809{
1810 int ret;
1811
1812 if (enable && port->vbus_charge)
1813 return -EINVAL;
1814
1815 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
1816
1817 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
1818 if (ret < 0)
1819 return ret;
1820
1821 port->vbus_source = enable;
1822 return 0;
1823}
1824
1825static int tcpm_set_charge(struct tcpm_port *port, bool charge)
1826{
1827 int ret;
1828
1829 if (charge && port->vbus_source)
1830 return -EINVAL;
1831
1832 if (charge != port->vbus_charge) {
1833 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
1834 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
1835 charge);
1836 if (ret < 0)
1837 return ret;
1838 }
1839 port->vbus_charge = charge;
1840 return 0;
1841}
1842
1843static bool tcpm_start_drp_toggling(struct tcpm_port *port)
1844{
1845 int ret;
1846
1847 if (port->tcpc->start_drp_toggling &&
9b0ae699 1848 port->port_type == TYPEC_PORT_DRP) {
f0690a25
GR
1849 tcpm_log_force(port, "Start DRP toggling");
1850 ret = port->tcpc->start_drp_toggling(port->tcpc,
1851 tcpm_rp_cc(port));
1852 if (!ret)
1853 return true;
1854 }
1855
1856 return false;
1857}
1858
1859static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
1860{
1861 tcpm_log(port, "cc:=%d", cc);
1862 port->cc_req = cc;
1863 port->tcpc->set_cc(port->tcpc, cc);
1864}
1865
1866static int tcpm_init_vbus(struct tcpm_port *port)
1867{
1868 int ret;
1869
1870 ret = port->tcpc->set_vbus(port->tcpc, false, false);
1871 port->vbus_source = false;
1872 port->vbus_charge = false;
1873 return ret;
1874}
1875
1876static int tcpm_init_vconn(struct tcpm_port *port)
1877{
1878 int ret;
1879
1880 ret = port->tcpc->set_vconn(port->tcpc, false);
1881 port->vconn_role = TYPEC_SINK;
1882 return ret;
1883}
1884
1885static void tcpm_typec_connect(struct tcpm_port *port)
1886{
1887 if (!port->connected) {
1888 /* Make sure we don't report stale identity information */
1889 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
1890 port->partner_desc.usb_pd = port->pd_capable;
1891 if (tcpm_port_is_debug(port))
1892 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
1893 else if (tcpm_port_is_audio(port))
1894 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
1895 else
1896 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
1897 port->partner = typec_register_partner(port->typec_port,
1898 &port->partner_desc);
1899 port->connected = true;
1900 }
1901}
1902
1903static int tcpm_src_attach(struct tcpm_port *port)
1904{
1905 enum typec_cc_polarity polarity =
1906 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
1907 : TYPEC_POLARITY_CC1;
1908 int ret;
1909
1910 if (port->attached)
1911 return 0;
1912
1913 ret = tcpm_set_polarity(port, polarity);
1914 if (ret < 0)
1915 return ret;
1916
1917 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
1918 if (ret < 0)
1919 return ret;
1920
1921 ret = port->tcpc->set_pd_rx(port->tcpc, true);
1922 if (ret < 0)
1923 goto out_disable_mux;
1924
1925 /*
1926 * USB Type-C specification, version 1.2,
1927 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
1928 * Enable VCONN only if the non-RD port is set to RA.
1929 */
1930 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
1931 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
1932 ret = tcpm_set_vconn(port, true);
1933 if (ret < 0)
1934 goto out_disable_pd;
1935 }
1936
1937 ret = tcpm_set_vbus(port, true);
1938 if (ret < 0)
1939 goto out_disable_vconn;
1940
1941 port->pd_capable = false;
1942
1943 port->partner = NULL;
1944
1945 port->attached = true;
1946 port->send_discover = true;
1947
1948 return 0;
1949
1950out_disable_vconn:
1951 tcpm_set_vconn(port, false);
1952out_disable_pd:
1953 port->tcpc->set_pd_rx(port->tcpc, false);
1954out_disable_mux:
1955 tcpm_mux_set(port, TYPEC_MUX_NONE, TCPC_USB_SWITCH_DISCONNECT);
1956 return ret;
1957}
1958
1959static void tcpm_typec_disconnect(struct tcpm_port *port)
1960{
1961 if (port->connected) {
1962 typec_unregister_partner(port->partner);
1963 port->partner = NULL;
1964 port->connected = false;
1965 }
1966}
1967
1968static void tcpm_unregister_altmodes(struct tcpm_port *port)
1969{
1970 struct pd_mode_data *modep = &port->mode_data;
1971 int i;
1972
1973 for (i = 0; i < modep->altmodes; i++) {
1974 typec_unregister_altmode(port->partner_altmode[i]);
1975 port->partner_altmode[i] = NULL;
1976 }
1977
1978 memset(modep, 0, sizeof(*modep));
1979}
1980
1981static void tcpm_reset_port(struct tcpm_port *port)
1982{
1983 tcpm_unregister_altmodes(port);
1984 tcpm_typec_disconnect(port);
1985 port->attached = false;
1986 port->pd_capable = false;
1987
5fec4b54
GR
1988 /*
1989 * First Rx ID should be 0; set this to a sentinel of -1 so that
1990 * we can check tcpm_pd_rx_handler() if we had seen it before.
1991 */
1992 port->rx_msgid = -1;
1993
f0690a25
GR
1994 port->tcpc->set_pd_rx(port->tcpc, false);
1995 tcpm_init_vbus(port); /* also disables charging */
1996 tcpm_init_vconn(port);
1997 tcpm_set_current_limit(port, 0, 0);
1998 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
1999 tcpm_set_attached_state(port, false);
2000 port->try_src_count = 0;
2001 port->try_snk_count = 0;
2002}
2003
2004static void tcpm_detach(struct tcpm_port *port)
2005{
2006 if (!port->attached)
2007 return;
2008
2009 if (tcpm_port_is_disconnected(port))
2010 port->hard_reset_count = 0;
2011
2012 tcpm_reset_port(port);
2013}
2014
2015static void tcpm_src_detach(struct tcpm_port *port)
2016{
2017 tcpm_detach(port);
2018}
2019
2020static int tcpm_snk_attach(struct tcpm_port *port)
2021{
2022 int ret;
2023
2024 if (port->attached)
2025 return 0;
2026
2027 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
2028 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
2029 if (ret < 0)
2030 return ret;
2031
2032 ret = tcpm_set_roles(port, true, TYPEC_SINK, TYPEC_DEVICE);
2033 if (ret < 0)
2034 return ret;
2035
2036 port->pd_capable = false;
2037
2038 port->partner = NULL;
2039
2040 port->attached = true;
2041 port->send_discover = true;
2042
2043 return 0;
2044}
2045
2046static void tcpm_snk_detach(struct tcpm_port *port)
2047{
2048 tcpm_detach(port);
2049
2050 /* XXX: (Dis)connect SuperSpeed mux? */
2051}
2052
2053static int tcpm_acc_attach(struct tcpm_port *port)
2054{
2055 int ret;
2056
2057 if (port->attached)
2058 return 0;
2059
2060 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
2061 if (ret < 0)
2062 return ret;
2063
2064 port->partner = NULL;
2065
2066 tcpm_typec_connect(port);
2067
2068 port->attached = true;
2069
2070 return 0;
2071}
2072
2073static void tcpm_acc_detach(struct tcpm_port *port)
2074{
2075 tcpm_detach(port);
2076}
2077
2078static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
2079{
2080 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
2081 return HARD_RESET_SEND;
2082 if (port->pd_capable)
2083 return ERROR_RECOVERY;
2084 if (port->pwr_role == TYPEC_SOURCE)
2085 return SRC_UNATTACHED;
2086 if (port->state == SNK_WAIT_CAPABILITIES)
2087 return SNK_READY;
2088 return SNK_UNATTACHED;
2089}
2090
2091static inline enum tcpm_state ready_state(struct tcpm_port *port)
2092{
2093 if (port->pwr_role == TYPEC_SOURCE)
2094 return SRC_READY;
2095 else
2096 return SNK_READY;
2097}
2098
2099static inline enum tcpm_state unattached_state(struct tcpm_port *port)
2100{
13cb492c
BJS
2101 if (port->port_type == TYPEC_PORT_DRP) {
2102 if (port->pwr_role == TYPEC_SOURCE)
2103 return SRC_UNATTACHED;
2104 else
2105 return SNK_UNATTACHED;
2106 } else if (port->port_type == TYPEC_PORT_DFP) {
f0690a25 2107 return SRC_UNATTACHED;
13cb492c
BJS
2108 }
2109
2110 return SNK_UNATTACHED;
f0690a25
GR
2111}
2112
2113static void tcpm_check_send_discover(struct tcpm_port *port)
2114{
2115 if (port->data_role == TYPEC_HOST && port->send_discover &&
2116 port->pd_capable) {
2117 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
2118 port->send_discover = false;
2119 }
2120}
2121
2122static void tcpm_swap_complete(struct tcpm_port *port, int result)
2123{
2124 if (port->swap_pending) {
2125 port->swap_status = result;
2126 port->swap_pending = false;
b17dd571 2127 port->non_pd_role_swap = false;
f0690a25
GR
2128 complete(&port->swap_complete);
2129 }
2130}
2131
53b70e5c 2132static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
fce042f0
BJS
2133{
2134 switch (cc) {
2135 case TYPEC_CC_RP_1_5:
2136 return TYPEC_PWR_MODE_1_5A;
2137 case TYPEC_CC_RP_3_0:
2138 return TYPEC_PWR_MODE_3_0A;
2139 case TYPEC_CC_RP_DEF:
2140 default:
2141 return TYPEC_PWR_MODE_USB;
2142 }
2143}
2144
f0690a25
GR
2145static void run_state_machine(struct tcpm_port *port)
2146{
2147 int ret;
fce042f0 2148 enum typec_pwr_opmode opmode;
131c7d12 2149 unsigned int msecs;
f0690a25
GR
2150
2151 port->enter_state = port->state;
2152 switch (port->state) {
2153 case DRP_TOGGLING:
2154 break;
2155 /* SRC states */
2156 case SRC_UNATTACHED:
b17dd571
GR
2157 if (!port->non_pd_role_swap)
2158 tcpm_swap_complete(port, -ENOTCONN);
f0690a25
GR
2159 tcpm_src_detach(port);
2160 if (tcpm_start_drp_toggling(port)) {
2161 tcpm_set_state(port, DRP_TOGGLING, 0);
2162 break;
2163 }
2164 tcpm_set_cc(port, tcpm_rp_cc(port));
9b0ae699 2165 if (port->port_type == TYPEC_PORT_DRP)
f0690a25
GR
2166 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
2167 break;
2168 case SRC_ATTACH_WAIT:
2169 if (tcpm_port_is_debug(port))
2170 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
2171 PD_T_CC_DEBOUNCE);
2172 else if (tcpm_port_is_audio(port))
2173 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
2174 PD_T_CC_DEBOUNCE);
2175 else if (tcpm_port_is_source(port))
2176 tcpm_set_state(port,
2177 tcpm_try_snk(port) ? SNK_TRY
2178 : SRC_ATTACHED,
2179 PD_T_CC_DEBOUNCE);
2180 break;
2181
2182 case SNK_TRY:
2183 port->try_snk_count++;
2184 /*
2185 * Requirements:
2186 * - Do not drive vconn or vbus
2187 * - Terminate CC pins (both) to Rd
2188 * Action:
2189 * - Wait for tDRPTry (PD_T_DRP_TRY).
2190 * Until then, ignore any state changes.
2191 */
2192 tcpm_set_cc(port, TYPEC_CC_RD);
2193 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
2194 break;
2195 case SNK_TRY_WAIT:
a0a3e04e
BJS
2196 if (tcpm_port_is_sink(port)) {
2197 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
2198 } else {
2199 tcpm_set_state(port, SRC_TRYWAIT, 0);
2200 port->max_wait = 0;
2201 }
2202 break;
2203 case SNK_TRY_WAIT_DEBOUNCE:
2204 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
2205 PD_T_PD_DEBOUNCE);
2206 break;
2207 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
f0690a25
GR
2208 if (port->vbus_present && tcpm_port_is_sink(port)) {
2209 tcpm_set_state(port, SNK_ATTACHED, 0);
a0a3e04e
BJS
2210 } else {
2211 tcpm_set_state(port, SRC_TRYWAIT, 0);
02d5be46 2212 port->max_wait = 0;
f0690a25 2213 }
f0690a25
GR
2214 break;
2215 case SRC_TRYWAIT:
2216 tcpm_set_cc(port, tcpm_rp_cc(port));
02d5be46
BJS
2217 if (port->max_wait == 0) {
2218 port->max_wait = jiffies +
2219 msecs_to_jiffies(PD_T_DRP_TRY);
f0690a25
GR
2220 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2221 PD_T_DRP_TRY);
02d5be46
BJS
2222 } else {
2223 if (time_is_after_jiffies(port->max_wait))
2224 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2225 jiffies_to_msecs(port->max_wait -
2226 jiffies));
2227 else
2228 tcpm_set_state(port, SNK_UNATTACHED, 0);
2229 }
2230 break;
2231 case SRC_TRYWAIT_DEBOUNCE:
2232 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
f0690a25
GR
2233 break;
2234 case SRC_TRYWAIT_UNATTACHED:
2235 tcpm_set_state(port, SNK_UNATTACHED, 0);
2236 break;
2237
2238 case SRC_ATTACHED:
2239 ret = tcpm_src_attach(port);
2240 tcpm_set_state(port, SRC_UNATTACHED,
2241 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
2242 break;
2243 case SRC_STARTUP:
fce042f0
BJS
2244 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
2245 typec_set_pwr_opmode(port->typec_port, opmode);
f0690a25
GR
2246 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2247 port->caps_count = 0;
2248 port->message_id = 0;
5fec4b54 2249 port->rx_msgid = -1;
f0690a25
GR
2250 port->explicit_contract = false;
2251 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2252 break;
2253 case SRC_SEND_CAPABILITIES:
2254 port->caps_count++;
2255 if (port->caps_count > PD_N_CAPS_COUNT) {
2256 tcpm_set_state(port, SRC_READY, 0);
2257 break;
2258 }
2259 ret = tcpm_pd_send_source_caps(port);
2260 if (ret < 0) {
2261 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
2262 PD_T_SEND_SOURCE_CAP);
2263 } else {
2264 /*
2265 * Per standard, we should clear the reset counter here.
2266 * However, that can result in state machine hang-ups.
2267 * Reset it only in READY state to improve stability.
2268 */
2269 /* port->hard_reset_count = 0; */
2270 port->caps_count = 0;
2271 port->pd_capable = true;
2272 tcpm_set_state_cond(port, hard_reset_state(port),
2273 PD_T_SEND_SOURCE_CAP);
2274 }
2275 break;
2276 case SRC_NEGOTIATE_CAPABILITIES:
2277 ret = tcpm_pd_check_request(port);
2278 if (ret < 0) {
2279 tcpm_pd_send_control(port, PD_CTRL_REJECT);
2280 if (!port->explicit_contract) {
2281 tcpm_set_state(port,
2282 SRC_WAIT_NEW_CAPABILITIES, 0);
2283 } else {
2284 tcpm_set_state(port, SRC_READY, 0);
2285 }
2286 } else {
2287 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2288 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
2289 PD_T_SRC_TRANSITION);
2290 }
2291 break;
2292 case SRC_TRANSITION_SUPPLY:
2293 /* XXX: regulator_set_voltage(vbus, ...) */
2294 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2295 port->explicit_contract = true;
2296 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
2297 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2298 tcpm_set_state_cond(port, SRC_READY, 0);
2299 break;
2300 case SRC_READY:
2301#if 1
2302 port->hard_reset_count = 0;
2303#endif
2304 port->try_src_count = 0;
2305
3113bf1a 2306 tcpm_swap_complete(port, 0);
f0690a25 2307 tcpm_typec_connect(port);
f0690a25
GR
2308 tcpm_check_send_discover(port);
2309 /*
2310 * 6.3.5
2311 * Sending ping messages is not necessary if
2312 * - the source operates at vSafe5V
2313 * or
2314 * - The system is not operating in PD mode
2315 * or
2316 * - Both partners are connected using a Type-C connector
f451ac9e
BJS
2317 *
2318 * There is no actual need to send PD messages since the local
2319 * port type-c and the spec does not clearly say whether PD is
2320 * possible when type-c is connected to Type-A/B
f0690a25 2321 */
f0690a25
GR
2322 break;
2323 case SRC_WAIT_NEW_CAPABILITIES:
2324 /* Nothing to do... */
2325 break;
2326
2327 /* SNK states */
2328 case SNK_UNATTACHED:
b17dd571
GR
2329 if (!port->non_pd_role_swap)
2330 tcpm_swap_complete(port, -ENOTCONN);
f0690a25
GR
2331 tcpm_snk_detach(port);
2332 if (tcpm_start_drp_toggling(port)) {
2333 tcpm_set_state(port, DRP_TOGGLING, 0);
2334 break;
2335 }
2336 tcpm_set_cc(port, TYPEC_CC_RD);
9b0ae699 2337 if (port->port_type == TYPEC_PORT_DRP)
f0690a25
GR
2338 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
2339 break;
2340 case SNK_ATTACH_WAIT:
2341 if ((port->cc1 == TYPEC_CC_OPEN &&
2342 port->cc2 != TYPEC_CC_OPEN) ||
2343 (port->cc1 != TYPEC_CC_OPEN &&
2344 port->cc2 == TYPEC_CC_OPEN))
2345 tcpm_set_state(port, SNK_DEBOUNCED,
2346 PD_T_CC_DEBOUNCE);
2347 else if (tcpm_port_is_disconnected(port))
2348 tcpm_set_state(port, SNK_UNATTACHED,
2349 PD_T_PD_DEBOUNCE);
2350 break;
2351 case SNK_DEBOUNCED:
2352 if (tcpm_port_is_disconnected(port))
2353 tcpm_set_state(port, SNK_UNATTACHED,
2354 PD_T_PD_DEBOUNCE);
2355 else if (port->vbus_present)
2356 tcpm_set_state(port,
2357 tcpm_try_src(port) ? SRC_TRY
2358 : SNK_ATTACHED,
2359 0);
2360 else
2361 /* Wait for VBUS, but not forever */
56277035 2362 tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
f0690a25
GR
2363 break;
2364
2365 case SRC_TRY:
2366 port->try_src_count++;
2367 tcpm_set_cc(port, tcpm_rp_cc(port));
131c7d12
BJS
2368 port->max_wait = 0;
2369 tcpm_set_state(port, SRC_TRY_WAIT, 0);
2370 break;
2371 case SRC_TRY_WAIT:
2372 if (port->max_wait == 0) {
2373 port->max_wait = jiffies +
2374 msecs_to_jiffies(PD_T_DRP_TRY);
2375 msecs = PD_T_DRP_TRY;
2376 } else {
2377 if (time_is_after_jiffies(port->max_wait))
2378 msecs = jiffies_to_msecs(port->max_wait -
2379 jiffies);
2380 else
2381 msecs = 0;
2382 }
2383 tcpm_set_state(port, SNK_TRYWAIT, msecs);
f0690a25
GR
2384 break;
2385 case SRC_TRY_DEBOUNCE:
2386 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
2387 break;
2388 case SNK_TRYWAIT:
2389 tcpm_set_cc(port, TYPEC_CC_RD);
af450ebb 2390 tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE);
f0690a25 2391 break;
af450ebb
BJS
2392 case SNK_TRYWAIT_VBUS:
2393 /*
2394 * TCPM stays in this state indefinitely until VBUS
2395 * is detected as long as Rp is not detected for
2396 * more than a time period of tPDDebounce.
2397 */
2398 if (port->vbus_present && tcpm_port_is_sink(port)) {
f0690a25
GR
2399 tcpm_set_state(port, SNK_ATTACHED, 0);
2400 break;
2401 }
af450ebb
BJS
2402 if (!tcpm_port_is_sink(port))
2403 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
f0690a25 2404 break;
af450ebb
BJS
2405 case SNK_TRYWAIT_DEBOUNCE:
2406 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
f0690a25 2407 break;
f0690a25
GR
2408 case SNK_ATTACHED:
2409 ret = tcpm_snk_attach(port);
2410 if (ret < 0)
2411 tcpm_set_state(port, SNK_UNATTACHED, 0);
2412 else
2413 tcpm_set_state(port, SNK_STARTUP, 0);
2414 break;
2415 case SNK_STARTUP:
fce042f0
BJS
2416 opmode = tcpm_get_pwr_opmode(port->polarity ?
2417 port->cc2 : port->cc1);
2418 typec_set_pwr_opmode(port->typec_port, opmode);
f0690a25
GR
2419 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2420 port->message_id = 0;
5fec4b54 2421 port->rx_msgid = -1;
f0690a25
GR
2422 port->explicit_contract = false;
2423 tcpm_set_state(port, SNK_DISCOVERY, 0);
2424 break;
2425 case SNK_DISCOVERY:
2426 if (port->vbus_present) {
2427 tcpm_set_current_limit(port,
2428 tcpm_get_current_limit(port),
2429 5000);
2430 tcpm_set_charge(port, true);
2431 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2432 break;
2433 }
2434 /*
2435 * For DRP, timeouts differ. Also, handling is supposed to be
2436 * different and much more complex (dead battery detection;
2437 * see USB power delivery specification, section 8.3.3.6.1.5.1).
2438 */
2439 tcpm_set_state(port, hard_reset_state(port),
9b0ae699 2440 port->port_type == TYPEC_PORT_DRP ?
f0690a25
GR
2441 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
2442 break;
2443 case SNK_DISCOVERY_DEBOUNCE:
2444 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
2445 PD_T_CC_DEBOUNCE);
2446 break;
2447 case SNK_DISCOVERY_DEBOUNCE_DONE:
2448 if (!tcpm_port_is_disconnected(port) &&
2449 tcpm_port_is_sink(port) &&
2450 time_is_after_jiffies(port->delayed_runtime)) {
2451 tcpm_set_state(port, SNK_DISCOVERY,
2452 port->delayed_runtime - jiffies);
2453 break;
2454 }
2455 tcpm_set_state(port, unattached_state(port), 0);
2456 break;
2457 case SNK_WAIT_CAPABILITIES:
2458 ret = port->tcpc->set_pd_rx(port->tcpc, true);
2459 if (ret < 0) {
2460 tcpm_set_state(port, SNK_READY, 0);
2461 break;
2462 }
2463 /*
2464 * If VBUS has never been low, and we time out waiting
2465 * for source cap, try a soft reset first, in case we
2466 * were already in a stable contract before this boot.
2467 * Do this only once.
2468 */
2469 if (port->vbus_never_low) {
2470 port->vbus_never_low = false;
2471 tcpm_set_state(port, SOFT_RESET_SEND,
2472 PD_T_SINK_WAIT_CAP);
2473 } else {
2474 tcpm_set_state(port, hard_reset_state(port),
2475 PD_T_SINK_WAIT_CAP);
2476 }
2477 break;
2478 case SNK_NEGOTIATE_CAPABILITIES:
2479 port->pd_capable = true;
2480 port->hard_reset_count = 0;
2481 ret = tcpm_pd_send_request(port);
2482 if (ret < 0) {
2483 /* Let the Source send capabilities again. */
2484 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2485 } else {
2486 tcpm_set_state_cond(port, hard_reset_state(port),
2487 PD_T_SENDER_RESPONSE);
2488 }
2489 break;
2490 case SNK_TRANSITION_SINK:
2491 case SNK_TRANSITION_SINK_VBUS:
2492 tcpm_set_state(port, hard_reset_state(port),
2493 PD_T_PS_TRANSITION);
2494 break;
2495 case SNK_READY:
2496 port->try_snk_count = 0;
8bf05746
BJS
2497 if (port->explicit_contract) {
2498 typec_set_pwr_opmode(port->typec_port,
2499 TYPEC_PWR_MODE_PD);
2500 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2501 }
f0690a25 2502
3113bf1a 2503 tcpm_swap_complete(port, 0);
f0690a25 2504 tcpm_typec_connect(port);
f0690a25
GR
2505 tcpm_check_send_discover(port);
2506 break;
2507
2508 /* Accessory states */
2509 case ACC_UNATTACHED:
2510 tcpm_acc_detach(port);
2511 tcpm_set_state(port, SRC_UNATTACHED, 0);
2512 break;
2513 case DEBUG_ACC_ATTACHED:
2514 case AUDIO_ACC_ATTACHED:
2515 ret = tcpm_acc_attach(port);
2516 if (ret < 0)
2517 tcpm_set_state(port, ACC_UNATTACHED, 0);
2518 break;
2519 case AUDIO_ACC_DEBOUNCE:
2520 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
2521 break;
2522
2523 /* Hard_Reset states */
2524 case HARD_RESET_SEND:
2525 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
2526 tcpm_set_state(port, HARD_RESET_START, 0);
2527 break;
2528 case HARD_RESET_START:
2529 port->hard_reset_count++;
2530 port->tcpc->set_pd_rx(port->tcpc, false);
2531 tcpm_unregister_altmodes(port);
2532 port->send_discover = true;
2533 if (port->pwr_role == TYPEC_SOURCE)
2534 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
2535 PD_T_PS_HARD_RESET);
2536 else
2537 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
2538 break;
2539 case SRC_HARD_RESET_VBUS_OFF:
2540 tcpm_set_vconn(port, true);
2541 tcpm_set_vbus(port, false);
2542 tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
2543 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
2544 break;
2545 case SRC_HARD_RESET_VBUS_ON:
2546 tcpm_set_vbus(port, true);
2547 port->tcpc->set_pd_rx(port->tcpc, true);
2548 tcpm_set_attached_state(port, true);
2549 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
2550 break;
2551 case SNK_HARD_RESET_SINK_OFF:
2552 tcpm_set_vconn(port, false);
2553 tcpm_set_charge(port, false);
2554 tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
2555 /*
2556 * VBUS may or may not toggle, depending on the adapter.
2557 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
2558 * directly after timeout.
2559 */
2560 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
2561 break;
2562 case SNK_HARD_RESET_WAIT_VBUS:
2563 /* Assume we're disconnected if VBUS doesn't come back. */
2564 tcpm_set_state(port, SNK_UNATTACHED,
2565 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
2566 break;
2567 case SNK_HARD_RESET_SINK_ON:
2568 /* Note: There is no guarantee that VBUS is on in this state */
2569 /*
2570 * XXX:
2571 * The specification suggests that dual mode ports in sink
2572 * mode should transition to state PE_SRC_Transition_to_default.
2573 * See USB power delivery specification chapter 8.3.3.6.1.3.
2574 * This would mean to to
2575 * - turn off VCONN, reset power supply
2576 * - request hardware reset
2577 * - turn on VCONN
2578 * - Transition to state PE_Src_Startup
2579 * SNK only ports shall transition to state Snk_Startup
2580 * (see chapter 8.3.3.3.8).
2581 * Similar, dual-mode ports in source mode should transition
2582 * to PE_SNK_Transition_to_default.
2583 */
2584 tcpm_set_attached_state(port, true);
2585 tcpm_set_state(port, SNK_STARTUP, 0);
2586 break;
2587
2588 /* Soft_Reset states */
2589 case SOFT_RESET:
2590 port->message_id = 0;
5fec4b54 2591 port->rx_msgid = -1;
f0690a25
GR
2592 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2593 if (port->pwr_role == TYPEC_SOURCE)
2594 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2595 else
2596 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2597 break;
2598 case SOFT_RESET_SEND:
2599 port->message_id = 0;
5fec4b54 2600 port->rx_msgid = -1;
f0690a25
GR
2601 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
2602 tcpm_set_state_cond(port, hard_reset_state(port), 0);
2603 else
2604 tcpm_set_state_cond(port, hard_reset_state(port),
2605 PD_T_SENDER_RESPONSE);
2606 break;
2607
2608 /* DR_Swap states */
2609 case DR_SWAP_SEND:
2610 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
2611 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
2612 PD_T_SENDER_RESPONSE);
2613 break;
2614 case DR_SWAP_ACCEPT:
2615 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2616 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
2617 break;
2618 case DR_SWAP_SEND_TIMEOUT:
2619 tcpm_swap_complete(port, -ETIMEDOUT);
2620 tcpm_set_state(port, ready_state(port), 0);
2621 break;
2622 case DR_SWAP_CHANGE_DR:
2623 if (port->data_role == TYPEC_HOST) {
2624 tcpm_unregister_altmodes(port);
2625 tcpm_set_roles(port, true, port->pwr_role,
2626 TYPEC_DEVICE);
2627 } else {
2628 tcpm_set_roles(port, true, port->pwr_role,
2629 TYPEC_HOST);
2630 port->send_discover = true;
2631 }
f0690a25
GR
2632 tcpm_set_state(port, ready_state(port), 0);
2633 break;
2634
2635 /* PR_Swap states */
2636 case PR_SWAP_ACCEPT:
2637 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2638 tcpm_set_state(port, PR_SWAP_START, 0);
2639 break;
2640 case PR_SWAP_SEND:
2641 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
2642 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
2643 PD_T_SENDER_RESPONSE);
2644 break;
2645 case PR_SWAP_SEND_TIMEOUT:
2646 tcpm_swap_complete(port, -ETIMEDOUT);
2647 tcpm_set_state(port, ready_state(port), 0);
2648 break;
2649 case PR_SWAP_START:
2650 if (port->pwr_role == TYPEC_SOURCE)
2651 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
2652 PD_T_SRC_TRANSITION);
2653 else
2654 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
2655 break;
2656 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2657 tcpm_set_vbus(port, false);
2658 port->explicit_contract = false;
b965b631 2659 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
f0690a25 2660 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
b965b631 2661 PD_T_SRCSWAPSTDBY);
f0690a25
GR
2662 break;
2663 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2664 tcpm_set_cc(port, TYPEC_CC_RD);
b965b631
BJS
2665 /* allow CC debounce */
2666 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
2667 PD_T_CC_DEBOUNCE);
2668 break;
2669 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
050161ea
GR
2670 /*
2671 * USB-PD standard, 6.2.1.4, Port Power Role:
2672 * "During the Power Role Swap Sequence, for the initial Source
2673 * Port, the Port Power Role field shall be set to Sink in the
2674 * PS_RDY Message indicating that the initial Source’s power
2675 * supply is turned off"
2676 */
2677 tcpm_set_pwr_role(port, TYPEC_SINK);
f0690a25
GR
2678 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
2679 tcpm_set_state(port, ERROR_RECOVERY, 0);
2680 break;
2681 }
2682 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2683 break;
2684 case PR_SWAP_SRC_SNK_SINK_ON:
f0690a25
GR
2685 tcpm_set_state(port, SNK_STARTUP, 0);
2686 break;
2687 case PR_SWAP_SNK_SRC_SINK_OFF:
2688 tcpm_set_charge(port, false);
2689 tcpm_set_state(port, hard_reset_state(port),
2690 PD_T_PS_SOURCE_OFF);
2691 break;
2692 case PR_SWAP_SNK_SRC_SOURCE_ON:
2693 tcpm_set_cc(port, tcpm_rp_cc(port));
2694 tcpm_set_vbus(port, true);
b965b631
BJS
2695 /*
2696 * allow time VBUS ramp-up, must be < tNewSrc
2697 * Also, this window overlaps with CC debounce as well.
2698 * So, Wait for the max of two which is PD_T_NEWSRC
2699 */
2700 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
2701 PD_T_NEWSRC);
2702 break;
2703 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
050161ea
GR
2704 /*
2705 * USB PD standard, 6.2.1.4:
2706 * "Subsequent Messages initiated by the Policy Engine,
2707 * such as the PS_RDY Message sent to indicate that Vbus
2708 * is ready, will have the Port Power Role field set to
2709 * Source."
2710 */
f0690a25 2711 tcpm_set_pwr_role(port, TYPEC_SOURCE);
050161ea 2712 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
f0690a25
GR
2713 tcpm_set_state(port, SRC_STARTUP, 0);
2714 break;
2715
2716 case VCONN_SWAP_ACCEPT:
2717 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2718 tcpm_set_state(port, VCONN_SWAP_START, 0);
2719 break;
2720 case VCONN_SWAP_SEND:
2721 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
2722 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
2723 PD_T_SENDER_RESPONSE);
2724 break;
2725 case VCONN_SWAP_SEND_TIMEOUT:
2726 tcpm_swap_complete(port, -ETIMEDOUT);
2727 tcpm_set_state(port, ready_state(port), 0);
2728 break;
2729 case VCONN_SWAP_START:
2730 if (port->vconn_role == TYPEC_SOURCE)
2731 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
2732 else
2733 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
2734 break;
2735 case VCONN_SWAP_WAIT_FOR_VCONN:
2736 tcpm_set_state(port, hard_reset_state(port),
2737 PD_T_VCONN_SOURCE_ON);
2738 break;
2739 case VCONN_SWAP_TURN_ON_VCONN:
2740 tcpm_set_vconn(port, true);
2741 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
f0690a25
GR
2742 tcpm_set_state(port, ready_state(port), 0);
2743 break;
2744 case VCONN_SWAP_TURN_OFF_VCONN:
2745 tcpm_set_vconn(port, false);
f0690a25
GR
2746 tcpm_set_state(port, ready_state(port), 0);
2747 break;
2748
2749 case DR_SWAP_CANCEL:
2750 case PR_SWAP_CANCEL:
2751 case VCONN_SWAP_CANCEL:
2752 tcpm_swap_complete(port, port->swap_status);
2753 if (port->pwr_role == TYPEC_SOURCE)
2754 tcpm_set_state(port, SRC_READY, 0);
2755 else
2756 tcpm_set_state(port, SNK_READY, 0);
2757 break;
2758
2759 case BIST_RX:
2760 switch (BDO_MODE_MASK(port->bist_request)) {
2761 case BDO_MODE_CARRIER2:
2762 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
2763 break;
2764 default:
2765 break;
2766 }
2767 /* Always switch to unattached state */
2768 tcpm_set_state(port, unattached_state(port), 0);
2769 break;
2770 case ERROR_RECOVERY:
2771 tcpm_swap_complete(port, -EPROTO);
b17dd571
GR
2772 tcpm_set_state(port, PORT_RESET, 0);
2773 break;
2774 case PORT_RESET:
f0690a25 2775 tcpm_reset_port(port);
f0690a25 2776 tcpm_set_cc(port, TYPEC_CC_OPEN);
b17dd571 2777 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
f0690a25
GR
2778 PD_T_ERROR_RECOVERY);
2779 break;
b17dd571 2780 case PORT_RESET_WAIT_OFF:
f0690a25
GR
2781 tcpm_set_state(port,
2782 tcpm_default_state(port),
2783 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
2784 break;
2785 default:
2786 WARN(1, "Unexpected port state %d\n", port->state);
2787 break;
2788 }
2789}
2790
2791static void tcpm_state_machine_work(struct work_struct *work)
2792{
2793 struct tcpm_port *port = container_of(work, struct tcpm_port,
2794 state_machine.work);
2795 enum tcpm_state prev_state;
2796
2797 mutex_lock(&port->lock);
2798 port->state_machine_running = true;
2799
2800 if (port->queued_message && tcpm_send_queued_message(port))
2801 goto done;
2802
2803 /* If we were queued due to a delayed state change, update it now */
2804 if (port->delayed_state) {
2805 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
2806 tcpm_states[port->state],
2807 tcpm_states[port->delayed_state], port->delay_ms);
2808 port->prev_state = port->state;
2809 port->state = port->delayed_state;
2810 port->delayed_state = INVALID_STATE;
2811 }
2812
2813 /*
2814 * Continue running as long as we have (non-delayed) state changes
2815 * to make.
2816 */
2817 do {
2818 prev_state = port->state;
2819 run_state_machine(port);
2820 if (port->queued_message)
2821 tcpm_send_queued_message(port);
2822 } while (port->state != prev_state && !port->delayed_state);
2823
2824done:
2825 port->state_machine_running = false;
2826 mutex_unlock(&port->lock);
2827}
2828
2829static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
2830 enum typec_cc_status cc2)
2831{
2832 enum typec_cc_status old_cc1, old_cc2;
2833 enum tcpm_state new_state;
2834
2835 old_cc1 = port->cc1;
2836 old_cc2 = port->cc2;
2837 port->cc1 = cc1;
2838 port->cc2 = cc2;
2839
2840 tcpm_log_force(port,
2841 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
2842 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
2843 port->polarity,
2844 tcpm_port_is_disconnected(port) ? "disconnected"
2845 : "connected");
2846
2847 switch (port->state) {
2848 case DRP_TOGGLING:
2849 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2850 tcpm_port_is_source(port))
2851 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2852 else if (tcpm_port_is_sink(port))
2853 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2854 break;
2855 case SRC_UNATTACHED:
2856 case ACC_UNATTACHED:
2857 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2858 tcpm_port_is_source(port))
2859 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2860 break;
2861 case SRC_ATTACH_WAIT:
2862 if (tcpm_port_is_disconnected(port) ||
2863 tcpm_port_is_audio_detached(port))
2864 tcpm_set_state(port, SRC_UNATTACHED, 0);
2865 else if (cc1 != old_cc1 || cc2 != old_cc2)
2866 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2867 break;
2868 case SRC_ATTACHED:
f3b73364
BJS
2869 case SRC_SEND_CAPABILITIES:
2870 case SRC_READY:
2871 if (tcpm_port_is_disconnected(port) ||
2872 !tcpm_port_is_source(port))
f0690a25
GR
2873 tcpm_set_state(port, SRC_UNATTACHED, 0);
2874 break;
f0690a25
GR
2875 case SNK_UNATTACHED:
2876 if (tcpm_port_is_sink(port))
2877 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2878 break;
2879 case SNK_ATTACH_WAIT:
2880 if ((port->cc1 == TYPEC_CC_OPEN &&
2881 port->cc2 != TYPEC_CC_OPEN) ||
2882 (port->cc1 != TYPEC_CC_OPEN &&
2883 port->cc2 == TYPEC_CC_OPEN))
2884 new_state = SNK_DEBOUNCED;
2885 else if (tcpm_port_is_disconnected(port))
2886 new_state = SNK_UNATTACHED;
2887 else
2888 break;
2889 if (new_state != port->delayed_state)
2890 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2891 break;
2892 case SNK_DEBOUNCED:
2893 if (tcpm_port_is_disconnected(port))
2894 new_state = SNK_UNATTACHED;
2895 else if (port->vbus_present)
2896 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
2897 else
2898 new_state = SNK_UNATTACHED;
2899 if (new_state != port->delayed_state)
2900 tcpm_set_state(port, SNK_DEBOUNCED, 0);
2901 break;
2902 case SNK_READY:
2903 if (tcpm_port_is_disconnected(port))
2904 tcpm_set_state(port, unattached_state(port), 0);
2905 else if (!port->pd_capable &&
2906 (cc1 != old_cc1 || cc2 != old_cc2))
2907 tcpm_set_current_limit(port,
2908 tcpm_get_current_limit(port),
2909 5000);
2910 break;
2911
2912 case AUDIO_ACC_ATTACHED:
2913 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
2914 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
2915 break;
2916 case AUDIO_ACC_DEBOUNCE:
2917 if (tcpm_port_is_audio(port))
2918 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
2919 break;
2920
2921 case DEBUG_ACC_ATTACHED:
2922 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
2923 tcpm_set_state(port, ACC_UNATTACHED, 0);
2924 break;
2925
2926 case SNK_TRY:
2927 /* Do nothing, waiting for timeout */
2928 break;
2929
2930 case SNK_DISCOVERY:
2931 /* CC line is unstable, wait for debounce */
2932 if (tcpm_port_is_disconnected(port))
2933 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
2934 break;
2935 case SNK_DISCOVERY_DEBOUNCE:
2936 break;
2937
2938 case SRC_TRYWAIT:
2939 /* Hand over to state machine if needed */
2940 if (!port->vbus_present && tcpm_port_is_source(port))
02d5be46
BJS
2941 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
2942 break;
2943 case SRC_TRYWAIT_DEBOUNCE:
2944 if (port->vbus_present || !tcpm_port_is_source(port))
f0690a25
GR
2945 tcpm_set_state(port, SRC_TRYWAIT, 0);
2946 break;
a0a3e04e
BJS
2947 case SNK_TRY_WAIT_DEBOUNCE:
2948 if (!tcpm_port_is_sink(port)) {
2949 port->max_wait = 0;
2950 tcpm_set_state(port, SRC_TRYWAIT, 0);
f0690a25 2951 }
f0690a25 2952 break;
131c7d12 2953 case SRC_TRY_WAIT:
c79d92bd
BJS
2954 if (tcpm_port_is_source(port))
2955 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
f0690a25
GR
2956 break;
2957 case SRC_TRY_DEBOUNCE:
131c7d12 2958 tcpm_set_state(port, SRC_TRY_WAIT, 0);
f0690a25
GR
2959 break;
2960 case SNK_TRYWAIT_DEBOUNCE:
af450ebb
BJS
2961 if (tcpm_port_is_sink(port))
2962 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
2963 break;
2964 case SNK_TRYWAIT_VBUS:
2965 if (!tcpm_port_is_sink(port))
f0690a25
GR
2966 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
2967 break;
af450ebb
BJS
2968 case SNK_TRYWAIT:
2969 /* Do nothing, waiting for tCCDebounce */
2970 break;
f0690a25
GR
2971 case PR_SWAP_SNK_SRC_SINK_OFF:
2972 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2973 case PR_SWAP_SRC_SNK_SOURCE_OFF:
b965b631
BJS
2974 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
2975 case PR_SWAP_SNK_SRC_SOURCE_ON:
f0690a25 2976 /*
b965b631 2977 * CC state change is expected in PR_SWAP
f0690a25
GR
2978 * Ignore it.
2979 */
2980 break;
2981
2982 default:
2983 if (tcpm_port_is_disconnected(port))
2984 tcpm_set_state(port, unattached_state(port), 0);
2985 break;
2986 }
2987}
2988
2989static void _tcpm_pd_vbus_on(struct tcpm_port *port)
2990{
f0690a25
GR
2991 tcpm_log_force(port, "VBUS on");
2992 port->vbus_present = true;
2993 switch (port->state) {
2994 case SNK_TRANSITION_SINK_VBUS:
8bf05746 2995 port->explicit_contract = true;
f0690a25
GR
2996 tcpm_set_state(port, SNK_READY, 0);
2997 break;
2998 case SNK_DISCOVERY:
2999 tcpm_set_state(port, SNK_DISCOVERY, 0);
3000 break;
3001
3002 case SNK_DEBOUNCED:
3003 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
3004 : SNK_ATTACHED,
3005 0);
3006 break;
3007 case SNK_HARD_RESET_WAIT_VBUS:
3008 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
3009 break;
3010 case SRC_ATTACHED:
3011 tcpm_set_state(port, SRC_STARTUP, 0);
3012 break;
3013 case SRC_HARD_RESET_VBUS_ON:
3014 tcpm_set_state(port, SRC_STARTUP, 0);
3015 break;
3016
3017 case SNK_TRY:
3018 /* Do nothing, waiting for timeout */
3019 break;
3020 case SRC_TRYWAIT:
02d5be46
BJS
3021 /* Do nothing, Waiting for Rd to be detected */
3022 break;
3023 case SRC_TRYWAIT_DEBOUNCE:
3024 tcpm_set_state(port, SRC_TRYWAIT, 0);
f0690a25 3025 break;
a0a3e04e
BJS
3026 case SNK_TRY_WAIT_DEBOUNCE:
3027 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
f0690a25
GR
3028 break;
3029 case SNK_TRYWAIT:
af450ebb
BJS
3030 /* Do nothing, waiting for tCCDebounce */
3031 break;
3032 case SNK_TRYWAIT_VBUS:
3033 if (tcpm_port_is_sink(port))
3034 tcpm_set_state(port, SNK_ATTACHED, 0);
3035 break;
3036 case SNK_TRYWAIT_DEBOUNCE:
3037 /* Do nothing, waiting for Rp */
f0690a25 3038 break;
131c7d12
BJS
3039 case SRC_TRY_WAIT:
3040 case SRC_TRY_DEBOUNCE:
3041 /* Do nothing, waiting for sink detection */
3042 break;
f0690a25
GR
3043 default:
3044 break;
3045 }
3046}
3047
3048static void _tcpm_pd_vbus_off(struct tcpm_port *port)
3049{
f0690a25
GR
3050 tcpm_log_force(port, "VBUS off");
3051 port->vbus_present = false;
3052 port->vbus_never_low = false;
3053 switch (port->state) {
3054 case SNK_HARD_RESET_SINK_OFF:
3055 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
3056 break;
3057 case SRC_HARD_RESET_VBUS_OFF:
3058 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
3059 break;
3060 case HARD_RESET_SEND:
3061 break;
3062
3063 case SNK_TRY:
3064 /* Do nothing, waiting for timeout */
3065 break;
3066 case SRC_TRYWAIT:
3067 /* Hand over to state machine if needed */
3068 if (tcpm_port_is_source(port))
02d5be46 3069 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
f0690a25 3070 break;
a0a3e04e
BJS
3071 case SNK_TRY_WAIT_DEBOUNCE:
3072 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
f0690a25 3073 break;
af450ebb 3074 case SNK_TRYWAIT:
f0690a25 3075 case SNK_TRYWAIT_VBUS:
af450ebb 3076 case SNK_TRYWAIT_DEBOUNCE:
f0690a25 3077 break;
f0690a25
GR
3078 case SNK_ATTACH_WAIT:
3079 tcpm_set_state(port, SNK_UNATTACHED, 0);
3080 break;
3081
3082 case SNK_NEGOTIATE_CAPABILITIES:
3083 break;
3084
3085 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3086 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
3087 break;
3088
3089 case PR_SWAP_SNK_SRC_SINK_OFF:
3090 /* Do nothing, expected */
3091 break;
3092
b17dd571 3093 case PORT_RESET_WAIT_OFF:
c749d4d0 3094 tcpm_set_state(port, tcpm_default_state(port), 0);
f0690a25 3095 break;
131c7d12
BJS
3096 case SRC_TRY_WAIT:
3097 case SRC_TRY_DEBOUNCE:
3098 /* Do nothing, waiting for sink detection */
3099 break;
f0690a25
GR
3100 default:
3101 if (port->pwr_role == TYPEC_SINK &&
3102 port->attached)
3103 tcpm_set_state(port, SNK_UNATTACHED, 0);
3104 break;
3105 }
3106}
3107
3108static void _tcpm_pd_hard_reset(struct tcpm_port *port)
3109{
3110 tcpm_log_force(port, "Received hard reset");
3111 /*
3112 * If we keep receiving hard reset requests, executing the hard reset
3113 * must have failed. Revert to error recovery if that happens.
3114 */
3115 tcpm_set_state(port,
3116 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
3117 HARD_RESET_START : ERROR_RECOVERY,
3118 0);
3119}
3120
3121static void tcpm_pd_event_handler(struct work_struct *work)
3122{
3123 struct tcpm_port *port = container_of(work, struct tcpm_port,
3124 event_work);
3125 u32 events;
3126
3127 mutex_lock(&port->lock);
3128
3129 spin_lock(&port->pd_event_lock);
3130 while (port->pd_events) {
3131 events = port->pd_events;
3132 port->pd_events = 0;
3133 spin_unlock(&port->pd_event_lock);
3134 if (events & TCPM_RESET_EVENT)
3135 _tcpm_pd_hard_reset(port);
3136 if (events & TCPM_VBUS_EVENT) {
3137 bool vbus;
3138
3139 vbus = port->tcpc->get_vbus(port->tcpc);
3140 if (vbus)
3141 _tcpm_pd_vbus_on(port);
3142 else
3143 _tcpm_pd_vbus_off(port);
3144 }
3145 if (events & TCPM_CC_EVENT) {
3146 enum typec_cc_status cc1, cc2;
3147
3148 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3149 _tcpm_cc_change(port, cc1, cc2);
3150 }
3151 spin_lock(&port->pd_event_lock);
3152 }
3153 spin_unlock(&port->pd_event_lock);
3154 mutex_unlock(&port->lock);
3155}
3156
3157void tcpm_cc_change(struct tcpm_port *port)
3158{
3159 spin_lock(&port->pd_event_lock);
3160 port->pd_events |= TCPM_CC_EVENT;
3161 spin_unlock(&port->pd_event_lock);
3162 queue_work(port->wq, &port->event_work);
3163}
3164EXPORT_SYMBOL_GPL(tcpm_cc_change);
3165
3166void tcpm_vbus_change(struct tcpm_port *port)
3167{
3168 spin_lock(&port->pd_event_lock);
3169 port->pd_events |= TCPM_VBUS_EVENT;
3170 spin_unlock(&port->pd_event_lock);
3171 queue_work(port->wq, &port->event_work);
3172}
3173EXPORT_SYMBOL_GPL(tcpm_vbus_change);
3174
3175void tcpm_pd_hard_reset(struct tcpm_port *port)
3176{
3177 spin_lock(&port->pd_event_lock);
3178 port->pd_events = TCPM_RESET_EVENT;
3179 spin_unlock(&port->pd_event_lock);
3180 queue_work(port->wq, &port->event_work);
3181}
3182EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
3183
3184static int tcpm_dr_set(const struct typec_capability *cap,
3185 enum typec_data_role data)
3186{
3187 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3188 int ret;
3189
3190 mutex_lock(&port->swap_lock);
3191 mutex_lock(&port->lock);
3192
9b0ae699 3193 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
3194 ret = -EINVAL;
3195 goto port_unlock;
3196 }
3197 if (port->state != SRC_READY && port->state != SNK_READY) {
3198 ret = -EAGAIN;
3199 goto port_unlock;
3200 }
3201
3202 if (port->data_role == data) {
3203 ret = 0;
3204 goto port_unlock;
3205 }
3206
3207 /*
3208 * XXX
3209 * 6.3.9: If an alternate mode is active, a request to swap
3210 * alternate modes shall trigger a port reset.
3211 * Reject data role swap request in this case.
3212 */
3213
b17dd571
GR
3214 if (!port->pd_capable) {
3215 /*
3216 * If the partner is not PD capable, reset the port to
3217 * trigger a role change. This can only work if a preferred
3218 * role is configured, and if it matches the requested role.
3219 */
3220 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
3221 port->try_role == port->pwr_role) {
3222 ret = -EINVAL;
3223 goto port_unlock;
3224 }
3225 port->non_pd_role_swap = true;
3226 tcpm_set_state(port, PORT_RESET, 0);
3227 } else {
3228 tcpm_set_state(port, DR_SWAP_SEND, 0);
3229 }
3230
f0690a25
GR
3231 port->swap_status = 0;
3232 port->swap_pending = true;
3233 reinit_completion(&port->swap_complete);
f0690a25
GR
3234 mutex_unlock(&port->lock);
3235
9adf9f9e
GR
3236 if (!wait_for_completion_timeout(&port->swap_complete,
3237 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3238 ret = -ETIMEDOUT;
3239 else
3240 ret = port->swap_status;
f0690a25 3241
b17dd571 3242 port->non_pd_role_swap = false;
f0690a25
GR
3243 goto swap_unlock;
3244
3245port_unlock:
3246 mutex_unlock(&port->lock);
3247swap_unlock:
3248 mutex_unlock(&port->swap_lock);
3249 return ret;
3250}
3251
3252static int tcpm_pr_set(const struct typec_capability *cap,
3253 enum typec_role role)
3254{
3255 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3256 int ret;
3257
3258 mutex_lock(&port->swap_lock);
3259 mutex_lock(&port->lock);
3260
9b0ae699 3261 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
3262 ret = -EINVAL;
3263 goto port_unlock;
3264 }
3265 if (port->state != SRC_READY && port->state != SNK_READY) {
3266 ret = -EAGAIN;
3267 goto port_unlock;
3268 }
3269
3270 if (role == port->pwr_role) {
3271 ret = 0;
3272 goto port_unlock;
3273 }
3274
f0690a25
GR
3275 port->swap_status = 0;
3276 port->swap_pending = true;
3277 reinit_completion(&port->swap_complete);
3278 tcpm_set_state(port, PR_SWAP_SEND, 0);
3279 mutex_unlock(&port->lock);
3280
9adf9f9e
GR
3281 if (!wait_for_completion_timeout(&port->swap_complete,
3282 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3283 ret = -ETIMEDOUT;
3284 else
3285 ret = port->swap_status;
f0690a25 3286
f0690a25
GR
3287 goto swap_unlock;
3288
3289port_unlock:
3290 mutex_unlock(&port->lock);
3291swap_unlock:
3292 mutex_unlock(&port->swap_lock);
3293 return ret;
3294}
3295
3296static int tcpm_vconn_set(const struct typec_capability *cap,
3297 enum typec_role role)
3298{
3299 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3300 int ret;
3301
3302 mutex_lock(&port->swap_lock);
3303 mutex_lock(&port->lock);
3304
3305 if (port->state != SRC_READY && port->state != SNK_READY) {
3306 ret = -EAGAIN;
3307 goto port_unlock;
3308 }
3309
3310 if (role == port->vconn_role) {
3311 ret = 0;
3312 goto port_unlock;
3313 }
3314
3315 port->swap_status = 0;
3316 port->swap_pending = true;
3317 reinit_completion(&port->swap_complete);
3318 tcpm_set_state(port, VCONN_SWAP_SEND, 0);
3319 mutex_unlock(&port->lock);
3320
9adf9f9e
GR
3321 if (!wait_for_completion_timeout(&port->swap_complete,
3322 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3323 ret = -ETIMEDOUT;
3324 else
3325 ret = port->swap_status;
f0690a25 3326
f0690a25
GR
3327 goto swap_unlock;
3328
3329port_unlock:
3330 mutex_unlock(&port->lock);
3331swap_unlock:
3332 mutex_unlock(&port->swap_lock);
3333 return ret;
3334}
3335
3336static int tcpm_try_role(const struct typec_capability *cap, int role)
3337{
3338 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3339 struct tcpc_dev *tcpc = port->tcpc;
3340 int ret = 0;
3341
3342 mutex_lock(&port->lock);
3343 if (tcpc->try_role)
3344 ret = tcpc->try_role(tcpc, role);
3345 if (!ret && !tcpc->config->try_role_hw)
3346 port->try_role = role;
3347 port->try_src_count = 0;
3348 port->try_snk_count = 0;
3349 mutex_unlock(&port->lock);
3350
3351 return ret;
3352}
3353
3354static void tcpm_init(struct tcpm_port *port)
3355{
3356 enum typec_cc_status cc1, cc2;
3357
3358 port->tcpc->init(port->tcpc);
3359
3360 tcpm_reset_port(port);
3361
3362 /*
3363 * XXX
3364 * Should possibly wait for VBUS to settle if it was enabled locally
3365 * since tcpm_reset_port() will disable VBUS.
3366 */
3367 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
3368 if (port->vbus_present)
3369 port->vbus_never_low = true;
3370
3371 tcpm_set_state(port, tcpm_default_state(port), 0);
3372
3373 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3374 _tcpm_cc_change(port, cc1, cc2);
3375
3376 /*
3377 * Some adapters need a clean slate at startup, and won't recover
3378 * otherwise. So do not try to be fancy and force a clean disconnect.
3379 */
b17dd571 3380 tcpm_set_state(port, PORT_RESET, 0);
f0690a25
GR
3381}
3382
9b0ae699
BJS
3383static int tcpm_port_type_set(const struct typec_capability *cap,
3384 enum typec_port_type type)
3385{
3386 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3387
3388 mutex_lock(&port->lock);
3389 if (type == port->port_type)
3390 goto port_unlock;
3391
3392 port->port_type = type;
3393
3394 if (!port->connected) {
3395 tcpm_set_state(port, PORT_RESET, 0);
3396 } else if (type == TYPEC_PORT_UFP) {
3397 if (!(port->pwr_role == TYPEC_SINK &&
3398 port->data_role == TYPEC_DEVICE))
3399 tcpm_set_state(port, PORT_RESET, 0);
3400 } else if (type == TYPEC_PORT_DFP) {
3401 if (!(port->pwr_role == TYPEC_SOURCE &&
3402 port->data_role == TYPEC_HOST))
3403 tcpm_set_state(port, PORT_RESET, 0);
3404 }
3405
3406port_unlock:
3407 mutex_unlock(&port->lock);
3408 return 0;
3409}
3410
f0690a25
GR
3411void tcpm_tcpc_reset(struct tcpm_port *port)
3412{
3413 mutex_lock(&port->lock);
3414 /* XXX: Maintain PD connection if possible? */
3415 tcpm_init(port);
3416 mutex_unlock(&port->lock);
3417}
3418EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
3419
3420static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
3421 unsigned int nr_pdo)
3422{
3423 unsigned int i;
3424
3425 if (nr_pdo > PDO_MAX_OBJECTS)
3426 nr_pdo = PDO_MAX_OBJECTS;
3427
3428 for (i = 0; i < nr_pdo; i++)
3429 dest_pdo[i] = src_pdo[i];
3430
3431 return nr_pdo;
3432}
3433
193a6801
GR
3434static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
3435 unsigned int nr_vdo)
3436{
3437 unsigned int i;
3438
3439 if (nr_vdo > VDO_MAX_OBJECTS)
3440 nr_vdo = VDO_MAX_OBJECTS;
3441
3442 for (i = 0; i < nr_vdo; i++)
3443 dest_vdo[i] = src_vdo[i];
3444
3445 return nr_vdo;
3446}
3447
f0690a25
GR
3448void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
3449 unsigned int nr_pdo)
3450{
3451 mutex_lock(&port->lock);
3452 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, pdo, nr_pdo);
3453 switch (port->state) {
3454 case SRC_UNATTACHED:
3455 case SRC_ATTACH_WAIT:
3456 case SRC_TRYWAIT:
3457 tcpm_set_cc(port, tcpm_rp_cc(port));
3458 break;
3459 case SRC_SEND_CAPABILITIES:
3460 case SRC_NEGOTIATE_CAPABILITIES:
3461 case SRC_READY:
3462 case SRC_WAIT_NEW_CAPABILITIES:
3463 tcpm_set_cc(port, tcpm_rp_cc(port));
3464 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
3465 break;
3466 default:
3467 break;
3468 }
3469 mutex_unlock(&port->lock);
3470}
3471EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities);
3472
3473void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
3474 unsigned int nr_pdo,
3475 unsigned int max_snk_mv,
3476 unsigned int max_snk_ma,
3477 unsigned int max_snk_mw,
3478 unsigned int operating_snk_mw)
3479{
3480 mutex_lock(&port->lock);
3481 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
3482 port->max_snk_mv = max_snk_mv;
3483 port->max_snk_ma = max_snk_ma;
3484 port->max_snk_mw = max_snk_mw;
3485 port->operating_snk_mw = operating_snk_mw;
3486
3487 switch (port->state) {
3488 case SNK_NEGOTIATE_CAPABILITIES:
3489 case SNK_READY:
3490 case SNK_TRANSITION_SINK:
3491 case SNK_TRANSITION_SINK_VBUS:
3492 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3493 break;
3494 default:
3495 break;
3496 }
3497 mutex_unlock(&port->lock);
3498}
3499EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
3500
3501struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3502{
3503 struct tcpm_port *port;
3504 int i, err;
3505
3506 if (!dev || !tcpc || !tcpc->config ||
3507 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
3508 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
3509 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
3510 return ERR_PTR(-EINVAL);
3511
3512 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
3513 if (!port)
3514 return ERR_PTR(-ENOMEM);
3515
3516 port->dev = dev;
3517 port->tcpc = tcpc;
3518
3519 mutex_init(&port->lock);
3520 mutex_init(&port->swap_lock);
3521
3522 port->wq = create_singlethread_workqueue(dev_name(dev));
3523 if (!port->wq)
3524 return ERR_PTR(-ENOMEM);
3525 INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work);
3526 INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work);
3527 INIT_WORK(&port->event_work, tcpm_pd_event_handler);
3528
3529 spin_lock_init(&port->pd_event_lock);
3530
3531 init_completion(&port->tx_complete);
3532 init_completion(&port->swap_complete);
3533
3534 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcpc->config->src_pdo,
3535 tcpc->config->nr_src_pdo);
3536 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3537 tcpc->config->nr_snk_pdo);
193a6801
GR
3538 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3539 tcpc->config->nr_snk_vdo);
f0690a25
GR
3540
3541 port->max_snk_mv = tcpc->config->max_snk_mv;
3542 port->max_snk_ma = tcpc->config->max_snk_ma;
3543 port->max_snk_mw = tcpc->config->max_snk_mw;
3544 port->operating_snk_mw = tcpc->config->operating_snk_mw;
3545 if (!tcpc->config->try_role_hw)
3546 port->try_role = tcpc->config->default_role;
3547 else
3548 port->try_role = TYPEC_NO_PREFERRED_ROLE;
3549
3550 port->typec_caps.prefer_role = tcpc->config->default_role;
3551 port->typec_caps.type = tcpc->config->type;
3552 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
3553 port->typec_caps.pd_revision = 0x0200; /* USB-PD spec release 2.0 */
3554 port->typec_caps.dr_set = tcpm_dr_set;
3555 port->typec_caps.pr_set = tcpm_pr_set;
3556 port->typec_caps.vconn_set = tcpm_vconn_set;
3557 port->typec_caps.try_role = tcpm_try_role;
9b0ae699 3558 port->typec_caps.port_type_set = tcpm_port_type_set;
f0690a25
GR
3559
3560 port->partner_desc.identity = &port->partner_ident;
9b0ae699 3561 port->port_type = tcpc->config->type;
f0690a25
GR
3562
3563 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
3564 if (!port->typec_port) {
3565 err = -ENOMEM;
3566 goto out_destroy_wq;
3567 }
3568
3569 if (tcpc->config->alt_modes) {
3c41dbde 3570 const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
f0690a25
GR
3571
3572 i = 0;
3573 while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
3574 port->port_altmode[i] =
3575 typec_port_register_altmode(port->typec_port,
3576 paltmode);
3577 if (!port->port_altmode[i]) {
3578 tcpm_log(port,
3579 "%s: failed to register port alternate mode 0x%x",
3580 dev_name(dev), paltmode->svid);
3581 break;
3582 }
3583 i++;
3584 paltmode++;
3585 }
3586 }
3587
3588 tcpm_debugfs_init(port);
3589 mutex_lock(&port->lock);
3590 tcpm_init(port);
3591 mutex_unlock(&port->lock);
3592
3593 tcpm_log(port, "%s: registered", dev_name(dev));
3594 return port;
3595
3596out_destroy_wq:
3597 destroy_workqueue(port->wq);
3598 return ERR_PTR(err);
3599}
3600EXPORT_SYMBOL_GPL(tcpm_register_port);
3601
3602void tcpm_unregister_port(struct tcpm_port *port)
3603{
3604 int i;
3605
3606 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
3607 typec_unregister_altmode(port->port_altmode[i]);
3608 typec_unregister_port(port->typec_port);
3609 tcpm_debugfs_exit(port);
3610 destroy_workqueue(port->wq);
3611}
3612EXPORT_SYMBOL_GPL(tcpm_unregister_port);
3613
3614MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
3615MODULE_DESCRIPTION("USB Type-C Port Manager");
3616MODULE_LICENSE("GPL");