staging: typec: tcpm: Set correct flags in PD request messages
[linux-block.git] / drivers / staging / typec / tcpm.c
CommitLineData
f0690a25
GR
1/*
2 * Copyright 2015-2017 Google, Inc
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * USB Power Delivery protocol stack.
15 */
16
17#include <linux/completion.h>
18#include <linux/debugfs.h>
19#include <linux/device.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/proc_fs.h>
24#include <linux/sched/clock.h>
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28#include <linux/usb/typec.h>
29#include <linux/workqueue.h>
30
31#include "pd.h"
32#include "pd_vdo.h"
33#include "pd_bdo.h"
34#include "tcpm.h"
35
36#define FOREACH_STATE(S) \
37 S(INVALID_STATE), \
38 S(DRP_TOGGLING), \
39 S(SRC_UNATTACHED), \
40 S(SRC_ATTACH_WAIT), \
41 S(SRC_ATTACHED), \
42 S(SRC_STARTUP), \
43 S(SRC_SEND_CAPABILITIES), \
44 S(SRC_NEGOTIATE_CAPABILITIES), \
45 S(SRC_TRANSITION_SUPPLY), \
46 S(SRC_READY), \
47 S(SRC_WAIT_NEW_CAPABILITIES), \
48 \
49 S(SNK_UNATTACHED), \
50 S(SNK_ATTACH_WAIT), \
51 S(SNK_DEBOUNCED), \
52 S(SNK_ATTACHED), \
53 S(SNK_STARTUP), \
54 S(SNK_DISCOVERY), \
55 S(SNK_DISCOVERY_DEBOUNCE), \
56 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
57 S(SNK_WAIT_CAPABILITIES), \
58 S(SNK_NEGOTIATE_CAPABILITIES), \
59 S(SNK_TRANSITION_SINK), \
60 S(SNK_TRANSITION_SINK_VBUS), \
61 S(SNK_READY), \
62 \
63 S(ACC_UNATTACHED), \
64 S(DEBUG_ACC_ATTACHED), \
65 S(AUDIO_ACC_ATTACHED), \
66 S(AUDIO_ACC_DEBOUNCE), \
67 \
68 S(HARD_RESET_SEND), \
69 S(HARD_RESET_START), \
70 S(SRC_HARD_RESET_VBUS_OFF), \
71 S(SRC_HARD_RESET_VBUS_ON), \
72 S(SNK_HARD_RESET_SINK_OFF), \
73 S(SNK_HARD_RESET_WAIT_VBUS), \
74 S(SNK_HARD_RESET_SINK_ON), \
75 \
76 S(SOFT_RESET), \
77 S(SOFT_RESET_SEND), \
78 \
79 S(DR_SWAP_ACCEPT), \
80 S(DR_SWAP_SEND), \
81 S(DR_SWAP_SEND_TIMEOUT), \
82 S(DR_SWAP_CANCEL), \
83 S(DR_SWAP_CHANGE_DR), \
84 \
85 S(PR_SWAP_ACCEPT), \
86 S(PR_SWAP_SEND), \
87 S(PR_SWAP_SEND_TIMEOUT), \
88 S(PR_SWAP_CANCEL), \
89 S(PR_SWAP_START), \
90 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
91 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
92 S(PR_SWAP_SRC_SNK_SINK_ON), \
93 S(PR_SWAP_SNK_SRC_SINK_OFF), \
94 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
95 \
96 S(VCONN_SWAP_ACCEPT), \
97 S(VCONN_SWAP_SEND), \
98 S(VCONN_SWAP_SEND_TIMEOUT), \
99 S(VCONN_SWAP_CANCEL), \
100 S(VCONN_SWAP_START), \
101 S(VCONN_SWAP_WAIT_FOR_VCONN), \
102 S(VCONN_SWAP_TURN_ON_VCONN), \
103 S(VCONN_SWAP_TURN_OFF_VCONN), \
104 \
105 S(SNK_TRY), \
106 S(SNK_TRY_WAIT), \
107 S(SRC_TRYWAIT), \
108 S(SRC_TRYWAIT_UNATTACHED), \
109 \
110 S(SRC_TRY), \
111 S(SRC_TRY_DEBOUNCE), \
112 S(SNK_TRYWAIT), \
113 S(SNK_TRYWAIT_DEBOUNCE), \
114 S(SNK_TRYWAIT_VBUS), \
115 S(BIST_RX), \
116 \
117 S(ERROR_RECOVERY), \
118 S(ERROR_RECOVERY_WAIT_OFF)
119
120#define GENERATE_ENUM(e) e
121#define GENERATE_STRING(s) #s
122
123enum tcpm_state {
124 FOREACH_STATE(GENERATE_ENUM)
125};
126
127static const char * const tcpm_states[] = {
128 FOREACH_STATE(GENERATE_STRING)
129};
130
131enum vdm_states {
132 VDM_STATE_ERR_BUSY = -3,
133 VDM_STATE_ERR_SEND = -2,
134 VDM_STATE_ERR_TMOUT = -1,
135 VDM_STATE_DONE = 0,
136 /* Anything >0 represents an active state */
137 VDM_STATE_READY = 1,
138 VDM_STATE_BUSY = 2,
139 VDM_STATE_WAIT_RSP_BUSY = 3,
140};
141
142enum pd_msg_request {
143 PD_MSG_NONE = 0,
144 PD_MSG_CTRL_REJECT,
145 PD_MSG_CTRL_WAIT,
146 PD_MSG_DATA_SINK_CAP,
147 PD_MSG_DATA_SOURCE_CAP,
148};
149
150/* Events from low level driver */
151
152#define TCPM_CC_EVENT BIT(0)
153#define TCPM_VBUS_EVENT BIT(1)
154#define TCPM_RESET_EVENT BIT(2)
155
156#define LOG_BUFFER_ENTRIES 1024
157#define LOG_BUFFER_ENTRY_SIZE 128
158
159/* Alternate mode support */
160
161#define SVID_DISCOVERY_MAX 16
162
163struct pd_mode_data {
164 int svid_index; /* current SVID index */
165 int nsvids;
166 u16 svids[SVID_DISCOVERY_MAX];
167 int altmodes; /* number of alternate modes */
168 struct typec_altmode_desc altmode_desc[SVID_DISCOVERY_MAX];
169};
170
171struct tcpm_port {
172 struct device *dev;
173
174 struct mutex lock; /* tcpm state machine lock */
175 struct workqueue_struct *wq;
176
177 struct typec_capability typec_caps;
178 struct typec_port *typec_port;
179
180 struct tcpc_dev *tcpc;
181
182 enum typec_role vconn_role;
183 enum typec_role pwr_role;
184 enum typec_data_role data_role;
185 enum typec_pwr_opmode pwr_opmode;
186
187 struct usb_pd_identity partner_ident;
188 struct typec_partner_desc partner_desc;
189 struct typec_partner *partner;
190
191 enum typec_cc_status cc_req;
192
193 enum typec_cc_status cc1;
194 enum typec_cc_status cc2;
195 enum typec_cc_polarity polarity;
196
197 bool attached;
198 bool connected;
199 bool vbus_present;
200 bool vbus_never_low;
201 bool vbus_source;
202 bool vbus_charge;
203
204 bool send_discover;
205 bool op_vsafe5v;
206
207 int try_role;
208 int try_snk_count;
209 int try_src_count;
210
211 enum pd_msg_request queued_message;
212
213 enum tcpm_state enter_state;
214 enum tcpm_state prev_state;
215 enum tcpm_state state;
216 enum tcpm_state delayed_state;
217 unsigned long delayed_runtime;
218 unsigned long delay_ms;
219
220 spinlock_t pd_event_lock;
221 u32 pd_events;
222
223 struct work_struct event_work;
224 struct delayed_work state_machine;
225 struct delayed_work vdm_state_machine;
226 bool state_machine_running;
227
228 struct completion tx_complete;
229 enum tcpm_transmit_status tx_status;
230
231 struct mutex swap_lock; /* swap command lock */
232 bool swap_pending;
233 struct completion swap_complete;
234 int swap_status;
235
236 unsigned int message_id;
237 unsigned int caps_count;
238 unsigned int hard_reset_count;
239 bool pd_capable;
240 bool explicit_contract;
5fec4b54 241 unsigned int rx_msgid;
f0690a25
GR
242
243 /* Partner capabilities/requests */
244 u32 sink_request;
245 u32 source_caps[PDO_MAX_OBJECTS];
246 unsigned int nr_source_caps;
247 u32 sink_caps[PDO_MAX_OBJECTS];
248 unsigned int nr_sink_caps;
249
250 /* Local capabilities */
251 u32 src_pdo[PDO_MAX_OBJECTS];
252 unsigned int nr_src_pdo;
253 u32 snk_pdo[PDO_MAX_OBJECTS];
254 unsigned int nr_snk_pdo;
255
256 unsigned int max_snk_mv;
257 unsigned int max_snk_ma;
258 unsigned int max_snk_mw;
259 unsigned int operating_snk_mw;
260
261 /* Requested current / voltage */
262 u32 current_limit;
263 u32 supply_voltage;
264
265 u32 bist_request;
266
267 /* PD state for Vendor Defined Messages */
268 enum vdm_states vdm_state;
269 u32 vdm_retries;
270 /* next Vendor Defined Message to send */
271 u32 vdo_data[VDO_MAX_SIZE];
272 u8 vdo_count;
273 /* VDO to retry if UFP responder replied busy */
274 u32 vdo_retry;
275
276 /* Alternate mode data */
277
278 struct pd_mode_data mode_data;
279 struct typec_altmode *partner_altmode[SVID_DISCOVERY_MAX];
280 struct typec_altmode *port_altmode[SVID_DISCOVERY_MAX];
281
282#ifdef CONFIG_DEBUG_FS
283 struct dentry *dentry;
284 struct mutex logbuffer_lock; /* log buffer access lock */
285 int logbuffer_head;
286 int logbuffer_tail;
287 u8 *logbuffer[LOG_BUFFER_ENTRIES];
288#endif
289};
290
291struct pd_rx_event {
292 struct work_struct work;
293 struct tcpm_port *port;
294 struct pd_message msg;
295};
296
297#define tcpm_cc_is_sink(cc) \
298 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
299 (cc) == TYPEC_CC_RP_3_0)
300
301#define tcpm_port_is_sink(port) \
302 ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
303 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
304
305#define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
306#define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
307#define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
308
309#define tcpm_port_is_source(port) \
310 ((tcpm_cc_is_source((port)->cc1) && \
311 !tcpm_cc_is_source((port)->cc2)) || \
312 (tcpm_cc_is_source((port)->cc2) && \
313 !tcpm_cc_is_source((port)->cc1)))
314
315#define tcpm_port_is_debug(port) \
316 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
317
318#define tcpm_port_is_audio(port) \
319 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
320
321#define tcpm_port_is_audio_detached(port) \
322 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
323 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
324
325#define tcpm_try_snk(port) \
326 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK)
327
328#define tcpm_try_src(port) \
329 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE)
330
331static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
332{
333 if (port->try_role == TYPEC_SINK)
334 return SNK_UNATTACHED;
335 else if (port->try_role == TYPEC_SOURCE)
336 return SRC_UNATTACHED;
337 else if (port->tcpc->config->default_role == TYPEC_SINK)
338 return SNK_UNATTACHED;
339 return SRC_UNATTACHED;
340}
341
342static inline
343struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
344{
345 return container_of(cap, struct tcpm_port, typec_caps);
346}
347
348static bool tcpm_port_is_disconnected(struct tcpm_port *port)
349{
350 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
351 port->cc2 == TYPEC_CC_OPEN) ||
352 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
353 port->cc1 == TYPEC_CC_OPEN) ||
354 (port->polarity == TYPEC_POLARITY_CC2 &&
355 port->cc2 == TYPEC_CC_OPEN)));
356}
357
358/*
359 * Logging
360 */
361
362#ifdef CONFIG_DEBUG_FS
363
364static bool tcpm_log_full(struct tcpm_port *port)
365{
366 return port->logbuffer_tail ==
367 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
368}
369
370static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
371{
372 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
373 u64 ts_nsec = local_clock();
374 unsigned long rem_nsec;
375
376 if (!port->logbuffer[port->logbuffer_head]) {
377 port->logbuffer[port->logbuffer_head] =
378 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
379 if (!port->logbuffer[port->logbuffer_head])
380 return;
381 }
382
383 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
384
385 mutex_lock(&port->logbuffer_lock);
386
387 if (tcpm_log_full(port)) {
388 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
389 strcpy(tmpbuffer, "overflow");
390 }
391
392 if (port->logbuffer_head < 0 ||
393 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
394 dev_warn(port->dev,
395 "Bad log buffer index %d\n", port->logbuffer_head);
396 goto abort;
397 }
398
399 if (!port->logbuffer[port->logbuffer_head]) {
400 dev_warn(port->dev,
401 "Log buffer index %d is NULL\n", port->logbuffer_head);
402 goto abort;
403 }
404
405 rem_nsec = do_div(ts_nsec, 1000000000);
406 scnprintf(port->logbuffer[port->logbuffer_head],
407 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
408 (unsigned long)ts_nsec, rem_nsec / 1000,
409 tmpbuffer);
410 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
411
412abort:
413 mutex_unlock(&port->logbuffer_lock);
414}
415
416static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
417{
418 va_list args;
419
420 /* Do not log while disconnected and unattached */
421 if (tcpm_port_is_disconnected(port) &&
422 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
423 port->state == DRP_TOGGLING))
424 return;
425
426 va_start(args, fmt);
427 _tcpm_log(port, fmt, args);
428 va_end(args);
429}
430
431static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
432{
433 va_list args;
434
435 va_start(args, fmt);
436 _tcpm_log(port, fmt, args);
437 va_end(args);
438}
439
440static void tcpm_log_source_caps(struct tcpm_port *port)
441{
442 int i;
443
444 for (i = 0; i < port->nr_source_caps; i++) {
445 u32 pdo = port->source_caps[i];
446 enum pd_pdo_type type = pdo_type(pdo);
447 char msg[64];
448
449 switch (type) {
450 case PDO_TYPE_FIXED:
451 scnprintf(msg, sizeof(msg),
452 "%u mV, %u mA [%s%s%s%s%s%s]",
453 pdo_fixed_voltage(pdo),
454 pdo_max_current(pdo),
455 (pdo & PDO_FIXED_DUAL_ROLE) ?
456 "R" : "",
457 (pdo & PDO_FIXED_SUSPEND) ?
458 "S" : "",
459 (pdo & PDO_FIXED_HIGHER_CAP) ?
460 "H" : "",
461 (pdo & PDO_FIXED_USB_COMM) ?
462 "U" : "",
463 (pdo & PDO_FIXED_DATA_SWAP) ?
464 "D" : "",
465 (pdo & PDO_FIXED_EXTPOWER) ?
466 "E" : "");
467 break;
468 case PDO_TYPE_VAR:
469 scnprintf(msg, sizeof(msg),
470 "%u-%u mV, %u mA",
471 pdo_min_voltage(pdo),
472 pdo_max_voltage(pdo),
473 pdo_max_current(pdo));
474 break;
475 case PDO_TYPE_BATT:
476 scnprintf(msg, sizeof(msg),
477 "%u-%u mV, %u mW",
478 pdo_min_voltage(pdo),
479 pdo_max_voltage(pdo),
480 pdo_max_power(pdo));
481 break;
482 default:
483 strcpy(msg, "undefined");
484 break;
485 }
486 tcpm_log(port, " PDO %d: type %d, %s",
487 i, type, msg);
488 }
489}
490
491static int tcpm_seq_show(struct seq_file *s, void *v)
492{
493 struct tcpm_port *port = (struct tcpm_port *)s->private;
494 int tail;
495
496 mutex_lock(&port->logbuffer_lock);
497 tail = port->logbuffer_tail;
498 while (tail != port->logbuffer_head) {
499 seq_printf(s, "%s\n", port->logbuffer[tail]);
500 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
501 }
502 if (!seq_has_overflowed(s))
503 port->logbuffer_tail = tail;
504 mutex_unlock(&port->logbuffer_lock);
505
506 return 0;
507}
508
509static int tcpm_debug_open(struct inode *inode, struct file *file)
510{
511 return single_open(file, tcpm_seq_show, inode->i_private);
512}
513
514static const struct file_operations tcpm_debug_operations = {
515 .open = tcpm_debug_open,
516 .llseek = seq_lseek,
517 .read = seq_read,
518 .release = single_release,
519};
520
521static struct dentry *rootdir;
522
523static int tcpm_debugfs_init(struct tcpm_port *port)
524{
525 mutex_init(&port->logbuffer_lock);
526 /* /sys/kernel/debug/tcpm/usbcX */
527 if (!rootdir) {
528 rootdir = debugfs_create_dir("tcpm", NULL);
529 if (!rootdir)
530 return -ENOMEM;
531 }
532
533 port->dentry = debugfs_create_file(dev_name(port->dev),
534 S_IFREG | 0444, rootdir,
535 port, &tcpm_debug_operations);
536
537 return 0;
538}
539
540static void tcpm_debugfs_exit(struct tcpm_port *port)
541{
542 debugfs_remove(port->dentry);
543}
544
545#else
546
547static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
548static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
549static void tcpm_log_source_caps(struct tcpm_port *port) { }
550static int tcpm_debugfs_init(const struct tcpm_port *port) { return 0; }
551static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
552
553#endif
554
555static int tcpm_pd_transmit(struct tcpm_port *port,
556 enum tcpm_transmit_type type,
557 const struct pd_message *msg)
558{
559 unsigned long timeout;
560 int ret;
561
562 if (msg)
563 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
564 else
565 tcpm_log(port, "PD TX, type: %#x", type);
566
567 reinit_completion(&port->tx_complete);
568 ret = port->tcpc->pd_transmit(port->tcpc, type, msg);
569 if (ret < 0)
570 return ret;
571
572 mutex_unlock(&port->lock);
573 timeout = wait_for_completion_timeout(&port->tx_complete,
574 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
575 mutex_lock(&port->lock);
576 if (!timeout)
577 return -ETIMEDOUT;
578
579 switch (port->tx_status) {
580 case TCPC_TX_SUCCESS:
581 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
582 return 0;
583 case TCPC_TX_DISCARDED:
584 return -EAGAIN;
585 case TCPC_TX_FAILED:
586 default:
587 return -EIO;
588 }
589}
590
591void tcpm_pd_transmit_complete(struct tcpm_port *port,
592 enum tcpm_transmit_status status)
593{
594 tcpm_log(port, "PD TX complete, status: %u", status);
595 port->tx_status = status;
596 complete(&port->tx_complete);
597}
598EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
599
600static int tcpm_mux_set(struct tcpm_port *port, enum tcpc_mux_mode mode,
601 enum tcpc_usb_switch config)
602{
603 int ret = 0;
604
605 tcpm_log(port, "Requesting mux mode %d, config %d, polarity %d",
606 mode, config, port->polarity);
607
608 if (port->tcpc->mux)
609 ret = port->tcpc->mux->set(port->tcpc->mux, mode, config,
610 port->polarity);
611
612 return ret;
613}
614
615static int tcpm_set_polarity(struct tcpm_port *port,
616 enum typec_cc_polarity polarity)
617{
618 int ret;
619
620 tcpm_log(port, "polarity %d", polarity);
621
622 ret = port->tcpc->set_polarity(port->tcpc, polarity);
623 if (ret < 0)
624 return ret;
625
626 port->polarity = polarity;
627
628 return 0;
629}
630
631static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
632{
633 int ret;
634
635 tcpm_log(port, "vconn:=%d", enable);
636
637 ret = port->tcpc->set_vconn(port->tcpc, enable);
638 if (!ret) {
639 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
640 typec_set_vconn_role(port->typec_port, port->vconn_role);
641 }
642
643 return ret;
644}
645
646static u32 tcpm_get_current_limit(struct tcpm_port *port)
647{
648 enum typec_cc_status cc;
649 u32 limit;
650
651 cc = port->polarity ? port->cc2 : port->cc1;
652 switch (cc) {
653 case TYPEC_CC_RP_1_5:
654 limit = 1500;
655 break;
656 case TYPEC_CC_RP_3_0:
657 limit = 3000;
658 break;
659 case TYPEC_CC_RP_DEF:
660 default:
661 limit = 0;
662 break;
663 }
664
665 return limit;
666}
667
668static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
669{
670 int ret = -EOPNOTSUPP;
671
672 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
673
674 if (port->tcpc->set_current_limit)
675 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
676
677 return ret;
678}
679
680/*
681 * Determine RP value to set based on maximum current supported
682 * by a port if configured as source.
683 * Returns CC value to report to link partner.
684 */
685static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
686{
687 const u32 *src_pdo = port->src_pdo;
688 int nr_pdo = port->nr_src_pdo;
689 int i;
690
691 /*
692 * Search for first entry with matching voltage.
693 * It should report the maximum supported current.
694 */
695 for (i = 0; i < nr_pdo; i++) {
696 const u32 pdo = src_pdo[i];
697
698 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
699 pdo_fixed_voltage(pdo) == 5000) {
700 unsigned int curr = pdo_max_current(pdo);
701
702 if (curr >= 3000)
703 return TYPEC_CC_RP_3_0;
704 else if (curr >= 1500)
705 return TYPEC_CC_RP_1_5;
706 return TYPEC_CC_RP_DEF;
707 }
708 }
709
710 return TYPEC_CC_RP_DEF;
711}
712
713static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
714{
715 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
716 port->data_role);
717}
718
719static int tcpm_set_roles(struct tcpm_port *port, bool attached,
720 enum typec_role role, enum typec_data_role data)
721{
722 int ret;
723
724 if (data == TYPEC_HOST)
725 ret = tcpm_mux_set(port, TYPEC_MUX_USB,
726 TCPC_USB_SWITCH_CONNECT);
727 else
728 ret = tcpm_mux_set(port, TYPEC_MUX_NONE,
729 TCPC_USB_SWITCH_DISCONNECT);
730 if (ret < 0)
731 return ret;
732
733 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
734 if (ret < 0)
735 return ret;
736
737 port->pwr_role = role;
738 port->data_role = data;
739 typec_set_data_role(port->typec_port, data);
740 typec_set_pwr_role(port->typec_port, role);
741
742 return 0;
743}
744
745static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
746{
747 int ret;
748
749 ret = port->tcpc->set_roles(port->tcpc, true, role,
750 port->data_role);
751 if (ret < 0)
752 return ret;
753
754 port->pwr_role = role;
755 typec_set_pwr_role(port->typec_port, role);
756
757 return 0;
758}
759
760static int tcpm_pd_send_source_caps(struct tcpm_port *port)
761{
762 struct pd_message msg;
763 int i;
764
765 memset(&msg, 0, sizeof(msg));
766 if (!port->nr_src_pdo) {
767 /* No source capabilities defined, sink only */
768 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
769 port->pwr_role,
770 port->data_role,
771 port->message_id, 0);
772 } else {
773 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
774 port->pwr_role,
775 port->data_role,
776 port->message_id,
777 port->nr_src_pdo);
778 }
779 for (i = 0; i < port->nr_src_pdo; i++)
780 msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
781
782 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
783}
784
785static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
786{
787 struct pd_message msg;
788 int i;
789
790 memset(&msg, 0, sizeof(msg));
791 if (!port->nr_snk_pdo) {
792 /* No sink capabilities defined, source only */
793 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
794 port->pwr_role,
795 port->data_role,
796 port->message_id, 0);
797 } else {
798 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
799 port->pwr_role,
800 port->data_role,
801 port->message_id,
802 port->nr_snk_pdo);
803 }
804 for (i = 0; i < port->nr_snk_pdo; i++)
805 msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
806
807 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
808}
809
810static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
811 unsigned int delay_ms)
812{
813 if (delay_ms) {
814 tcpm_log(port, "pending state change %s -> %s @ %u ms",
815 tcpm_states[port->state], tcpm_states[state],
816 delay_ms);
817 port->delayed_state = state;
818 mod_delayed_work(port->wq, &port->state_machine,
819 msecs_to_jiffies(delay_ms));
820 port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms);
821 port->delay_ms = delay_ms;
822 } else {
823 tcpm_log(port, "state change %s -> %s",
824 tcpm_states[port->state], tcpm_states[state]);
825 port->delayed_state = INVALID_STATE;
826 port->prev_state = port->state;
827 port->state = state;
828 /*
829 * Don't re-queue the state machine work item if we're currently
830 * in the state machine and we're immediately changing states.
831 * tcpm_state_machine_work() will continue running the state
832 * machine.
833 */
834 if (!port->state_machine_running)
835 mod_delayed_work(port->wq, &port->state_machine, 0);
836 }
837}
838
839static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
840 unsigned int delay_ms)
841{
842 if (port->enter_state == port->state)
843 tcpm_set_state(port, state, delay_ms);
844 else
845 tcpm_log(port,
846 "skipped %sstate change %s -> %s [%u ms], context state %s",
847 delay_ms ? "delayed " : "",
848 tcpm_states[port->state], tcpm_states[state],
849 delay_ms, tcpm_states[port->enter_state]);
850}
851
852static void tcpm_queue_message(struct tcpm_port *port,
853 enum pd_msg_request message)
854{
855 port->queued_message = message;
856 mod_delayed_work(port->wq, &port->state_machine, 0);
857}
858
859/*
860 * VDM/VDO handling functions
861 */
862static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
863 const u32 *data, int cnt)
864{
865 port->vdo_count = cnt + 1;
866 port->vdo_data[0] = header;
867 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
868 /* Set ready, vdm state machine will actually send */
869 port->vdm_retries = 0;
870 port->vdm_state = VDM_STATE_READY;
871}
872
873static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
874 int cnt)
875{
876 u32 vdo = le32_to_cpu(payload[VDO_INDEX_IDH]);
877 u32 product = le32_to_cpu(payload[VDO_INDEX_PRODUCT]);
878
879 memset(&port->mode_data, 0, sizeof(port->mode_data));
880
881#if 0 /* Not really a match */
882 switch (PD_IDH_PTYPE(vdo)) {
883 case IDH_PTYPE_UNDEF:
884 port->partner.type = TYPEC_PARTNER_NONE; /* no longer exists */
885 break;
886 case IDH_PTYPE_HUB:
887 break;
888 case IDH_PTYPE_PERIPH:
889 break;
890 case IDH_PTYPE_PCABLE:
891 break;
892 case IDH_PTYPE_ACABLE:
893 break;
894 case IDH_PTYPE_AMA:
895 port->partner.type = TYPEC_PARTNER_ALTMODE;
896 break;
897 default:
898 break;
899 }
900#endif
901
902 port->partner_ident.id_header = vdo;
903 port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
904 port->partner_ident.product = product;
905
906 typec_partner_set_identity(port->partner);
907
908 tcpm_log(port, "Identity: %04x:%04x.%04x",
909 PD_IDH_VID(vdo),
910 PD_PRODUCT_PID(product), product & 0xffff);
911}
912
913static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
914 int cnt)
915{
916 struct pd_mode_data *pmdata = &port->mode_data;
917 int i;
918
919 for (i = 1; i < cnt; i++) {
920 u32 p = le32_to_cpu(payload[i]);
921 u16 svid;
922
923 svid = (p >> 16) & 0xffff;
924 if (!svid)
925 return false;
926
927 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
928 goto abort;
929
930 pmdata->svids[pmdata->nsvids++] = svid;
931 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
932
933 svid = p & 0xffff;
934 if (!svid)
935 return false;
936
937 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
938 goto abort;
939
940 pmdata->svids[pmdata->nsvids++] = svid;
941 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
942 }
943 return true;
944abort:
945 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
946 return false;
947}
948
949static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
950 int cnt)
951{
952 struct pd_mode_data *pmdata = &port->mode_data;
953 struct typec_altmode_desc *paltmode;
954 struct typec_mode_desc *pmode;
955 int i;
956
957 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
958 /* Already logged in svdm_consume_svids() */
959 return;
960 }
961
962 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
963 memset(paltmode, 0, sizeof(*paltmode));
964
965 paltmode->svid = pmdata->svids[pmdata->svid_index];
966
967 tcpm_log(port, " Alternate mode %d: SVID 0x%04x",
968 pmdata->altmodes, paltmode->svid);
969
970 for (i = 1; i < cnt && paltmode->n_modes < ALTMODE_MAX_MODES; i++) {
971 pmode = &paltmode->modes[paltmode->n_modes];
972 memset(pmode, 0, sizeof(*pmode));
973 pmode->vdo = le32_to_cpu(payload[i]);
974 pmode->index = i - 1;
975 paltmode->n_modes++;
976 tcpm_log(port, " VDO %d: 0x%08x",
977 pmode->index, pmode->vdo);
978 }
979 port->partner_altmode[pmdata->altmodes] =
980 typec_partner_register_altmode(port->partner, paltmode);
981 if (port->partner_altmode[pmdata->altmodes] == NULL) {
982 tcpm_log(port,
983 "Failed to register alternate modes for SVID 0x%04x",
984 paltmode->svid);
985 return;
986 }
987 pmdata->altmodes++;
988}
989
990#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
991
992static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
993 u32 *response)
994{
995 u32 p0 = le32_to_cpu(payload[0]);
996 int cmd_type = PD_VDO_CMDT(p0);
997 int cmd = PD_VDO_CMD(p0);
998 struct pd_mode_data *modep;
999 int rlen = 0;
1000 u16 svid;
1001
1002 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1003 p0, cmd_type, cmd, cnt);
1004
1005 modep = &port->mode_data;
1006
1007 switch (cmd_type) {
1008 case CMDT_INIT:
1009 switch (cmd) {
1010 case CMD_DISCOVER_IDENT:
1011 break;
1012 case CMD_DISCOVER_SVID:
1013 break;
1014 case CMD_DISCOVER_MODES:
1015 break;
1016 case CMD_ENTER_MODE:
1017 break;
1018 case CMD_EXIT_MODE:
1019 break;
1020 case CMD_ATTENTION:
1021 break;
1022 default:
1023 break;
1024 }
1025 if (rlen >= 1) {
1026 response[0] = p0 | VDO_CMDT(CMDT_RSP_ACK);
1027 } else if (rlen == 0) {
1028 response[0] = p0 | VDO_CMDT(CMDT_RSP_NAK);
1029 rlen = 1;
1030 } else {
1031 response[0] = p0 | VDO_CMDT(CMDT_RSP_BUSY);
1032 rlen = 1;
1033 }
1034 break;
1035 case CMDT_RSP_ACK:
1036 /* silently drop message if we are not connected */
1037 if (!port->partner)
1038 break;
1039
1040 switch (cmd) {
1041 case CMD_DISCOVER_IDENT:
1042 /* 6.4.4.3.1 */
1043 svdm_consume_identity(port, payload, cnt);
1044 response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
1045 rlen = 1;
1046 break;
1047 case CMD_DISCOVER_SVID:
1048 /* 6.4.4.3.2 */
1049 if (svdm_consume_svids(port, payload, cnt)) {
1050 response[0] = VDO(USB_SID_PD, 1,
1051 CMD_DISCOVER_SVID);
1052 rlen = 1;
1053 } else if (modep->nsvids && supports_modal(port)) {
1054 response[0] = VDO(modep->svids[0], 1,
1055 CMD_DISCOVER_MODES);
1056 rlen = 1;
1057 }
1058 break;
1059 case CMD_DISCOVER_MODES:
1060 /* 6.4.4.3.3 */
1061 svdm_consume_modes(port, payload, cnt);
1062 modep->svid_index++;
1063 if (modep->svid_index < modep->nsvids) {
1064 svid = modep->svids[modep->svid_index];
1065 response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
1066 rlen = 1;
1067 } else {
1068#if 0
1069 response[0] = pd_dfp_enter_mode(port, 0, 0);
1070 if (response[0])
1071 rlen = 1;
1072#endif
1073 }
1074 break;
1075 case CMD_ENTER_MODE:
1076 break;
1077 default:
1078 break;
1079 }
1080 break;
1081 default:
1082 break;
1083 }
1084
1085 return rlen;
1086}
1087
1088static void tcpm_handle_vdm_request(struct tcpm_port *port,
1089 const __le32 *payload, int cnt)
1090{
1091 int rlen = 0;
1092 u32 response[8] = { };
1093 u32 p0 = le32_to_cpu(payload[0]);
1094
1095 if (port->vdm_state == VDM_STATE_BUSY) {
1096 /* If UFP responded busy retry after timeout */
1097 if (PD_VDO_CMDT(p0) == CMDT_RSP_BUSY) {
1098 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1099 port->vdo_retry = (p0 & ~VDO_CMDT_MASK) |
1100 CMDT_INIT;
1101 mod_delayed_work(port->wq, &port->vdm_state_machine,
1102 msecs_to_jiffies(PD_T_VDM_BUSY));
1103 return;
1104 }
1105 port->vdm_state = VDM_STATE_DONE;
1106 }
1107
1108 if (PD_VDO_SVDM(p0))
1109 rlen = tcpm_pd_svdm(port, payload, cnt, response);
1110#if 0
1111 else
1112 rlen = tcpm_pd_custom_vdm(port, cnt, payload, response);
1113#endif
1114
1115 if (rlen > 0) {
1116 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1117 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1118 }
1119}
1120
1121static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1122 const u32 *data, int count)
1123{
1124 u32 header;
1125
1126 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1127 count = VDO_MAX_SIZE - 1;
1128
1129 /* set VDM header with VID & CMD */
1130 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1131 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd);
1132 tcpm_queue_vdm(port, header, data, count);
1133
1134 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1135}
1136
1137static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1138{
1139 unsigned int timeout;
1140 int cmd = PD_VDO_CMD(vdm_hdr);
1141
1142 /* its not a structured VDM command */
1143 if (!PD_VDO_SVDM(vdm_hdr))
1144 return PD_T_VDM_UNSTRUCTURED;
1145
1146 switch (PD_VDO_CMDT(vdm_hdr)) {
1147 case CMDT_INIT:
1148 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1149 timeout = PD_T_VDM_WAIT_MODE_E;
1150 else
1151 timeout = PD_T_VDM_SNDR_RSP;
1152 break;
1153 default:
1154 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1155 timeout = PD_T_VDM_E_MODE;
1156 else
1157 timeout = PD_T_VDM_RCVR_RSP;
1158 break;
1159 }
1160 return timeout;
1161}
1162
1163static void vdm_run_state_machine(struct tcpm_port *port)
1164{
1165 struct pd_message msg;
1166 int i, res;
1167
1168 switch (port->vdm_state) {
1169 case VDM_STATE_READY:
1170 /* Only transmit VDM if attached */
1171 if (!port->attached) {
1172 port->vdm_state = VDM_STATE_ERR_BUSY;
1173 break;
1174 }
1175
1176 /*
1177 * if there's traffic or we're not in PDO ready state don't send
1178 * a VDM.
1179 */
1180 if (port->state != SRC_READY && port->state != SNK_READY)
1181 break;
1182
1183 /* Prepare and send VDM */
1184 memset(&msg, 0, sizeof(msg));
1185 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
1186 port->pwr_role,
1187 port->data_role,
1188 port->message_id, port->vdo_count);
1189 for (i = 0; i < port->vdo_count; i++)
1190 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
1191 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1192 if (res < 0) {
1193 port->vdm_state = VDM_STATE_ERR_SEND;
1194 } else {
1195 unsigned long timeout;
1196
1197 port->vdm_retries = 0;
1198 port->vdm_state = VDM_STATE_BUSY;
1199 timeout = vdm_ready_timeout(port->vdo_data[0]);
1200 mod_delayed_work(port->wq, &port->vdm_state_machine,
1201 timeout);
1202 }
1203 break;
1204 case VDM_STATE_WAIT_RSP_BUSY:
1205 port->vdo_data[0] = port->vdo_retry;
1206 port->vdo_count = 1;
1207 port->vdm_state = VDM_STATE_READY;
1208 break;
1209 case VDM_STATE_BUSY:
1210 port->vdm_state = VDM_STATE_ERR_TMOUT;
1211 break;
1212 case VDM_STATE_ERR_SEND:
1213 /*
1214 * A partner which does not support USB PD will not reply,
1215 * so this is not a fatal error. At the same time, some
1216 * devices may not return GoodCRC under some circumstances,
1217 * so we need to retry.
1218 */
1219 if (port->vdm_retries < 3) {
1220 tcpm_log(port, "VDM Tx error, retry");
1221 port->vdm_retries++;
1222 port->vdm_state = VDM_STATE_READY;
1223 }
1224 break;
1225 default:
1226 break;
1227 }
1228}
1229
1230static void vdm_state_machine_work(struct work_struct *work)
1231{
1232 struct tcpm_port *port = container_of(work, struct tcpm_port,
1233 vdm_state_machine.work);
1234 enum vdm_states prev_state;
1235
1236 mutex_lock(&port->lock);
1237
1238 /*
1239 * Continue running as long as the port is not busy and there was
1240 * a state change.
1241 */
1242 do {
1243 prev_state = port->vdm_state;
1244 vdm_run_state_machine(port);
1245 } while (port->vdm_state != prev_state &&
1246 port->vdm_state != VDM_STATE_BUSY);
1247
1248 mutex_unlock(&port->lock);
1249}
1250
1251/*
1252 * PD (data, control) command handling functions
1253 */
1254static void tcpm_pd_data_request(struct tcpm_port *port,
1255 const struct pd_message *msg)
1256{
1257 enum pd_data_msg_type type = pd_header_type_le(msg->header);
1258 unsigned int cnt = pd_header_cnt_le(msg->header);
1259 unsigned int i;
1260
1261 switch (type) {
1262 case PD_DATA_SOURCE_CAP:
1263 if (port->pwr_role != TYPEC_SINK)
1264 break;
1265
1266 for (i = 0; i < cnt; i++)
1267 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
1268
1269 port->nr_source_caps = cnt;
1270
1271 tcpm_log_source_caps(port);
1272
1273 /*
1274 * This message may be received even if VBUS is not
1275 * present. This is quite unexpected; see USB PD
1276 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
1277 * However, at the same time, we must be ready to
1278 * receive this message and respond to it 15ms after
1279 * receiving PS_RDY during power swap operations, no matter
1280 * if VBUS is available or not (USB PD specification,
1281 * section 6.5.9.2).
1282 * So we need to accept the message either way,
1283 * but be prepared to keep waiting for VBUS after it was
1284 * handled.
1285 */
1286 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
1287 break;
1288 case PD_DATA_REQUEST:
1289 if (port->pwr_role != TYPEC_SOURCE ||
1290 cnt != 1) {
1291 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1292 break;
1293 }
1294 port->sink_request = le32_to_cpu(msg->payload[0]);
1295 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
1296 break;
1297 case PD_DATA_SINK_CAP:
1298 /* We don't do anything with this at the moment... */
1299 for (i = 0; i < cnt; i++)
1300 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
1301 port->nr_sink_caps = cnt;
1302 break;
1303 case PD_DATA_VENDOR_DEF:
1304 tcpm_handle_vdm_request(port, msg->payload, cnt);
1305 break;
1306 case PD_DATA_BIST:
1307 if (port->state == SRC_READY || port->state == SNK_READY) {
1308 port->bist_request = le32_to_cpu(msg->payload[0]);
1309 tcpm_set_state(port, BIST_RX, 0);
1310 }
1311 break;
1312 default:
1313 tcpm_log(port, "Unhandled data message type %#x", type);
1314 break;
1315 }
1316}
1317
1318static void tcpm_pd_ctrl_request(struct tcpm_port *port,
1319 const struct pd_message *msg)
1320{
1321 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1322 enum tcpm_state next_state;
1323
1324 switch (type) {
1325 case PD_CTRL_GOOD_CRC:
1326 case PD_CTRL_PING:
1327 break;
1328 case PD_CTRL_GET_SOURCE_CAP:
1329 switch (port->state) {
1330 case SRC_READY:
1331 case SNK_READY:
1332 tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
1333 break;
1334 default:
1335 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1336 break;
1337 }
1338 break;
1339 case PD_CTRL_GET_SINK_CAP:
1340 switch (port->state) {
1341 case SRC_READY:
1342 case SNK_READY:
1343 tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
1344 break;
1345 default:
1346 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1347 break;
1348 }
1349 break;
1350 case PD_CTRL_GOTO_MIN:
1351 break;
1352 case PD_CTRL_PS_RDY:
1353 switch (port->state) {
1354 case SNK_TRANSITION_SINK:
1355 if (port->vbus_present) {
1356 tcpm_set_current_limit(port,
1357 port->current_limit,
1358 port->supply_voltage);
1359 tcpm_set_state(port, SNK_READY, 0);
1360 } else {
1361 /*
1362 * Seen after power swap. Keep waiting for VBUS
1363 * in a transitional state.
1364 */
1365 tcpm_set_state(port,
1366 SNK_TRANSITION_SINK_VBUS, 0);
1367 }
1368 break;
1369 case PR_SWAP_SRC_SNK_SOURCE_OFF:
1370 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
1371 break;
1372 case PR_SWAP_SNK_SRC_SINK_OFF:
1373 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
1374 break;
1375 case VCONN_SWAP_WAIT_FOR_VCONN:
1376 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
1377 break;
1378 default:
1379 break;
1380 }
1381 break;
1382 case PD_CTRL_REJECT:
1383 case PD_CTRL_WAIT:
1384 switch (port->state) {
1385 case SNK_NEGOTIATE_CAPABILITIES:
1386 /* USB PD specification, Figure 8-43 */
1387 if (port->explicit_contract)
1388 next_state = SNK_READY;
1389 else
1390 next_state = SNK_WAIT_CAPABILITIES;
1391 tcpm_set_state(port, next_state, 0);
1392 break;
1393 case DR_SWAP_SEND:
1394 port->swap_status = (type == PD_CTRL_WAIT ?
1395 -EAGAIN : -EOPNOTSUPP);
1396 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
1397 break;
1398 case PR_SWAP_SEND:
1399 port->swap_status = (type == PD_CTRL_WAIT ?
1400 -EAGAIN : -EOPNOTSUPP);
1401 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
1402 break;
1403 case VCONN_SWAP_SEND:
1404 port->swap_status = (type == PD_CTRL_WAIT ?
1405 -EAGAIN : -EOPNOTSUPP);
1406 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
1407 break;
1408 default:
1409 break;
1410 }
1411 break;
1412 case PD_CTRL_ACCEPT:
1413 switch (port->state) {
1414 case SNK_NEGOTIATE_CAPABILITIES:
1415 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
1416 break;
1417 case SOFT_RESET_SEND:
1418 port->message_id = 0;
5fec4b54 1419 port->rx_msgid = -1;
f0690a25
GR
1420 if (port->pwr_role == TYPEC_SOURCE)
1421 next_state = SRC_SEND_CAPABILITIES;
1422 else
1423 next_state = SNK_WAIT_CAPABILITIES;
1424 tcpm_set_state(port, next_state, 0);
1425 break;
1426 case DR_SWAP_SEND:
1427 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
1428 break;
1429 case PR_SWAP_SEND:
1430 tcpm_set_state(port, PR_SWAP_START, 0);
1431 break;
1432 case VCONN_SWAP_SEND:
1433 tcpm_set_state(port, VCONN_SWAP_START, 0);
1434 break;
1435 default:
1436 break;
1437 }
1438 break;
1439 case PD_CTRL_SOFT_RESET:
1440 tcpm_set_state(port, SOFT_RESET, 0);
1441 break;
1442 case PD_CTRL_DR_SWAP:
1443 if (port->typec_caps.type != TYPEC_PORT_DRP) {
1444 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1445 break;
1446 }
1447 /*
1448 * XXX
1449 * 6.3.9: If an alternate mode is active, a request to swap
1450 * alternate modes shall trigger a port reset.
1451 */
1452 switch (port->state) {
1453 case SRC_READY:
1454 case SNK_READY:
1455 tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
1456 break;
1457 default:
1458 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1459 break;
1460 }
1461 break;
1462 case PD_CTRL_PR_SWAP:
1463 if (port->typec_caps.type != TYPEC_PORT_DRP) {
1464 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1465 break;
1466 }
1467 switch (port->state) {
1468 case SRC_READY:
1469 case SNK_READY:
1470 tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
1471 break;
1472 default:
1473 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1474 break;
1475 }
1476 break;
1477 case PD_CTRL_VCONN_SWAP:
1478 switch (port->state) {
1479 case SRC_READY:
1480 case SNK_READY:
1481 tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
1482 break;
1483 default:
1484 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1485 break;
1486 }
1487 break;
1488 default:
1489 tcpm_log(port, "Unhandled ctrl message type %#x", type);
1490 break;
1491 }
1492}
1493
1494static void tcpm_pd_rx_handler(struct work_struct *work)
1495{
1496 struct pd_rx_event *event = container_of(work,
1497 struct pd_rx_event, work);
1498 const struct pd_message *msg = &event->msg;
1499 unsigned int cnt = pd_header_cnt_le(msg->header);
1500 struct tcpm_port *port = event->port;
1501
1502 mutex_lock(&port->lock);
1503
1504 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
1505 port->attached);
1506
1507 if (port->attached) {
5fec4b54
GR
1508 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1509 unsigned int msgid = pd_header_msgid_le(msg->header);
1510
1511 /*
1512 * USB PD standard, 6.6.1.2:
1513 * "... if MessageID value in a received Message is the
1514 * same as the stored value, the receiver shall return a
1515 * GoodCRC Message with that MessageID value and drop
1516 * the Message (this is a retry of an already received
1517 * Message). Note: this shall not apply to the Soft_Reset
1518 * Message which always has a MessageID value of zero."
1519 */
1520 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
1521 goto done;
1522 port->rx_msgid = msgid;
1523
f0690a25
GR
1524 /*
1525 * If both ends believe to be DFP/host, we have a data role
1526 * mismatch.
1527 */
1528 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
1529 (port->data_role == TYPEC_HOST)) {
1530 tcpm_log(port,
1531 "Data role mismatch, initiating error recovery");
1532 tcpm_set_state(port, ERROR_RECOVERY, 0);
1533 } else {
1534 if (cnt)
1535 tcpm_pd_data_request(port, msg);
1536 else
1537 tcpm_pd_ctrl_request(port, msg);
1538 }
1539 }
1540
5fec4b54 1541done:
f0690a25
GR
1542 mutex_unlock(&port->lock);
1543 kfree(event);
1544}
1545
1546void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
1547{
1548 struct pd_rx_event *event;
1549
1550 event = kzalloc(sizeof(*event), GFP_ATOMIC);
1551 if (!event)
1552 return;
1553
1554 INIT_WORK(&event->work, tcpm_pd_rx_handler);
1555 event->port = port;
1556 memcpy(&event->msg, msg, sizeof(*msg));
1557 queue_work(port->wq, &event->work);
1558}
1559EXPORT_SYMBOL_GPL(tcpm_pd_receive);
1560
1561static int tcpm_pd_send_control(struct tcpm_port *port,
1562 enum pd_ctrl_msg_type type)
1563{
1564 struct pd_message msg;
1565
1566 memset(&msg, 0, sizeof(msg));
1567 msg.header = PD_HEADER_LE(type, port->pwr_role,
1568 port->data_role,
1569 port->message_id, 0);
1570
1571 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1572}
1573
1574/*
1575 * Send queued message without affecting state.
1576 * Return true if state machine should go back to sleep,
1577 * false otherwise.
1578 */
1579static bool tcpm_send_queued_message(struct tcpm_port *port)
1580{
1581 enum pd_msg_request queued_message;
1582
1583 do {
1584 queued_message = port->queued_message;
1585 port->queued_message = PD_MSG_NONE;
1586
1587 switch (queued_message) {
1588 case PD_MSG_CTRL_WAIT:
1589 tcpm_pd_send_control(port, PD_CTRL_WAIT);
1590 break;
1591 case PD_MSG_CTRL_REJECT:
1592 tcpm_pd_send_control(port, PD_CTRL_REJECT);
1593 break;
1594 case PD_MSG_DATA_SINK_CAP:
1595 tcpm_pd_send_sink_caps(port);
1596 break;
1597 case PD_MSG_DATA_SOURCE_CAP:
1598 tcpm_pd_send_source_caps(port);
1599 break;
1600 default:
1601 break;
1602 }
1603 } while (port->queued_message != PD_MSG_NONE);
1604
1605 if (port->delayed_state != INVALID_STATE) {
1606 if (time_is_after_jiffies(port->delayed_runtime)) {
1607 mod_delayed_work(port->wq, &port->state_machine,
1608 port->delayed_runtime - jiffies);
1609 return true;
1610 }
1611 port->delayed_state = INVALID_STATE;
1612 }
1613 return false;
1614}
1615
1616static int tcpm_pd_check_request(struct tcpm_port *port)
1617{
1618 u32 pdo, rdo = port->sink_request;
1619 unsigned int max, op, pdo_max, index;
1620 enum pd_pdo_type type;
1621
1622 index = rdo_index(rdo);
1623 if (!index || index > port->nr_src_pdo)
1624 return -EINVAL;
1625
1626 pdo = port->src_pdo[index - 1];
1627 type = pdo_type(pdo);
1628 switch (type) {
1629 case PDO_TYPE_FIXED:
1630 case PDO_TYPE_VAR:
1631 max = rdo_max_current(rdo);
1632 op = rdo_op_current(rdo);
1633 pdo_max = pdo_max_current(pdo);
1634
1635 if (op > pdo_max)
1636 return -EINVAL;
1637 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1638 return -EINVAL;
1639
1640 if (type == PDO_TYPE_FIXED)
1641 tcpm_log(port,
1642 "Requested %u mV, %u mA for %u / %u mA",
1643 pdo_fixed_voltage(pdo), pdo_max, op, max);
1644 else
1645 tcpm_log(port,
1646 "Requested %u -> %u mV, %u mA for %u / %u mA",
1647 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1648 pdo_max, op, max);
1649 break;
1650 case PDO_TYPE_BATT:
1651 max = rdo_max_power(rdo);
1652 op = rdo_op_power(rdo);
1653 pdo_max = pdo_max_power(pdo);
1654
1655 if (op > pdo_max)
1656 return -EINVAL;
1657 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1658 return -EINVAL;
1659 tcpm_log(port,
1660 "Requested %u -> %u mV, %u mW for %u / %u mW",
1661 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1662 pdo_max, op, max);
1663 break;
1664 default:
1665 return -EINVAL;
1666 }
1667
1668 port->op_vsafe5v = index == 1;
1669
1670 return 0;
1671}
1672
1673static int tcpm_pd_select_pdo(struct tcpm_port *port)
1674{
1675 unsigned int i, max_mw = 0, max_mv = 0;
1676 int ret = -EINVAL;
1677
1678 /*
1679 * Select the source PDO providing the most power while staying within
1680 * the board's voltage limits. Prefer PDO providing exp
1681 */
1682 for (i = 0; i < port->nr_source_caps; i++) {
1683 u32 pdo = port->source_caps[i];
1684 enum pd_pdo_type type = pdo_type(pdo);
1685 unsigned int mv, ma, mw;
1686
1687 if (type == PDO_TYPE_FIXED)
1688 mv = pdo_fixed_voltage(pdo);
1689 else
1690 mv = pdo_min_voltage(pdo);
1691
1692 if (type == PDO_TYPE_BATT) {
1693 mw = pdo_max_power(pdo);
1694 } else {
1695 ma = min(pdo_max_current(pdo),
1696 port->max_snk_ma);
1697 mw = ma * mv / 1000;
1698 }
1699
1700 /* Perfer higher voltages if available */
1701 if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
1702 mv <= port->max_snk_mv) {
1703 ret = i;
1704 max_mw = mw;
1705 max_mv = mv;
1706 }
1707 }
1708
1709 return ret;
1710}
1711
1712static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1713{
1714 unsigned int mv, ma, mw, flags;
1715 unsigned int max_ma, max_mw;
1716 enum pd_pdo_type type;
1717 int index;
1718 u32 pdo;
1719
1720 index = tcpm_pd_select_pdo(port);
1721 if (index < 0)
1722 return -EINVAL;
1723 pdo = port->source_caps[index];
1724 type = pdo_type(pdo);
1725
1726 if (type == PDO_TYPE_FIXED)
1727 mv = pdo_fixed_voltage(pdo);
1728 else
1729 mv = pdo_min_voltage(pdo);
1730
1731 /* Select maximum available current within the board's power limit */
1732 if (type == PDO_TYPE_BATT) {
1733 mw = pdo_max_power(pdo);
1734 ma = 1000 * min(mw, port->max_snk_mw) / mv;
1735 } else {
1736 ma = min(pdo_max_current(pdo),
1737 1000 * port->max_snk_mw / mv);
1738 }
1739 ma = min(ma, port->max_snk_ma);
1740
931693f9 1741 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
f0690a25
GR
1742
1743 /* Set mismatch bit if offered power is less than operating power */
1744 mw = ma * mv / 1000;
1745 max_ma = ma;
1746 max_mw = mw;
1747 if (mw < port->operating_snk_mw) {
1748 flags |= RDO_CAP_MISMATCH;
1749 max_mw = port->operating_snk_mw;
1750 max_ma = max_mw * 1000 / mv;
1751 }
1752
1753 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
1754 port->cc_req, port->cc1, port->cc2, port->vbus_source,
1755 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
1756 port->polarity);
1757
1758 if (type == PDO_TYPE_BATT) {
1759 *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
1760
1761 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
1762 index, mv, mw,
1763 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1764 } else {
1765 *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
1766
1767 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
1768 index, mv, ma,
1769 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1770 }
1771
1772 port->current_limit = ma;
1773 port->supply_voltage = mv;
1774
1775 return 0;
1776}
1777
1778static int tcpm_pd_send_request(struct tcpm_port *port)
1779{
1780 struct pd_message msg;
1781 int ret;
1782 u32 rdo;
1783
1784 ret = tcpm_pd_build_request(port, &rdo);
1785 if (ret < 0)
1786 return ret;
1787
1788 memset(&msg, 0, sizeof(msg));
1789 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
1790 port->pwr_role,
1791 port->data_role,
1792 port->message_id, 1);
1793 msg.payload[0] = cpu_to_le32(rdo);
1794
1795 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1796}
1797
1798static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
1799{
1800 int ret;
1801
1802 if (enable && port->vbus_charge)
1803 return -EINVAL;
1804
1805 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
1806
1807 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
1808 if (ret < 0)
1809 return ret;
1810
1811 port->vbus_source = enable;
1812 return 0;
1813}
1814
1815static int tcpm_set_charge(struct tcpm_port *port, bool charge)
1816{
1817 int ret;
1818
1819 if (charge && port->vbus_source)
1820 return -EINVAL;
1821
1822 if (charge != port->vbus_charge) {
1823 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
1824 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
1825 charge);
1826 if (ret < 0)
1827 return ret;
1828 }
1829 port->vbus_charge = charge;
1830 return 0;
1831}
1832
1833static bool tcpm_start_drp_toggling(struct tcpm_port *port)
1834{
1835 int ret;
1836
1837 if (port->tcpc->start_drp_toggling &&
1838 port->typec_caps.type == TYPEC_PORT_DRP) {
1839 tcpm_log_force(port, "Start DRP toggling");
1840 ret = port->tcpc->start_drp_toggling(port->tcpc,
1841 tcpm_rp_cc(port));
1842 if (!ret)
1843 return true;
1844 }
1845
1846 return false;
1847}
1848
1849static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
1850{
1851 tcpm_log(port, "cc:=%d", cc);
1852 port->cc_req = cc;
1853 port->tcpc->set_cc(port->tcpc, cc);
1854}
1855
1856static int tcpm_init_vbus(struct tcpm_port *port)
1857{
1858 int ret;
1859
1860 ret = port->tcpc->set_vbus(port->tcpc, false, false);
1861 port->vbus_source = false;
1862 port->vbus_charge = false;
1863 return ret;
1864}
1865
1866static int tcpm_init_vconn(struct tcpm_port *port)
1867{
1868 int ret;
1869
1870 ret = port->tcpc->set_vconn(port->tcpc, false);
1871 port->vconn_role = TYPEC_SINK;
1872 return ret;
1873}
1874
1875static void tcpm_typec_connect(struct tcpm_port *port)
1876{
1877 if (!port->connected) {
1878 /* Make sure we don't report stale identity information */
1879 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
1880 port->partner_desc.usb_pd = port->pd_capable;
1881 if (tcpm_port_is_debug(port))
1882 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
1883 else if (tcpm_port_is_audio(port))
1884 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
1885 else
1886 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
1887 port->partner = typec_register_partner(port->typec_port,
1888 &port->partner_desc);
1889 port->connected = true;
1890 }
1891}
1892
1893static int tcpm_src_attach(struct tcpm_port *port)
1894{
1895 enum typec_cc_polarity polarity =
1896 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
1897 : TYPEC_POLARITY_CC1;
1898 int ret;
1899
1900 if (port->attached)
1901 return 0;
1902
1903 ret = tcpm_set_polarity(port, polarity);
1904 if (ret < 0)
1905 return ret;
1906
1907 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
1908 if (ret < 0)
1909 return ret;
1910
1911 ret = port->tcpc->set_pd_rx(port->tcpc, true);
1912 if (ret < 0)
1913 goto out_disable_mux;
1914
1915 /*
1916 * USB Type-C specification, version 1.2,
1917 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
1918 * Enable VCONN only if the non-RD port is set to RA.
1919 */
1920 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
1921 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
1922 ret = tcpm_set_vconn(port, true);
1923 if (ret < 0)
1924 goto out_disable_pd;
1925 }
1926
1927 ret = tcpm_set_vbus(port, true);
1928 if (ret < 0)
1929 goto out_disable_vconn;
1930
1931 port->pd_capable = false;
1932
1933 port->partner = NULL;
1934
1935 port->attached = true;
1936 port->send_discover = true;
1937
1938 return 0;
1939
1940out_disable_vconn:
1941 tcpm_set_vconn(port, false);
1942out_disable_pd:
1943 port->tcpc->set_pd_rx(port->tcpc, false);
1944out_disable_mux:
1945 tcpm_mux_set(port, TYPEC_MUX_NONE, TCPC_USB_SWITCH_DISCONNECT);
1946 return ret;
1947}
1948
1949static void tcpm_typec_disconnect(struct tcpm_port *port)
1950{
1951 if (port->connected) {
1952 typec_unregister_partner(port->partner);
1953 port->partner = NULL;
1954 port->connected = false;
1955 }
1956}
1957
1958static void tcpm_unregister_altmodes(struct tcpm_port *port)
1959{
1960 struct pd_mode_data *modep = &port->mode_data;
1961 int i;
1962
1963 for (i = 0; i < modep->altmodes; i++) {
1964 typec_unregister_altmode(port->partner_altmode[i]);
1965 port->partner_altmode[i] = NULL;
1966 }
1967
1968 memset(modep, 0, sizeof(*modep));
1969}
1970
1971static void tcpm_reset_port(struct tcpm_port *port)
1972{
1973 tcpm_unregister_altmodes(port);
1974 tcpm_typec_disconnect(port);
1975 port->attached = false;
1976 port->pd_capable = false;
1977
5fec4b54
GR
1978 /*
1979 * First Rx ID should be 0; set this to a sentinel of -1 so that
1980 * we can check tcpm_pd_rx_handler() if we had seen it before.
1981 */
1982 port->rx_msgid = -1;
1983
f0690a25
GR
1984 port->tcpc->set_pd_rx(port->tcpc, false);
1985 tcpm_init_vbus(port); /* also disables charging */
1986 tcpm_init_vconn(port);
1987 tcpm_set_current_limit(port, 0, 0);
1988 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
1989 tcpm_set_attached_state(port, false);
1990 port->try_src_count = 0;
1991 port->try_snk_count = 0;
1992}
1993
1994static void tcpm_detach(struct tcpm_port *port)
1995{
1996 if (!port->attached)
1997 return;
1998
1999 if (tcpm_port_is_disconnected(port))
2000 port->hard_reset_count = 0;
2001
2002 tcpm_reset_port(port);
2003}
2004
2005static void tcpm_src_detach(struct tcpm_port *port)
2006{
2007 tcpm_detach(port);
2008}
2009
2010static int tcpm_snk_attach(struct tcpm_port *port)
2011{
2012 int ret;
2013
2014 if (port->attached)
2015 return 0;
2016
2017 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
2018 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
2019 if (ret < 0)
2020 return ret;
2021
2022 ret = tcpm_set_roles(port, true, TYPEC_SINK, TYPEC_DEVICE);
2023 if (ret < 0)
2024 return ret;
2025
2026 port->pd_capable = false;
2027
2028 port->partner = NULL;
2029
2030 port->attached = true;
2031 port->send_discover = true;
2032
2033 return 0;
2034}
2035
2036static void tcpm_snk_detach(struct tcpm_port *port)
2037{
2038 tcpm_detach(port);
2039
2040 /* XXX: (Dis)connect SuperSpeed mux? */
2041}
2042
2043static int tcpm_acc_attach(struct tcpm_port *port)
2044{
2045 int ret;
2046
2047 if (port->attached)
2048 return 0;
2049
2050 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
2051 if (ret < 0)
2052 return ret;
2053
2054 port->partner = NULL;
2055
2056 tcpm_typec_connect(port);
2057
2058 port->attached = true;
2059
2060 return 0;
2061}
2062
2063static void tcpm_acc_detach(struct tcpm_port *port)
2064{
2065 tcpm_detach(port);
2066}
2067
2068static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
2069{
2070 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
2071 return HARD_RESET_SEND;
2072 if (port->pd_capable)
2073 return ERROR_RECOVERY;
2074 if (port->pwr_role == TYPEC_SOURCE)
2075 return SRC_UNATTACHED;
2076 if (port->state == SNK_WAIT_CAPABILITIES)
2077 return SNK_READY;
2078 return SNK_UNATTACHED;
2079}
2080
2081static inline enum tcpm_state ready_state(struct tcpm_port *port)
2082{
2083 if (port->pwr_role == TYPEC_SOURCE)
2084 return SRC_READY;
2085 else
2086 return SNK_READY;
2087}
2088
2089static inline enum tcpm_state unattached_state(struct tcpm_port *port)
2090{
2091 if (port->pwr_role == TYPEC_SOURCE)
2092 return SRC_UNATTACHED;
2093 else
2094 return SNK_UNATTACHED;
2095}
2096
2097static void tcpm_check_send_discover(struct tcpm_port *port)
2098{
2099 if (port->data_role == TYPEC_HOST && port->send_discover &&
2100 port->pd_capable) {
2101 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
2102 port->send_discover = false;
2103 }
2104}
2105
2106static void tcpm_swap_complete(struct tcpm_port *port, int result)
2107{
2108 if (port->swap_pending) {
2109 port->swap_status = result;
2110 port->swap_pending = false;
2111 complete(&port->swap_complete);
2112 }
2113}
2114
2115static void run_state_machine(struct tcpm_port *port)
2116{
2117 int ret;
2118
2119 port->enter_state = port->state;
2120 switch (port->state) {
2121 case DRP_TOGGLING:
2122 break;
2123 /* SRC states */
2124 case SRC_UNATTACHED:
2125 tcpm_swap_complete(port, -ENOTCONN);
2126 tcpm_src_detach(port);
2127 if (tcpm_start_drp_toggling(port)) {
2128 tcpm_set_state(port, DRP_TOGGLING, 0);
2129 break;
2130 }
2131 tcpm_set_cc(port, tcpm_rp_cc(port));
2132 if (port->typec_caps.type == TYPEC_PORT_DRP)
2133 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
2134 break;
2135 case SRC_ATTACH_WAIT:
2136 if (tcpm_port_is_debug(port))
2137 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
2138 PD_T_CC_DEBOUNCE);
2139 else if (tcpm_port_is_audio(port))
2140 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
2141 PD_T_CC_DEBOUNCE);
2142 else if (tcpm_port_is_source(port))
2143 tcpm_set_state(port,
2144 tcpm_try_snk(port) ? SNK_TRY
2145 : SRC_ATTACHED,
2146 PD_T_CC_DEBOUNCE);
2147 break;
2148
2149 case SNK_TRY:
2150 port->try_snk_count++;
2151 /*
2152 * Requirements:
2153 * - Do not drive vconn or vbus
2154 * - Terminate CC pins (both) to Rd
2155 * Action:
2156 * - Wait for tDRPTry (PD_T_DRP_TRY).
2157 * Until then, ignore any state changes.
2158 */
2159 tcpm_set_cc(port, TYPEC_CC_RD);
2160 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
2161 break;
2162 case SNK_TRY_WAIT:
2163 if (port->vbus_present && tcpm_port_is_sink(port)) {
2164 tcpm_set_state(port, SNK_ATTACHED, 0);
2165 break;
2166 }
2167 if (!tcpm_port_is_sink(port)) {
2168 tcpm_set_state(port, SRC_TRYWAIT,
2169 PD_T_PD_DEBOUNCE);
2170 break;
2171 }
2172 /* No vbus, cc state is sink or open */
2173 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED, PD_T_DRP_TRYWAIT);
2174 break;
2175 case SRC_TRYWAIT:
2176 tcpm_set_cc(port, tcpm_rp_cc(port));
2177 if (!port->vbus_present && tcpm_port_is_source(port))
2178 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
2179 else
2180 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2181 PD_T_DRP_TRY);
2182 break;
2183 case SRC_TRYWAIT_UNATTACHED:
2184 tcpm_set_state(port, SNK_UNATTACHED, 0);
2185 break;
2186
2187 case SRC_ATTACHED:
2188 ret = tcpm_src_attach(port);
2189 tcpm_set_state(port, SRC_UNATTACHED,
2190 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
2191 break;
2192 case SRC_STARTUP:
2193 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB);
2194 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2195 port->caps_count = 0;
2196 port->message_id = 0;
5fec4b54 2197 port->rx_msgid = -1;
f0690a25
GR
2198 port->explicit_contract = false;
2199 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2200 break;
2201 case SRC_SEND_CAPABILITIES:
2202 port->caps_count++;
2203 if (port->caps_count > PD_N_CAPS_COUNT) {
2204 tcpm_set_state(port, SRC_READY, 0);
2205 break;
2206 }
2207 ret = tcpm_pd_send_source_caps(port);
2208 if (ret < 0) {
2209 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
2210 PD_T_SEND_SOURCE_CAP);
2211 } else {
2212 /*
2213 * Per standard, we should clear the reset counter here.
2214 * However, that can result in state machine hang-ups.
2215 * Reset it only in READY state to improve stability.
2216 */
2217 /* port->hard_reset_count = 0; */
2218 port->caps_count = 0;
2219 port->pd_capable = true;
2220 tcpm_set_state_cond(port, hard_reset_state(port),
2221 PD_T_SEND_SOURCE_CAP);
2222 }
2223 break;
2224 case SRC_NEGOTIATE_CAPABILITIES:
2225 ret = tcpm_pd_check_request(port);
2226 if (ret < 0) {
2227 tcpm_pd_send_control(port, PD_CTRL_REJECT);
2228 if (!port->explicit_contract) {
2229 tcpm_set_state(port,
2230 SRC_WAIT_NEW_CAPABILITIES, 0);
2231 } else {
2232 tcpm_set_state(port, SRC_READY, 0);
2233 }
2234 } else {
2235 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2236 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
2237 PD_T_SRC_TRANSITION);
2238 }
2239 break;
2240 case SRC_TRANSITION_SUPPLY:
2241 /* XXX: regulator_set_voltage(vbus, ...) */
2242 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2243 port->explicit_contract = true;
2244 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
2245 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2246 tcpm_set_state_cond(port, SRC_READY, 0);
2247 break;
2248 case SRC_READY:
2249#if 1
2250 port->hard_reset_count = 0;
2251#endif
2252 port->try_src_count = 0;
2253
2254 tcpm_typec_connect(port);
2255
2256 tcpm_check_send_discover(port);
2257 /*
2258 * 6.3.5
2259 * Sending ping messages is not necessary if
2260 * - the source operates at vSafe5V
2261 * or
2262 * - The system is not operating in PD mode
2263 * or
2264 * - Both partners are connected using a Type-C connector
2265 * XXX How do we know that ?
2266 */
2267 if (port->pwr_opmode == TYPEC_PWR_MODE_PD &&
2268 !port->op_vsafe5v) {
2269 tcpm_pd_send_control(port, PD_CTRL_PING);
2270 tcpm_set_state_cond(port, SRC_READY,
2271 PD_T_SOURCE_ACTIVITY);
2272 }
2273 break;
2274 case SRC_WAIT_NEW_CAPABILITIES:
2275 /* Nothing to do... */
2276 break;
2277
2278 /* SNK states */
2279 case SNK_UNATTACHED:
2280 tcpm_swap_complete(port, -ENOTCONN);
2281 tcpm_snk_detach(port);
2282 if (tcpm_start_drp_toggling(port)) {
2283 tcpm_set_state(port, DRP_TOGGLING, 0);
2284 break;
2285 }
2286 tcpm_set_cc(port, TYPEC_CC_RD);
2287 if (port->typec_caps.type == TYPEC_PORT_DRP)
2288 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
2289 break;
2290 case SNK_ATTACH_WAIT:
2291 if ((port->cc1 == TYPEC_CC_OPEN &&
2292 port->cc2 != TYPEC_CC_OPEN) ||
2293 (port->cc1 != TYPEC_CC_OPEN &&
2294 port->cc2 == TYPEC_CC_OPEN))
2295 tcpm_set_state(port, SNK_DEBOUNCED,
2296 PD_T_CC_DEBOUNCE);
2297 else if (tcpm_port_is_disconnected(port))
2298 tcpm_set_state(port, SNK_UNATTACHED,
2299 PD_T_PD_DEBOUNCE);
2300 break;
2301 case SNK_DEBOUNCED:
2302 if (tcpm_port_is_disconnected(port))
2303 tcpm_set_state(port, SNK_UNATTACHED,
2304 PD_T_PD_DEBOUNCE);
2305 else if (port->vbus_present)
2306 tcpm_set_state(port,
2307 tcpm_try_src(port) ? SRC_TRY
2308 : SNK_ATTACHED,
2309 0);
2310 else
2311 /* Wait for VBUS, but not forever */
2312 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2313 break;
2314
2315 case SRC_TRY:
2316 port->try_src_count++;
2317 tcpm_set_cc(port, tcpm_rp_cc(port));
2318 tcpm_set_state(port, SNK_TRYWAIT, PD_T_DRP_TRY);
2319 break;
2320 case SRC_TRY_DEBOUNCE:
2321 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
2322 break;
2323 case SNK_TRYWAIT:
2324 tcpm_set_cc(port, TYPEC_CC_RD);
2325 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, PD_T_CC_DEBOUNCE);
2326 break;
2327 case SNK_TRYWAIT_DEBOUNCE:
2328 if (port->vbus_present) {
2329 tcpm_set_state(port, SNK_ATTACHED, 0);
2330 break;
2331 }
2332 if (tcpm_port_is_disconnected(port)) {
2333 tcpm_set_state(port, SNK_UNATTACHED,
2334 PD_T_PD_DEBOUNCE);
2335 break;
2336 }
2337 if (tcpm_port_is_source(port))
2338 tcpm_set_state(port, SRC_ATTACHED, 0);
2339 /* XXX Are we supposed to stay in this state ? */
2340 break;
2341 case SNK_TRYWAIT_VBUS:
2342 tcpm_set_state(port, SNK_ATTACHED, PD_T_CC_DEBOUNCE);
2343 break;
2344
2345 case SNK_ATTACHED:
2346 ret = tcpm_snk_attach(port);
2347 if (ret < 0)
2348 tcpm_set_state(port, SNK_UNATTACHED, 0);
2349 else
2350 tcpm_set_state(port, SNK_STARTUP, 0);
2351 break;
2352 case SNK_STARTUP:
2353 /* XXX: callback into infrastructure */
2354 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB);
2355 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2356 port->message_id = 0;
5fec4b54 2357 port->rx_msgid = -1;
f0690a25
GR
2358 port->explicit_contract = false;
2359 tcpm_set_state(port, SNK_DISCOVERY, 0);
2360 break;
2361 case SNK_DISCOVERY:
2362 if (port->vbus_present) {
2363 tcpm_set_current_limit(port,
2364 tcpm_get_current_limit(port),
2365 5000);
2366 tcpm_set_charge(port, true);
2367 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2368 break;
2369 }
2370 /*
2371 * For DRP, timeouts differ. Also, handling is supposed to be
2372 * different and much more complex (dead battery detection;
2373 * see USB power delivery specification, section 8.3.3.6.1.5.1).
2374 */
2375 tcpm_set_state(port, hard_reset_state(port),
2376 port->typec_caps.type == TYPEC_PORT_DRP ?
2377 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
2378 break;
2379 case SNK_DISCOVERY_DEBOUNCE:
2380 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
2381 PD_T_CC_DEBOUNCE);
2382 break;
2383 case SNK_DISCOVERY_DEBOUNCE_DONE:
2384 if (!tcpm_port_is_disconnected(port) &&
2385 tcpm_port_is_sink(port) &&
2386 time_is_after_jiffies(port->delayed_runtime)) {
2387 tcpm_set_state(port, SNK_DISCOVERY,
2388 port->delayed_runtime - jiffies);
2389 break;
2390 }
2391 tcpm_set_state(port, unattached_state(port), 0);
2392 break;
2393 case SNK_WAIT_CAPABILITIES:
2394 ret = port->tcpc->set_pd_rx(port->tcpc, true);
2395 if (ret < 0) {
2396 tcpm_set_state(port, SNK_READY, 0);
2397 break;
2398 }
2399 /*
2400 * If VBUS has never been low, and we time out waiting
2401 * for source cap, try a soft reset first, in case we
2402 * were already in a stable contract before this boot.
2403 * Do this only once.
2404 */
2405 if (port->vbus_never_low) {
2406 port->vbus_never_low = false;
2407 tcpm_set_state(port, SOFT_RESET_SEND,
2408 PD_T_SINK_WAIT_CAP);
2409 } else {
2410 tcpm_set_state(port, hard_reset_state(port),
2411 PD_T_SINK_WAIT_CAP);
2412 }
2413 break;
2414 case SNK_NEGOTIATE_CAPABILITIES:
2415 port->pd_capable = true;
2416 port->hard_reset_count = 0;
2417 ret = tcpm_pd_send_request(port);
2418 if (ret < 0) {
2419 /* Let the Source send capabilities again. */
2420 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2421 } else {
2422 tcpm_set_state_cond(port, hard_reset_state(port),
2423 PD_T_SENDER_RESPONSE);
2424 }
2425 break;
2426 case SNK_TRANSITION_SINK:
2427 case SNK_TRANSITION_SINK_VBUS:
2428 tcpm_set_state(port, hard_reset_state(port),
2429 PD_T_PS_TRANSITION);
2430 break;
2431 case SNK_READY:
2432 port->try_snk_count = 0;
2433 port->explicit_contract = true;
2434 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
2435 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2436
2437 tcpm_typec_connect(port);
2438
2439 tcpm_check_send_discover(port);
2440 break;
2441
2442 /* Accessory states */
2443 case ACC_UNATTACHED:
2444 tcpm_acc_detach(port);
2445 tcpm_set_state(port, SRC_UNATTACHED, 0);
2446 break;
2447 case DEBUG_ACC_ATTACHED:
2448 case AUDIO_ACC_ATTACHED:
2449 ret = tcpm_acc_attach(port);
2450 if (ret < 0)
2451 tcpm_set_state(port, ACC_UNATTACHED, 0);
2452 break;
2453 case AUDIO_ACC_DEBOUNCE:
2454 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
2455 break;
2456
2457 /* Hard_Reset states */
2458 case HARD_RESET_SEND:
2459 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
2460 tcpm_set_state(port, HARD_RESET_START, 0);
2461 break;
2462 case HARD_RESET_START:
2463 port->hard_reset_count++;
2464 port->tcpc->set_pd_rx(port->tcpc, false);
2465 tcpm_unregister_altmodes(port);
2466 port->send_discover = true;
2467 if (port->pwr_role == TYPEC_SOURCE)
2468 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
2469 PD_T_PS_HARD_RESET);
2470 else
2471 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
2472 break;
2473 case SRC_HARD_RESET_VBUS_OFF:
2474 tcpm_set_vconn(port, true);
2475 tcpm_set_vbus(port, false);
2476 tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
2477 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
2478 break;
2479 case SRC_HARD_RESET_VBUS_ON:
2480 tcpm_set_vbus(port, true);
2481 port->tcpc->set_pd_rx(port->tcpc, true);
2482 tcpm_set_attached_state(port, true);
2483 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
2484 break;
2485 case SNK_HARD_RESET_SINK_OFF:
2486 tcpm_set_vconn(port, false);
2487 tcpm_set_charge(port, false);
2488 tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
2489 /*
2490 * VBUS may or may not toggle, depending on the adapter.
2491 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
2492 * directly after timeout.
2493 */
2494 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
2495 break;
2496 case SNK_HARD_RESET_WAIT_VBUS:
2497 /* Assume we're disconnected if VBUS doesn't come back. */
2498 tcpm_set_state(port, SNK_UNATTACHED,
2499 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
2500 break;
2501 case SNK_HARD_RESET_SINK_ON:
2502 /* Note: There is no guarantee that VBUS is on in this state */
2503 /*
2504 * XXX:
2505 * The specification suggests that dual mode ports in sink
2506 * mode should transition to state PE_SRC_Transition_to_default.
2507 * See USB power delivery specification chapter 8.3.3.6.1.3.
2508 * This would mean to to
2509 * - turn off VCONN, reset power supply
2510 * - request hardware reset
2511 * - turn on VCONN
2512 * - Transition to state PE_Src_Startup
2513 * SNK only ports shall transition to state Snk_Startup
2514 * (see chapter 8.3.3.3.8).
2515 * Similar, dual-mode ports in source mode should transition
2516 * to PE_SNK_Transition_to_default.
2517 */
2518 tcpm_set_attached_state(port, true);
2519 tcpm_set_state(port, SNK_STARTUP, 0);
2520 break;
2521
2522 /* Soft_Reset states */
2523 case SOFT_RESET:
2524 port->message_id = 0;
5fec4b54 2525 port->rx_msgid = -1;
f0690a25
GR
2526 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2527 if (port->pwr_role == TYPEC_SOURCE)
2528 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2529 else
2530 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2531 break;
2532 case SOFT_RESET_SEND:
2533 port->message_id = 0;
5fec4b54 2534 port->rx_msgid = -1;
f0690a25
GR
2535 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
2536 tcpm_set_state_cond(port, hard_reset_state(port), 0);
2537 else
2538 tcpm_set_state_cond(port, hard_reset_state(port),
2539 PD_T_SENDER_RESPONSE);
2540 break;
2541
2542 /* DR_Swap states */
2543 case DR_SWAP_SEND:
2544 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
2545 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
2546 PD_T_SENDER_RESPONSE);
2547 break;
2548 case DR_SWAP_ACCEPT:
2549 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2550 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
2551 break;
2552 case DR_SWAP_SEND_TIMEOUT:
2553 tcpm_swap_complete(port, -ETIMEDOUT);
2554 tcpm_set_state(port, ready_state(port), 0);
2555 break;
2556 case DR_SWAP_CHANGE_DR:
2557 if (port->data_role == TYPEC_HOST) {
2558 tcpm_unregister_altmodes(port);
2559 tcpm_set_roles(port, true, port->pwr_role,
2560 TYPEC_DEVICE);
2561 } else {
2562 tcpm_set_roles(port, true, port->pwr_role,
2563 TYPEC_HOST);
2564 port->send_discover = true;
2565 }
2566 tcpm_swap_complete(port, 0);
2567 tcpm_set_state(port, ready_state(port), 0);
2568 break;
2569
2570 /* PR_Swap states */
2571 case PR_SWAP_ACCEPT:
2572 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2573 tcpm_set_state(port, PR_SWAP_START, 0);
2574 break;
2575 case PR_SWAP_SEND:
2576 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
2577 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
2578 PD_T_SENDER_RESPONSE);
2579 break;
2580 case PR_SWAP_SEND_TIMEOUT:
2581 tcpm_swap_complete(port, -ETIMEDOUT);
2582 tcpm_set_state(port, ready_state(port), 0);
2583 break;
2584 case PR_SWAP_START:
2585 if (port->pwr_role == TYPEC_SOURCE)
2586 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
2587 PD_T_SRC_TRANSITION);
2588 else
2589 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
2590 break;
2591 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2592 tcpm_set_vbus(port, false);
2593 port->explicit_contract = false;
2594 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
2595 PD_T_PS_SOURCE_OFF);
2596 break;
2597 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2598 tcpm_set_cc(port, TYPEC_CC_RD);
2599 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
2600 tcpm_set_state(port, ERROR_RECOVERY, 0);
2601 break;
2602 }
2603 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2604 break;
2605 case PR_SWAP_SRC_SNK_SINK_ON:
2606 tcpm_set_pwr_role(port, TYPEC_SINK);
2607 tcpm_swap_complete(port, 0);
2608 tcpm_set_state(port, SNK_STARTUP, 0);
2609 break;
2610 case PR_SWAP_SNK_SRC_SINK_OFF:
2611 tcpm_set_charge(port, false);
2612 tcpm_set_state(port, hard_reset_state(port),
2613 PD_T_PS_SOURCE_OFF);
2614 break;
2615 case PR_SWAP_SNK_SRC_SOURCE_ON:
2616 tcpm_set_cc(port, tcpm_rp_cc(port));
2617 tcpm_set_vbus(port, true);
2618 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2619 tcpm_set_pwr_role(port, TYPEC_SOURCE);
2620 tcpm_swap_complete(port, 0);
2621 tcpm_set_state(port, SRC_STARTUP, 0);
2622 break;
2623
2624 case VCONN_SWAP_ACCEPT:
2625 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2626 tcpm_set_state(port, VCONN_SWAP_START, 0);
2627 break;
2628 case VCONN_SWAP_SEND:
2629 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
2630 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
2631 PD_T_SENDER_RESPONSE);
2632 break;
2633 case VCONN_SWAP_SEND_TIMEOUT:
2634 tcpm_swap_complete(port, -ETIMEDOUT);
2635 tcpm_set_state(port, ready_state(port), 0);
2636 break;
2637 case VCONN_SWAP_START:
2638 if (port->vconn_role == TYPEC_SOURCE)
2639 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
2640 else
2641 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
2642 break;
2643 case VCONN_SWAP_WAIT_FOR_VCONN:
2644 tcpm_set_state(port, hard_reset_state(port),
2645 PD_T_VCONN_SOURCE_ON);
2646 break;
2647 case VCONN_SWAP_TURN_ON_VCONN:
2648 tcpm_set_vconn(port, true);
2649 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2650 tcpm_swap_complete(port, 0);
2651 tcpm_set_state(port, ready_state(port), 0);
2652 break;
2653 case VCONN_SWAP_TURN_OFF_VCONN:
2654 tcpm_set_vconn(port, false);
2655 tcpm_swap_complete(port, 0);
2656 tcpm_set_state(port, ready_state(port), 0);
2657 break;
2658
2659 case DR_SWAP_CANCEL:
2660 case PR_SWAP_CANCEL:
2661 case VCONN_SWAP_CANCEL:
2662 tcpm_swap_complete(port, port->swap_status);
2663 if (port->pwr_role == TYPEC_SOURCE)
2664 tcpm_set_state(port, SRC_READY, 0);
2665 else
2666 tcpm_set_state(port, SNK_READY, 0);
2667 break;
2668
2669 case BIST_RX:
2670 switch (BDO_MODE_MASK(port->bist_request)) {
2671 case BDO_MODE_CARRIER2:
2672 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
2673 break;
2674 default:
2675 break;
2676 }
2677 /* Always switch to unattached state */
2678 tcpm_set_state(port, unattached_state(port), 0);
2679 break;
2680 case ERROR_RECOVERY:
2681 tcpm_swap_complete(port, -EPROTO);
2682 tcpm_reset_port(port);
2683
2684 tcpm_set_cc(port, TYPEC_CC_OPEN);
2685 tcpm_set_state(port, ERROR_RECOVERY_WAIT_OFF,
2686 PD_T_ERROR_RECOVERY);
2687 break;
2688 case ERROR_RECOVERY_WAIT_OFF:
2689 tcpm_set_state(port,
2690 tcpm_default_state(port),
2691 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
2692 break;
2693 default:
2694 WARN(1, "Unexpected port state %d\n", port->state);
2695 break;
2696 }
2697}
2698
2699static void tcpm_state_machine_work(struct work_struct *work)
2700{
2701 struct tcpm_port *port = container_of(work, struct tcpm_port,
2702 state_machine.work);
2703 enum tcpm_state prev_state;
2704
2705 mutex_lock(&port->lock);
2706 port->state_machine_running = true;
2707
2708 if (port->queued_message && tcpm_send_queued_message(port))
2709 goto done;
2710
2711 /* If we were queued due to a delayed state change, update it now */
2712 if (port->delayed_state) {
2713 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
2714 tcpm_states[port->state],
2715 tcpm_states[port->delayed_state], port->delay_ms);
2716 port->prev_state = port->state;
2717 port->state = port->delayed_state;
2718 port->delayed_state = INVALID_STATE;
2719 }
2720
2721 /*
2722 * Continue running as long as we have (non-delayed) state changes
2723 * to make.
2724 */
2725 do {
2726 prev_state = port->state;
2727 run_state_machine(port);
2728 if (port->queued_message)
2729 tcpm_send_queued_message(port);
2730 } while (port->state != prev_state && !port->delayed_state);
2731
2732done:
2733 port->state_machine_running = false;
2734 mutex_unlock(&port->lock);
2735}
2736
2737static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
2738 enum typec_cc_status cc2)
2739{
2740 enum typec_cc_status old_cc1, old_cc2;
2741 enum tcpm_state new_state;
2742
2743 old_cc1 = port->cc1;
2744 old_cc2 = port->cc2;
2745 port->cc1 = cc1;
2746 port->cc2 = cc2;
2747
2748 tcpm_log_force(port,
2749 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
2750 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
2751 port->polarity,
2752 tcpm_port_is_disconnected(port) ? "disconnected"
2753 : "connected");
2754
2755 switch (port->state) {
2756 case DRP_TOGGLING:
2757 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2758 tcpm_port_is_source(port))
2759 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2760 else if (tcpm_port_is_sink(port))
2761 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2762 break;
2763 case SRC_UNATTACHED:
2764 case ACC_UNATTACHED:
2765 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2766 tcpm_port_is_source(port))
2767 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2768 break;
2769 case SRC_ATTACH_WAIT:
2770 if (tcpm_port_is_disconnected(port) ||
2771 tcpm_port_is_audio_detached(port))
2772 tcpm_set_state(port, SRC_UNATTACHED, 0);
2773 else if (cc1 != old_cc1 || cc2 != old_cc2)
2774 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2775 break;
2776 case SRC_ATTACHED:
2777 if (tcpm_port_is_disconnected(port))
2778 tcpm_set_state(port, SRC_UNATTACHED, 0);
2779 break;
2780
2781 case SNK_UNATTACHED:
2782 if (tcpm_port_is_sink(port))
2783 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2784 break;
2785 case SNK_ATTACH_WAIT:
2786 if ((port->cc1 == TYPEC_CC_OPEN &&
2787 port->cc2 != TYPEC_CC_OPEN) ||
2788 (port->cc1 != TYPEC_CC_OPEN &&
2789 port->cc2 == TYPEC_CC_OPEN))
2790 new_state = SNK_DEBOUNCED;
2791 else if (tcpm_port_is_disconnected(port))
2792 new_state = SNK_UNATTACHED;
2793 else
2794 break;
2795 if (new_state != port->delayed_state)
2796 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2797 break;
2798 case SNK_DEBOUNCED:
2799 if (tcpm_port_is_disconnected(port))
2800 new_state = SNK_UNATTACHED;
2801 else if (port->vbus_present)
2802 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
2803 else
2804 new_state = SNK_UNATTACHED;
2805 if (new_state != port->delayed_state)
2806 tcpm_set_state(port, SNK_DEBOUNCED, 0);
2807 break;
2808 case SNK_READY:
2809 if (tcpm_port_is_disconnected(port))
2810 tcpm_set_state(port, unattached_state(port), 0);
2811 else if (!port->pd_capable &&
2812 (cc1 != old_cc1 || cc2 != old_cc2))
2813 tcpm_set_current_limit(port,
2814 tcpm_get_current_limit(port),
2815 5000);
2816 break;
2817
2818 case AUDIO_ACC_ATTACHED:
2819 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
2820 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
2821 break;
2822 case AUDIO_ACC_DEBOUNCE:
2823 if (tcpm_port_is_audio(port))
2824 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
2825 break;
2826
2827 case DEBUG_ACC_ATTACHED:
2828 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
2829 tcpm_set_state(port, ACC_UNATTACHED, 0);
2830 break;
2831
2832 case SNK_TRY:
2833 /* Do nothing, waiting for timeout */
2834 break;
2835
2836 case SNK_DISCOVERY:
2837 /* CC line is unstable, wait for debounce */
2838 if (tcpm_port_is_disconnected(port))
2839 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
2840 break;
2841 case SNK_DISCOVERY_DEBOUNCE:
2842 break;
2843
2844 case SRC_TRYWAIT:
2845 /* Hand over to state machine if needed */
2846 if (!port->vbus_present && tcpm_port_is_source(port))
2847 new_state = SRC_ATTACHED;
2848 else
2849 new_state = SRC_TRYWAIT_UNATTACHED;
2850
2851 if (new_state != port->delayed_state)
2852 tcpm_set_state(port, SRC_TRYWAIT, 0);
2853 break;
2854 case SNK_TRY_WAIT:
2855 if (port->vbus_present && tcpm_port_is_sink(port)) {
2856 tcpm_set_state(port, SNK_ATTACHED, 0);
2857 break;
2858 }
2859 if (!tcpm_port_is_sink(port))
2860 new_state = SRC_TRYWAIT;
2861 else
2862 new_state = SRC_TRYWAIT_UNATTACHED;
2863
2864 if (new_state != port->delayed_state)
2865 tcpm_set_state(port, SNK_TRY_WAIT, 0);
2866 break;
2867
2868 case SRC_TRY:
2869 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
2870 break;
2871 case SRC_TRY_DEBOUNCE:
2872 tcpm_set_state(port, SRC_TRY, 0);
2873 break;
2874 case SNK_TRYWAIT_DEBOUNCE:
2875 if (port->vbus_present) {
2876 tcpm_set_state(port, SNK_ATTACHED, 0);
2877 break;
2878 }
2879 if (tcpm_port_is_source(port)) {
2880 tcpm_set_state(port, SRC_ATTACHED, 0);
2881 break;
2882 }
2883 if (tcpm_port_is_disconnected(port) &&
2884 port->delayed_state != SNK_UNATTACHED)
2885 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
2886 break;
2887
2888 case PR_SWAP_SNK_SRC_SINK_OFF:
2889 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2890 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2891 /*
2892 * CC state change is expected here; we just turned off power.
2893 * Ignore it.
2894 */
2895 break;
2896
2897 default:
2898 if (tcpm_port_is_disconnected(port))
2899 tcpm_set_state(port, unattached_state(port), 0);
2900 break;
2901 }
2902}
2903
2904static void _tcpm_pd_vbus_on(struct tcpm_port *port)
2905{
2906 enum tcpm_state new_state;
2907
2908 tcpm_log_force(port, "VBUS on");
2909 port->vbus_present = true;
2910 switch (port->state) {
2911 case SNK_TRANSITION_SINK_VBUS:
2912 tcpm_set_state(port, SNK_READY, 0);
2913 break;
2914 case SNK_DISCOVERY:
2915 tcpm_set_state(port, SNK_DISCOVERY, 0);
2916 break;
2917
2918 case SNK_DEBOUNCED:
2919 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
2920 : SNK_ATTACHED,
2921 0);
2922 break;
2923 case SNK_HARD_RESET_WAIT_VBUS:
2924 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
2925 break;
2926 case SRC_ATTACHED:
2927 tcpm_set_state(port, SRC_STARTUP, 0);
2928 break;
2929 case SRC_HARD_RESET_VBUS_ON:
2930 tcpm_set_state(port, SRC_STARTUP, 0);
2931 break;
2932
2933 case SNK_TRY:
2934 /* Do nothing, waiting for timeout */
2935 break;
2936 case SRC_TRYWAIT:
2937 /* Hand over to state machine if needed */
2938 if (port->delayed_state != SRC_TRYWAIT_UNATTACHED)
2939 tcpm_set_state(port, SRC_TRYWAIT, 0);
2940 break;
2941 case SNK_TRY_WAIT:
2942 if (tcpm_port_is_sink(port)) {
2943 tcpm_set_state(port, SNK_ATTACHED, 0);
2944 break;
2945 }
2946 if (!tcpm_port_is_sink(port))
2947 new_state = SRC_TRYWAIT;
2948 else
2949 new_state = SRC_TRYWAIT_UNATTACHED;
2950
2951 if (new_state != port->delayed_state)
2952 tcpm_set_state(port, SNK_TRY_WAIT, 0);
2953 break;
2954 case SNK_TRYWAIT:
2955 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
2956 break;
2957
2958 default:
2959 break;
2960 }
2961}
2962
2963static void _tcpm_pd_vbus_off(struct tcpm_port *port)
2964{
2965 enum tcpm_state new_state;
2966
2967 tcpm_log_force(port, "VBUS off");
2968 port->vbus_present = false;
2969 port->vbus_never_low = false;
2970 switch (port->state) {
2971 case SNK_HARD_RESET_SINK_OFF:
2972 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
2973 break;
2974 case SRC_HARD_RESET_VBUS_OFF:
2975 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
2976 break;
2977 case HARD_RESET_SEND:
2978 break;
2979
2980 case SNK_TRY:
2981 /* Do nothing, waiting for timeout */
2982 break;
2983 case SRC_TRYWAIT:
2984 /* Hand over to state machine if needed */
2985 if (tcpm_port_is_source(port))
2986 new_state = SRC_ATTACHED;
2987 else
2988 new_state = SRC_TRYWAIT_UNATTACHED;
2989 if (new_state != port->delayed_state)
2990 tcpm_set_state(port, SRC_TRYWAIT, 0);
2991 break;
2992 case SNK_TRY_WAIT:
2993 if (!tcpm_port_is_sink(port))
2994 new_state = SRC_TRYWAIT;
2995 else
2996 new_state = SRC_TRYWAIT_UNATTACHED;
2997
2998 if (new_state != port->delayed_state)
2999 tcpm_set_state(port, SNK_TRY_WAIT, 0);
3000 break;
3001 case SNK_TRYWAIT_VBUS:
3002 tcpm_set_state(port, SNK_TRYWAIT, 0);
3003 break;
3004
3005 case SNK_ATTACH_WAIT:
3006 tcpm_set_state(port, SNK_UNATTACHED, 0);
3007 break;
3008
3009 case SNK_NEGOTIATE_CAPABILITIES:
3010 break;
3011
3012 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3013 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
3014 break;
3015
3016 case PR_SWAP_SNK_SRC_SINK_OFF:
3017 /* Do nothing, expected */
3018 break;
3019
3020 case ERROR_RECOVERY_WAIT_OFF:
3021 tcpm_set_state(port,
3022 port->pwr_role == TYPEC_SOURCE ?
3023 SRC_UNATTACHED : SNK_UNATTACHED,
3024 0);
3025 break;
3026
3027 default:
3028 if (port->pwr_role == TYPEC_SINK &&
3029 port->attached)
3030 tcpm_set_state(port, SNK_UNATTACHED, 0);
3031 break;
3032 }
3033}
3034
3035static void _tcpm_pd_hard_reset(struct tcpm_port *port)
3036{
3037 tcpm_log_force(port, "Received hard reset");
3038 /*
3039 * If we keep receiving hard reset requests, executing the hard reset
3040 * must have failed. Revert to error recovery if that happens.
3041 */
3042 tcpm_set_state(port,
3043 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
3044 HARD_RESET_START : ERROR_RECOVERY,
3045 0);
3046}
3047
3048static void tcpm_pd_event_handler(struct work_struct *work)
3049{
3050 struct tcpm_port *port = container_of(work, struct tcpm_port,
3051 event_work);
3052 u32 events;
3053
3054 mutex_lock(&port->lock);
3055
3056 spin_lock(&port->pd_event_lock);
3057 while (port->pd_events) {
3058 events = port->pd_events;
3059 port->pd_events = 0;
3060 spin_unlock(&port->pd_event_lock);
3061 if (events & TCPM_RESET_EVENT)
3062 _tcpm_pd_hard_reset(port);
3063 if (events & TCPM_VBUS_EVENT) {
3064 bool vbus;
3065
3066 vbus = port->tcpc->get_vbus(port->tcpc);
3067 if (vbus)
3068 _tcpm_pd_vbus_on(port);
3069 else
3070 _tcpm_pd_vbus_off(port);
3071 }
3072 if (events & TCPM_CC_EVENT) {
3073 enum typec_cc_status cc1, cc2;
3074
3075 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3076 _tcpm_cc_change(port, cc1, cc2);
3077 }
3078 spin_lock(&port->pd_event_lock);
3079 }
3080 spin_unlock(&port->pd_event_lock);
3081 mutex_unlock(&port->lock);
3082}
3083
3084void tcpm_cc_change(struct tcpm_port *port)
3085{
3086 spin_lock(&port->pd_event_lock);
3087 port->pd_events |= TCPM_CC_EVENT;
3088 spin_unlock(&port->pd_event_lock);
3089 queue_work(port->wq, &port->event_work);
3090}
3091EXPORT_SYMBOL_GPL(tcpm_cc_change);
3092
3093void tcpm_vbus_change(struct tcpm_port *port)
3094{
3095 spin_lock(&port->pd_event_lock);
3096 port->pd_events |= TCPM_VBUS_EVENT;
3097 spin_unlock(&port->pd_event_lock);
3098 queue_work(port->wq, &port->event_work);
3099}
3100EXPORT_SYMBOL_GPL(tcpm_vbus_change);
3101
3102void tcpm_pd_hard_reset(struct tcpm_port *port)
3103{
3104 spin_lock(&port->pd_event_lock);
3105 port->pd_events = TCPM_RESET_EVENT;
3106 spin_unlock(&port->pd_event_lock);
3107 queue_work(port->wq, &port->event_work);
3108}
3109EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
3110
3111static int tcpm_dr_set(const struct typec_capability *cap,
3112 enum typec_data_role data)
3113{
3114 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3115 int ret;
3116
3117 mutex_lock(&port->swap_lock);
3118 mutex_lock(&port->lock);
3119
3120 if (port->typec_caps.type != TYPEC_PORT_DRP || !port->pd_capable) {
3121 ret = -EINVAL;
3122 goto port_unlock;
3123 }
3124 if (port->state != SRC_READY && port->state != SNK_READY) {
3125 ret = -EAGAIN;
3126 goto port_unlock;
3127 }
3128
3129 if (port->data_role == data) {
3130 ret = 0;
3131 goto port_unlock;
3132 }
3133
3134 /*
3135 * XXX
3136 * 6.3.9: If an alternate mode is active, a request to swap
3137 * alternate modes shall trigger a port reset.
3138 * Reject data role swap request in this case.
3139 */
3140
3141 port->swap_status = 0;
3142 port->swap_pending = true;
3143 reinit_completion(&port->swap_complete);
3144 tcpm_set_state(port, DR_SWAP_SEND, 0);
3145 mutex_unlock(&port->lock);
3146
3147 wait_for_completion(&port->swap_complete);
3148
3149 ret = port->swap_status;
3150 goto swap_unlock;
3151
3152port_unlock:
3153 mutex_unlock(&port->lock);
3154swap_unlock:
3155 mutex_unlock(&port->swap_lock);
3156 return ret;
3157}
3158
3159static int tcpm_pr_set(const struct typec_capability *cap,
3160 enum typec_role role)
3161{
3162 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3163 int ret;
3164
3165 mutex_lock(&port->swap_lock);
3166 mutex_lock(&port->lock);
3167
3168 if (port->typec_caps.type != TYPEC_PORT_DRP) {
3169 ret = -EINVAL;
3170 goto port_unlock;
3171 }
3172 if (port->state != SRC_READY && port->state != SNK_READY) {
3173 ret = -EAGAIN;
3174 goto port_unlock;
3175 }
3176
3177 if (role == port->pwr_role) {
3178 ret = 0;
3179 goto port_unlock;
3180 }
3181
3182 if (!port->pd_capable) {
3183 /*
3184 * If the partner is not PD capable, reset the port to
3185 * trigger a role change. This can only work if a preferred
3186 * role is configured, and if it matches the requested role.
3187 */
3188 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
3189 port->try_role == port->pwr_role) {
3190 ret = -EINVAL;
3191 goto port_unlock;
3192 }
3193 tcpm_set_state(port, HARD_RESET_SEND, 0);
3194 ret = 0;
3195 goto port_unlock;
3196 }
3197
3198 port->swap_status = 0;
3199 port->swap_pending = true;
3200 reinit_completion(&port->swap_complete);
3201 tcpm_set_state(port, PR_SWAP_SEND, 0);
3202 mutex_unlock(&port->lock);
3203
3204 wait_for_completion(&port->swap_complete);
3205
3206 ret = port->swap_status;
3207 goto swap_unlock;
3208
3209port_unlock:
3210 mutex_unlock(&port->lock);
3211swap_unlock:
3212 mutex_unlock(&port->swap_lock);
3213 return ret;
3214}
3215
3216static int tcpm_vconn_set(const struct typec_capability *cap,
3217 enum typec_role role)
3218{
3219 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3220 int ret;
3221
3222 mutex_lock(&port->swap_lock);
3223 mutex_lock(&port->lock);
3224
3225 if (port->state != SRC_READY && port->state != SNK_READY) {
3226 ret = -EAGAIN;
3227 goto port_unlock;
3228 }
3229
3230 if (role == port->vconn_role) {
3231 ret = 0;
3232 goto port_unlock;
3233 }
3234
3235 port->swap_status = 0;
3236 port->swap_pending = true;
3237 reinit_completion(&port->swap_complete);
3238 tcpm_set_state(port, VCONN_SWAP_SEND, 0);
3239 mutex_unlock(&port->lock);
3240
3241 wait_for_completion(&port->swap_complete);
3242
3243 ret = port->swap_status;
3244 goto swap_unlock;
3245
3246port_unlock:
3247 mutex_unlock(&port->lock);
3248swap_unlock:
3249 mutex_unlock(&port->swap_lock);
3250 return ret;
3251}
3252
3253static int tcpm_try_role(const struct typec_capability *cap, int role)
3254{
3255 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3256 struct tcpc_dev *tcpc = port->tcpc;
3257 int ret = 0;
3258
3259 mutex_lock(&port->lock);
3260 if (tcpc->try_role)
3261 ret = tcpc->try_role(tcpc, role);
3262 if (!ret && !tcpc->config->try_role_hw)
3263 port->try_role = role;
3264 port->try_src_count = 0;
3265 port->try_snk_count = 0;
3266 mutex_unlock(&port->lock);
3267
3268 return ret;
3269}
3270
3271static void tcpm_init(struct tcpm_port *port)
3272{
3273 enum typec_cc_status cc1, cc2;
3274
3275 port->tcpc->init(port->tcpc);
3276
3277 tcpm_reset_port(port);
3278
3279 /*
3280 * XXX
3281 * Should possibly wait for VBUS to settle if it was enabled locally
3282 * since tcpm_reset_port() will disable VBUS.
3283 */
3284 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
3285 if (port->vbus_present)
3286 port->vbus_never_low = true;
3287
3288 tcpm_set_state(port, tcpm_default_state(port), 0);
3289
3290 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3291 _tcpm_cc_change(port, cc1, cc2);
3292
3293 /*
3294 * Some adapters need a clean slate at startup, and won't recover
3295 * otherwise. So do not try to be fancy and force a clean disconnect.
3296 */
3297 tcpm_set_state(port, ERROR_RECOVERY, 0);
3298}
3299
3300void tcpm_tcpc_reset(struct tcpm_port *port)
3301{
3302 mutex_lock(&port->lock);
3303 /* XXX: Maintain PD connection if possible? */
3304 tcpm_init(port);
3305 mutex_unlock(&port->lock);
3306}
3307EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
3308
3309static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
3310 unsigned int nr_pdo)
3311{
3312 unsigned int i;
3313
3314 if (nr_pdo > PDO_MAX_OBJECTS)
3315 nr_pdo = PDO_MAX_OBJECTS;
3316
3317 for (i = 0; i < nr_pdo; i++)
3318 dest_pdo[i] = src_pdo[i];
3319
3320 return nr_pdo;
3321}
3322
3323void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
3324 unsigned int nr_pdo)
3325{
3326 mutex_lock(&port->lock);
3327 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, pdo, nr_pdo);
3328 switch (port->state) {
3329 case SRC_UNATTACHED:
3330 case SRC_ATTACH_WAIT:
3331 case SRC_TRYWAIT:
3332 tcpm_set_cc(port, tcpm_rp_cc(port));
3333 break;
3334 case SRC_SEND_CAPABILITIES:
3335 case SRC_NEGOTIATE_CAPABILITIES:
3336 case SRC_READY:
3337 case SRC_WAIT_NEW_CAPABILITIES:
3338 tcpm_set_cc(port, tcpm_rp_cc(port));
3339 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
3340 break;
3341 default:
3342 break;
3343 }
3344 mutex_unlock(&port->lock);
3345}
3346EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities);
3347
3348void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
3349 unsigned int nr_pdo,
3350 unsigned int max_snk_mv,
3351 unsigned int max_snk_ma,
3352 unsigned int max_snk_mw,
3353 unsigned int operating_snk_mw)
3354{
3355 mutex_lock(&port->lock);
3356 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
3357 port->max_snk_mv = max_snk_mv;
3358 port->max_snk_ma = max_snk_ma;
3359 port->max_snk_mw = max_snk_mw;
3360 port->operating_snk_mw = operating_snk_mw;
3361
3362 switch (port->state) {
3363 case SNK_NEGOTIATE_CAPABILITIES:
3364 case SNK_READY:
3365 case SNK_TRANSITION_SINK:
3366 case SNK_TRANSITION_SINK_VBUS:
3367 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3368 break;
3369 default:
3370 break;
3371 }
3372 mutex_unlock(&port->lock);
3373}
3374EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
3375
3376struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3377{
3378 struct tcpm_port *port;
3379 int i, err;
3380
3381 if (!dev || !tcpc || !tcpc->config ||
3382 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
3383 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
3384 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
3385 return ERR_PTR(-EINVAL);
3386
3387 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
3388 if (!port)
3389 return ERR_PTR(-ENOMEM);
3390
3391 port->dev = dev;
3392 port->tcpc = tcpc;
3393
3394 mutex_init(&port->lock);
3395 mutex_init(&port->swap_lock);
3396
3397 port->wq = create_singlethread_workqueue(dev_name(dev));
3398 if (!port->wq)
3399 return ERR_PTR(-ENOMEM);
3400 INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work);
3401 INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work);
3402 INIT_WORK(&port->event_work, tcpm_pd_event_handler);
3403
3404 spin_lock_init(&port->pd_event_lock);
3405
3406 init_completion(&port->tx_complete);
3407 init_completion(&port->swap_complete);
3408
3409 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcpc->config->src_pdo,
3410 tcpc->config->nr_src_pdo);
3411 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3412 tcpc->config->nr_snk_pdo);
3413
3414 port->max_snk_mv = tcpc->config->max_snk_mv;
3415 port->max_snk_ma = tcpc->config->max_snk_ma;
3416 port->max_snk_mw = tcpc->config->max_snk_mw;
3417 port->operating_snk_mw = tcpc->config->operating_snk_mw;
3418 if (!tcpc->config->try_role_hw)
3419 port->try_role = tcpc->config->default_role;
3420 else
3421 port->try_role = TYPEC_NO_PREFERRED_ROLE;
3422
3423 port->typec_caps.prefer_role = tcpc->config->default_role;
3424 port->typec_caps.type = tcpc->config->type;
3425 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
3426 port->typec_caps.pd_revision = 0x0200; /* USB-PD spec release 2.0 */
3427 port->typec_caps.dr_set = tcpm_dr_set;
3428 port->typec_caps.pr_set = tcpm_pr_set;
3429 port->typec_caps.vconn_set = tcpm_vconn_set;
3430 port->typec_caps.try_role = tcpm_try_role;
3431
3432 port->partner_desc.identity = &port->partner_ident;
3433
3434 /*
3435 * TODO:
3436 * - alt_modes, set_alt_mode
3437 * - {debug,audio}_accessory
3438 */
3439
3440 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
3441 if (!port->typec_port) {
3442 err = -ENOMEM;
3443 goto out_destroy_wq;
3444 }
3445
3446 if (tcpc->config->alt_modes) {
3447 struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
3448
3449 i = 0;
3450 while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
3451 port->port_altmode[i] =
3452 typec_port_register_altmode(port->typec_port,
3453 paltmode);
3454 if (!port->port_altmode[i]) {
3455 tcpm_log(port,
3456 "%s: failed to register port alternate mode 0x%x",
3457 dev_name(dev), paltmode->svid);
3458 break;
3459 }
3460 i++;
3461 paltmode++;
3462 }
3463 }
3464
3465 tcpm_debugfs_init(port);
3466 mutex_lock(&port->lock);
3467 tcpm_init(port);
3468 mutex_unlock(&port->lock);
3469
3470 tcpm_log(port, "%s: registered", dev_name(dev));
3471 return port;
3472
3473out_destroy_wq:
3474 destroy_workqueue(port->wq);
3475 return ERR_PTR(err);
3476}
3477EXPORT_SYMBOL_GPL(tcpm_register_port);
3478
3479void tcpm_unregister_port(struct tcpm_port *port)
3480{
3481 int i;
3482
3483 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
3484 typec_unregister_altmode(port->port_altmode[i]);
3485 typec_unregister_port(port->typec_port);
3486 tcpm_debugfs_exit(port);
3487 destroy_workqueue(port->wq);
3488}
3489EXPORT_SYMBOL_GPL(tcpm_unregister_port);
3490
3491MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
3492MODULE_DESCRIPTION("USB Type-C Port Manager");
3493MODULE_LICENSE("GPL");