staging: typec: tcpm: Report role swap complete after entering READY state
[linux-2.6-block.git] / drivers / staging / typec / tcpm.c
CommitLineData
f0690a25
GR
1/*
2 * Copyright 2015-2017 Google, Inc
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * USB Power Delivery protocol stack.
15 */
16
17#include <linux/completion.h>
18#include <linux/debugfs.h>
19#include <linux/device.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/proc_fs.h>
24#include <linux/sched/clock.h>
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28#include <linux/usb/typec.h>
29#include <linux/workqueue.h>
30
31#include "pd.h"
32#include "pd_vdo.h"
33#include "pd_bdo.h"
34#include "tcpm.h"
35
36#define FOREACH_STATE(S) \
37 S(INVALID_STATE), \
38 S(DRP_TOGGLING), \
39 S(SRC_UNATTACHED), \
40 S(SRC_ATTACH_WAIT), \
41 S(SRC_ATTACHED), \
42 S(SRC_STARTUP), \
43 S(SRC_SEND_CAPABILITIES), \
44 S(SRC_NEGOTIATE_CAPABILITIES), \
45 S(SRC_TRANSITION_SUPPLY), \
46 S(SRC_READY), \
47 S(SRC_WAIT_NEW_CAPABILITIES), \
48 \
49 S(SNK_UNATTACHED), \
50 S(SNK_ATTACH_WAIT), \
51 S(SNK_DEBOUNCED), \
52 S(SNK_ATTACHED), \
53 S(SNK_STARTUP), \
54 S(SNK_DISCOVERY), \
55 S(SNK_DISCOVERY_DEBOUNCE), \
56 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
57 S(SNK_WAIT_CAPABILITIES), \
58 S(SNK_NEGOTIATE_CAPABILITIES), \
59 S(SNK_TRANSITION_SINK), \
60 S(SNK_TRANSITION_SINK_VBUS), \
61 S(SNK_READY), \
62 \
63 S(ACC_UNATTACHED), \
64 S(DEBUG_ACC_ATTACHED), \
65 S(AUDIO_ACC_ATTACHED), \
66 S(AUDIO_ACC_DEBOUNCE), \
67 \
68 S(HARD_RESET_SEND), \
69 S(HARD_RESET_START), \
70 S(SRC_HARD_RESET_VBUS_OFF), \
71 S(SRC_HARD_RESET_VBUS_ON), \
72 S(SNK_HARD_RESET_SINK_OFF), \
73 S(SNK_HARD_RESET_WAIT_VBUS), \
74 S(SNK_HARD_RESET_SINK_ON), \
75 \
76 S(SOFT_RESET), \
77 S(SOFT_RESET_SEND), \
78 \
79 S(DR_SWAP_ACCEPT), \
80 S(DR_SWAP_SEND), \
81 S(DR_SWAP_SEND_TIMEOUT), \
82 S(DR_SWAP_CANCEL), \
83 S(DR_SWAP_CHANGE_DR), \
84 \
85 S(PR_SWAP_ACCEPT), \
86 S(PR_SWAP_SEND), \
87 S(PR_SWAP_SEND_TIMEOUT), \
88 S(PR_SWAP_CANCEL), \
89 S(PR_SWAP_START), \
90 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
91 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
92 S(PR_SWAP_SRC_SNK_SINK_ON), \
93 S(PR_SWAP_SNK_SRC_SINK_OFF), \
94 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
95 \
96 S(VCONN_SWAP_ACCEPT), \
97 S(VCONN_SWAP_SEND), \
98 S(VCONN_SWAP_SEND_TIMEOUT), \
99 S(VCONN_SWAP_CANCEL), \
100 S(VCONN_SWAP_START), \
101 S(VCONN_SWAP_WAIT_FOR_VCONN), \
102 S(VCONN_SWAP_TURN_ON_VCONN), \
103 S(VCONN_SWAP_TURN_OFF_VCONN), \
104 \
105 S(SNK_TRY), \
106 S(SNK_TRY_WAIT), \
107 S(SRC_TRYWAIT), \
108 S(SRC_TRYWAIT_UNATTACHED), \
109 \
110 S(SRC_TRY), \
111 S(SRC_TRY_DEBOUNCE), \
112 S(SNK_TRYWAIT), \
113 S(SNK_TRYWAIT_DEBOUNCE), \
114 S(SNK_TRYWAIT_VBUS), \
115 S(BIST_RX), \
116 \
117 S(ERROR_RECOVERY), \
118 S(ERROR_RECOVERY_WAIT_OFF)
119
120#define GENERATE_ENUM(e) e
121#define GENERATE_STRING(s) #s
122
123enum tcpm_state {
124 FOREACH_STATE(GENERATE_ENUM)
125};
126
127static const char * const tcpm_states[] = {
128 FOREACH_STATE(GENERATE_STRING)
129};
130
131enum vdm_states {
132 VDM_STATE_ERR_BUSY = -3,
133 VDM_STATE_ERR_SEND = -2,
134 VDM_STATE_ERR_TMOUT = -1,
135 VDM_STATE_DONE = 0,
136 /* Anything >0 represents an active state */
137 VDM_STATE_READY = 1,
138 VDM_STATE_BUSY = 2,
139 VDM_STATE_WAIT_RSP_BUSY = 3,
140};
141
142enum pd_msg_request {
143 PD_MSG_NONE = 0,
144 PD_MSG_CTRL_REJECT,
145 PD_MSG_CTRL_WAIT,
146 PD_MSG_DATA_SINK_CAP,
147 PD_MSG_DATA_SOURCE_CAP,
148};
149
150/* Events from low level driver */
151
152#define TCPM_CC_EVENT BIT(0)
153#define TCPM_VBUS_EVENT BIT(1)
154#define TCPM_RESET_EVENT BIT(2)
155
156#define LOG_BUFFER_ENTRIES 1024
157#define LOG_BUFFER_ENTRY_SIZE 128
158
159/* Alternate mode support */
160
161#define SVID_DISCOVERY_MAX 16
162
163struct pd_mode_data {
164 int svid_index; /* current SVID index */
165 int nsvids;
166 u16 svids[SVID_DISCOVERY_MAX];
167 int altmodes; /* number of alternate modes */
168 struct typec_altmode_desc altmode_desc[SVID_DISCOVERY_MAX];
169};
170
171struct tcpm_port {
172 struct device *dev;
173
174 struct mutex lock; /* tcpm state machine lock */
175 struct workqueue_struct *wq;
176
177 struct typec_capability typec_caps;
178 struct typec_port *typec_port;
179
180 struct tcpc_dev *tcpc;
181
182 enum typec_role vconn_role;
183 enum typec_role pwr_role;
184 enum typec_data_role data_role;
185 enum typec_pwr_opmode pwr_opmode;
186
187 struct usb_pd_identity partner_ident;
188 struct typec_partner_desc partner_desc;
189 struct typec_partner *partner;
190
191 enum typec_cc_status cc_req;
192
193 enum typec_cc_status cc1;
194 enum typec_cc_status cc2;
195 enum typec_cc_polarity polarity;
196
197 bool attached;
198 bool connected;
199 bool vbus_present;
200 bool vbus_never_low;
201 bool vbus_source;
202 bool vbus_charge;
203
204 bool send_discover;
205 bool op_vsafe5v;
206
207 int try_role;
208 int try_snk_count;
209 int try_src_count;
210
211 enum pd_msg_request queued_message;
212
213 enum tcpm_state enter_state;
214 enum tcpm_state prev_state;
215 enum tcpm_state state;
216 enum tcpm_state delayed_state;
217 unsigned long delayed_runtime;
218 unsigned long delay_ms;
219
220 spinlock_t pd_event_lock;
221 u32 pd_events;
222
223 struct work_struct event_work;
224 struct delayed_work state_machine;
225 struct delayed_work vdm_state_machine;
226 bool state_machine_running;
227
228 struct completion tx_complete;
229 enum tcpm_transmit_status tx_status;
230
231 struct mutex swap_lock; /* swap command lock */
232 bool swap_pending;
233 struct completion swap_complete;
234 int swap_status;
235
236 unsigned int message_id;
237 unsigned int caps_count;
238 unsigned int hard_reset_count;
239 bool pd_capable;
240 bool explicit_contract;
5fec4b54 241 unsigned int rx_msgid;
f0690a25
GR
242
243 /* Partner capabilities/requests */
244 u32 sink_request;
245 u32 source_caps[PDO_MAX_OBJECTS];
246 unsigned int nr_source_caps;
247 u32 sink_caps[PDO_MAX_OBJECTS];
248 unsigned int nr_sink_caps;
249
250 /* Local capabilities */
251 u32 src_pdo[PDO_MAX_OBJECTS];
252 unsigned int nr_src_pdo;
253 u32 snk_pdo[PDO_MAX_OBJECTS];
254 unsigned int nr_snk_pdo;
193a6801
GR
255 u32 snk_vdo[VDO_MAX_OBJECTS];
256 unsigned int nr_snk_vdo;
f0690a25
GR
257
258 unsigned int max_snk_mv;
259 unsigned int max_snk_ma;
260 unsigned int max_snk_mw;
261 unsigned int operating_snk_mw;
262
263 /* Requested current / voltage */
264 u32 current_limit;
265 u32 supply_voltage;
266
267 u32 bist_request;
268
269 /* PD state for Vendor Defined Messages */
270 enum vdm_states vdm_state;
271 u32 vdm_retries;
272 /* next Vendor Defined Message to send */
273 u32 vdo_data[VDO_MAX_SIZE];
274 u8 vdo_count;
275 /* VDO to retry if UFP responder replied busy */
276 u32 vdo_retry;
277
278 /* Alternate mode data */
279
280 struct pd_mode_data mode_data;
281 struct typec_altmode *partner_altmode[SVID_DISCOVERY_MAX];
282 struct typec_altmode *port_altmode[SVID_DISCOVERY_MAX];
283
284#ifdef CONFIG_DEBUG_FS
285 struct dentry *dentry;
286 struct mutex logbuffer_lock; /* log buffer access lock */
287 int logbuffer_head;
288 int logbuffer_tail;
289 u8 *logbuffer[LOG_BUFFER_ENTRIES];
290#endif
291};
292
293struct pd_rx_event {
294 struct work_struct work;
295 struct tcpm_port *port;
296 struct pd_message msg;
297};
298
299#define tcpm_cc_is_sink(cc) \
300 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
301 (cc) == TYPEC_CC_RP_3_0)
302
303#define tcpm_port_is_sink(port) \
304 ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
305 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
306
307#define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
308#define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
309#define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
310
311#define tcpm_port_is_source(port) \
312 ((tcpm_cc_is_source((port)->cc1) && \
313 !tcpm_cc_is_source((port)->cc2)) || \
314 (tcpm_cc_is_source((port)->cc2) && \
315 !tcpm_cc_is_source((port)->cc1)))
316
317#define tcpm_port_is_debug(port) \
318 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
319
320#define tcpm_port_is_audio(port) \
321 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
322
323#define tcpm_port_is_audio_detached(port) \
324 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
325 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
326
327#define tcpm_try_snk(port) \
328 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK)
329
330#define tcpm_try_src(port) \
331 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE)
332
333static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
334{
335 if (port->try_role == TYPEC_SINK)
336 return SNK_UNATTACHED;
337 else if (port->try_role == TYPEC_SOURCE)
338 return SRC_UNATTACHED;
339 else if (port->tcpc->config->default_role == TYPEC_SINK)
340 return SNK_UNATTACHED;
341 return SRC_UNATTACHED;
342}
343
344static inline
345struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
346{
347 return container_of(cap, struct tcpm_port, typec_caps);
348}
349
350static bool tcpm_port_is_disconnected(struct tcpm_port *port)
351{
352 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
353 port->cc2 == TYPEC_CC_OPEN) ||
354 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
355 port->cc1 == TYPEC_CC_OPEN) ||
356 (port->polarity == TYPEC_POLARITY_CC2 &&
357 port->cc2 == TYPEC_CC_OPEN)));
358}
359
360/*
361 * Logging
362 */
363
364#ifdef CONFIG_DEBUG_FS
365
366static bool tcpm_log_full(struct tcpm_port *port)
367{
368 return port->logbuffer_tail ==
369 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
370}
371
372static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
373{
374 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
375 u64 ts_nsec = local_clock();
376 unsigned long rem_nsec;
377
378 if (!port->logbuffer[port->logbuffer_head]) {
379 port->logbuffer[port->logbuffer_head] =
380 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
381 if (!port->logbuffer[port->logbuffer_head])
382 return;
383 }
384
385 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
386
387 mutex_lock(&port->logbuffer_lock);
388
389 if (tcpm_log_full(port)) {
390 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
391 strcpy(tmpbuffer, "overflow");
392 }
393
394 if (port->logbuffer_head < 0 ||
395 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
396 dev_warn(port->dev,
397 "Bad log buffer index %d\n", port->logbuffer_head);
398 goto abort;
399 }
400
401 if (!port->logbuffer[port->logbuffer_head]) {
402 dev_warn(port->dev,
403 "Log buffer index %d is NULL\n", port->logbuffer_head);
404 goto abort;
405 }
406
407 rem_nsec = do_div(ts_nsec, 1000000000);
408 scnprintf(port->logbuffer[port->logbuffer_head],
409 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
410 (unsigned long)ts_nsec, rem_nsec / 1000,
411 tmpbuffer);
412 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
413
414abort:
415 mutex_unlock(&port->logbuffer_lock);
416}
417
418static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
419{
420 va_list args;
421
422 /* Do not log while disconnected and unattached */
423 if (tcpm_port_is_disconnected(port) &&
424 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
425 port->state == DRP_TOGGLING))
426 return;
427
428 va_start(args, fmt);
429 _tcpm_log(port, fmt, args);
430 va_end(args);
431}
432
433static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
434{
435 va_list args;
436
437 va_start(args, fmt);
438 _tcpm_log(port, fmt, args);
439 va_end(args);
440}
441
442static void tcpm_log_source_caps(struct tcpm_port *port)
443{
444 int i;
445
446 for (i = 0; i < port->nr_source_caps; i++) {
447 u32 pdo = port->source_caps[i];
448 enum pd_pdo_type type = pdo_type(pdo);
449 char msg[64];
450
451 switch (type) {
452 case PDO_TYPE_FIXED:
453 scnprintf(msg, sizeof(msg),
454 "%u mV, %u mA [%s%s%s%s%s%s]",
455 pdo_fixed_voltage(pdo),
456 pdo_max_current(pdo),
457 (pdo & PDO_FIXED_DUAL_ROLE) ?
458 "R" : "",
459 (pdo & PDO_FIXED_SUSPEND) ?
460 "S" : "",
461 (pdo & PDO_FIXED_HIGHER_CAP) ?
462 "H" : "",
463 (pdo & PDO_FIXED_USB_COMM) ?
464 "U" : "",
465 (pdo & PDO_FIXED_DATA_SWAP) ?
466 "D" : "",
467 (pdo & PDO_FIXED_EXTPOWER) ?
468 "E" : "");
469 break;
470 case PDO_TYPE_VAR:
471 scnprintf(msg, sizeof(msg),
472 "%u-%u mV, %u mA",
473 pdo_min_voltage(pdo),
474 pdo_max_voltage(pdo),
475 pdo_max_current(pdo));
476 break;
477 case PDO_TYPE_BATT:
478 scnprintf(msg, sizeof(msg),
479 "%u-%u mV, %u mW",
480 pdo_min_voltage(pdo),
481 pdo_max_voltage(pdo),
482 pdo_max_power(pdo));
483 break;
484 default:
485 strcpy(msg, "undefined");
486 break;
487 }
488 tcpm_log(port, " PDO %d: type %d, %s",
489 i, type, msg);
490 }
491}
492
493static int tcpm_seq_show(struct seq_file *s, void *v)
494{
495 struct tcpm_port *port = (struct tcpm_port *)s->private;
496 int tail;
497
498 mutex_lock(&port->logbuffer_lock);
499 tail = port->logbuffer_tail;
500 while (tail != port->logbuffer_head) {
501 seq_printf(s, "%s\n", port->logbuffer[tail]);
502 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
503 }
504 if (!seq_has_overflowed(s))
505 port->logbuffer_tail = tail;
506 mutex_unlock(&port->logbuffer_lock);
507
508 return 0;
509}
510
511static int tcpm_debug_open(struct inode *inode, struct file *file)
512{
513 return single_open(file, tcpm_seq_show, inode->i_private);
514}
515
516static const struct file_operations tcpm_debug_operations = {
517 .open = tcpm_debug_open,
518 .llseek = seq_lseek,
519 .read = seq_read,
520 .release = single_release,
521};
522
523static struct dentry *rootdir;
524
525static int tcpm_debugfs_init(struct tcpm_port *port)
526{
527 mutex_init(&port->logbuffer_lock);
528 /* /sys/kernel/debug/tcpm/usbcX */
529 if (!rootdir) {
530 rootdir = debugfs_create_dir("tcpm", NULL);
531 if (!rootdir)
532 return -ENOMEM;
533 }
534
535 port->dentry = debugfs_create_file(dev_name(port->dev),
536 S_IFREG | 0444, rootdir,
537 port, &tcpm_debug_operations);
538
539 return 0;
540}
541
542static void tcpm_debugfs_exit(struct tcpm_port *port)
543{
544 debugfs_remove(port->dentry);
545}
546
547#else
548
549static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
550static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
551static void tcpm_log_source_caps(struct tcpm_port *port) { }
552static int tcpm_debugfs_init(const struct tcpm_port *port) { return 0; }
553static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
554
555#endif
556
557static int tcpm_pd_transmit(struct tcpm_port *port,
558 enum tcpm_transmit_type type,
559 const struct pd_message *msg)
560{
561 unsigned long timeout;
562 int ret;
563
564 if (msg)
565 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
566 else
567 tcpm_log(port, "PD TX, type: %#x", type);
568
569 reinit_completion(&port->tx_complete);
570 ret = port->tcpc->pd_transmit(port->tcpc, type, msg);
571 if (ret < 0)
572 return ret;
573
574 mutex_unlock(&port->lock);
575 timeout = wait_for_completion_timeout(&port->tx_complete,
576 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
577 mutex_lock(&port->lock);
578 if (!timeout)
579 return -ETIMEDOUT;
580
581 switch (port->tx_status) {
582 case TCPC_TX_SUCCESS:
583 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
584 return 0;
585 case TCPC_TX_DISCARDED:
586 return -EAGAIN;
587 case TCPC_TX_FAILED:
588 default:
589 return -EIO;
590 }
591}
592
593void tcpm_pd_transmit_complete(struct tcpm_port *port,
594 enum tcpm_transmit_status status)
595{
596 tcpm_log(port, "PD TX complete, status: %u", status);
597 port->tx_status = status;
598 complete(&port->tx_complete);
599}
600EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
601
602static int tcpm_mux_set(struct tcpm_port *port, enum tcpc_mux_mode mode,
603 enum tcpc_usb_switch config)
604{
605 int ret = 0;
606
607 tcpm_log(port, "Requesting mux mode %d, config %d, polarity %d",
608 mode, config, port->polarity);
609
610 if (port->tcpc->mux)
611 ret = port->tcpc->mux->set(port->tcpc->mux, mode, config,
612 port->polarity);
613
614 return ret;
615}
616
617static int tcpm_set_polarity(struct tcpm_port *port,
618 enum typec_cc_polarity polarity)
619{
620 int ret;
621
622 tcpm_log(port, "polarity %d", polarity);
623
624 ret = port->tcpc->set_polarity(port->tcpc, polarity);
625 if (ret < 0)
626 return ret;
627
628 port->polarity = polarity;
629
630 return 0;
631}
632
633static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
634{
635 int ret;
636
637 tcpm_log(port, "vconn:=%d", enable);
638
639 ret = port->tcpc->set_vconn(port->tcpc, enable);
640 if (!ret) {
641 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
642 typec_set_vconn_role(port->typec_port, port->vconn_role);
643 }
644
645 return ret;
646}
647
648static u32 tcpm_get_current_limit(struct tcpm_port *port)
649{
650 enum typec_cc_status cc;
651 u32 limit;
652
653 cc = port->polarity ? port->cc2 : port->cc1;
654 switch (cc) {
655 case TYPEC_CC_RP_1_5:
656 limit = 1500;
657 break;
658 case TYPEC_CC_RP_3_0:
659 limit = 3000;
660 break;
661 case TYPEC_CC_RP_DEF:
662 default:
663 limit = 0;
664 break;
665 }
666
667 return limit;
668}
669
670static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
671{
672 int ret = -EOPNOTSUPP;
673
674 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
675
676 if (port->tcpc->set_current_limit)
677 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
678
679 return ret;
680}
681
682/*
683 * Determine RP value to set based on maximum current supported
684 * by a port if configured as source.
685 * Returns CC value to report to link partner.
686 */
687static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
688{
689 const u32 *src_pdo = port->src_pdo;
690 int nr_pdo = port->nr_src_pdo;
691 int i;
692
693 /*
694 * Search for first entry with matching voltage.
695 * It should report the maximum supported current.
696 */
697 for (i = 0; i < nr_pdo; i++) {
698 const u32 pdo = src_pdo[i];
699
700 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
701 pdo_fixed_voltage(pdo) == 5000) {
702 unsigned int curr = pdo_max_current(pdo);
703
704 if (curr >= 3000)
705 return TYPEC_CC_RP_3_0;
706 else if (curr >= 1500)
707 return TYPEC_CC_RP_1_5;
708 return TYPEC_CC_RP_DEF;
709 }
710 }
711
712 return TYPEC_CC_RP_DEF;
713}
714
715static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
716{
717 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
718 port->data_role);
719}
720
721static int tcpm_set_roles(struct tcpm_port *port, bool attached,
722 enum typec_role role, enum typec_data_role data)
723{
724 int ret;
725
726 if (data == TYPEC_HOST)
727 ret = tcpm_mux_set(port, TYPEC_MUX_USB,
728 TCPC_USB_SWITCH_CONNECT);
729 else
730 ret = tcpm_mux_set(port, TYPEC_MUX_NONE,
731 TCPC_USB_SWITCH_DISCONNECT);
732 if (ret < 0)
733 return ret;
734
735 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
736 if (ret < 0)
737 return ret;
738
739 port->pwr_role = role;
740 port->data_role = data;
741 typec_set_data_role(port->typec_port, data);
742 typec_set_pwr_role(port->typec_port, role);
743
744 return 0;
745}
746
747static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
748{
749 int ret;
750
751 ret = port->tcpc->set_roles(port->tcpc, true, role,
752 port->data_role);
753 if (ret < 0)
754 return ret;
755
756 port->pwr_role = role;
757 typec_set_pwr_role(port->typec_port, role);
758
759 return 0;
760}
761
762static int tcpm_pd_send_source_caps(struct tcpm_port *port)
763{
764 struct pd_message msg;
765 int i;
766
767 memset(&msg, 0, sizeof(msg));
768 if (!port->nr_src_pdo) {
769 /* No source capabilities defined, sink only */
770 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
771 port->pwr_role,
772 port->data_role,
773 port->message_id, 0);
774 } else {
775 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
776 port->pwr_role,
777 port->data_role,
778 port->message_id,
779 port->nr_src_pdo);
780 }
781 for (i = 0; i < port->nr_src_pdo; i++)
782 msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
783
784 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
785}
786
787static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
788{
789 struct pd_message msg;
790 int i;
791
792 memset(&msg, 0, sizeof(msg));
793 if (!port->nr_snk_pdo) {
794 /* No sink capabilities defined, source only */
795 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
796 port->pwr_role,
797 port->data_role,
798 port->message_id, 0);
799 } else {
800 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
801 port->pwr_role,
802 port->data_role,
803 port->message_id,
804 port->nr_snk_pdo);
805 }
806 for (i = 0; i < port->nr_snk_pdo; i++)
807 msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
808
809 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
810}
811
812static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
813 unsigned int delay_ms)
814{
815 if (delay_ms) {
816 tcpm_log(port, "pending state change %s -> %s @ %u ms",
817 tcpm_states[port->state], tcpm_states[state],
818 delay_ms);
819 port->delayed_state = state;
820 mod_delayed_work(port->wq, &port->state_machine,
821 msecs_to_jiffies(delay_ms));
822 port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms);
823 port->delay_ms = delay_ms;
824 } else {
825 tcpm_log(port, "state change %s -> %s",
826 tcpm_states[port->state], tcpm_states[state]);
827 port->delayed_state = INVALID_STATE;
828 port->prev_state = port->state;
829 port->state = state;
830 /*
831 * Don't re-queue the state machine work item if we're currently
832 * in the state machine and we're immediately changing states.
833 * tcpm_state_machine_work() will continue running the state
834 * machine.
835 */
836 if (!port->state_machine_running)
837 mod_delayed_work(port->wq, &port->state_machine, 0);
838 }
839}
840
841static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
842 unsigned int delay_ms)
843{
844 if (port->enter_state == port->state)
845 tcpm_set_state(port, state, delay_ms);
846 else
847 tcpm_log(port,
848 "skipped %sstate change %s -> %s [%u ms], context state %s",
849 delay_ms ? "delayed " : "",
850 tcpm_states[port->state], tcpm_states[state],
851 delay_ms, tcpm_states[port->enter_state]);
852}
853
854static void tcpm_queue_message(struct tcpm_port *port,
855 enum pd_msg_request message)
856{
857 port->queued_message = message;
858 mod_delayed_work(port->wq, &port->state_machine, 0);
859}
860
861/*
862 * VDM/VDO handling functions
863 */
864static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
865 const u32 *data, int cnt)
866{
867 port->vdo_count = cnt + 1;
868 port->vdo_data[0] = header;
869 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
870 /* Set ready, vdm state machine will actually send */
871 port->vdm_retries = 0;
872 port->vdm_state = VDM_STATE_READY;
873}
874
875static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
876 int cnt)
877{
878 u32 vdo = le32_to_cpu(payload[VDO_INDEX_IDH]);
879 u32 product = le32_to_cpu(payload[VDO_INDEX_PRODUCT]);
880
881 memset(&port->mode_data, 0, sizeof(port->mode_data));
882
883#if 0 /* Not really a match */
884 switch (PD_IDH_PTYPE(vdo)) {
885 case IDH_PTYPE_UNDEF:
886 port->partner.type = TYPEC_PARTNER_NONE; /* no longer exists */
887 break;
888 case IDH_PTYPE_HUB:
889 break;
890 case IDH_PTYPE_PERIPH:
891 break;
892 case IDH_PTYPE_PCABLE:
893 break;
894 case IDH_PTYPE_ACABLE:
895 break;
896 case IDH_PTYPE_AMA:
897 port->partner.type = TYPEC_PARTNER_ALTMODE;
898 break;
899 default:
900 break;
901 }
902#endif
903
904 port->partner_ident.id_header = vdo;
905 port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
906 port->partner_ident.product = product;
907
908 typec_partner_set_identity(port->partner);
909
910 tcpm_log(port, "Identity: %04x:%04x.%04x",
911 PD_IDH_VID(vdo),
912 PD_PRODUCT_PID(product), product & 0xffff);
913}
914
915static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
916 int cnt)
917{
918 struct pd_mode_data *pmdata = &port->mode_data;
919 int i;
920
921 for (i = 1; i < cnt; i++) {
922 u32 p = le32_to_cpu(payload[i]);
923 u16 svid;
924
925 svid = (p >> 16) & 0xffff;
926 if (!svid)
927 return false;
928
929 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
930 goto abort;
931
932 pmdata->svids[pmdata->nsvids++] = svid;
933 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
934
935 svid = p & 0xffff;
936 if (!svid)
937 return false;
938
939 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
940 goto abort;
941
942 pmdata->svids[pmdata->nsvids++] = svid;
943 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
944 }
945 return true;
946abort:
947 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
948 return false;
949}
950
951static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
952 int cnt)
953{
954 struct pd_mode_data *pmdata = &port->mode_data;
955 struct typec_altmode_desc *paltmode;
956 struct typec_mode_desc *pmode;
957 int i;
958
959 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
960 /* Already logged in svdm_consume_svids() */
961 return;
962 }
963
964 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
965 memset(paltmode, 0, sizeof(*paltmode));
966
967 paltmode->svid = pmdata->svids[pmdata->svid_index];
968
969 tcpm_log(port, " Alternate mode %d: SVID 0x%04x",
970 pmdata->altmodes, paltmode->svid);
971
972 for (i = 1; i < cnt && paltmode->n_modes < ALTMODE_MAX_MODES; i++) {
973 pmode = &paltmode->modes[paltmode->n_modes];
974 memset(pmode, 0, sizeof(*pmode));
975 pmode->vdo = le32_to_cpu(payload[i]);
976 pmode->index = i - 1;
977 paltmode->n_modes++;
978 tcpm_log(port, " VDO %d: 0x%08x",
979 pmode->index, pmode->vdo);
980 }
981 port->partner_altmode[pmdata->altmodes] =
982 typec_partner_register_altmode(port->partner, paltmode);
983 if (port->partner_altmode[pmdata->altmodes] == NULL) {
984 tcpm_log(port,
985 "Failed to register alternate modes for SVID 0x%04x",
986 paltmode->svid);
987 return;
988 }
989 pmdata->altmodes++;
990}
991
992#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
993
994static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
995 u32 *response)
996{
997 u32 p0 = le32_to_cpu(payload[0]);
998 int cmd_type = PD_VDO_CMDT(p0);
999 int cmd = PD_VDO_CMD(p0);
1000 struct pd_mode_data *modep;
1001 int rlen = 0;
1002 u16 svid;
193a6801 1003 int i;
f0690a25
GR
1004
1005 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1006 p0, cmd_type, cmd, cnt);
1007
1008 modep = &port->mode_data;
1009
1010 switch (cmd_type) {
1011 case CMDT_INIT:
1012 switch (cmd) {
1013 case CMD_DISCOVER_IDENT:
193a6801
GR
1014 /* 6.4.4.3.1: Only respond as UFP (device) */
1015 if (port->data_role == TYPEC_DEVICE &&
1016 port->nr_snk_vdo) {
1017 for (i = 0; i < port->nr_snk_vdo; i++)
cbe5843e 1018 response[i + 1] = port->snk_vdo[i];
193a6801
GR
1019 rlen = port->nr_snk_vdo + 1;
1020 }
f0690a25
GR
1021 break;
1022 case CMD_DISCOVER_SVID:
1023 break;
1024 case CMD_DISCOVER_MODES:
1025 break;
1026 case CMD_ENTER_MODE:
1027 break;
1028 case CMD_EXIT_MODE:
1029 break;
1030 case CMD_ATTENTION:
1031 break;
1032 default:
1033 break;
1034 }
1035 if (rlen >= 1) {
1036 response[0] = p0 | VDO_CMDT(CMDT_RSP_ACK);
1037 } else if (rlen == 0) {
1038 response[0] = p0 | VDO_CMDT(CMDT_RSP_NAK);
1039 rlen = 1;
1040 } else {
1041 response[0] = p0 | VDO_CMDT(CMDT_RSP_BUSY);
1042 rlen = 1;
1043 }
1044 break;
1045 case CMDT_RSP_ACK:
1046 /* silently drop message if we are not connected */
1047 if (!port->partner)
1048 break;
1049
1050 switch (cmd) {
1051 case CMD_DISCOVER_IDENT:
1052 /* 6.4.4.3.1 */
1053 svdm_consume_identity(port, payload, cnt);
1054 response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
1055 rlen = 1;
1056 break;
1057 case CMD_DISCOVER_SVID:
1058 /* 6.4.4.3.2 */
1059 if (svdm_consume_svids(port, payload, cnt)) {
1060 response[0] = VDO(USB_SID_PD, 1,
1061 CMD_DISCOVER_SVID);
1062 rlen = 1;
1063 } else if (modep->nsvids && supports_modal(port)) {
1064 response[0] = VDO(modep->svids[0], 1,
1065 CMD_DISCOVER_MODES);
1066 rlen = 1;
1067 }
1068 break;
1069 case CMD_DISCOVER_MODES:
1070 /* 6.4.4.3.3 */
1071 svdm_consume_modes(port, payload, cnt);
1072 modep->svid_index++;
1073 if (modep->svid_index < modep->nsvids) {
1074 svid = modep->svids[modep->svid_index];
1075 response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
1076 rlen = 1;
1077 } else {
1078#if 0
1079 response[0] = pd_dfp_enter_mode(port, 0, 0);
1080 if (response[0])
1081 rlen = 1;
1082#endif
1083 }
1084 break;
1085 case CMD_ENTER_MODE:
1086 break;
1087 default:
1088 break;
1089 }
1090 break;
1091 default:
1092 break;
1093 }
1094
1095 return rlen;
1096}
1097
1098static void tcpm_handle_vdm_request(struct tcpm_port *port,
1099 const __le32 *payload, int cnt)
1100{
1101 int rlen = 0;
1102 u32 response[8] = { };
1103 u32 p0 = le32_to_cpu(payload[0]);
1104
1105 if (port->vdm_state == VDM_STATE_BUSY) {
1106 /* If UFP responded busy retry after timeout */
1107 if (PD_VDO_CMDT(p0) == CMDT_RSP_BUSY) {
1108 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1109 port->vdo_retry = (p0 & ~VDO_CMDT_MASK) |
1110 CMDT_INIT;
1111 mod_delayed_work(port->wq, &port->vdm_state_machine,
1112 msecs_to_jiffies(PD_T_VDM_BUSY));
1113 return;
1114 }
1115 port->vdm_state = VDM_STATE_DONE;
1116 }
1117
1118 if (PD_VDO_SVDM(p0))
1119 rlen = tcpm_pd_svdm(port, payload, cnt, response);
1120#if 0
1121 else
1122 rlen = tcpm_pd_custom_vdm(port, cnt, payload, response);
1123#endif
1124
1125 if (rlen > 0) {
1126 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1127 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1128 }
1129}
1130
1131static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1132 const u32 *data, int count)
1133{
1134 u32 header;
1135
1136 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1137 count = VDO_MAX_SIZE - 1;
1138
1139 /* set VDM header with VID & CMD */
1140 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1141 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd);
1142 tcpm_queue_vdm(port, header, data, count);
1143
1144 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1145}
1146
1147static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1148{
1149 unsigned int timeout;
1150 int cmd = PD_VDO_CMD(vdm_hdr);
1151
1152 /* its not a structured VDM command */
1153 if (!PD_VDO_SVDM(vdm_hdr))
1154 return PD_T_VDM_UNSTRUCTURED;
1155
1156 switch (PD_VDO_CMDT(vdm_hdr)) {
1157 case CMDT_INIT:
1158 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1159 timeout = PD_T_VDM_WAIT_MODE_E;
1160 else
1161 timeout = PD_T_VDM_SNDR_RSP;
1162 break;
1163 default:
1164 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1165 timeout = PD_T_VDM_E_MODE;
1166 else
1167 timeout = PD_T_VDM_RCVR_RSP;
1168 break;
1169 }
1170 return timeout;
1171}
1172
1173static void vdm_run_state_machine(struct tcpm_port *port)
1174{
1175 struct pd_message msg;
1176 int i, res;
1177
1178 switch (port->vdm_state) {
1179 case VDM_STATE_READY:
1180 /* Only transmit VDM if attached */
1181 if (!port->attached) {
1182 port->vdm_state = VDM_STATE_ERR_BUSY;
1183 break;
1184 }
1185
1186 /*
1187 * if there's traffic or we're not in PDO ready state don't send
1188 * a VDM.
1189 */
1190 if (port->state != SRC_READY && port->state != SNK_READY)
1191 break;
1192
1193 /* Prepare and send VDM */
1194 memset(&msg, 0, sizeof(msg));
1195 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
1196 port->pwr_role,
1197 port->data_role,
1198 port->message_id, port->vdo_count);
1199 for (i = 0; i < port->vdo_count; i++)
1200 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
1201 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1202 if (res < 0) {
1203 port->vdm_state = VDM_STATE_ERR_SEND;
1204 } else {
1205 unsigned long timeout;
1206
1207 port->vdm_retries = 0;
1208 port->vdm_state = VDM_STATE_BUSY;
1209 timeout = vdm_ready_timeout(port->vdo_data[0]);
1210 mod_delayed_work(port->wq, &port->vdm_state_machine,
1211 timeout);
1212 }
1213 break;
1214 case VDM_STATE_WAIT_RSP_BUSY:
1215 port->vdo_data[0] = port->vdo_retry;
1216 port->vdo_count = 1;
1217 port->vdm_state = VDM_STATE_READY;
1218 break;
1219 case VDM_STATE_BUSY:
1220 port->vdm_state = VDM_STATE_ERR_TMOUT;
1221 break;
1222 case VDM_STATE_ERR_SEND:
1223 /*
1224 * A partner which does not support USB PD will not reply,
1225 * so this is not a fatal error. At the same time, some
1226 * devices may not return GoodCRC under some circumstances,
1227 * so we need to retry.
1228 */
1229 if (port->vdm_retries < 3) {
1230 tcpm_log(port, "VDM Tx error, retry");
1231 port->vdm_retries++;
1232 port->vdm_state = VDM_STATE_READY;
1233 }
1234 break;
1235 default:
1236 break;
1237 }
1238}
1239
1240static void vdm_state_machine_work(struct work_struct *work)
1241{
1242 struct tcpm_port *port = container_of(work, struct tcpm_port,
1243 vdm_state_machine.work);
1244 enum vdm_states prev_state;
1245
1246 mutex_lock(&port->lock);
1247
1248 /*
1249 * Continue running as long as the port is not busy and there was
1250 * a state change.
1251 */
1252 do {
1253 prev_state = port->vdm_state;
1254 vdm_run_state_machine(port);
1255 } while (port->vdm_state != prev_state &&
1256 port->vdm_state != VDM_STATE_BUSY);
1257
1258 mutex_unlock(&port->lock);
1259}
1260
1261/*
1262 * PD (data, control) command handling functions
1263 */
1264static void tcpm_pd_data_request(struct tcpm_port *port,
1265 const struct pd_message *msg)
1266{
1267 enum pd_data_msg_type type = pd_header_type_le(msg->header);
1268 unsigned int cnt = pd_header_cnt_le(msg->header);
1269 unsigned int i;
1270
1271 switch (type) {
1272 case PD_DATA_SOURCE_CAP:
1273 if (port->pwr_role != TYPEC_SINK)
1274 break;
1275
1276 for (i = 0; i < cnt; i++)
1277 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
1278
1279 port->nr_source_caps = cnt;
1280
1281 tcpm_log_source_caps(port);
1282
1283 /*
1284 * This message may be received even if VBUS is not
1285 * present. This is quite unexpected; see USB PD
1286 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
1287 * However, at the same time, we must be ready to
1288 * receive this message and respond to it 15ms after
1289 * receiving PS_RDY during power swap operations, no matter
1290 * if VBUS is available or not (USB PD specification,
1291 * section 6.5.9.2).
1292 * So we need to accept the message either way,
1293 * but be prepared to keep waiting for VBUS after it was
1294 * handled.
1295 */
1296 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
1297 break;
1298 case PD_DATA_REQUEST:
1299 if (port->pwr_role != TYPEC_SOURCE ||
1300 cnt != 1) {
1301 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1302 break;
1303 }
1304 port->sink_request = le32_to_cpu(msg->payload[0]);
1305 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
1306 break;
1307 case PD_DATA_SINK_CAP:
1308 /* We don't do anything with this at the moment... */
1309 for (i = 0; i < cnt; i++)
1310 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
1311 port->nr_sink_caps = cnt;
1312 break;
1313 case PD_DATA_VENDOR_DEF:
1314 tcpm_handle_vdm_request(port, msg->payload, cnt);
1315 break;
1316 case PD_DATA_BIST:
1317 if (port->state == SRC_READY || port->state == SNK_READY) {
1318 port->bist_request = le32_to_cpu(msg->payload[0]);
1319 tcpm_set_state(port, BIST_RX, 0);
1320 }
1321 break;
1322 default:
1323 tcpm_log(port, "Unhandled data message type %#x", type);
1324 break;
1325 }
1326}
1327
1328static void tcpm_pd_ctrl_request(struct tcpm_port *port,
1329 const struct pd_message *msg)
1330{
1331 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1332 enum tcpm_state next_state;
1333
1334 switch (type) {
1335 case PD_CTRL_GOOD_CRC:
1336 case PD_CTRL_PING:
1337 break;
1338 case PD_CTRL_GET_SOURCE_CAP:
1339 switch (port->state) {
1340 case SRC_READY:
1341 case SNK_READY:
1342 tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
1343 break;
1344 default:
1345 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1346 break;
1347 }
1348 break;
1349 case PD_CTRL_GET_SINK_CAP:
1350 switch (port->state) {
1351 case SRC_READY:
1352 case SNK_READY:
1353 tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
1354 break;
1355 default:
1356 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1357 break;
1358 }
1359 break;
1360 case PD_CTRL_GOTO_MIN:
1361 break;
1362 case PD_CTRL_PS_RDY:
1363 switch (port->state) {
1364 case SNK_TRANSITION_SINK:
1365 if (port->vbus_present) {
1366 tcpm_set_current_limit(port,
1367 port->current_limit,
1368 port->supply_voltage);
1369 tcpm_set_state(port, SNK_READY, 0);
1370 } else {
1371 /*
1372 * Seen after power swap. Keep waiting for VBUS
1373 * in a transitional state.
1374 */
1375 tcpm_set_state(port,
1376 SNK_TRANSITION_SINK_VBUS, 0);
1377 }
1378 break;
1379 case PR_SWAP_SRC_SNK_SOURCE_OFF:
1380 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
1381 break;
1382 case PR_SWAP_SNK_SRC_SINK_OFF:
1383 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
1384 break;
1385 case VCONN_SWAP_WAIT_FOR_VCONN:
1386 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
1387 break;
1388 default:
1389 break;
1390 }
1391 break;
1392 case PD_CTRL_REJECT:
1393 case PD_CTRL_WAIT:
1394 switch (port->state) {
1395 case SNK_NEGOTIATE_CAPABILITIES:
1396 /* USB PD specification, Figure 8-43 */
1397 if (port->explicit_contract)
1398 next_state = SNK_READY;
1399 else
1400 next_state = SNK_WAIT_CAPABILITIES;
1401 tcpm_set_state(port, next_state, 0);
1402 break;
1403 case DR_SWAP_SEND:
1404 port->swap_status = (type == PD_CTRL_WAIT ?
1405 -EAGAIN : -EOPNOTSUPP);
1406 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
1407 break;
1408 case PR_SWAP_SEND:
1409 port->swap_status = (type == PD_CTRL_WAIT ?
1410 -EAGAIN : -EOPNOTSUPP);
1411 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
1412 break;
1413 case VCONN_SWAP_SEND:
1414 port->swap_status = (type == PD_CTRL_WAIT ?
1415 -EAGAIN : -EOPNOTSUPP);
1416 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
1417 break;
1418 default:
1419 break;
1420 }
1421 break;
1422 case PD_CTRL_ACCEPT:
1423 switch (port->state) {
1424 case SNK_NEGOTIATE_CAPABILITIES:
1425 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
1426 break;
1427 case SOFT_RESET_SEND:
1428 port->message_id = 0;
5fec4b54 1429 port->rx_msgid = -1;
f0690a25
GR
1430 if (port->pwr_role == TYPEC_SOURCE)
1431 next_state = SRC_SEND_CAPABILITIES;
1432 else
1433 next_state = SNK_WAIT_CAPABILITIES;
1434 tcpm_set_state(port, next_state, 0);
1435 break;
1436 case DR_SWAP_SEND:
1437 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
1438 break;
1439 case PR_SWAP_SEND:
1440 tcpm_set_state(port, PR_SWAP_START, 0);
1441 break;
1442 case VCONN_SWAP_SEND:
1443 tcpm_set_state(port, VCONN_SWAP_START, 0);
1444 break;
1445 default:
1446 break;
1447 }
1448 break;
1449 case PD_CTRL_SOFT_RESET:
1450 tcpm_set_state(port, SOFT_RESET, 0);
1451 break;
1452 case PD_CTRL_DR_SWAP:
1453 if (port->typec_caps.type != TYPEC_PORT_DRP) {
1454 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1455 break;
1456 }
1457 /*
1458 * XXX
1459 * 6.3.9: If an alternate mode is active, a request to swap
1460 * alternate modes shall trigger a port reset.
1461 */
1462 switch (port->state) {
1463 case SRC_READY:
1464 case SNK_READY:
1465 tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
1466 break;
1467 default:
1468 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1469 break;
1470 }
1471 break;
1472 case PD_CTRL_PR_SWAP:
1473 if (port->typec_caps.type != TYPEC_PORT_DRP) {
1474 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1475 break;
1476 }
1477 switch (port->state) {
1478 case SRC_READY:
1479 case SNK_READY:
1480 tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
1481 break;
1482 default:
1483 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1484 break;
1485 }
1486 break;
1487 case PD_CTRL_VCONN_SWAP:
1488 switch (port->state) {
1489 case SRC_READY:
1490 case SNK_READY:
1491 tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
1492 break;
1493 default:
1494 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1495 break;
1496 }
1497 break;
1498 default:
1499 tcpm_log(port, "Unhandled ctrl message type %#x", type);
1500 break;
1501 }
1502}
1503
1504static void tcpm_pd_rx_handler(struct work_struct *work)
1505{
1506 struct pd_rx_event *event = container_of(work,
1507 struct pd_rx_event, work);
1508 const struct pd_message *msg = &event->msg;
1509 unsigned int cnt = pd_header_cnt_le(msg->header);
1510 struct tcpm_port *port = event->port;
1511
1512 mutex_lock(&port->lock);
1513
1514 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
1515 port->attached);
1516
1517 if (port->attached) {
5fec4b54
GR
1518 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1519 unsigned int msgid = pd_header_msgid_le(msg->header);
1520
1521 /*
1522 * USB PD standard, 6.6.1.2:
1523 * "... if MessageID value in a received Message is the
1524 * same as the stored value, the receiver shall return a
1525 * GoodCRC Message with that MessageID value and drop
1526 * the Message (this is a retry of an already received
1527 * Message). Note: this shall not apply to the Soft_Reset
1528 * Message which always has a MessageID value of zero."
1529 */
1530 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
1531 goto done;
1532 port->rx_msgid = msgid;
1533
f0690a25
GR
1534 /*
1535 * If both ends believe to be DFP/host, we have a data role
1536 * mismatch.
1537 */
1538 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
1539 (port->data_role == TYPEC_HOST)) {
1540 tcpm_log(port,
1541 "Data role mismatch, initiating error recovery");
1542 tcpm_set_state(port, ERROR_RECOVERY, 0);
1543 } else {
1544 if (cnt)
1545 tcpm_pd_data_request(port, msg);
1546 else
1547 tcpm_pd_ctrl_request(port, msg);
1548 }
1549 }
1550
5fec4b54 1551done:
f0690a25
GR
1552 mutex_unlock(&port->lock);
1553 kfree(event);
1554}
1555
1556void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
1557{
1558 struct pd_rx_event *event;
1559
1560 event = kzalloc(sizeof(*event), GFP_ATOMIC);
1561 if (!event)
1562 return;
1563
1564 INIT_WORK(&event->work, tcpm_pd_rx_handler);
1565 event->port = port;
1566 memcpy(&event->msg, msg, sizeof(*msg));
1567 queue_work(port->wq, &event->work);
1568}
1569EXPORT_SYMBOL_GPL(tcpm_pd_receive);
1570
1571static int tcpm_pd_send_control(struct tcpm_port *port,
1572 enum pd_ctrl_msg_type type)
1573{
1574 struct pd_message msg;
1575
1576 memset(&msg, 0, sizeof(msg));
1577 msg.header = PD_HEADER_LE(type, port->pwr_role,
1578 port->data_role,
1579 port->message_id, 0);
1580
1581 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1582}
1583
1584/*
1585 * Send queued message without affecting state.
1586 * Return true if state machine should go back to sleep,
1587 * false otherwise.
1588 */
1589static bool tcpm_send_queued_message(struct tcpm_port *port)
1590{
1591 enum pd_msg_request queued_message;
1592
1593 do {
1594 queued_message = port->queued_message;
1595 port->queued_message = PD_MSG_NONE;
1596
1597 switch (queued_message) {
1598 case PD_MSG_CTRL_WAIT:
1599 tcpm_pd_send_control(port, PD_CTRL_WAIT);
1600 break;
1601 case PD_MSG_CTRL_REJECT:
1602 tcpm_pd_send_control(port, PD_CTRL_REJECT);
1603 break;
1604 case PD_MSG_DATA_SINK_CAP:
1605 tcpm_pd_send_sink_caps(port);
1606 break;
1607 case PD_MSG_DATA_SOURCE_CAP:
1608 tcpm_pd_send_source_caps(port);
1609 break;
1610 default:
1611 break;
1612 }
1613 } while (port->queued_message != PD_MSG_NONE);
1614
1615 if (port->delayed_state != INVALID_STATE) {
1616 if (time_is_after_jiffies(port->delayed_runtime)) {
1617 mod_delayed_work(port->wq, &port->state_machine,
1618 port->delayed_runtime - jiffies);
1619 return true;
1620 }
1621 port->delayed_state = INVALID_STATE;
1622 }
1623 return false;
1624}
1625
1626static int tcpm_pd_check_request(struct tcpm_port *port)
1627{
1628 u32 pdo, rdo = port->sink_request;
1629 unsigned int max, op, pdo_max, index;
1630 enum pd_pdo_type type;
1631
1632 index = rdo_index(rdo);
1633 if (!index || index > port->nr_src_pdo)
1634 return -EINVAL;
1635
1636 pdo = port->src_pdo[index - 1];
1637 type = pdo_type(pdo);
1638 switch (type) {
1639 case PDO_TYPE_FIXED:
1640 case PDO_TYPE_VAR:
1641 max = rdo_max_current(rdo);
1642 op = rdo_op_current(rdo);
1643 pdo_max = pdo_max_current(pdo);
1644
1645 if (op > pdo_max)
1646 return -EINVAL;
1647 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1648 return -EINVAL;
1649
1650 if (type == PDO_TYPE_FIXED)
1651 tcpm_log(port,
1652 "Requested %u mV, %u mA for %u / %u mA",
1653 pdo_fixed_voltage(pdo), pdo_max, op, max);
1654 else
1655 tcpm_log(port,
1656 "Requested %u -> %u mV, %u mA for %u / %u mA",
1657 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1658 pdo_max, op, max);
1659 break;
1660 case PDO_TYPE_BATT:
1661 max = rdo_max_power(rdo);
1662 op = rdo_op_power(rdo);
1663 pdo_max = pdo_max_power(pdo);
1664
1665 if (op > pdo_max)
1666 return -EINVAL;
1667 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1668 return -EINVAL;
1669 tcpm_log(port,
1670 "Requested %u -> %u mV, %u mW for %u / %u mW",
1671 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1672 pdo_max, op, max);
1673 break;
1674 default:
1675 return -EINVAL;
1676 }
1677
1678 port->op_vsafe5v = index == 1;
1679
1680 return 0;
1681}
1682
1683static int tcpm_pd_select_pdo(struct tcpm_port *port)
1684{
1685 unsigned int i, max_mw = 0, max_mv = 0;
1686 int ret = -EINVAL;
1687
1688 /*
1689 * Select the source PDO providing the most power while staying within
1690 * the board's voltage limits. Prefer PDO providing exp
1691 */
1692 for (i = 0; i < port->nr_source_caps; i++) {
1693 u32 pdo = port->source_caps[i];
1694 enum pd_pdo_type type = pdo_type(pdo);
1695 unsigned int mv, ma, mw;
1696
1697 if (type == PDO_TYPE_FIXED)
1698 mv = pdo_fixed_voltage(pdo);
1699 else
1700 mv = pdo_min_voltage(pdo);
1701
1702 if (type == PDO_TYPE_BATT) {
1703 mw = pdo_max_power(pdo);
1704 } else {
1705 ma = min(pdo_max_current(pdo),
1706 port->max_snk_ma);
1707 mw = ma * mv / 1000;
1708 }
1709
1710 /* Perfer higher voltages if available */
1711 if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
1712 mv <= port->max_snk_mv) {
1713 ret = i;
1714 max_mw = mw;
1715 max_mv = mv;
1716 }
1717 }
1718
1719 return ret;
1720}
1721
1722static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1723{
1724 unsigned int mv, ma, mw, flags;
1725 unsigned int max_ma, max_mw;
1726 enum pd_pdo_type type;
1727 int index;
1728 u32 pdo;
1729
1730 index = tcpm_pd_select_pdo(port);
1731 if (index < 0)
1732 return -EINVAL;
1733 pdo = port->source_caps[index];
1734 type = pdo_type(pdo);
1735
1736 if (type == PDO_TYPE_FIXED)
1737 mv = pdo_fixed_voltage(pdo);
1738 else
1739 mv = pdo_min_voltage(pdo);
1740
1741 /* Select maximum available current within the board's power limit */
1742 if (type == PDO_TYPE_BATT) {
1743 mw = pdo_max_power(pdo);
1744 ma = 1000 * min(mw, port->max_snk_mw) / mv;
1745 } else {
1746 ma = min(pdo_max_current(pdo),
1747 1000 * port->max_snk_mw / mv);
1748 }
1749 ma = min(ma, port->max_snk_ma);
1750
931693f9 1751 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
f0690a25
GR
1752
1753 /* Set mismatch bit if offered power is less than operating power */
1754 mw = ma * mv / 1000;
1755 max_ma = ma;
1756 max_mw = mw;
1757 if (mw < port->operating_snk_mw) {
1758 flags |= RDO_CAP_MISMATCH;
1759 max_mw = port->operating_snk_mw;
1760 max_ma = max_mw * 1000 / mv;
1761 }
1762
1763 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
1764 port->cc_req, port->cc1, port->cc2, port->vbus_source,
1765 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
1766 port->polarity);
1767
1768 if (type == PDO_TYPE_BATT) {
1769 *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
1770
1771 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
1772 index, mv, mw,
1773 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1774 } else {
1775 *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
1776
1777 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
1778 index, mv, ma,
1779 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1780 }
1781
1782 port->current_limit = ma;
1783 port->supply_voltage = mv;
1784
1785 return 0;
1786}
1787
1788static int tcpm_pd_send_request(struct tcpm_port *port)
1789{
1790 struct pd_message msg;
1791 int ret;
1792 u32 rdo;
1793
1794 ret = tcpm_pd_build_request(port, &rdo);
1795 if (ret < 0)
1796 return ret;
1797
1798 memset(&msg, 0, sizeof(msg));
1799 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
1800 port->pwr_role,
1801 port->data_role,
1802 port->message_id, 1);
1803 msg.payload[0] = cpu_to_le32(rdo);
1804
1805 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1806}
1807
1808static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
1809{
1810 int ret;
1811
1812 if (enable && port->vbus_charge)
1813 return -EINVAL;
1814
1815 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
1816
1817 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
1818 if (ret < 0)
1819 return ret;
1820
1821 port->vbus_source = enable;
1822 return 0;
1823}
1824
1825static int tcpm_set_charge(struct tcpm_port *port, bool charge)
1826{
1827 int ret;
1828
1829 if (charge && port->vbus_source)
1830 return -EINVAL;
1831
1832 if (charge != port->vbus_charge) {
1833 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
1834 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
1835 charge);
1836 if (ret < 0)
1837 return ret;
1838 }
1839 port->vbus_charge = charge;
1840 return 0;
1841}
1842
1843static bool tcpm_start_drp_toggling(struct tcpm_port *port)
1844{
1845 int ret;
1846
1847 if (port->tcpc->start_drp_toggling &&
1848 port->typec_caps.type == TYPEC_PORT_DRP) {
1849 tcpm_log_force(port, "Start DRP toggling");
1850 ret = port->tcpc->start_drp_toggling(port->tcpc,
1851 tcpm_rp_cc(port));
1852 if (!ret)
1853 return true;
1854 }
1855
1856 return false;
1857}
1858
1859static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
1860{
1861 tcpm_log(port, "cc:=%d", cc);
1862 port->cc_req = cc;
1863 port->tcpc->set_cc(port->tcpc, cc);
1864}
1865
1866static int tcpm_init_vbus(struct tcpm_port *port)
1867{
1868 int ret;
1869
1870 ret = port->tcpc->set_vbus(port->tcpc, false, false);
1871 port->vbus_source = false;
1872 port->vbus_charge = false;
1873 return ret;
1874}
1875
1876static int tcpm_init_vconn(struct tcpm_port *port)
1877{
1878 int ret;
1879
1880 ret = port->tcpc->set_vconn(port->tcpc, false);
1881 port->vconn_role = TYPEC_SINK;
1882 return ret;
1883}
1884
1885static void tcpm_typec_connect(struct tcpm_port *port)
1886{
1887 if (!port->connected) {
1888 /* Make sure we don't report stale identity information */
1889 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
1890 port->partner_desc.usb_pd = port->pd_capable;
1891 if (tcpm_port_is_debug(port))
1892 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
1893 else if (tcpm_port_is_audio(port))
1894 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
1895 else
1896 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
1897 port->partner = typec_register_partner(port->typec_port,
1898 &port->partner_desc);
1899 port->connected = true;
1900 }
1901}
1902
1903static int tcpm_src_attach(struct tcpm_port *port)
1904{
1905 enum typec_cc_polarity polarity =
1906 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
1907 : TYPEC_POLARITY_CC1;
1908 int ret;
1909
1910 if (port->attached)
1911 return 0;
1912
1913 ret = tcpm_set_polarity(port, polarity);
1914 if (ret < 0)
1915 return ret;
1916
1917 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
1918 if (ret < 0)
1919 return ret;
1920
1921 ret = port->tcpc->set_pd_rx(port->tcpc, true);
1922 if (ret < 0)
1923 goto out_disable_mux;
1924
1925 /*
1926 * USB Type-C specification, version 1.2,
1927 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
1928 * Enable VCONN only if the non-RD port is set to RA.
1929 */
1930 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
1931 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
1932 ret = tcpm_set_vconn(port, true);
1933 if (ret < 0)
1934 goto out_disable_pd;
1935 }
1936
1937 ret = tcpm_set_vbus(port, true);
1938 if (ret < 0)
1939 goto out_disable_vconn;
1940
1941 port->pd_capable = false;
1942
1943 port->partner = NULL;
1944
1945 port->attached = true;
1946 port->send_discover = true;
1947
1948 return 0;
1949
1950out_disable_vconn:
1951 tcpm_set_vconn(port, false);
1952out_disable_pd:
1953 port->tcpc->set_pd_rx(port->tcpc, false);
1954out_disable_mux:
1955 tcpm_mux_set(port, TYPEC_MUX_NONE, TCPC_USB_SWITCH_DISCONNECT);
1956 return ret;
1957}
1958
1959static void tcpm_typec_disconnect(struct tcpm_port *port)
1960{
1961 if (port->connected) {
1962 typec_unregister_partner(port->partner);
1963 port->partner = NULL;
1964 port->connected = false;
1965 }
1966}
1967
1968static void tcpm_unregister_altmodes(struct tcpm_port *port)
1969{
1970 struct pd_mode_data *modep = &port->mode_data;
1971 int i;
1972
1973 for (i = 0; i < modep->altmodes; i++) {
1974 typec_unregister_altmode(port->partner_altmode[i]);
1975 port->partner_altmode[i] = NULL;
1976 }
1977
1978 memset(modep, 0, sizeof(*modep));
1979}
1980
1981static void tcpm_reset_port(struct tcpm_port *port)
1982{
1983 tcpm_unregister_altmodes(port);
1984 tcpm_typec_disconnect(port);
1985 port->attached = false;
1986 port->pd_capable = false;
1987
5fec4b54
GR
1988 /*
1989 * First Rx ID should be 0; set this to a sentinel of -1 so that
1990 * we can check tcpm_pd_rx_handler() if we had seen it before.
1991 */
1992 port->rx_msgid = -1;
1993
f0690a25
GR
1994 port->tcpc->set_pd_rx(port->tcpc, false);
1995 tcpm_init_vbus(port); /* also disables charging */
1996 tcpm_init_vconn(port);
1997 tcpm_set_current_limit(port, 0, 0);
1998 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
1999 tcpm_set_attached_state(port, false);
2000 port->try_src_count = 0;
2001 port->try_snk_count = 0;
2002}
2003
2004static void tcpm_detach(struct tcpm_port *port)
2005{
2006 if (!port->attached)
2007 return;
2008
2009 if (tcpm_port_is_disconnected(port))
2010 port->hard_reset_count = 0;
2011
2012 tcpm_reset_port(port);
2013}
2014
2015static void tcpm_src_detach(struct tcpm_port *port)
2016{
2017 tcpm_detach(port);
2018}
2019
2020static int tcpm_snk_attach(struct tcpm_port *port)
2021{
2022 int ret;
2023
2024 if (port->attached)
2025 return 0;
2026
2027 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
2028 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
2029 if (ret < 0)
2030 return ret;
2031
2032 ret = tcpm_set_roles(port, true, TYPEC_SINK, TYPEC_DEVICE);
2033 if (ret < 0)
2034 return ret;
2035
2036 port->pd_capable = false;
2037
2038 port->partner = NULL;
2039
2040 port->attached = true;
2041 port->send_discover = true;
2042
2043 return 0;
2044}
2045
2046static void tcpm_snk_detach(struct tcpm_port *port)
2047{
2048 tcpm_detach(port);
2049
2050 /* XXX: (Dis)connect SuperSpeed mux? */
2051}
2052
2053static int tcpm_acc_attach(struct tcpm_port *port)
2054{
2055 int ret;
2056
2057 if (port->attached)
2058 return 0;
2059
2060 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
2061 if (ret < 0)
2062 return ret;
2063
2064 port->partner = NULL;
2065
2066 tcpm_typec_connect(port);
2067
2068 port->attached = true;
2069
2070 return 0;
2071}
2072
2073static void tcpm_acc_detach(struct tcpm_port *port)
2074{
2075 tcpm_detach(port);
2076}
2077
2078static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
2079{
2080 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
2081 return HARD_RESET_SEND;
2082 if (port->pd_capable)
2083 return ERROR_RECOVERY;
2084 if (port->pwr_role == TYPEC_SOURCE)
2085 return SRC_UNATTACHED;
2086 if (port->state == SNK_WAIT_CAPABILITIES)
2087 return SNK_READY;
2088 return SNK_UNATTACHED;
2089}
2090
2091static inline enum tcpm_state ready_state(struct tcpm_port *port)
2092{
2093 if (port->pwr_role == TYPEC_SOURCE)
2094 return SRC_READY;
2095 else
2096 return SNK_READY;
2097}
2098
2099static inline enum tcpm_state unattached_state(struct tcpm_port *port)
2100{
2101 if (port->pwr_role == TYPEC_SOURCE)
2102 return SRC_UNATTACHED;
2103 else
2104 return SNK_UNATTACHED;
2105}
2106
2107static void tcpm_check_send_discover(struct tcpm_port *port)
2108{
2109 if (port->data_role == TYPEC_HOST && port->send_discover &&
2110 port->pd_capable) {
2111 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
2112 port->send_discover = false;
2113 }
2114}
2115
2116static void tcpm_swap_complete(struct tcpm_port *port, int result)
2117{
2118 if (port->swap_pending) {
2119 port->swap_status = result;
2120 port->swap_pending = false;
2121 complete(&port->swap_complete);
2122 }
2123}
2124
2125static void run_state_machine(struct tcpm_port *port)
2126{
2127 int ret;
2128
2129 port->enter_state = port->state;
2130 switch (port->state) {
2131 case DRP_TOGGLING:
2132 break;
2133 /* SRC states */
2134 case SRC_UNATTACHED:
2135 tcpm_swap_complete(port, -ENOTCONN);
2136 tcpm_src_detach(port);
2137 if (tcpm_start_drp_toggling(port)) {
2138 tcpm_set_state(port, DRP_TOGGLING, 0);
2139 break;
2140 }
2141 tcpm_set_cc(port, tcpm_rp_cc(port));
2142 if (port->typec_caps.type == TYPEC_PORT_DRP)
2143 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
2144 break;
2145 case SRC_ATTACH_WAIT:
2146 if (tcpm_port_is_debug(port))
2147 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
2148 PD_T_CC_DEBOUNCE);
2149 else if (tcpm_port_is_audio(port))
2150 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
2151 PD_T_CC_DEBOUNCE);
2152 else if (tcpm_port_is_source(port))
2153 tcpm_set_state(port,
2154 tcpm_try_snk(port) ? SNK_TRY
2155 : SRC_ATTACHED,
2156 PD_T_CC_DEBOUNCE);
2157 break;
2158
2159 case SNK_TRY:
2160 port->try_snk_count++;
2161 /*
2162 * Requirements:
2163 * - Do not drive vconn or vbus
2164 * - Terminate CC pins (both) to Rd
2165 * Action:
2166 * - Wait for tDRPTry (PD_T_DRP_TRY).
2167 * Until then, ignore any state changes.
2168 */
2169 tcpm_set_cc(port, TYPEC_CC_RD);
2170 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
2171 break;
2172 case SNK_TRY_WAIT:
2173 if (port->vbus_present && tcpm_port_is_sink(port)) {
2174 tcpm_set_state(port, SNK_ATTACHED, 0);
2175 break;
2176 }
2177 if (!tcpm_port_is_sink(port)) {
2178 tcpm_set_state(port, SRC_TRYWAIT,
2179 PD_T_PD_DEBOUNCE);
2180 break;
2181 }
2182 /* No vbus, cc state is sink or open */
2183 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED, PD_T_DRP_TRYWAIT);
2184 break;
2185 case SRC_TRYWAIT:
2186 tcpm_set_cc(port, tcpm_rp_cc(port));
2187 if (!port->vbus_present && tcpm_port_is_source(port))
2188 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
2189 else
2190 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2191 PD_T_DRP_TRY);
2192 break;
2193 case SRC_TRYWAIT_UNATTACHED:
2194 tcpm_set_state(port, SNK_UNATTACHED, 0);
2195 break;
2196
2197 case SRC_ATTACHED:
2198 ret = tcpm_src_attach(port);
2199 tcpm_set_state(port, SRC_UNATTACHED,
2200 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
2201 break;
2202 case SRC_STARTUP:
2203 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB);
2204 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2205 port->caps_count = 0;
2206 port->message_id = 0;
5fec4b54 2207 port->rx_msgid = -1;
f0690a25
GR
2208 port->explicit_contract = false;
2209 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2210 break;
2211 case SRC_SEND_CAPABILITIES:
2212 port->caps_count++;
2213 if (port->caps_count > PD_N_CAPS_COUNT) {
2214 tcpm_set_state(port, SRC_READY, 0);
2215 break;
2216 }
2217 ret = tcpm_pd_send_source_caps(port);
2218 if (ret < 0) {
2219 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
2220 PD_T_SEND_SOURCE_CAP);
2221 } else {
2222 /*
2223 * Per standard, we should clear the reset counter here.
2224 * However, that can result in state machine hang-ups.
2225 * Reset it only in READY state to improve stability.
2226 */
2227 /* port->hard_reset_count = 0; */
2228 port->caps_count = 0;
2229 port->pd_capable = true;
2230 tcpm_set_state_cond(port, hard_reset_state(port),
2231 PD_T_SEND_SOURCE_CAP);
2232 }
2233 break;
2234 case SRC_NEGOTIATE_CAPABILITIES:
2235 ret = tcpm_pd_check_request(port);
2236 if (ret < 0) {
2237 tcpm_pd_send_control(port, PD_CTRL_REJECT);
2238 if (!port->explicit_contract) {
2239 tcpm_set_state(port,
2240 SRC_WAIT_NEW_CAPABILITIES, 0);
2241 } else {
2242 tcpm_set_state(port, SRC_READY, 0);
2243 }
2244 } else {
2245 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2246 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
2247 PD_T_SRC_TRANSITION);
2248 }
2249 break;
2250 case SRC_TRANSITION_SUPPLY:
2251 /* XXX: regulator_set_voltage(vbus, ...) */
2252 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2253 port->explicit_contract = true;
2254 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
2255 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2256 tcpm_set_state_cond(port, SRC_READY, 0);
2257 break;
2258 case SRC_READY:
2259#if 1
2260 port->hard_reset_count = 0;
2261#endif
2262 port->try_src_count = 0;
2263
3113bf1a 2264 tcpm_swap_complete(port, 0);
f0690a25 2265 tcpm_typec_connect(port);
f0690a25
GR
2266 tcpm_check_send_discover(port);
2267 /*
2268 * 6.3.5
2269 * Sending ping messages is not necessary if
2270 * - the source operates at vSafe5V
2271 * or
2272 * - The system is not operating in PD mode
2273 * or
2274 * - Both partners are connected using a Type-C connector
2275 * XXX How do we know that ?
2276 */
2277 if (port->pwr_opmode == TYPEC_PWR_MODE_PD &&
2278 !port->op_vsafe5v) {
2279 tcpm_pd_send_control(port, PD_CTRL_PING);
2280 tcpm_set_state_cond(port, SRC_READY,
2281 PD_T_SOURCE_ACTIVITY);
2282 }
2283 break;
2284 case SRC_WAIT_NEW_CAPABILITIES:
2285 /* Nothing to do... */
2286 break;
2287
2288 /* SNK states */
2289 case SNK_UNATTACHED:
2290 tcpm_swap_complete(port, -ENOTCONN);
2291 tcpm_snk_detach(port);
2292 if (tcpm_start_drp_toggling(port)) {
2293 tcpm_set_state(port, DRP_TOGGLING, 0);
2294 break;
2295 }
2296 tcpm_set_cc(port, TYPEC_CC_RD);
2297 if (port->typec_caps.type == TYPEC_PORT_DRP)
2298 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
2299 break;
2300 case SNK_ATTACH_WAIT:
2301 if ((port->cc1 == TYPEC_CC_OPEN &&
2302 port->cc2 != TYPEC_CC_OPEN) ||
2303 (port->cc1 != TYPEC_CC_OPEN &&
2304 port->cc2 == TYPEC_CC_OPEN))
2305 tcpm_set_state(port, SNK_DEBOUNCED,
2306 PD_T_CC_DEBOUNCE);
2307 else if (tcpm_port_is_disconnected(port))
2308 tcpm_set_state(port, SNK_UNATTACHED,
2309 PD_T_PD_DEBOUNCE);
2310 break;
2311 case SNK_DEBOUNCED:
2312 if (tcpm_port_is_disconnected(port))
2313 tcpm_set_state(port, SNK_UNATTACHED,
2314 PD_T_PD_DEBOUNCE);
2315 else if (port->vbus_present)
2316 tcpm_set_state(port,
2317 tcpm_try_src(port) ? SRC_TRY
2318 : SNK_ATTACHED,
2319 0);
2320 else
2321 /* Wait for VBUS, but not forever */
2322 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2323 break;
2324
2325 case SRC_TRY:
2326 port->try_src_count++;
2327 tcpm_set_cc(port, tcpm_rp_cc(port));
2328 tcpm_set_state(port, SNK_TRYWAIT, PD_T_DRP_TRY);
2329 break;
2330 case SRC_TRY_DEBOUNCE:
2331 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
2332 break;
2333 case SNK_TRYWAIT:
2334 tcpm_set_cc(port, TYPEC_CC_RD);
2335 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, PD_T_CC_DEBOUNCE);
2336 break;
2337 case SNK_TRYWAIT_DEBOUNCE:
2338 if (port->vbus_present) {
2339 tcpm_set_state(port, SNK_ATTACHED, 0);
2340 break;
2341 }
2342 if (tcpm_port_is_disconnected(port)) {
2343 tcpm_set_state(port, SNK_UNATTACHED,
2344 PD_T_PD_DEBOUNCE);
2345 break;
2346 }
2347 if (tcpm_port_is_source(port))
2348 tcpm_set_state(port, SRC_ATTACHED, 0);
2349 /* XXX Are we supposed to stay in this state ? */
2350 break;
2351 case SNK_TRYWAIT_VBUS:
2352 tcpm_set_state(port, SNK_ATTACHED, PD_T_CC_DEBOUNCE);
2353 break;
2354
2355 case SNK_ATTACHED:
2356 ret = tcpm_snk_attach(port);
2357 if (ret < 0)
2358 tcpm_set_state(port, SNK_UNATTACHED, 0);
2359 else
2360 tcpm_set_state(port, SNK_STARTUP, 0);
2361 break;
2362 case SNK_STARTUP:
2363 /* XXX: callback into infrastructure */
2364 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB);
2365 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2366 port->message_id = 0;
5fec4b54 2367 port->rx_msgid = -1;
f0690a25
GR
2368 port->explicit_contract = false;
2369 tcpm_set_state(port, SNK_DISCOVERY, 0);
2370 break;
2371 case SNK_DISCOVERY:
2372 if (port->vbus_present) {
2373 tcpm_set_current_limit(port,
2374 tcpm_get_current_limit(port),
2375 5000);
2376 tcpm_set_charge(port, true);
2377 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2378 break;
2379 }
2380 /*
2381 * For DRP, timeouts differ. Also, handling is supposed to be
2382 * different and much more complex (dead battery detection;
2383 * see USB power delivery specification, section 8.3.3.6.1.5.1).
2384 */
2385 tcpm_set_state(port, hard_reset_state(port),
2386 port->typec_caps.type == TYPEC_PORT_DRP ?
2387 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
2388 break;
2389 case SNK_DISCOVERY_DEBOUNCE:
2390 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
2391 PD_T_CC_DEBOUNCE);
2392 break;
2393 case SNK_DISCOVERY_DEBOUNCE_DONE:
2394 if (!tcpm_port_is_disconnected(port) &&
2395 tcpm_port_is_sink(port) &&
2396 time_is_after_jiffies(port->delayed_runtime)) {
2397 tcpm_set_state(port, SNK_DISCOVERY,
2398 port->delayed_runtime - jiffies);
2399 break;
2400 }
2401 tcpm_set_state(port, unattached_state(port), 0);
2402 break;
2403 case SNK_WAIT_CAPABILITIES:
2404 ret = port->tcpc->set_pd_rx(port->tcpc, true);
2405 if (ret < 0) {
2406 tcpm_set_state(port, SNK_READY, 0);
2407 break;
2408 }
2409 /*
2410 * If VBUS has never been low, and we time out waiting
2411 * for source cap, try a soft reset first, in case we
2412 * were already in a stable contract before this boot.
2413 * Do this only once.
2414 */
2415 if (port->vbus_never_low) {
2416 port->vbus_never_low = false;
2417 tcpm_set_state(port, SOFT_RESET_SEND,
2418 PD_T_SINK_WAIT_CAP);
2419 } else {
2420 tcpm_set_state(port, hard_reset_state(port),
2421 PD_T_SINK_WAIT_CAP);
2422 }
2423 break;
2424 case SNK_NEGOTIATE_CAPABILITIES:
2425 port->pd_capable = true;
2426 port->hard_reset_count = 0;
2427 ret = tcpm_pd_send_request(port);
2428 if (ret < 0) {
2429 /* Let the Source send capabilities again. */
2430 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2431 } else {
2432 tcpm_set_state_cond(port, hard_reset_state(port),
2433 PD_T_SENDER_RESPONSE);
2434 }
2435 break;
2436 case SNK_TRANSITION_SINK:
2437 case SNK_TRANSITION_SINK_VBUS:
2438 tcpm_set_state(port, hard_reset_state(port),
2439 PD_T_PS_TRANSITION);
2440 break;
2441 case SNK_READY:
2442 port->try_snk_count = 0;
2443 port->explicit_contract = true;
2444 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
2445 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2446
3113bf1a 2447 tcpm_swap_complete(port, 0);
f0690a25 2448 tcpm_typec_connect(port);
f0690a25
GR
2449 tcpm_check_send_discover(port);
2450 break;
2451
2452 /* Accessory states */
2453 case ACC_UNATTACHED:
2454 tcpm_acc_detach(port);
2455 tcpm_set_state(port, SRC_UNATTACHED, 0);
2456 break;
2457 case DEBUG_ACC_ATTACHED:
2458 case AUDIO_ACC_ATTACHED:
2459 ret = tcpm_acc_attach(port);
2460 if (ret < 0)
2461 tcpm_set_state(port, ACC_UNATTACHED, 0);
2462 break;
2463 case AUDIO_ACC_DEBOUNCE:
2464 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
2465 break;
2466
2467 /* Hard_Reset states */
2468 case HARD_RESET_SEND:
2469 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
2470 tcpm_set_state(port, HARD_RESET_START, 0);
2471 break;
2472 case HARD_RESET_START:
2473 port->hard_reset_count++;
2474 port->tcpc->set_pd_rx(port->tcpc, false);
2475 tcpm_unregister_altmodes(port);
2476 port->send_discover = true;
2477 if (port->pwr_role == TYPEC_SOURCE)
2478 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
2479 PD_T_PS_HARD_RESET);
2480 else
2481 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
2482 break;
2483 case SRC_HARD_RESET_VBUS_OFF:
2484 tcpm_set_vconn(port, true);
2485 tcpm_set_vbus(port, false);
2486 tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
2487 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
2488 break;
2489 case SRC_HARD_RESET_VBUS_ON:
2490 tcpm_set_vbus(port, true);
2491 port->tcpc->set_pd_rx(port->tcpc, true);
2492 tcpm_set_attached_state(port, true);
2493 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
2494 break;
2495 case SNK_HARD_RESET_SINK_OFF:
2496 tcpm_set_vconn(port, false);
2497 tcpm_set_charge(port, false);
2498 tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
2499 /*
2500 * VBUS may or may not toggle, depending on the adapter.
2501 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
2502 * directly after timeout.
2503 */
2504 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
2505 break;
2506 case SNK_HARD_RESET_WAIT_VBUS:
2507 /* Assume we're disconnected if VBUS doesn't come back. */
2508 tcpm_set_state(port, SNK_UNATTACHED,
2509 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
2510 break;
2511 case SNK_HARD_RESET_SINK_ON:
2512 /* Note: There is no guarantee that VBUS is on in this state */
2513 /*
2514 * XXX:
2515 * The specification suggests that dual mode ports in sink
2516 * mode should transition to state PE_SRC_Transition_to_default.
2517 * See USB power delivery specification chapter 8.3.3.6.1.3.
2518 * This would mean to to
2519 * - turn off VCONN, reset power supply
2520 * - request hardware reset
2521 * - turn on VCONN
2522 * - Transition to state PE_Src_Startup
2523 * SNK only ports shall transition to state Snk_Startup
2524 * (see chapter 8.3.3.3.8).
2525 * Similar, dual-mode ports in source mode should transition
2526 * to PE_SNK_Transition_to_default.
2527 */
2528 tcpm_set_attached_state(port, true);
2529 tcpm_set_state(port, SNK_STARTUP, 0);
2530 break;
2531
2532 /* Soft_Reset states */
2533 case SOFT_RESET:
2534 port->message_id = 0;
5fec4b54 2535 port->rx_msgid = -1;
f0690a25
GR
2536 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2537 if (port->pwr_role == TYPEC_SOURCE)
2538 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2539 else
2540 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2541 break;
2542 case SOFT_RESET_SEND:
2543 port->message_id = 0;
5fec4b54 2544 port->rx_msgid = -1;
f0690a25
GR
2545 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
2546 tcpm_set_state_cond(port, hard_reset_state(port), 0);
2547 else
2548 tcpm_set_state_cond(port, hard_reset_state(port),
2549 PD_T_SENDER_RESPONSE);
2550 break;
2551
2552 /* DR_Swap states */
2553 case DR_SWAP_SEND:
2554 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
2555 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
2556 PD_T_SENDER_RESPONSE);
2557 break;
2558 case DR_SWAP_ACCEPT:
2559 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2560 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
2561 break;
2562 case DR_SWAP_SEND_TIMEOUT:
2563 tcpm_swap_complete(port, -ETIMEDOUT);
2564 tcpm_set_state(port, ready_state(port), 0);
2565 break;
2566 case DR_SWAP_CHANGE_DR:
2567 if (port->data_role == TYPEC_HOST) {
2568 tcpm_unregister_altmodes(port);
2569 tcpm_set_roles(port, true, port->pwr_role,
2570 TYPEC_DEVICE);
2571 } else {
2572 tcpm_set_roles(port, true, port->pwr_role,
2573 TYPEC_HOST);
2574 port->send_discover = true;
2575 }
f0690a25
GR
2576 tcpm_set_state(port, ready_state(port), 0);
2577 break;
2578
2579 /* PR_Swap states */
2580 case PR_SWAP_ACCEPT:
2581 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2582 tcpm_set_state(port, PR_SWAP_START, 0);
2583 break;
2584 case PR_SWAP_SEND:
2585 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
2586 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
2587 PD_T_SENDER_RESPONSE);
2588 break;
2589 case PR_SWAP_SEND_TIMEOUT:
2590 tcpm_swap_complete(port, -ETIMEDOUT);
2591 tcpm_set_state(port, ready_state(port), 0);
2592 break;
2593 case PR_SWAP_START:
2594 if (port->pwr_role == TYPEC_SOURCE)
2595 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
2596 PD_T_SRC_TRANSITION);
2597 else
2598 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
2599 break;
2600 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2601 tcpm_set_vbus(port, false);
2602 port->explicit_contract = false;
2603 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
2604 PD_T_PS_SOURCE_OFF);
2605 break;
2606 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2607 tcpm_set_cc(port, TYPEC_CC_RD);
050161ea
GR
2608 /*
2609 * USB-PD standard, 6.2.1.4, Port Power Role:
2610 * "During the Power Role Swap Sequence, for the initial Source
2611 * Port, the Port Power Role field shall be set to Sink in the
2612 * PS_RDY Message indicating that the initial Source’s power
2613 * supply is turned off"
2614 */
2615 tcpm_set_pwr_role(port, TYPEC_SINK);
f0690a25
GR
2616 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
2617 tcpm_set_state(port, ERROR_RECOVERY, 0);
2618 break;
2619 }
2620 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2621 break;
2622 case PR_SWAP_SRC_SNK_SINK_ON:
f0690a25
GR
2623 tcpm_set_state(port, SNK_STARTUP, 0);
2624 break;
2625 case PR_SWAP_SNK_SRC_SINK_OFF:
2626 tcpm_set_charge(port, false);
2627 tcpm_set_state(port, hard_reset_state(port),
2628 PD_T_PS_SOURCE_OFF);
2629 break;
2630 case PR_SWAP_SNK_SRC_SOURCE_ON:
2631 tcpm_set_cc(port, tcpm_rp_cc(port));
2632 tcpm_set_vbus(port, true);
050161ea
GR
2633 /*
2634 * USB PD standard, 6.2.1.4:
2635 * "Subsequent Messages initiated by the Policy Engine,
2636 * such as the PS_RDY Message sent to indicate that Vbus
2637 * is ready, will have the Port Power Role field set to
2638 * Source."
2639 */
f0690a25 2640 tcpm_set_pwr_role(port, TYPEC_SOURCE);
050161ea 2641 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
f0690a25
GR
2642 tcpm_set_state(port, SRC_STARTUP, 0);
2643 break;
2644
2645 case VCONN_SWAP_ACCEPT:
2646 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2647 tcpm_set_state(port, VCONN_SWAP_START, 0);
2648 break;
2649 case VCONN_SWAP_SEND:
2650 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
2651 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
2652 PD_T_SENDER_RESPONSE);
2653 break;
2654 case VCONN_SWAP_SEND_TIMEOUT:
2655 tcpm_swap_complete(port, -ETIMEDOUT);
2656 tcpm_set_state(port, ready_state(port), 0);
2657 break;
2658 case VCONN_SWAP_START:
2659 if (port->vconn_role == TYPEC_SOURCE)
2660 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
2661 else
2662 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
2663 break;
2664 case VCONN_SWAP_WAIT_FOR_VCONN:
2665 tcpm_set_state(port, hard_reset_state(port),
2666 PD_T_VCONN_SOURCE_ON);
2667 break;
2668 case VCONN_SWAP_TURN_ON_VCONN:
2669 tcpm_set_vconn(port, true);
2670 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
f0690a25
GR
2671 tcpm_set_state(port, ready_state(port), 0);
2672 break;
2673 case VCONN_SWAP_TURN_OFF_VCONN:
2674 tcpm_set_vconn(port, false);
f0690a25
GR
2675 tcpm_set_state(port, ready_state(port), 0);
2676 break;
2677
2678 case DR_SWAP_CANCEL:
2679 case PR_SWAP_CANCEL:
2680 case VCONN_SWAP_CANCEL:
2681 tcpm_swap_complete(port, port->swap_status);
2682 if (port->pwr_role == TYPEC_SOURCE)
2683 tcpm_set_state(port, SRC_READY, 0);
2684 else
2685 tcpm_set_state(port, SNK_READY, 0);
2686 break;
2687
2688 case BIST_RX:
2689 switch (BDO_MODE_MASK(port->bist_request)) {
2690 case BDO_MODE_CARRIER2:
2691 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
2692 break;
2693 default:
2694 break;
2695 }
2696 /* Always switch to unattached state */
2697 tcpm_set_state(port, unattached_state(port), 0);
2698 break;
2699 case ERROR_RECOVERY:
2700 tcpm_swap_complete(port, -EPROTO);
2701 tcpm_reset_port(port);
2702
2703 tcpm_set_cc(port, TYPEC_CC_OPEN);
2704 tcpm_set_state(port, ERROR_RECOVERY_WAIT_OFF,
2705 PD_T_ERROR_RECOVERY);
2706 break;
2707 case ERROR_RECOVERY_WAIT_OFF:
2708 tcpm_set_state(port,
2709 tcpm_default_state(port),
2710 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
2711 break;
2712 default:
2713 WARN(1, "Unexpected port state %d\n", port->state);
2714 break;
2715 }
2716}
2717
2718static void tcpm_state_machine_work(struct work_struct *work)
2719{
2720 struct tcpm_port *port = container_of(work, struct tcpm_port,
2721 state_machine.work);
2722 enum tcpm_state prev_state;
2723
2724 mutex_lock(&port->lock);
2725 port->state_machine_running = true;
2726
2727 if (port->queued_message && tcpm_send_queued_message(port))
2728 goto done;
2729
2730 /* If we were queued due to a delayed state change, update it now */
2731 if (port->delayed_state) {
2732 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
2733 tcpm_states[port->state],
2734 tcpm_states[port->delayed_state], port->delay_ms);
2735 port->prev_state = port->state;
2736 port->state = port->delayed_state;
2737 port->delayed_state = INVALID_STATE;
2738 }
2739
2740 /*
2741 * Continue running as long as we have (non-delayed) state changes
2742 * to make.
2743 */
2744 do {
2745 prev_state = port->state;
2746 run_state_machine(port);
2747 if (port->queued_message)
2748 tcpm_send_queued_message(port);
2749 } while (port->state != prev_state && !port->delayed_state);
2750
2751done:
2752 port->state_machine_running = false;
2753 mutex_unlock(&port->lock);
2754}
2755
2756static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
2757 enum typec_cc_status cc2)
2758{
2759 enum typec_cc_status old_cc1, old_cc2;
2760 enum tcpm_state new_state;
2761
2762 old_cc1 = port->cc1;
2763 old_cc2 = port->cc2;
2764 port->cc1 = cc1;
2765 port->cc2 = cc2;
2766
2767 tcpm_log_force(port,
2768 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
2769 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
2770 port->polarity,
2771 tcpm_port_is_disconnected(port) ? "disconnected"
2772 : "connected");
2773
2774 switch (port->state) {
2775 case DRP_TOGGLING:
2776 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2777 tcpm_port_is_source(port))
2778 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2779 else if (tcpm_port_is_sink(port))
2780 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2781 break;
2782 case SRC_UNATTACHED:
2783 case ACC_UNATTACHED:
2784 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2785 tcpm_port_is_source(port))
2786 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2787 break;
2788 case SRC_ATTACH_WAIT:
2789 if (tcpm_port_is_disconnected(port) ||
2790 tcpm_port_is_audio_detached(port))
2791 tcpm_set_state(port, SRC_UNATTACHED, 0);
2792 else if (cc1 != old_cc1 || cc2 != old_cc2)
2793 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2794 break;
2795 case SRC_ATTACHED:
2796 if (tcpm_port_is_disconnected(port))
2797 tcpm_set_state(port, SRC_UNATTACHED, 0);
2798 break;
2799
2800 case SNK_UNATTACHED:
2801 if (tcpm_port_is_sink(port))
2802 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2803 break;
2804 case SNK_ATTACH_WAIT:
2805 if ((port->cc1 == TYPEC_CC_OPEN &&
2806 port->cc2 != TYPEC_CC_OPEN) ||
2807 (port->cc1 != TYPEC_CC_OPEN &&
2808 port->cc2 == TYPEC_CC_OPEN))
2809 new_state = SNK_DEBOUNCED;
2810 else if (tcpm_port_is_disconnected(port))
2811 new_state = SNK_UNATTACHED;
2812 else
2813 break;
2814 if (new_state != port->delayed_state)
2815 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2816 break;
2817 case SNK_DEBOUNCED:
2818 if (tcpm_port_is_disconnected(port))
2819 new_state = SNK_UNATTACHED;
2820 else if (port->vbus_present)
2821 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
2822 else
2823 new_state = SNK_UNATTACHED;
2824 if (new_state != port->delayed_state)
2825 tcpm_set_state(port, SNK_DEBOUNCED, 0);
2826 break;
2827 case SNK_READY:
2828 if (tcpm_port_is_disconnected(port))
2829 tcpm_set_state(port, unattached_state(port), 0);
2830 else if (!port->pd_capable &&
2831 (cc1 != old_cc1 || cc2 != old_cc2))
2832 tcpm_set_current_limit(port,
2833 tcpm_get_current_limit(port),
2834 5000);
2835 break;
2836
2837 case AUDIO_ACC_ATTACHED:
2838 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
2839 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
2840 break;
2841 case AUDIO_ACC_DEBOUNCE:
2842 if (tcpm_port_is_audio(port))
2843 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
2844 break;
2845
2846 case DEBUG_ACC_ATTACHED:
2847 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
2848 tcpm_set_state(port, ACC_UNATTACHED, 0);
2849 break;
2850
2851 case SNK_TRY:
2852 /* Do nothing, waiting for timeout */
2853 break;
2854
2855 case SNK_DISCOVERY:
2856 /* CC line is unstable, wait for debounce */
2857 if (tcpm_port_is_disconnected(port))
2858 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
2859 break;
2860 case SNK_DISCOVERY_DEBOUNCE:
2861 break;
2862
2863 case SRC_TRYWAIT:
2864 /* Hand over to state machine if needed */
2865 if (!port->vbus_present && tcpm_port_is_source(port))
2866 new_state = SRC_ATTACHED;
2867 else
2868 new_state = SRC_TRYWAIT_UNATTACHED;
2869
2870 if (new_state != port->delayed_state)
2871 tcpm_set_state(port, SRC_TRYWAIT, 0);
2872 break;
2873 case SNK_TRY_WAIT:
2874 if (port->vbus_present && tcpm_port_is_sink(port)) {
2875 tcpm_set_state(port, SNK_ATTACHED, 0);
2876 break;
2877 }
2878 if (!tcpm_port_is_sink(port))
2879 new_state = SRC_TRYWAIT;
2880 else
2881 new_state = SRC_TRYWAIT_UNATTACHED;
2882
2883 if (new_state != port->delayed_state)
2884 tcpm_set_state(port, SNK_TRY_WAIT, 0);
2885 break;
2886
2887 case SRC_TRY:
2888 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
2889 break;
2890 case SRC_TRY_DEBOUNCE:
2891 tcpm_set_state(port, SRC_TRY, 0);
2892 break;
2893 case SNK_TRYWAIT_DEBOUNCE:
2894 if (port->vbus_present) {
2895 tcpm_set_state(port, SNK_ATTACHED, 0);
2896 break;
2897 }
2898 if (tcpm_port_is_source(port)) {
2899 tcpm_set_state(port, SRC_ATTACHED, 0);
2900 break;
2901 }
2902 if (tcpm_port_is_disconnected(port) &&
2903 port->delayed_state != SNK_UNATTACHED)
2904 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
2905 break;
2906
2907 case PR_SWAP_SNK_SRC_SINK_OFF:
2908 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2909 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2910 /*
2911 * CC state change is expected here; we just turned off power.
2912 * Ignore it.
2913 */
2914 break;
2915
2916 default:
2917 if (tcpm_port_is_disconnected(port))
2918 tcpm_set_state(port, unattached_state(port), 0);
2919 break;
2920 }
2921}
2922
2923static void _tcpm_pd_vbus_on(struct tcpm_port *port)
2924{
2925 enum tcpm_state new_state;
2926
2927 tcpm_log_force(port, "VBUS on");
2928 port->vbus_present = true;
2929 switch (port->state) {
2930 case SNK_TRANSITION_SINK_VBUS:
2931 tcpm_set_state(port, SNK_READY, 0);
2932 break;
2933 case SNK_DISCOVERY:
2934 tcpm_set_state(port, SNK_DISCOVERY, 0);
2935 break;
2936
2937 case SNK_DEBOUNCED:
2938 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
2939 : SNK_ATTACHED,
2940 0);
2941 break;
2942 case SNK_HARD_RESET_WAIT_VBUS:
2943 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
2944 break;
2945 case SRC_ATTACHED:
2946 tcpm_set_state(port, SRC_STARTUP, 0);
2947 break;
2948 case SRC_HARD_RESET_VBUS_ON:
2949 tcpm_set_state(port, SRC_STARTUP, 0);
2950 break;
2951
2952 case SNK_TRY:
2953 /* Do nothing, waiting for timeout */
2954 break;
2955 case SRC_TRYWAIT:
2956 /* Hand over to state machine if needed */
2957 if (port->delayed_state != SRC_TRYWAIT_UNATTACHED)
2958 tcpm_set_state(port, SRC_TRYWAIT, 0);
2959 break;
2960 case SNK_TRY_WAIT:
2961 if (tcpm_port_is_sink(port)) {
2962 tcpm_set_state(port, SNK_ATTACHED, 0);
2963 break;
2964 }
2965 if (!tcpm_port_is_sink(port))
2966 new_state = SRC_TRYWAIT;
2967 else
2968 new_state = SRC_TRYWAIT_UNATTACHED;
2969
2970 if (new_state != port->delayed_state)
2971 tcpm_set_state(port, SNK_TRY_WAIT, 0);
2972 break;
2973 case SNK_TRYWAIT:
2974 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
2975 break;
2976
2977 default:
2978 break;
2979 }
2980}
2981
2982static void _tcpm_pd_vbus_off(struct tcpm_port *port)
2983{
2984 enum tcpm_state new_state;
2985
2986 tcpm_log_force(port, "VBUS off");
2987 port->vbus_present = false;
2988 port->vbus_never_low = false;
2989 switch (port->state) {
2990 case SNK_HARD_RESET_SINK_OFF:
2991 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
2992 break;
2993 case SRC_HARD_RESET_VBUS_OFF:
2994 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
2995 break;
2996 case HARD_RESET_SEND:
2997 break;
2998
2999 case SNK_TRY:
3000 /* Do nothing, waiting for timeout */
3001 break;
3002 case SRC_TRYWAIT:
3003 /* Hand over to state machine if needed */
3004 if (tcpm_port_is_source(port))
3005 new_state = SRC_ATTACHED;
3006 else
3007 new_state = SRC_TRYWAIT_UNATTACHED;
3008 if (new_state != port->delayed_state)
3009 tcpm_set_state(port, SRC_TRYWAIT, 0);
3010 break;
3011 case SNK_TRY_WAIT:
3012 if (!tcpm_port_is_sink(port))
3013 new_state = SRC_TRYWAIT;
3014 else
3015 new_state = SRC_TRYWAIT_UNATTACHED;
3016
3017 if (new_state != port->delayed_state)
3018 tcpm_set_state(port, SNK_TRY_WAIT, 0);
3019 break;
3020 case SNK_TRYWAIT_VBUS:
3021 tcpm_set_state(port, SNK_TRYWAIT, 0);
3022 break;
3023
3024 case SNK_ATTACH_WAIT:
3025 tcpm_set_state(port, SNK_UNATTACHED, 0);
3026 break;
3027
3028 case SNK_NEGOTIATE_CAPABILITIES:
3029 break;
3030
3031 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3032 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
3033 break;
3034
3035 case PR_SWAP_SNK_SRC_SINK_OFF:
3036 /* Do nothing, expected */
3037 break;
3038
3039 case ERROR_RECOVERY_WAIT_OFF:
3040 tcpm_set_state(port,
3041 port->pwr_role == TYPEC_SOURCE ?
3042 SRC_UNATTACHED : SNK_UNATTACHED,
3043 0);
3044 break;
3045
3046 default:
3047 if (port->pwr_role == TYPEC_SINK &&
3048 port->attached)
3049 tcpm_set_state(port, SNK_UNATTACHED, 0);
3050 break;
3051 }
3052}
3053
3054static void _tcpm_pd_hard_reset(struct tcpm_port *port)
3055{
3056 tcpm_log_force(port, "Received hard reset");
3057 /*
3058 * If we keep receiving hard reset requests, executing the hard reset
3059 * must have failed. Revert to error recovery if that happens.
3060 */
3061 tcpm_set_state(port,
3062 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
3063 HARD_RESET_START : ERROR_RECOVERY,
3064 0);
3065}
3066
3067static void tcpm_pd_event_handler(struct work_struct *work)
3068{
3069 struct tcpm_port *port = container_of(work, struct tcpm_port,
3070 event_work);
3071 u32 events;
3072
3073 mutex_lock(&port->lock);
3074
3075 spin_lock(&port->pd_event_lock);
3076 while (port->pd_events) {
3077 events = port->pd_events;
3078 port->pd_events = 0;
3079 spin_unlock(&port->pd_event_lock);
3080 if (events & TCPM_RESET_EVENT)
3081 _tcpm_pd_hard_reset(port);
3082 if (events & TCPM_VBUS_EVENT) {
3083 bool vbus;
3084
3085 vbus = port->tcpc->get_vbus(port->tcpc);
3086 if (vbus)
3087 _tcpm_pd_vbus_on(port);
3088 else
3089 _tcpm_pd_vbus_off(port);
3090 }
3091 if (events & TCPM_CC_EVENT) {
3092 enum typec_cc_status cc1, cc2;
3093
3094 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3095 _tcpm_cc_change(port, cc1, cc2);
3096 }
3097 spin_lock(&port->pd_event_lock);
3098 }
3099 spin_unlock(&port->pd_event_lock);
3100 mutex_unlock(&port->lock);
3101}
3102
3103void tcpm_cc_change(struct tcpm_port *port)
3104{
3105 spin_lock(&port->pd_event_lock);
3106 port->pd_events |= TCPM_CC_EVENT;
3107 spin_unlock(&port->pd_event_lock);
3108 queue_work(port->wq, &port->event_work);
3109}
3110EXPORT_SYMBOL_GPL(tcpm_cc_change);
3111
3112void tcpm_vbus_change(struct tcpm_port *port)
3113{
3114 spin_lock(&port->pd_event_lock);
3115 port->pd_events |= TCPM_VBUS_EVENT;
3116 spin_unlock(&port->pd_event_lock);
3117 queue_work(port->wq, &port->event_work);
3118}
3119EXPORT_SYMBOL_GPL(tcpm_vbus_change);
3120
3121void tcpm_pd_hard_reset(struct tcpm_port *port)
3122{
3123 spin_lock(&port->pd_event_lock);
3124 port->pd_events = TCPM_RESET_EVENT;
3125 spin_unlock(&port->pd_event_lock);
3126 queue_work(port->wq, &port->event_work);
3127}
3128EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
3129
3130static int tcpm_dr_set(const struct typec_capability *cap,
3131 enum typec_data_role data)
3132{
3133 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3134 int ret;
3135
3136 mutex_lock(&port->swap_lock);
3137 mutex_lock(&port->lock);
3138
3139 if (port->typec_caps.type != TYPEC_PORT_DRP || !port->pd_capable) {
3140 ret = -EINVAL;
3141 goto port_unlock;
3142 }
3143 if (port->state != SRC_READY && port->state != SNK_READY) {
3144 ret = -EAGAIN;
3145 goto port_unlock;
3146 }
3147
3148 if (port->data_role == data) {
3149 ret = 0;
3150 goto port_unlock;
3151 }
3152
3153 /*
3154 * XXX
3155 * 6.3.9: If an alternate mode is active, a request to swap
3156 * alternate modes shall trigger a port reset.
3157 * Reject data role swap request in this case.
3158 */
3159
3160 port->swap_status = 0;
3161 port->swap_pending = true;
3162 reinit_completion(&port->swap_complete);
3163 tcpm_set_state(port, DR_SWAP_SEND, 0);
3164 mutex_unlock(&port->lock);
3165
3166 wait_for_completion(&port->swap_complete);
3167
3168 ret = port->swap_status;
3169 goto swap_unlock;
3170
3171port_unlock:
3172 mutex_unlock(&port->lock);
3173swap_unlock:
3174 mutex_unlock(&port->swap_lock);
3175 return ret;
3176}
3177
3178static int tcpm_pr_set(const struct typec_capability *cap,
3179 enum typec_role role)
3180{
3181 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3182 int ret;
3183
3184 mutex_lock(&port->swap_lock);
3185 mutex_lock(&port->lock);
3186
3187 if (port->typec_caps.type != TYPEC_PORT_DRP) {
3188 ret = -EINVAL;
3189 goto port_unlock;
3190 }
3191 if (port->state != SRC_READY && port->state != SNK_READY) {
3192 ret = -EAGAIN;
3193 goto port_unlock;
3194 }
3195
3196 if (role == port->pwr_role) {
3197 ret = 0;
3198 goto port_unlock;
3199 }
3200
3201 if (!port->pd_capable) {
3202 /*
3203 * If the partner is not PD capable, reset the port to
3204 * trigger a role change. This can only work if a preferred
3205 * role is configured, and if it matches the requested role.
3206 */
3207 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
3208 port->try_role == port->pwr_role) {
3209 ret = -EINVAL;
3210 goto port_unlock;
3211 }
3212 tcpm_set_state(port, HARD_RESET_SEND, 0);
3213 ret = 0;
3214 goto port_unlock;
3215 }
3216
3217 port->swap_status = 0;
3218 port->swap_pending = true;
3219 reinit_completion(&port->swap_complete);
3220 tcpm_set_state(port, PR_SWAP_SEND, 0);
3221 mutex_unlock(&port->lock);
3222
3223 wait_for_completion(&port->swap_complete);
3224
3225 ret = port->swap_status;
3226 goto swap_unlock;
3227
3228port_unlock:
3229 mutex_unlock(&port->lock);
3230swap_unlock:
3231 mutex_unlock(&port->swap_lock);
3232 return ret;
3233}
3234
3235static int tcpm_vconn_set(const struct typec_capability *cap,
3236 enum typec_role role)
3237{
3238 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3239 int ret;
3240
3241 mutex_lock(&port->swap_lock);
3242 mutex_lock(&port->lock);
3243
3244 if (port->state != SRC_READY && port->state != SNK_READY) {
3245 ret = -EAGAIN;
3246 goto port_unlock;
3247 }
3248
3249 if (role == port->vconn_role) {
3250 ret = 0;
3251 goto port_unlock;
3252 }
3253
3254 port->swap_status = 0;
3255 port->swap_pending = true;
3256 reinit_completion(&port->swap_complete);
3257 tcpm_set_state(port, VCONN_SWAP_SEND, 0);
3258 mutex_unlock(&port->lock);
3259
3260 wait_for_completion(&port->swap_complete);
3261
3262 ret = port->swap_status;
3263 goto swap_unlock;
3264
3265port_unlock:
3266 mutex_unlock(&port->lock);
3267swap_unlock:
3268 mutex_unlock(&port->swap_lock);
3269 return ret;
3270}
3271
3272static int tcpm_try_role(const struct typec_capability *cap, int role)
3273{
3274 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3275 struct tcpc_dev *tcpc = port->tcpc;
3276 int ret = 0;
3277
3278 mutex_lock(&port->lock);
3279 if (tcpc->try_role)
3280 ret = tcpc->try_role(tcpc, role);
3281 if (!ret && !tcpc->config->try_role_hw)
3282 port->try_role = role;
3283 port->try_src_count = 0;
3284 port->try_snk_count = 0;
3285 mutex_unlock(&port->lock);
3286
3287 return ret;
3288}
3289
3290static void tcpm_init(struct tcpm_port *port)
3291{
3292 enum typec_cc_status cc1, cc2;
3293
3294 port->tcpc->init(port->tcpc);
3295
3296 tcpm_reset_port(port);
3297
3298 /*
3299 * XXX
3300 * Should possibly wait for VBUS to settle if it was enabled locally
3301 * since tcpm_reset_port() will disable VBUS.
3302 */
3303 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
3304 if (port->vbus_present)
3305 port->vbus_never_low = true;
3306
3307 tcpm_set_state(port, tcpm_default_state(port), 0);
3308
3309 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3310 _tcpm_cc_change(port, cc1, cc2);
3311
3312 /*
3313 * Some adapters need a clean slate at startup, and won't recover
3314 * otherwise. So do not try to be fancy and force a clean disconnect.
3315 */
3316 tcpm_set_state(port, ERROR_RECOVERY, 0);
3317}
3318
3319void tcpm_tcpc_reset(struct tcpm_port *port)
3320{
3321 mutex_lock(&port->lock);
3322 /* XXX: Maintain PD connection if possible? */
3323 tcpm_init(port);
3324 mutex_unlock(&port->lock);
3325}
3326EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
3327
3328static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
3329 unsigned int nr_pdo)
3330{
3331 unsigned int i;
3332
3333 if (nr_pdo > PDO_MAX_OBJECTS)
3334 nr_pdo = PDO_MAX_OBJECTS;
3335
3336 for (i = 0; i < nr_pdo; i++)
3337 dest_pdo[i] = src_pdo[i];
3338
3339 return nr_pdo;
3340}
3341
193a6801
GR
3342static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
3343 unsigned int nr_vdo)
3344{
3345 unsigned int i;
3346
3347 if (nr_vdo > VDO_MAX_OBJECTS)
3348 nr_vdo = VDO_MAX_OBJECTS;
3349
3350 for (i = 0; i < nr_vdo; i++)
3351 dest_vdo[i] = src_vdo[i];
3352
3353 return nr_vdo;
3354}
3355
f0690a25
GR
3356void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
3357 unsigned int nr_pdo)
3358{
3359 mutex_lock(&port->lock);
3360 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, pdo, nr_pdo);
3361 switch (port->state) {
3362 case SRC_UNATTACHED:
3363 case SRC_ATTACH_WAIT:
3364 case SRC_TRYWAIT:
3365 tcpm_set_cc(port, tcpm_rp_cc(port));
3366 break;
3367 case SRC_SEND_CAPABILITIES:
3368 case SRC_NEGOTIATE_CAPABILITIES:
3369 case SRC_READY:
3370 case SRC_WAIT_NEW_CAPABILITIES:
3371 tcpm_set_cc(port, tcpm_rp_cc(port));
3372 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
3373 break;
3374 default:
3375 break;
3376 }
3377 mutex_unlock(&port->lock);
3378}
3379EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities);
3380
3381void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
3382 unsigned int nr_pdo,
3383 unsigned int max_snk_mv,
3384 unsigned int max_snk_ma,
3385 unsigned int max_snk_mw,
3386 unsigned int operating_snk_mw)
3387{
3388 mutex_lock(&port->lock);
3389 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
3390 port->max_snk_mv = max_snk_mv;
3391 port->max_snk_ma = max_snk_ma;
3392 port->max_snk_mw = max_snk_mw;
3393 port->operating_snk_mw = operating_snk_mw;
3394
3395 switch (port->state) {
3396 case SNK_NEGOTIATE_CAPABILITIES:
3397 case SNK_READY:
3398 case SNK_TRANSITION_SINK:
3399 case SNK_TRANSITION_SINK_VBUS:
3400 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3401 break;
3402 default:
3403 break;
3404 }
3405 mutex_unlock(&port->lock);
3406}
3407EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
3408
3409struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3410{
3411 struct tcpm_port *port;
3412 int i, err;
3413
3414 if (!dev || !tcpc || !tcpc->config ||
3415 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
3416 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
3417 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
3418 return ERR_PTR(-EINVAL);
3419
3420 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
3421 if (!port)
3422 return ERR_PTR(-ENOMEM);
3423
3424 port->dev = dev;
3425 port->tcpc = tcpc;
3426
3427 mutex_init(&port->lock);
3428 mutex_init(&port->swap_lock);
3429
3430 port->wq = create_singlethread_workqueue(dev_name(dev));
3431 if (!port->wq)
3432 return ERR_PTR(-ENOMEM);
3433 INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work);
3434 INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work);
3435 INIT_WORK(&port->event_work, tcpm_pd_event_handler);
3436
3437 spin_lock_init(&port->pd_event_lock);
3438
3439 init_completion(&port->tx_complete);
3440 init_completion(&port->swap_complete);
3441
3442 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcpc->config->src_pdo,
3443 tcpc->config->nr_src_pdo);
3444 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3445 tcpc->config->nr_snk_pdo);
193a6801
GR
3446 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3447 tcpc->config->nr_snk_vdo);
f0690a25
GR
3448
3449 port->max_snk_mv = tcpc->config->max_snk_mv;
3450 port->max_snk_ma = tcpc->config->max_snk_ma;
3451 port->max_snk_mw = tcpc->config->max_snk_mw;
3452 port->operating_snk_mw = tcpc->config->operating_snk_mw;
3453 if (!tcpc->config->try_role_hw)
3454 port->try_role = tcpc->config->default_role;
3455 else
3456 port->try_role = TYPEC_NO_PREFERRED_ROLE;
3457
3458 port->typec_caps.prefer_role = tcpc->config->default_role;
3459 port->typec_caps.type = tcpc->config->type;
3460 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
3461 port->typec_caps.pd_revision = 0x0200; /* USB-PD spec release 2.0 */
3462 port->typec_caps.dr_set = tcpm_dr_set;
3463 port->typec_caps.pr_set = tcpm_pr_set;
3464 port->typec_caps.vconn_set = tcpm_vconn_set;
3465 port->typec_caps.try_role = tcpm_try_role;
3466
3467 port->partner_desc.identity = &port->partner_ident;
3468
3469 /*
3470 * TODO:
3471 * - alt_modes, set_alt_mode
3472 * - {debug,audio}_accessory
3473 */
3474
3475 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
3476 if (!port->typec_port) {
3477 err = -ENOMEM;
3478 goto out_destroy_wq;
3479 }
3480
3481 if (tcpc->config->alt_modes) {
3c41dbde 3482 const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
f0690a25
GR
3483
3484 i = 0;
3485 while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
3486 port->port_altmode[i] =
3487 typec_port_register_altmode(port->typec_port,
3488 paltmode);
3489 if (!port->port_altmode[i]) {
3490 tcpm_log(port,
3491 "%s: failed to register port alternate mode 0x%x",
3492 dev_name(dev), paltmode->svid);
3493 break;
3494 }
3495 i++;
3496 paltmode++;
3497 }
3498 }
3499
3500 tcpm_debugfs_init(port);
3501 mutex_lock(&port->lock);
3502 tcpm_init(port);
3503 mutex_unlock(&port->lock);
3504
3505 tcpm_log(port, "%s: registered", dev_name(dev));
3506 return port;
3507
3508out_destroy_wq:
3509 destroy_workqueue(port->wq);
3510 return ERR_PTR(err);
3511}
3512EXPORT_SYMBOL_GPL(tcpm_register_port);
3513
3514void tcpm_unregister_port(struct tcpm_port *port)
3515{
3516 int i;
3517
3518 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
3519 typec_unregister_altmode(port->port_altmode[i]);
3520 typec_unregister_port(port->typec_port);
3521 tcpm_debugfs_exit(port);
3522 destroy_workqueue(port->wq);
3523}
3524EXPORT_SYMBOL_GPL(tcpm_unregister_port);
3525
3526MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
3527MODULE_DESCRIPTION("USB Type-C Port Manager");
3528MODULE_LICENSE("GPL");