drm/amd/display: Ignore Transitional Invalid Link Rate Error Message
[linux-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_helpers.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/string.h>
27#include <linux/acpi.h>
4562236b
HW
28#include <linux/i2c.h>
29
fcd70cd3 30#include <drm/drm_probe_helper.h>
4562236b
HW
31#include <drm/amdgpu_drm.h>
32#include <drm/drm_edid.h>
33
34#include "dm_services.h"
35#include "amdgpu.h"
36#include "dc.h"
37#include "amdgpu_dm.h"
38#include "amdgpu_dm_irq.h"
f9c8742c 39#include "amdgpu_dm_mst_types.h"
4562236b
HW
40
41#include "dm_helpers.h"
9cc37043 42#include "ddc_service_types.h"
4562236b 43
55eea8ef
RL
44struct monitor_patch_info {
45 unsigned int manufacturer_id;
46 unsigned int product_id;
47 void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param);
48 unsigned int patch_param;
49};
50static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param);
51
52static const struct monitor_patch_info monitor_patch_table[] = {
53{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15},
54{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15},
55};
56
57static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param)
58{
59 if (edid_caps)
60 edid_caps->panel_patch.max_dsc_target_bpp_limit = param;
61}
62
63static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
64{
65 int i, ret = 0;
66
67 for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++)
68 if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id)
69 && (edid_caps->product_id == monitor_patch_table[i].product_id)) {
70 monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param);
71 ret++;
72 }
73
74 return ret;
75}
76
4562236b
HW
77/* dm_helpers_parse_edid_caps
78 *
79 * Parse edid caps
80 *
81 * @edid: [in] pointer to edid
82 * edid_caps: [in] pointer to edid caps
83 * @return
84 * void
85 * */
86enum dc_edid_status dm_helpers_parse_edid_caps(
3c021931 87 struct dc_link *link,
4562236b
HW
88 const struct dc_edid *edid,
89 struct dc_edid_caps *edid_caps)
90{
3c021931
CS
91 struct amdgpu_dm_connector *aconnector = link->priv;
92 struct drm_connector *connector = &aconnector->base;
4562236b
HW
93 struct edid *edid_buf = (struct edid *) edid->raw_edid;
94 struct cea_sad *sads;
95 int sad_count = -1;
96 int sadb_count = -1;
97 int i = 0;
4562236b
HW
98 uint8_t *sadb = NULL;
99
100 enum dc_edid_status result = EDID_OK;
101
102 if (!edid_caps || !edid)
103 return EDID_BAD_INPUT;
104
105 if (!drm_edid_is_valid(edid_buf))
106 result = EDID_BAD_CHECKSUM;
107
108 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
109 ((uint16_t) edid_buf->mfg_id[1])<<8;
110 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
111 ((uint16_t) edid_buf->prod_code[1])<<8;
112 edid_caps->serial_number = edid_buf->serial;
113 edid_caps->manufacture_week = edid_buf->mfg_week;
114 edid_caps->manufacture_year = edid_buf->mfg_year;
115
0b7778f4
CS
116 drm_edid_get_monitor_name(edid_buf,
117 edid_caps->display_name,
118 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4562236b 119
3c021931 120 edid_caps->edid_hdmi = connector->display_info.is_hdmi;
4562236b
HW
121
122 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
ae2a3495 123 if (sad_count <= 0)
4562236b 124 return result;
4562236b
HW
125
126 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
127 for (i = 0; i < edid_caps->audio_mode_count; ++i) {
128 struct cea_sad *sad = &sads[i];
129
130 edid_caps->audio_modes[i].format_code = sad->format;
731a3736 131 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
4562236b
HW
132 edid_caps->audio_modes[i].sample_rate = sad->freq;
133 edid_caps->audio_modes[i].sample_size = sad->byte2;
134 }
135
136 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
137
138 if (sadb_count < 0) {
139 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
140 sadb_count = 0;
141 }
142
143 if (sadb_count)
144 edid_caps->speaker_flags = sadb[0];
145 else
146 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
147
148 kfree(sads);
149 kfree(sadb);
150
55eea8ef
RL
151 amdgpu_dm_patch_edid_caps(edid_caps);
152
4562236b
HW
153 return result;
154}
155
4562236b 156static void get_payload_table(
c84dec2f 157 struct amdgpu_dm_connector *aconnector,
4562236b
HW
158 struct dp_mst_stream_allocation_table *proposed_table)
159{
160 int i;
161 struct drm_dp_mst_topology_mgr *mst_mgr =
162 &aconnector->mst_port->mst_mgr;
163
164 mutex_lock(&mst_mgr->payload_lock);
165
166 proposed_table->stream_count = 0;
167
168 /* number of active streams */
169 for (i = 0; i < mst_mgr->max_payloads; i++) {
170 if (mst_mgr->payloads[i].num_slots == 0)
171 break; /* end of vcp_id table */
172
173 ASSERT(mst_mgr->payloads[i].payload_state !=
174 DP_PAYLOAD_DELETE_LOCAL);
175
176 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
177 mst_mgr->payloads[i].payload_state ==
178 DP_PAYLOAD_REMOTE) {
179
180 struct dp_mst_stream_allocation *sa =
181 &proposed_table->stream_allocations[
182 proposed_table->stream_count];
183
184 sa->slot_count = mst_mgr->payloads[i].num_slots;
185 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
186 proposed_table->stream_count++;
187 }
188 }
189
190 mutex_unlock(&mst_mgr->payload_lock);
191}
192
2068afe6
NC
193void dm_helpers_dp_update_branch_info(
194 struct dc_context *ctx,
195 const struct dc_link *link)
196{}
197
4562236b
HW
198/*
199 * Writes payload allocation table in immediate downstream device.
200 */
201bool dm_helpers_dp_mst_write_payload_allocation_table(
202 struct dc_context *ctx,
0971c40e 203 const struct dc_stream_state *stream,
4562236b
HW
204 struct dp_mst_stream_allocation_table *proposed_table,
205 bool enable)
206{
c84dec2f 207 struct amdgpu_dm_connector *aconnector;
3261e013 208 struct dm_connector_state *dm_conn_state;
4562236b
HW
209 struct drm_dp_mst_topology_mgr *mst_mgr;
210 struct drm_dp_mst_port *mst_port;
4562236b 211 bool ret;
00f965e7 212 u8 link_coding_cap = DP_8b_10b_ENCODING;
4562236b 213
ceb3dbb4 214 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
3261e013
ML
215 /* Accessing the connector state is required for vcpi_slots allocation
216 * and directly relies on behaviour in commit check
217 * that blocks before commit guaranteeing that the state
218 * is not gonna be swapped while still in use in commit tail */
219
4562236b
HW
220 if (!aconnector || !aconnector->mst_port)
221 return false;
222
320f6d81
CIK
223 dm_conn_state = to_dm_connector_state(aconnector->base.state);
224
4562236b
HW
225 mst_mgr = &aconnector->mst_port->mst_mgr;
226
227 if (!mst_mgr->mst_state)
228 return false;
229
230 mst_port = aconnector->port;
231
00f965e7 232#if defined(CONFIG_DRM_AMD_DC_DCN)
41724ea2 233 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
00f965e7 234#endif
41724ea2 235
4562236b 236 if (enable) {
4562236b 237
3261e013
ML
238 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
239 dm_conn_state->pbn,
240 dm_conn_state->vcpi_slots);
4562236b
HW
241 if (!ret)
242 return false;
243
244 } else {
245 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
246 }
247
58fe03d6 248 /* It's OK for this to fail */
41724ea2 249 drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1);
4562236b
HW
250
251 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
252 * AUX message. The sequence is slot 1-63 allocated sequence for each
253 * stream. AMD ASIC stream slot allocation should follow the same
254 * sequence. copy DRM MST allocation to dc */
255
256 get_payload_table(aconnector, proposed_table);
257
4562236b
HW
258 return true;
259}
260
22051b63 261/*
9cc032b2 262 * poll pending down reply
22051b63
MT
263 */
264void dm_helpers_dp_mst_poll_pending_down_reply(
265 struct dc_context *ctx,
266 const struct dc_link *link)
267{}
fd92ac1b
HW
268
269/*
270 * Clear payload allocation table before enable MST DP link.
271 */
272void dm_helpers_dp_mst_clear_payload_allocation_table(
273 struct dc_context *ctx,
274 const struct dc_link *link)
275{}
276
4562236b
HW
277/*
278 * Polls for ACT (allocation change trigger) handled and sends
279 * ALLOCATE_PAYLOAD message.
280 */
48af9b91 281enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
4562236b 282 struct dc_context *ctx,
0971c40e 283 const struct dc_stream_state *stream)
4562236b 284{
c84dec2f 285 struct amdgpu_dm_connector *aconnector;
4562236b
HW
286 struct drm_dp_mst_topology_mgr *mst_mgr;
287 int ret;
288
ceb3dbb4 289 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
4562236b
HW
290
291 if (!aconnector || !aconnector->mst_port)
48af9b91 292 return ACT_FAILED;
4562236b
HW
293
294 mst_mgr = &aconnector->mst_port->mst_mgr;
295
296 if (!mst_mgr->mst_state)
48af9b91 297 return ACT_FAILED;
4562236b
HW
298
299 ret = drm_dp_check_act_status(mst_mgr);
300
301 if (ret)
48af9b91 302 return ACT_FAILED;
4562236b 303
48af9b91 304 return ACT_SUCCESS;
4562236b
HW
305}
306
307bool dm_helpers_dp_mst_send_payload_allocation(
308 struct dc_context *ctx,
0971c40e 309 const struct dc_stream_state *stream,
4562236b
HW
310 bool enable)
311{
c84dec2f 312 struct amdgpu_dm_connector *aconnector;
4562236b
HW
313 struct drm_dp_mst_topology_mgr *mst_mgr;
314 struct drm_dp_mst_port *mst_port;
4562236b 315
ceb3dbb4 316 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
4562236b
HW
317
318 if (!aconnector || !aconnector->mst_port)
319 return false;
320
321 mst_port = aconnector->port;
322
323 mst_mgr = &aconnector->mst_port->mst_mgr;
324
325 if (!mst_mgr->mst_state)
326 return false;
327
58fe03d6
LP
328 /* It's OK for this to fail */
329 drm_dp_update_payload_part2(mst_mgr);
4562236b
HW
330
331 if (!enable)
332 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
333
334 return true;
335}
336
46659a83
NK
337void dm_dtn_log_begin(struct dc_context *ctx,
338 struct dc_log_buffer_ctx *log_ctx)
e498eb71 339{
46659a83
NK
340 static const char msg[] = "[dtn begin]\n";
341
342 if (!log_ctx) {
343 pr_info("%s", msg);
344 return;
345 }
346
347 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
e498eb71 348}
2248eb6b 349
fb8284a5 350__printf(3, 4)
2248eb6b 351void dm_dtn_log_append_v(struct dc_context *ctx,
46659a83
NK
352 struct dc_log_buffer_ctx *log_ctx,
353 const char *msg, ...)
e498eb71 354{
e498eb71 355 va_list args;
46659a83
NK
356 size_t total;
357 int n;
358
359 if (!log_ctx) {
360 /* No context, redirect to dmesg. */
361 struct va_format vaf;
362
363 vaf.fmt = msg;
364 vaf.va = &args;
365
366 va_start(args, msg);
367 pr_info("%pV", &vaf);
368 va_end(args);
e498eb71 369
46659a83
NK
370 return;
371 }
372
373 /* Measure the output. */
e498eb71 374 va_start(args, msg);
46659a83
NK
375 n = vsnprintf(NULL, 0, msg, args);
376 va_end(args);
377
378 if (n <= 0)
379 return;
380
381 /* Reallocate the string buffer as needed. */
382 total = log_ctx->pos + n + 1;
e498eb71 383
46659a83
NK
384 if (total > log_ctx->size) {
385 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
386
387 if (buf) {
388 memcpy(buf, log_ctx->buf, log_ctx->pos);
389 kfree(log_ctx->buf);
390
391 log_ctx->buf = buf;
392 log_ctx->size = total;
393 }
394 }
395
396 if (!log_ctx->buf)
397 return;
398
399 /* Write the formatted string to the log buffer. */
400 va_start(args, msg);
401 n = vscnprintf(
402 log_ctx->buf + log_ctx->pos,
403 log_ctx->size - log_ctx->pos,
404 msg,
405 args);
e498eb71 406 va_end(args);
46659a83
NK
407
408 if (n > 0)
409 log_ctx->pos += n;
e498eb71 410}
2248eb6b 411
46659a83
NK
412void dm_dtn_log_end(struct dc_context *ctx,
413 struct dc_log_buffer_ctx *log_ctx)
e498eb71 414{
46659a83
NK
415 static const char msg[] = "[dtn end]\n";
416
417 if (!log_ctx) {
418 pr_info("%s", msg);
419 return;
420 }
421
422 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
e498eb71 423}
a235bd9f 424
4562236b
HW
425bool dm_helpers_dp_mst_start_top_mgr(
426 struct dc_context *ctx,
427 const struct dc_link *link,
428 bool boot)
429{
c84dec2f 430 struct amdgpu_dm_connector *aconnector = link->priv;
4562236b
HW
431
432 if (!aconnector) {
3c1fcc55
RL
433 DRM_ERROR("Failed to find connector for link!");
434 return false;
4562236b
HW
435 }
436
437 if (boot) {
438 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
439 aconnector, aconnector->base.base.id);
440 return true;
441 }
442
443 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
444 aconnector, aconnector->base.base.id);
445
446 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
447}
448
87e298d6 449bool dm_helpers_dp_mst_stop_top_mgr(
4562236b 450 struct dc_context *ctx,
3f16ae82 451 struct dc_link *link)
4562236b 452{
c84dec2f 453 struct amdgpu_dm_connector *aconnector = link->priv;
4562236b
HW
454
455 if (!aconnector) {
3c1fcc55 456 DRM_ERROR("Failed to find connector for link!");
87e298d6 457 return false;
4562236b
HW
458 }
459
460 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
461 aconnector, aconnector->base.base.id);
462
87e298d6 463 if (aconnector->mst_mgr.mst_state == true)
4562236b 464 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
3f16ae82 465
87e298d6 466 return false;
4562236b
HW
467}
468
469bool dm_helpers_dp_read_dpcd(
470 struct dc_context *ctx,
471 const struct dc_link *link,
472 uint32_t address,
473 uint8_t *data,
474 uint32_t size)
475{
476
c84dec2f 477 struct amdgpu_dm_connector *aconnector = link->priv;
4562236b
HW
478
479 if (!aconnector) {
f9135b08 480 DC_LOG_DC("Failed to find connector for link!\n");
4562236b
HW
481 return false;
482 }
483
484 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
485 data, size) > 0;
486}
487
488bool dm_helpers_dp_write_dpcd(
489 struct dc_context *ctx,
490 const struct dc_link *link,
491 uint32_t address,
492 const uint8_t *data,
493 uint32_t size)
494{
c84dec2f 495 struct amdgpu_dm_connector *aconnector = link->priv;
4562236b
HW
496
497 if (!aconnector) {
3c1fcc55 498 DRM_ERROR("Failed to find connector for link!");
4562236b
HW
499 return false;
500 }
501
502 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
503 address, (uint8_t *)data, size) > 0;
504}
505
506bool dm_helpers_submit_i2c(
507 struct dc_context *ctx,
508 const struct dc_link *link,
509 struct i2c_command *cmd)
510{
c84dec2f 511 struct amdgpu_dm_connector *aconnector = link->priv;
4562236b
HW
512 struct i2c_msg *msgs;
513 int i = 0;
514 int num = cmd->number_of_payloads;
515 bool result;
516
517 if (!aconnector) {
3c1fcc55 518 DRM_ERROR("Failed to find connector for link!");
4562236b
HW
519 return false;
520 }
521
6396bb22 522 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
4562236b
HW
523
524 if (!msgs)
525 return false;
526
527 for (i = 0; i < num; i++) {
bb01672c 528 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
4562236b
HW
529 msgs[i].addr = cmd->payloads[i].address;
530 msgs[i].len = cmd->payloads[i].length;
531 msgs[i].buf = cmd->payloads[i].data;
532 }
533
534 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
535
536 kfree(msgs);
537
538 return result;
539}
2ca97adc 540
9cc37043 541#if defined(CONFIG_DRM_AMD_DC_DCN)
1ca489fc 542static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
2ca97adc
FZ
543 bool is_write_cmd,
544 unsigned char cmd,
545 unsigned int length,
546 unsigned int offset,
547 unsigned char *data)
548{
549 bool success = false;
550 unsigned char rc_data[16] = {0};
551 unsigned char rc_offset[4] = {0};
552 unsigned char rc_length[2] = {0};
553 unsigned char rc_cmd = 0;
554 unsigned char rc_result = 0xFF;
555 unsigned char i = 0;
556 uint8_t ret = 0;
557
558 if (is_write_cmd) {
559 // write rc data
560 memmove(rc_data, data, length);
561 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data));
562 }
563
564 // write rc offset
565 rc_offset[0] = (unsigned char) offset & 0xFF;
566 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF;
567 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF;
568 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF;
569 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset));
570
571 // write rc length
572 rc_length[0] = (unsigned char) length & 0xFF;
573 rc_length[1] = (unsigned char) (length >> 8) & 0xFF;
574 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length));
575
576 // write rc cmd
577 rc_cmd = cmd | 0x80;
578 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
579
580 if (ret < 0) {
1ca489fc 581 DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret);
2ca97adc
FZ
582 return false;
583 }
584
585 // poll until active is 0
586 for (i = 0; i < 10; i++) {
587 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
588 if (rc_cmd == cmd)
589 // active is 0
590 break;
591 msleep(10);
592 }
593
594 // read rc result
595 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result));
596 success = (rc_result == 0);
597
598 if (success && !is_write_cmd) {
599 // read rc data
600 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length);
601 }
602
1ca489fc 603 DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success);
2ca97adc
FZ
604
605 return success;
606}
607
608static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
609{
610 unsigned char data[16] = {0};
611
612 DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n");
613
614 // Step 2
615 data[0] = 'P';
616 data[1] = 'R';
617 data[2] = 'I';
618 data[3] = 'U';
619 data[4] = 'S';
620
1ca489fc 621 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data))
2ca97adc
FZ
622 return;
623
624 // Step 3 and 4
1ca489fc 625 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
2ca97adc
FZ
626 return;
627
628 data[0] &= (~(1 << 1)); // set bit 1 to 0
1ca489fc 629 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
2ca97adc
FZ
630 return;
631
1ca489fc 632 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
2ca97adc
FZ
633 return;
634
635 data[0] &= (~(1 << 1)); // set bit 1 to 0
1ca489fc 636 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data))
2ca97adc
FZ
637 return;
638
1ca489fc 639 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
2ca97adc
FZ
640 return;
641
642 data[0] &= (~(1 << 1)); // set bit 1 to 0
1ca489fc 643 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
2ca97adc
FZ
644 return;
645
646 // Step 3 and 5
1ca489fc 647 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
2ca97adc
FZ
648 return;
649
650 data[0] |= (1 << 1); // set bit 1 to 1
1ca489fc 651 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
2ca97adc
FZ
652 return;
653
1ca489fc 654 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
2ca97adc
FZ
655 return;
656
657 data[0] |= (1 << 1); // set bit 1 to 1
658 return;
659
1ca489fc 660 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
2ca97adc
FZ
661 return;
662
663 data[0] |= (1 << 1); // set bit 1 to 1
1ca489fc 664 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
2ca97adc
FZ
665 return;
666
667 // Step 6
1ca489fc 668 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL))
2ca97adc
FZ
669 return;
670
671 DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n");
672}
673
9cc37043
FZ
674static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
675 struct drm_dp_aux *aux,
676 const struct dc_stream_state *stream,
677 bool enable)
678{
679 uint8_t ret = 0;
680
681 DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n");
682
683 if (enable) {
684 /* When DSC is enabled on previous boot and reboot with the hub,
685 * there is a chance that Synaptics hub gets stuck during reboot sequence.
686 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream
687 */
688 if (!stream->link->link_status.link_active &&
689 memcmp(stream->link->dpcd_caps.branch_dev_name,
690 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0)
691 apply_synaptics_fifo_reset_wa(aux);
692
693 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
694 DRM_INFO("Send DSC enable to synaptics\n");
695
696 } else {
697 /* Synaptics hub not support virtual dpcd,
698 * external monitor occur garbage while disable DSC,
699 * Disable DSC only when entire link status turn to false,
700 */
701 if (!stream->link->link_status.link_active) {
702 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
703 DRM_INFO("Send DSC disable to synaptics\n");
704 }
705 }
706
707 return ret;
708}
709#endif
710
97bda032
HW
711bool dm_helpers_dp_write_dsc_enable(
712 struct dc_context *ctx,
713 const struct dc_stream_state *stream,
bd0c064c 714 bool enable)
97bda032 715{
df2f1015 716 uint8_t enable_dsc = enable ? 1 : 0;
f9c8742c 717 struct amdgpu_dm_connector *aconnector;
6302aead 718 uint8_t ret = 0;
f9c8742c
DF
719
720 if (!stream)
721 return false;
722
723 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
724 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
725
726 if (!aconnector->dsc_aux)
727 return false;
728
9cc37043
FZ
729#if defined(CONFIG_DRM_AMD_DC_DCN)
730 // apply w/a to synaptics
731 if (needs_dsc_aux_workaround(aconnector->dc_link) &&
732 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3)
733 return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
734 aconnector->dsc_aux, stream, enable_dsc);
735#endif
736
bd0c064c 737 ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
9cc37043 738 DC_LOG_DC("Send DSC %s to MST RX\n", enable_dsc ? "enable" : "disable");
f9c8742c
DF
739 }
740
16f0c500 741 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
50b1f44e
FZ
742#if defined(CONFIG_DRM_AMD_DC_DCN)
743 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
744#endif
745 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
746 DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable");
747#if defined(CONFIG_DRM_AMD_DC_DCN)
748 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
749 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
750 DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable");
751 }
752#endif
eda8f799 753 }
df2f1015 754
bd0c064c 755 return (ret > 0);
97bda032 756}
7c7f5b15 757
aac5db82
HW
758bool dm_helpers_is_dp_sink_present(struct dc_link *link)
759{
760 bool dp_sink_present;
761 struct amdgpu_dm_connector *aconnector = link->priv;
762
763 if (!aconnector) {
3c1fcc55 764 BUG_ON("Failed to find connector for link!");
aac5db82
HW
765 return true;
766 }
767
768 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
769 dp_sink_present = dc_link_is_dp_sink_present(link);
770 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
771 return dp_sink_present;
772}
773
7c7f5b15
AG
774enum dc_edid_status dm_helpers_read_local_edid(
775 struct dc_context *ctx,
776 struct dc_link *link,
777 struct dc_sink *sink)
778{
c84dec2f 779 struct amdgpu_dm_connector *aconnector = link->priv;
85d4d684 780 struct drm_connector *connector = &aconnector->base;
7c7f5b15
AG
781 struct i2c_adapter *ddc;
782 int retry = 3;
783 enum dc_edid_status edid_status;
784 struct edid *edid;
785
786 if (link->aux_mode)
787 ddc = &aconnector->dm_dp_aux.aux.ddc;
788 else
789 ddc = &aconnector->i2c->base;
790
791 /* some dongles read edid incorrectly the first time,
792 * do check sum and retry to make sure read correct edid.
793 */
794 do {
795
796 edid = drm_get_edid(&aconnector->base, ddc);
797
85d4d684
JFZ
798 /* DP Compliance Test 4.2.2.6 */
799 if (link->aux_mode && connector->edid_corrupt)
800 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
801
802 if (!edid && connector->edid_corrupt) {
803 connector->edid_corrupt = false;
804 return EDID_BAD_CHECKSUM;
805 }
806
7c7f5b15
AG
807 if (!edid)
808 return EDID_NO_RESPONSE;
809
810 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
811 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
812
813 /* We don't need the original edid anymore */
814 kfree(edid);
815
816 edid_status = dm_helpers_parse_edid_caps(
3c021931 817 link,
7c7f5b15
AG
818 &sink->dc_edid,
819 &sink->edid_caps);
820
821 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
822
823 if (edid_status != EDID_OK)
824 DRM_ERROR("EDID err: %d, on connector: %s",
825 edid_status,
826 aconnector->base.name);
6e0ef9d8 827
85d4d684
JFZ
828 /* DP Compliance Test 4.2.2.3 */
829 if (link->aux_mode)
830 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
7c7f5b15
AG
831
832 return edid_status;
833}
81927e28
JS
834int dm_helper_dmub_aux_transfer_sync(
835 struct dc_context *ctx,
836 const struct dc_link *link,
837 struct aux_payload *payload,
838 enum aux_return_code_type *operation_result)
839{
88f52b1f
JS
840 return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx,
841 link->link_index, (void *)payload,
842 (void *)operation_result);
81927e28 843}
88f52b1f
JS
844
845int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
846 const struct dc_link *link,
847 struct set_config_cmd_payload *payload,
848 enum set_config_status *operation_result)
849{
850 return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx,
851 link->link_index, (void *)payload,
852 (void *)operation_result);
853}
854
15cf3974
DL
855void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
856{
857 /* TODO: something */
858}
79037324 859
118a3315
NK
860void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us)
861{
862 // TODO:
863 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
864}
865
79037324
BL
866void *dm_helpers_allocate_gpu_mem(
867 struct dc_context *ctx,
868 enum dc_gpu_mem_alloc_type type,
869 size_t size,
870 long long *addr)
871{
0dd79532
ZL
872 struct amdgpu_device *adev = ctx->driver_context;
873 struct dal_allocation *da;
874 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
875 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
876 int ret;
877
878 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
879 if (!da)
880 return NULL;
881
882 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
883 domain, &da->bo,
884 &da->gpu_addr, &da->cpu_ptr);
885
886 *addr = da->gpu_addr;
887
888 if (ret) {
889 kfree(da);
890 return NULL;
891 }
892
893 /* add da to list in dm */
894 list_add(&da->list, &adev->dm.da_list);
895
896 return da->cpu_ptr;
79037324
BL
897}
898
899void dm_helpers_free_gpu_mem(
900 struct dc_context *ctx,
901 enum dc_gpu_mem_alloc_type type,
902 void *pvMem)
903{
0dd79532
ZL
904 struct amdgpu_device *adev = ctx->driver_context;
905 struct dal_allocation *da;
906
907 /* walk the da list in DM */
908 list_for_each_entry(da, &adev->dm.da_list, list) {
909 if (pvMem == da->cpu_ptr) {
910 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
911 list_del(&da->list);
912 kfree(da);
913 break;
914 }
915 }
79037324 916}
70732504 917
81927e28 918bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable)
70732504 919{
a08f16cf
LHM
920 enum dc_irq_source irq_source;
921 bool ret;
922
81927e28 923 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
a08f16cf
LHM
924
925 ret = dc_interrupt_set(ctx->dc, irq_source, enable);
926
927 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",
928 enable ? "en" : "dis", ret);
929 return ret;
70732504 930}
6016cd9d
BG
931
932void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
933{
934 /* TODO: virtual DPCD */
935 struct dc_link *link = stream->link;
936 union down_spread_ctrl old_downspread;
937 union down_spread_ctrl new_downspread;
938
939 if (link->aux_access_disabled)
940 return;
941
942 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
943 &old_downspread.raw,
944 sizeof(old_downspread)))
945 return;
946
947 new_downspread.raw = old_downspread.raw;
948 new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
949 (stream->ignore_msa_timing_param) ? 1 : 0;
950
951 if (new_downspread.raw != old_downspread.raw)
952 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
953 &new_downspread.raw,
954 sizeof(new_downspread));
955}
f01ee019 956
f01ee019
FZ
957void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
958{
d9eb8fea 959 // TODO
f01ee019 960}
ac02dc34 961
ac02dc34
EY
962void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
963{
964 /* TODO: add peridic detection implementation */
965}