drm/amd/display: Fix gpio port mapping issue
[linux-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_helpers.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include <linux/string.h>
27#include <linux/acpi.h>
4562236b
HW
28#include <linux/i2c.h>
29
4d07b0bc 30#include <drm/drm_atomic.h>
fcd70cd3 31#include <drm/drm_probe_helper.h>
4562236b
HW
32#include <drm/amdgpu_drm.h>
33#include <drm/drm_edid.h>
34
35#include "dm_services.h"
36#include "amdgpu.h"
37#include "dc.h"
38#include "amdgpu_dm.h"
39#include "amdgpu_dm_irq.h"
f9c8742c 40#include "amdgpu_dm_mst_types.h"
4562236b
HW
41
42#include "dm_helpers.h"
9cc37043 43#include "ddc_service_types.h"
4562236b
HW
44
45/* dm_helpers_parse_edid_caps
46 *
47 * Parse edid caps
48 *
49 * @edid: [in] pointer to edid
50 * edid_caps: [in] pointer to edid caps
51 * @return
52 * void
53 * */
54enum dc_edid_status dm_helpers_parse_edid_caps(
3c021931 55 struct dc_link *link,
4562236b
HW
56 const struct dc_edid *edid,
57 struct dc_edid_caps *edid_caps)
58{
3c021931
CS
59 struct amdgpu_dm_connector *aconnector = link->priv;
60 struct drm_connector *connector = &aconnector->base;
3222a811 61 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL;
4562236b
HW
62 struct cea_sad *sads;
63 int sad_count = -1;
64 int sadb_count = -1;
65 int i = 0;
4562236b
HW
66 uint8_t *sadb = NULL;
67
68 enum dc_edid_status result = EDID_OK;
69
70 if (!edid_caps || !edid)
71 return EDID_BAD_INPUT;
72
73 if (!drm_edid_is_valid(edid_buf))
74 result = EDID_BAD_CHECKSUM;
75
76 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
77 ((uint16_t) edid_buf->mfg_id[1])<<8;
78 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
79 ((uint16_t) edid_buf->prod_code[1])<<8;
80 edid_caps->serial_number = edid_buf->serial;
81 edid_caps->manufacture_week = edid_buf->mfg_week;
82 edid_caps->manufacture_year = edid_buf->mfg_year;
83
0b7778f4
CS
84 drm_edid_get_monitor_name(edid_buf,
85 edid_caps->display_name,
86 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4562236b 87
3c021931 88 edid_caps->edid_hdmi = connector->display_info.is_hdmi;
4562236b
HW
89
90 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
ae2a3495 91 if (sad_count <= 0)
4562236b 92 return result;
4562236b
HW
93
94 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
95 for (i = 0; i < edid_caps->audio_mode_count; ++i) {
96 struct cea_sad *sad = &sads[i];
97
98 edid_caps->audio_modes[i].format_code = sad->format;
731a3736 99 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
4562236b
HW
100 edid_caps->audio_modes[i].sample_rate = sad->freq;
101 edid_caps->audio_modes[i].sample_size = sad->byte2;
102 }
103
104 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
105
106 if (sadb_count < 0) {
107 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
108 sadb_count = 0;
109 }
110
111 if (sadb_count)
112 edid_caps->speaker_flags = sadb[0];
113 else
114 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
115
116 kfree(sads);
117 kfree(sadb);
118
119 return result;
120}
121
dbaadb3c 122static void
4d07b0bc
LP
123fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
124 struct amdgpu_dm_connector *aconnector,
125 struct dc_dp_mst_stream_allocation_table *table)
4562236b 126{
4d07b0bc
LP
127 struct dc_dp_mst_stream_allocation_table new_table = { 0 };
128 struct dc_dp_mst_stream_allocation *sa;
129 struct drm_dp_mst_atomic_payload *payload;
130
131 /* Fill payload info*/
132 list_for_each_entry(payload, &mst_state->payloads, next) {
133 if (payload->delete)
134 continue;
135
136 sa = &new_table.stream_allocations[new_table.stream_count];
137 sa->slot_count = payload->time_slots;
138 sa->vcp_id = payload->vcpi;
139 new_table.stream_count++;
4562236b
HW
140 }
141
4d07b0bc
LP
142 /* Overwrite the old table */
143 *table = new_table;
4562236b
HW
144}
145
2068afe6
NC
146void dm_helpers_dp_update_branch_info(
147 struct dc_context *ctx,
148 const struct dc_link *link)
149{}
150
4562236b
HW
151/*
152 * Writes payload allocation table in immediate downstream device.
153 */
154bool dm_helpers_dp_mst_write_payload_allocation_table(
155 struct dc_context *ctx,
0971c40e 156 const struct dc_stream_state *stream,
8c5e9bbb 157 struct dc_dp_mst_stream_allocation_table *proposed_table,
4562236b
HW
158 bool enable)
159{
c84dec2f 160 struct amdgpu_dm_connector *aconnector;
4d07b0bc
LP
161 struct drm_dp_mst_topology_state *mst_state;
162 struct drm_dp_mst_atomic_payload *payload;
4562236b 163 struct drm_dp_mst_topology_mgr *mst_mgr;
4562236b 164
ceb3dbb4 165 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
3261e013
ML
166 /* Accessing the connector state is required for vcpi_slots allocation
167 * and directly relies on behaviour in commit check
168 * that blocks before commit guaranteeing that the state
169 * is not gonna be swapped while still in use in commit tail */
170
4562236b
HW
171 if (!aconnector || !aconnector->mst_port)
172 return false;
173
174 mst_mgr = &aconnector->mst_port->mst_mgr;
4d07b0bc 175 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
4562236b 176
58fe03d6 177 /* It's OK for this to fail */
4d07b0bc
LP
178 payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
179 if (enable)
180 drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
181 else
182 drm_dp_remove_payload(mst_mgr, mst_state, payload);
4562236b
HW
183
184 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
185 * AUX message. The sequence is slot 1-63 allocated sequence for each
186 * stream. AMD ASIC stream slot allocation should follow the same
187 * sequence. copy DRM MST allocation to dc */
4d07b0bc 188 fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
4562236b 189
4562236b
HW
190 return true;
191}
192
22051b63 193/*
9cc032b2 194 * poll pending down reply
22051b63
MT
195 */
196void dm_helpers_dp_mst_poll_pending_down_reply(
197 struct dc_context *ctx,
198 const struct dc_link *link)
199{}
fd92ac1b
HW
200
201/*
202 * Clear payload allocation table before enable MST DP link.
203 */
204void dm_helpers_dp_mst_clear_payload_allocation_table(
205 struct dc_context *ctx,
206 const struct dc_link *link)
207{}
208
4562236b
HW
209/*
210 * Polls for ACT (allocation change trigger) handled and sends
211 * ALLOCATE_PAYLOAD message.
212 */
48af9b91 213enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
4562236b 214 struct dc_context *ctx,
0971c40e 215 const struct dc_stream_state *stream)
4562236b 216{
c84dec2f 217 struct amdgpu_dm_connector *aconnector;
4562236b
HW
218 struct drm_dp_mst_topology_mgr *mst_mgr;
219 int ret;
220
ceb3dbb4 221 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
4562236b
HW
222
223 if (!aconnector || !aconnector->mst_port)
48af9b91 224 return ACT_FAILED;
4562236b
HW
225
226 mst_mgr = &aconnector->mst_port->mst_mgr;
227
228 if (!mst_mgr->mst_state)
48af9b91 229 return ACT_FAILED;
4562236b
HW
230
231 ret = drm_dp_check_act_status(mst_mgr);
232
233 if (ret)
48af9b91 234 return ACT_FAILED;
4562236b 235
48af9b91 236 return ACT_SUCCESS;
4562236b
HW
237}
238
239bool dm_helpers_dp_mst_send_payload_allocation(
240 struct dc_context *ctx,
0971c40e 241 const struct dc_stream_state *stream,
4562236b
HW
242 bool enable)
243{
c84dec2f 244 struct amdgpu_dm_connector *aconnector;
4d07b0bc 245 struct drm_dp_mst_topology_state *mst_state;
4562236b 246 struct drm_dp_mst_topology_mgr *mst_mgr;
4d07b0bc 247 struct drm_dp_mst_atomic_payload *payload;
25f7cde8
WL
248 enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
249 enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
4562236b 250
ceb3dbb4 251 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
4562236b
HW
252
253 if (!aconnector || !aconnector->mst_port)
254 return false;
255
4562236b 256 mst_mgr = &aconnector->mst_port->mst_mgr;
4d07b0bc 257 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
4562236b 258
4d07b0bc 259 payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
25f7cde8
WL
260 if (!enable) {
261 set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
262 clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
263 }
264
4d07b0bc 265 if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) {
25f7cde8
WL
266 amdgpu_dm_set_mst_status(&aconnector->mst_status,
267 set_flag, false);
268 } else {
269 amdgpu_dm_set_mst_status(&aconnector->mst_status,
270 set_flag, true);
271 amdgpu_dm_set_mst_status(&aconnector->mst_status,
272 clr_flag, false);
273 }
4562236b 274
4562236b
HW
275 return true;
276}
277
46659a83
NK
278void dm_dtn_log_begin(struct dc_context *ctx,
279 struct dc_log_buffer_ctx *log_ctx)
e498eb71 280{
46659a83
NK
281 static const char msg[] = "[dtn begin]\n";
282
283 if (!log_ctx) {
284 pr_info("%s", msg);
285 return;
286 }
287
288 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
e498eb71 289}
2248eb6b 290
fb8284a5 291__printf(3, 4)
2248eb6b 292void dm_dtn_log_append_v(struct dc_context *ctx,
46659a83
NK
293 struct dc_log_buffer_ctx *log_ctx,
294 const char *msg, ...)
e498eb71 295{
e498eb71 296 va_list args;
46659a83
NK
297 size_t total;
298 int n;
299
300 if (!log_ctx) {
301 /* No context, redirect to dmesg. */
302 struct va_format vaf;
303
304 vaf.fmt = msg;
305 vaf.va = &args;
306
307 va_start(args, msg);
308 pr_info("%pV", &vaf);
309 va_end(args);
e498eb71 310
46659a83
NK
311 return;
312 }
313
314 /* Measure the output. */
e498eb71 315 va_start(args, msg);
46659a83
NK
316 n = vsnprintf(NULL, 0, msg, args);
317 va_end(args);
318
319 if (n <= 0)
320 return;
321
322 /* Reallocate the string buffer as needed. */
323 total = log_ctx->pos + n + 1;
e498eb71 324
46659a83
NK
325 if (total > log_ctx->size) {
326 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
327
328 if (buf) {
329 memcpy(buf, log_ctx->buf, log_ctx->pos);
330 kfree(log_ctx->buf);
331
332 log_ctx->buf = buf;
333 log_ctx->size = total;
334 }
335 }
336
337 if (!log_ctx->buf)
338 return;
339
340 /* Write the formatted string to the log buffer. */
341 va_start(args, msg);
342 n = vscnprintf(
343 log_ctx->buf + log_ctx->pos,
344 log_ctx->size - log_ctx->pos,
345 msg,
346 args);
e498eb71 347 va_end(args);
46659a83
NK
348
349 if (n > 0)
350 log_ctx->pos += n;
e498eb71 351}
2248eb6b 352
46659a83
NK
353void dm_dtn_log_end(struct dc_context *ctx,
354 struct dc_log_buffer_ctx *log_ctx)
e498eb71 355{
46659a83
NK
356 static const char msg[] = "[dtn end]\n";
357
358 if (!log_ctx) {
359 pr_info("%s", msg);
360 return;
361 }
362
363 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
e498eb71 364}
a235bd9f 365
4562236b
HW
366bool dm_helpers_dp_mst_start_top_mgr(
367 struct dc_context *ctx,
368 const struct dc_link *link,
369 bool boot)
370{
c84dec2f 371 struct amdgpu_dm_connector *aconnector = link->priv;
4562236b
HW
372
373 if (!aconnector) {
3c1fcc55
RL
374 DRM_ERROR("Failed to find connector for link!");
375 return false;
4562236b
HW
376 }
377
378 if (boot) {
379 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
380 aconnector, aconnector->base.base.id);
381 return true;
382 }
383
384 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
385 aconnector, aconnector->base.base.id);
386
387 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
388}
389
87e298d6 390bool dm_helpers_dp_mst_stop_top_mgr(
4562236b 391 struct dc_context *ctx,
3f16ae82 392 struct dc_link *link)
4562236b 393{
c84dec2f 394 struct amdgpu_dm_connector *aconnector = link->priv;
4562236b
HW
395
396 if (!aconnector) {
3c1fcc55 397 DRM_ERROR("Failed to find connector for link!");
87e298d6 398 return false;
4562236b
HW
399 }
400
401 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
402 aconnector, aconnector->base.base.id);
403
dfd9be42 404 if (aconnector->mst_mgr.mst_state == true) {
4562236b 405 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
84a8b390 406 link->cur_link_settings.lane_count = 0;
dfd9be42
WL
407 }
408
87e298d6 409 return false;
4562236b
HW
410}
411
412bool dm_helpers_dp_read_dpcd(
413 struct dc_context *ctx,
414 const struct dc_link *link,
415 uint32_t address,
416 uint8_t *data,
417 uint32_t size)
418{
419
c84dec2f 420 struct amdgpu_dm_connector *aconnector = link->priv;
4562236b
HW
421
422 if (!aconnector) {
f9135b08 423 DC_LOG_DC("Failed to find connector for link!\n");
4562236b
HW
424 return false;
425 }
426
427 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
428 data, size) > 0;
429}
430
431bool dm_helpers_dp_write_dpcd(
432 struct dc_context *ctx,
433 const struct dc_link *link,
434 uint32_t address,
435 const uint8_t *data,
436 uint32_t size)
437{
c84dec2f 438 struct amdgpu_dm_connector *aconnector = link->priv;
4562236b
HW
439
440 if (!aconnector) {
3c1fcc55 441 DRM_ERROR("Failed to find connector for link!");
4562236b
HW
442 return false;
443 }
444
445 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
446 address, (uint8_t *)data, size) > 0;
447}
448
449bool dm_helpers_submit_i2c(
450 struct dc_context *ctx,
451 const struct dc_link *link,
452 struct i2c_command *cmd)
453{
c84dec2f 454 struct amdgpu_dm_connector *aconnector = link->priv;
4562236b
HW
455 struct i2c_msg *msgs;
456 int i = 0;
457 int num = cmd->number_of_payloads;
458 bool result;
459
460 if (!aconnector) {
3c1fcc55 461 DRM_ERROR("Failed to find connector for link!");
4562236b
HW
462 return false;
463 }
464
6396bb22 465 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
4562236b
HW
466
467 if (!msgs)
468 return false;
469
470 for (i = 0; i < num; i++) {
bb01672c 471 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
4562236b
HW
472 msgs[i].addr = cmd->payloads[i].address;
473 msgs[i].len = cmd->payloads[i].length;
474 msgs[i].buf = cmd->payloads[i].data;
475 }
476
477 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
478
479 kfree(msgs);
480
481 return result;
482}
2ca97adc 483
9cc37043 484#if defined(CONFIG_DRM_AMD_DC_DCN)
1ca489fc 485static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
2ca97adc
FZ
486 bool is_write_cmd,
487 unsigned char cmd,
488 unsigned int length,
489 unsigned int offset,
490 unsigned char *data)
491{
492 bool success = false;
493 unsigned char rc_data[16] = {0};
494 unsigned char rc_offset[4] = {0};
495 unsigned char rc_length[2] = {0};
496 unsigned char rc_cmd = 0;
497 unsigned char rc_result = 0xFF;
498 unsigned char i = 0;
06ac561f 499 int ret;
2ca97adc
FZ
500
501 if (is_write_cmd) {
502 // write rc data
503 memmove(rc_data, data, length);
504 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data));
505 }
506
507 // write rc offset
508 rc_offset[0] = (unsigned char) offset & 0xFF;
509 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF;
510 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF;
511 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF;
512 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset));
513
514 // write rc length
515 rc_length[0] = (unsigned char) length & 0xFF;
516 rc_length[1] = (unsigned char) (length >> 8) & 0xFF;
517 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length));
518
519 // write rc cmd
520 rc_cmd = cmd | 0x80;
521 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
522
523 if (ret < 0) {
1ca489fc 524 DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret);
2ca97adc
FZ
525 return false;
526 }
527
528 // poll until active is 0
529 for (i = 0; i < 10; i++) {
530 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
531 if (rc_cmd == cmd)
532 // active is 0
533 break;
534 msleep(10);
535 }
536
537 // read rc result
538 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result));
539 success = (rc_result == 0);
540
541 if (success && !is_write_cmd) {
542 // read rc data
543 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length);
544 }
545
1ca489fc 546 DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success);
2ca97adc
FZ
547
548 return success;
549}
550
551static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
552{
553 unsigned char data[16] = {0};
554
555 DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n");
556
557 // Step 2
558 data[0] = 'P';
559 data[1] = 'R';
560 data[2] = 'I';
561 data[3] = 'U';
562 data[4] = 'S';
563
1ca489fc 564 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data))
2ca97adc
FZ
565 return;
566
567 // Step 3 and 4
1ca489fc 568 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
2ca97adc
FZ
569 return;
570
571 data[0] &= (~(1 << 1)); // set bit 1 to 0
1ca489fc 572 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
2ca97adc
FZ
573 return;
574
1ca489fc 575 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
2ca97adc
FZ
576 return;
577
578 data[0] &= (~(1 << 1)); // set bit 1 to 0
1ca489fc 579 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data))
2ca97adc
FZ
580 return;
581
1ca489fc 582 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
2ca97adc
FZ
583 return;
584
585 data[0] &= (~(1 << 1)); // set bit 1 to 0
1ca489fc 586 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
2ca97adc
FZ
587 return;
588
589 // Step 3 and 5
1ca489fc 590 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
2ca97adc
FZ
591 return;
592
593 data[0] |= (1 << 1); // set bit 1 to 1
1ca489fc 594 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
2ca97adc
FZ
595 return;
596
1ca489fc 597 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
2ca97adc
FZ
598 return;
599
600 data[0] |= (1 << 1); // set bit 1 to 1
601 return;
602
1ca489fc 603 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
2ca97adc
FZ
604 return;
605
606 data[0] |= (1 << 1); // set bit 1 to 1
1ca489fc 607 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
2ca97adc
FZ
608 return;
609
610 // Step 6
1ca489fc 611 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL))
2ca97adc
FZ
612 return;
613
614 DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n");
615}
616
9cc37043
FZ
617static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
618 struct drm_dp_aux *aux,
619 const struct dc_stream_state *stream,
620 bool enable)
621{
622 uint8_t ret = 0;
623
624 DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n");
625
626 if (enable) {
627 /* When DSC is enabled on previous boot and reboot with the hub,
628 * there is a chance that Synaptics hub gets stuck during reboot sequence.
629 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream
630 */
631 if (!stream->link->link_status.link_active &&
632 memcmp(stream->link->dpcd_caps.branch_dev_name,
633 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0)
634 apply_synaptics_fifo_reset_wa(aux);
635
636 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
637 DRM_INFO("Send DSC enable to synaptics\n");
638
639 } else {
640 /* Synaptics hub not support virtual dpcd,
641 * external monitor occur garbage while disable DSC,
642 * Disable DSC only when entire link status turn to false,
643 */
644 if (!stream->link->link_status.link_active) {
645 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
646 DRM_INFO("Send DSC disable to synaptics\n");
647 }
648 }
649
650 return ret;
651}
652#endif
653
97bda032
HW
654bool dm_helpers_dp_write_dsc_enable(
655 struct dc_context *ctx,
656 const struct dc_stream_state *stream,
bd0c064c 657 bool enable)
97bda032 658{
a4d32303
HM
659 static const uint8_t DSC_DISABLE;
660 static const uint8_t DSC_DECODING = 0x01;
661 static const uint8_t DSC_PASSTHROUGH = 0x02;
662
f9c8742c 663 struct amdgpu_dm_connector *aconnector;
a4d32303
HM
664 struct drm_dp_mst_port *port;
665 uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE;
666 uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE;
6302aead 667 uint8_t ret = 0;
f9c8742c
DF
668
669 if (!stream)
670 return false;
671
672 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
673 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
674
675 if (!aconnector->dsc_aux)
676 return false;
677
9cc37043
FZ
678#if defined(CONFIG_DRM_AMD_DC_DCN)
679 // apply w/a to synaptics
680 if (needs_dsc_aux_workaround(aconnector->dc_link) &&
681 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3)
682 return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
683 aconnector->dsc_aux, stream, enable_dsc);
684#endif
685
a4d32303
HM
686 port = aconnector->port;
687
688 if (enable) {
689 if (port->passthrough_aux) {
690 ret = drm_dp_dpcd_write(port->passthrough_aux,
691 DP_DSC_ENABLE,
692 &enable_passthrough, 1);
693 DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
694 ret);
695 }
696
697 ret = drm_dp_dpcd_write(aconnector->dsc_aux,
698 DP_DSC_ENABLE, &enable_dsc, 1);
699 DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n",
700 (port->passthrough_aux) ? "remote RX" :
701 "virtual dpcd",
702 ret);
703 } else {
704 ret = drm_dp_dpcd_write(aconnector->dsc_aux,
705 DP_DSC_ENABLE, &enable_dsc, 1);
706 DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n",
707 (port->passthrough_aux) ? "remote RX" :
708 "virtual dpcd",
709 ret);
710
711 if (port->passthrough_aux) {
712 ret = drm_dp_dpcd_write(port->passthrough_aux,
713 DP_DSC_ENABLE,
714 &enable_passthrough, 1);
715 DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
716 ret);
717 }
718 }
f9c8742c
DF
719 }
720
16f0c500 721 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
50b1f44e
FZ
722#if defined(CONFIG_DRM_AMD_DC_DCN)
723 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
724#endif
725 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
726 DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable");
727#if defined(CONFIG_DRM_AMD_DC_DCN)
728 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
729 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
730 DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable");
731 }
732#endif
eda8f799 733 }
df2f1015 734
a4d32303 735 return ret;
97bda032 736}
7c7f5b15 737
aac5db82
HW
738bool dm_helpers_is_dp_sink_present(struct dc_link *link)
739{
740 bool dp_sink_present;
741 struct amdgpu_dm_connector *aconnector = link->priv;
742
743 if (!aconnector) {
3c1fcc55 744 BUG_ON("Failed to find connector for link!");
aac5db82
HW
745 return true;
746 }
747
748 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
749 dp_sink_present = dc_link_is_dp_sink_present(link);
750 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
751 return dp_sink_present;
752}
753
7c7f5b15
AG
754enum dc_edid_status dm_helpers_read_local_edid(
755 struct dc_context *ctx,
756 struct dc_link *link,
757 struct dc_sink *sink)
758{
c84dec2f 759 struct amdgpu_dm_connector *aconnector = link->priv;
85d4d684 760 struct drm_connector *connector = &aconnector->base;
7c7f5b15
AG
761 struct i2c_adapter *ddc;
762 int retry = 3;
763 enum dc_edid_status edid_status;
764 struct edid *edid;
765
766 if (link->aux_mode)
767 ddc = &aconnector->dm_dp_aux.aux.ddc;
768 else
769 ddc = &aconnector->i2c->base;
770
771 /* some dongles read edid incorrectly the first time,
772 * do check sum and retry to make sure read correct edid.
773 */
774 do {
775
776 edid = drm_get_edid(&aconnector->base, ddc);
777
85d4d684
JFZ
778 /* DP Compliance Test 4.2.2.6 */
779 if (link->aux_mode && connector->edid_corrupt)
780 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
781
782 if (!edid && connector->edid_corrupt) {
783 connector->edid_corrupt = false;
784 return EDID_BAD_CHECKSUM;
785 }
786
7c7f5b15
AG
787 if (!edid)
788 return EDID_NO_RESPONSE;
789
790 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
791 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
792
793 /* We don't need the original edid anymore */
794 kfree(edid);
795
796 edid_status = dm_helpers_parse_edid_caps(
3c021931 797 link,
7c7f5b15
AG
798 &sink->dc_edid,
799 &sink->edid_caps);
800
801 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
802
803 if (edid_status != EDID_OK)
804 DRM_ERROR("EDID err: %d, on connector: %s",
805 edid_status,
806 aconnector->base.name);
6e0ef9d8 807
85d4d684
JFZ
808 /* DP Compliance Test 4.2.2.3 */
809 if (link->aux_mode)
810 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
7c7f5b15
AG
811
812 return edid_status;
813}
81927e28
JS
814int dm_helper_dmub_aux_transfer_sync(
815 struct dc_context *ctx,
816 const struct dc_link *link,
817 struct aux_payload *payload,
818 enum aux_return_code_type *operation_result)
819{
88f52b1f
JS
820 return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx,
821 link->link_index, (void *)payload,
822 (void *)operation_result);
81927e28 823}
88f52b1f
JS
824
825int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
826 const struct dc_link *link,
827 struct set_config_cmd_payload *payload,
828 enum set_config_status *operation_result)
829{
830 return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx,
831 link->link_index, (void *)payload,
832 (void *)operation_result);
833}
834
15cf3974
DL
835void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
836{
837 /* TODO: something */
838}
79037324 839
118a3315
NK
840void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us)
841{
842 // TODO:
843 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
844}
845
c17a34e0
IC
846void dm_helpers_init_panel_settings(
847 struct dc_context *ctx,
eccff6cd
IC
848 struct dc_panel_config *panel_config,
849 struct dc_sink *sink)
c17a34e0 850{
eccff6cd
IC
851 // Extra Panel Power Sequence
852 panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms;
853 panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms;
854 panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off;
855 panel_config->pps.extra_post_t7_ms = 0;
856 panel_config->pps.extra_pre_t11_ms = 0;
857 panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms;
858 panel_config->pps.extra_post_OUI_ms = 0;
c17a34e0
IC
859 // Feature DSC
860 panel_config->dsc.disable_dsc_edp = false;
861 panel_config->dsc.force_dsc_edp_policy = 0;
862}
863
864void dm_helpers_override_panel_settings(
865 struct dc_context *ctx,
866 struct dc_panel_config *panel_config)
867{
868 // Feature DSC
869 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
870 panel_config->dsc.disable_dsc_edp = true;
871 }
872}
873
79037324
BL
874void *dm_helpers_allocate_gpu_mem(
875 struct dc_context *ctx,
876 enum dc_gpu_mem_alloc_type type,
877 size_t size,
878 long long *addr)
879{
0dd79532
ZL
880 struct amdgpu_device *adev = ctx->driver_context;
881 struct dal_allocation *da;
882 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
883 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
884 int ret;
885
886 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
887 if (!da)
888 return NULL;
889
890 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
891 domain, &da->bo,
892 &da->gpu_addr, &da->cpu_ptr);
893
894 *addr = da->gpu_addr;
895
896 if (ret) {
897 kfree(da);
898 return NULL;
899 }
900
901 /* add da to list in dm */
902 list_add(&da->list, &adev->dm.da_list);
903
904 return da->cpu_ptr;
79037324
BL
905}
906
907void dm_helpers_free_gpu_mem(
908 struct dc_context *ctx,
909 enum dc_gpu_mem_alloc_type type,
910 void *pvMem)
911{
0dd79532
ZL
912 struct amdgpu_device *adev = ctx->driver_context;
913 struct dal_allocation *da;
914
915 /* walk the da list in DM */
916 list_for_each_entry(da, &adev->dm.da_list, list) {
917 if (pvMem == da->cpu_ptr) {
918 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
919 list_del(&da->list);
920 kfree(da);
921 break;
922 }
923 }
79037324 924}
70732504 925
81927e28 926bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable)
70732504 927{
a08f16cf
LHM
928 enum dc_irq_source irq_source;
929 bool ret;
930
81927e28 931 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
a08f16cf
LHM
932
933 ret = dc_interrupt_set(ctx->dc, irq_source, enable);
934
935 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",
936 enable ? "en" : "dis", ret);
937 return ret;
70732504 938}
6016cd9d
BG
939
940void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
941{
942 /* TODO: virtual DPCD */
943 struct dc_link *link = stream->link;
944 union down_spread_ctrl old_downspread;
945 union down_spread_ctrl new_downspread;
946
947 if (link->aux_access_disabled)
948 return;
949
950 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
951 &old_downspread.raw,
952 sizeof(old_downspread)))
953 return;
954
955 new_downspread.raw = old_downspread.raw;
956 new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
957 (stream->ignore_msa_timing_param) ? 1 : 0;
958
959 if (new_downspread.raw != old_downspread.raw)
960 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
961 &new_downspread.raw,
962 sizeof(new_downspread));
963}
f01ee019 964
f01ee019
FZ
965void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
966{
d9eb8fea 967 // TODO
f01ee019 968}
ac02dc34 969
ac02dc34
EY
970void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
971{
b3e82398 972 /* TODO: add periodic detection implementation */
ac02dc34 973}