Commit | Line | Data |
---|---|---|
7c008829 NK |
1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: AMD | |
23 | * | |
24 | */ | |
25 | ||
cdca3f21 | 26 | #include "../dmub_srv.h" |
7c008829 NK |
27 | #include "dmub_dcn20.h" |
28 | #include "dmub_dcn21.h" | |
84034ad4 | 29 | #include "dmub_cmd.h" |
5baebf61 BL |
30 | #ifdef CONFIG_DRM_AMD_DC_DCN3_0 |
31 | #include "dmub_dcn30.h" | |
32 | #endif | |
b9e9f11c | 33 | #include "os_types.h" |
7c008829 NK |
34 | /* |
35 | * Note: the DMUB service is standalone. No additional headers should be | |
36 | * added below or above this line unless they reside within the DMUB | |
37 | * folder. | |
38 | */ | |
39 | ||
40 | /* Alignment for framebuffer memory. */ | |
41 | #define DMUB_FB_ALIGNMENT (1024 * 1024) | |
42 | ||
43 | /* Stack size. */ | |
44 | #define DMUB_STACK_SIZE (128 * 1024) | |
45 | ||
46 | /* Context size. */ | |
47 | #define DMUB_CONTEXT_SIZE (512 * 1024) | |
48 | ||
49 | /* Mailbox size */ | |
50 | #define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE) | |
51 | ||
1f0674fd | 52 | /* Default state size if meta is absent. */ |
891f016d | 53 | #define DMUB_FW_STATE_SIZE (64 * 1024) |
1f0674fd NK |
54 | |
55 | /* Default tracebuffer size if meta is absent. */ | |
891f016d | 56 | #define DMUB_TRACE_BUFFER_SIZE (64 * 1024) |
7c008829 | 57 | |
2277f01d WW |
58 | /* Default scratch mem size. */ |
59 | #define DMUB_SCRATCH_MEM_SIZE (256) | |
60 | ||
7c008829 | 61 | /* Number of windows in use. */ |
2277f01d | 62 | #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) |
7c008829 NK |
63 | /* Base addresses. */ |
64 | ||
65 | #define DMUB_CW0_BASE (0x60000000) | |
66 | #define DMUB_CW1_BASE (0x61000000) | |
b9e9f11c | 67 | #define DMUB_CW3_BASE (0x63000000) |
562c805f | 68 | #define DMUB_CW4_BASE (0x64000000) |
7c008829 | 69 | #define DMUB_CW5_BASE (0x65000000) |
2f39835c | 70 | #define DMUB_CW6_BASE (0x66000000) |
7c008829 NK |
71 | |
72 | static inline uint32_t dmub_align(uint32_t val, uint32_t factor) | |
73 | { | |
74 | return (val + factor - 1) / factor * factor; | |
75 | } | |
76 | ||
c5d5b0ec | 77 | void dmub_flush_buffer_mem(const struct dmub_fb *fb) |
dee5d542 NK |
78 | { |
79 | const uint8_t *base = (const uint8_t *)fb->cpu_addr; | |
80 | uint8_t buf[64]; | |
81 | uint32_t pos, end; | |
82 | ||
83 | /** | |
84 | * Read 64-byte chunks since we don't want to store a | |
85 | * large temporary buffer for this purpose. | |
86 | */ | |
87 | end = fb->size / sizeof(buf) * sizeof(buf); | |
88 | ||
89 | for (pos = 0; pos < end; pos += sizeof(buf)) | |
90 | dmub_memcpy(buf, base + pos, sizeof(buf)); | |
91 | ||
92 | /* Read anything leftover into the buffer. */ | |
93 | if (end < fb->size) | |
94 | dmub_memcpy(buf, base + pos, fb->size - end); | |
95 | } | |
96 | ||
1f0674fd | 97 | static const struct dmub_fw_meta_info * |
a576b345 | 98 | dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) |
1f0674fd NK |
99 | { |
100 | const union dmub_fw_meta *meta; | |
a576b345 NK |
101 | const uint8_t *blob = NULL; |
102 | uint32_t blob_size = 0; | |
d5617541 | 103 | uint32_t meta_offset = 0; |
a576b345 | 104 | |
08a512d4 | 105 | if (params->fw_bss_data && params->bss_data_size) { |
a576b345 NK |
106 | /* Legacy metadata region. */ |
107 | blob = params->fw_bss_data; | |
108 | blob_size = params->bss_data_size; | |
d5617541 | 109 | meta_offset = DMUB_FW_META_OFFSET; |
08a512d4 | 110 | } else if (params->fw_inst_const && params->inst_const_size) { |
a576b345 NK |
111 | /* Combined metadata region. */ |
112 | blob = params->fw_inst_const; | |
113 | blob_size = params->inst_const_size; | |
d5617541 | 114 | meta_offset = 0; |
a576b345 | 115 | } |
1f0674fd | 116 | |
a576b345 | 117 | if (!blob || !blob_size) |
1f0674fd NK |
118 | return NULL; |
119 | ||
d5617541 | 120 | if (blob_size < sizeof(union dmub_fw_meta) + meta_offset) |
1f0674fd NK |
121 | return NULL; |
122 | ||
d5617541 | 123 | meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset - |
1f0674fd NK |
124 | sizeof(union dmub_fw_meta)); |
125 | ||
126 | if (meta->info.magic_value != DMUB_FW_META_MAGIC) | |
127 | return NULL; | |
128 | ||
129 | return &meta->info; | |
130 | } | |
131 | ||
7c008829 NK |
132 | static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) |
133 | { | |
134 | struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; | |
135 | ||
136 | switch (asic) { | |
137 | case DMUB_ASIC_DCN20: | |
138 | case DMUB_ASIC_DCN21: | |
5baebf61 BL |
139 | #ifdef CONFIG_DRM_AMD_DC_DCN3_0 |
140 | case DMUB_ASIC_DCN30: | |
141 | #endif | |
01c229d9 NK |
142 | dmub->regs = &dmub_srv_dcn20_regs; |
143 | ||
7c008829 NK |
144 | funcs->reset = dmub_dcn20_reset; |
145 | funcs->reset_release = dmub_dcn20_reset_release; | |
146 | funcs->backdoor_load = dmub_dcn20_backdoor_load; | |
147 | funcs->setup_windows = dmub_dcn20_setup_windows; | |
148 | funcs->setup_mailbox = dmub_dcn20_setup_mailbox; | |
149 | funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; | |
150 | funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; | |
151 | funcs->is_supported = dmub_dcn20_is_supported; | |
c09eeee4 | 152 | funcs->is_hw_init = dmub_dcn20_is_hw_init; |
fbbd3f8f NK |
153 | funcs->set_gpint = dmub_dcn20_set_gpint; |
154 | funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked; | |
155 | funcs->get_gpint_response = dmub_dcn20_get_gpint_response; | |
8f95ff28 EY |
156 | funcs->get_fw_status = dmub_dcn20_get_fw_boot_status; |
157 | funcs->enable_dmub_boot_options = dmub_dcn20_enable_dmub_boot_options; | |
7c008829 | 158 | |
8f95ff28 | 159 | if (asic == DMUB_ASIC_DCN21) |
01c229d9 NK |
160 | dmub->regs = &dmub_srv_dcn21_regs; |
161 | ||
5baebf61 BL |
162 | #ifdef CONFIG_DRM_AMD_DC_DCN3_0 |
163 | if (asic == DMUB_ASIC_DCN30) { | |
164 | dmub->regs = &dmub_srv_dcn30_regs; | |
165 | ||
5baebf61 BL |
166 | funcs->backdoor_load = dmub_dcn30_backdoor_load; |
167 | funcs->setup_windows = dmub_dcn30_setup_windows; | |
168 | } | |
169 | #endif | |
7c008829 NK |
170 | break; |
171 | ||
172 | default: | |
173 | return false; | |
174 | } | |
175 | ||
176 | return true; | |
177 | } | |
178 | ||
179 | enum dmub_status dmub_srv_create(struct dmub_srv *dmub, | |
180 | const struct dmub_srv_create_params *params) | |
181 | { | |
182 | enum dmub_status status = DMUB_STATUS_OK; | |
183 | ||
184 | dmub_memset(dmub, 0, sizeof(*dmub)); | |
185 | ||
186 | dmub->funcs = params->funcs; | |
187 | dmub->user_ctx = params->user_ctx; | |
188 | dmub->asic = params->asic; | |
455802c7 | 189 | dmub->fw_version = params->fw_version; |
7c008829 NK |
190 | dmub->is_virtual = params->is_virtual; |
191 | ||
192 | /* Setup asic dependent hardware funcs. */ | |
193 | if (!dmub_srv_hw_setup(dmub, params->asic)) { | |
194 | status = DMUB_STATUS_INVALID; | |
195 | goto cleanup; | |
196 | } | |
197 | ||
198 | /* Override (some) hardware funcs based on user params. */ | |
199 | if (params->hw_funcs) { | |
37ffa7a1 YS |
200 | if (params->hw_funcs->emul_get_inbox1_rptr) |
201 | dmub->hw_funcs.emul_get_inbox1_rptr = | |
202 | params->hw_funcs->emul_get_inbox1_rptr; | |
7c008829 | 203 | |
37ffa7a1 YS |
204 | if (params->hw_funcs->emul_set_inbox1_wptr) |
205 | dmub->hw_funcs.emul_set_inbox1_wptr = | |
206 | params->hw_funcs->emul_set_inbox1_wptr; | |
7c008829 NK |
207 | |
208 | if (params->hw_funcs->is_supported) | |
209 | dmub->hw_funcs.is_supported = | |
210 | params->hw_funcs->is_supported; | |
211 | } | |
212 | ||
213 | /* Sanity checks for required hw func pointers. */ | |
214 | if (!dmub->hw_funcs.get_inbox1_rptr || | |
215 | !dmub->hw_funcs.set_inbox1_wptr) { | |
216 | status = DMUB_STATUS_INVALID; | |
217 | goto cleanup; | |
218 | } | |
219 | ||
220 | cleanup: | |
221 | if (status == DMUB_STATUS_OK) | |
222 | dmub->sw_init = true; | |
223 | else | |
224 | dmub_srv_destroy(dmub); | |
225 | ||
226 | return status; | |
227 | } | |
228 | ||
229 | void dmub_srv_destroy(struct dmub_srv *dmub) | |
230 | { | |
231 | dmub_memset(dmub, 0, sizeof(*dmub)); | |
232 | } | |
233 | ||
234 | enum dmub_status | |
235 | dmub_srv_calc_region_info(struct dmub_srv *dmub, | |
236 | const struct dmub_srv_region_params *params, | |
237 | struct dmub_srv_region_info *out) | |
238 | { | |
239 | struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST]; | |
240 | struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK]; | |
241 | struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA]; | |
242 | struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; | |
243 | struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; | |
244 | struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; | |
2f39835c | 245 | struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; |
2277f01d | 246 | struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM]; |
1f0674fd NK |
247 | const struct dmub_fw_meta_info *fw_info; |
248 | uint32_t fw_state_size = DMUB_FW_STATE_SIZE; | |
249 | uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; | |
2277f01d | 250 | uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; |
7c008829 NK |
251 | |
252 | if (!dmub->sw_init) | |
253 | return DMUB_STATUS_INVALID; | |
254 | ||
255 | memset(out, 0, sizeof(*out)); | |
256 | ||
257 | out->num_regions = DMUB_NUM_WINDOWS; | |
258 | ||
259 | inst->base = 0x0; | |
260 | inst->top = inst->base + params->inst_const_size; | |
261 | ||
262 | data->base = dmub_align(inst->top, 256); | |
263 | data->top = data->base + params->bss_data_size; | |
264 | ||
1f0674fd NK |
265 | /* |
266 | * All cache windows below should be aligned to the size | |
267 | * of the DMCUB cache line, 64 bytes. | |
268 | */ | |
269 | ||
7c008829 NK |
270 | stack->base = dmub_align(data->top, 256); |
271 | stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; | |
272 | ||
273 | bios->base = dmub_align(stack->top, 256); | |
274 | bios->top = bios->base + params->vbios_size; | |
275 | ||
276 | mail->base = dmub_align(bios->top, 256); | |
277 | mail->top = mail->base + DMUB_MAILBOX_SIZE; | |
278 | ||
a576b345 | 279 | fw_info = dmub_get_fw_meta_info(params); |
1f0674fd NK |
280 | |
281 | if (fw_info) { | |
282 | fw_state_size = fw_info->fw_region_size; | |
283 | trace_buffer_size = fw_info->trace_buffer_size; | |
284 | } | |
285 | ||
7c008829 | 286 | trace_buff->base = dmub_align(mail->top, 256); |
1f0674fd | 287 | trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); |
7c008829 | 288 | |
2f39835c | 289 | fw_state->base = dmub_align(trace_buff->top, 256); |
1f0674fd | 290 | fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); |
2f39835c | 291 | |
2277f01d WW |
292 | scratch_mem->base = dmub_align(fw_state->top, 256); |
293 | scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64); | |
294 | ||
295 | out->fb_size = dmub_align(scratch_mem->top, 4096); | |
7c008829 NK |
296 | |
297 | return DMUB_STATUS_OK; | |
298 | } | |
299 | ||
300 | enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, | |
301 | const struct dmub_srv_fb_params *params, | |
302 | struct dmub_srv_fb_info *out) | |
303 | { | |
304 | uint8_t *cpu_base; | |
305 | uint64_t gpu_base; | |
306 | uint32_t i; | |
307 | ||
308 | if (!dmub->sw_init) | |
309 | return DMUB_STATUS_INVALID; | |
310 | ||
311 | memset(out, 0, sizeof(*out)); | |
312 | ||
313 | if (params->region_info->num_regions != DMUB_NUM_WINDOWS) | |
314 | return DMUB_STATUS_INVALID; | |
315 | ||
316 | cpu_base = (uint8_t *)params->cpu_addr; | |
317 | gpu_base = params->gpu_addr; | |
318 | ||
319 | for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { | |
320 | const struct dmub_region *reg = | |
321 | ¶ms->region_info->regions[i]; | |
322 | ||
323 | out->fb[i].cpu_addr = cpu_base + reg->base; | |
324 | out->fb[i].gpu_addr = gpu_base + reg->base; | |
325 | out->fb[i].size = reg->top - reg->base; | |
326 | } | |
327 | ||
328 | out->num_fb = DMUB_NUM_WINDOWS; | |
329 | ||
330 | return DMUB_STATUS_OK; | |
331 | } | |
332 | ||
333 | enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, | |
334 | bool *is_supported) | |
335 | { | |
336 | *is_supported = false; | |
337 | ||
338 | if (!dmub->sw_init) | |
339 | return DMUB_STATUS_INVALID; | |
340 | ||
341 | if (dmub->hw_funcs.is_supported) | |
342 | *is_supported = dmub->hw_funcs.is_supported(dmub); | |
343 | ||
344 | return DMUB_STATUS_OK; | |
345 | } | |
346 | ||
c09eeee4 NK |
347 | enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) |
348 | { | |
349 | *is_hw_init = false; | |
350 | ||
351 | if (!dmub->sw_init) | |
352 | return DMUB_STATUS_INVALID; | |
353 | ||
e5f0b521 NK |
354 | if (!dmub->hw_init) |
355 | return DMUB_STATUS_OK; | |
356 | ||
c09eeee4 NK |
357 | if (dmub->hw_funcs.is_hw_init) |
358 | *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); | |
359 | ||
360 | return DMUB_STATUS_OK; | |
361 | } | |
362 | ||
7c008829 NK |
363 | enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, |
364 | const struct dmub_srv_hw_params *params) | |
365 | { | |
366 | struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST]; | |
367 | struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK]; | |
368 | struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA]; | |
369 | struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS]; | |
370 | struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; | |
371 | struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; | |
2f39835c | 372 | struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; |
2277f01d | 373 | struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; |
7c008829 NK |
374 | |
375 | struct dmub_rb_init_params rb_params; | |
2f39835c | 376 | struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; |
7c008829 NK |
377 | struct dmub_region inbox1; |
378 | ||
379 | if (!dmub->sw_init) | |
380 | return DMUB_STATUS_INVALID; | |
381 | ||
382 | dmub->fb_base = params->fb_base; | |
383 | dmub->fb_offset = params->fb_offset; | |
384 | dmub->psp_version = params->psp_version; | |
385 | ||
386 | if (inst_fb && data_fb) { | |
387 | cw0.offset.quad_part = inst_fb->gpu_addr; | |
388 | cw0.region.base = DMUB_CW0_BASE; | |
389 | cw0.region.top = cw0.region.base + inst_fb->size - 1; | |
390 | ||
391 | cw1.offset.quad_part = stack_fb->gpu_addr; | |
392 | cw1.region.base = DMUB_CW1_BASE; | |
393 | cw1.region.top = cw1.region.base + stack_fb->size - 1; | |
394 | ||
dee5d542 NK |
395 | /** |
396 | * Read back all the instruction memory so we don't hang the | |
397 | * DMCUB when backdoor loading if the write from x86 hasn't been | |
398 | * flushed yet. This only occurs in backdoor loading. | |
399 | */ | |
400 | dmub_flush_buffer_mem(inst_fb); | |
401 | ||
ab16c736 | 402 | if (params->load_inst_const && dmub->hw_funcs.backdoor_load) |
7c008829 NK |
403 | dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); |
404 | } | |
405 | ||
406 | if (dmub->hw_funcs.reset) | |
407 | dmub->hw_funcs.reset(dmub); | |
408 | ||
2f39835c | 409 | if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb && |
2277f01d | 410 | fw_state_fb && scratch_mem_fb) { |
7c008829 NK |
411 | cw2.offset.quad_part = data_fb->gpu_addr; |
412 | cw2.region.base = DMUB_CW0_BASE + inst_fb->size; | |
413 | cw2.region.top = cw2.region.base + data_fb->size; | |
414 | ||
415 | cw3.offset.quad_part = bios_fb->gpu_addr; | |
b9e9f11c | 416 | cw3.region.base = DMUB_CW3_BASE; |
7c008829 NK |
417 | cw3.region.top = cw3.region.base + bios_fb->size; |
418 | ||
419 | cw4.offset.quad_part = mail_fb->gpu_addr; | |
562c805f | 420 | cw4.region.base = DMUB_CW4_BASE; |
7c008829 NK |
421 | cw4.region.top = cw4.region.base + mail_fb->size; |
422 | ||
423 | inbox1.base = cw4.region.base; | |
424 | inbox1.top = cw4.region.top; | |
425 | ||
426 | cw5.offset.quad_part = tracebuff_fb->gpu_addr; | |
427 | cw5.region.base = DMUB_CW5_BASE; | |
428 | cw5.region.top = cw5.region.base + tracebuff_fb->size; | |
429 | ||
2f39835c NK |
430 | cw6.offset.quad_part = fw_state_fb->gpu_addr; |
431 | cw6.region.base = DMUB_CW6_BASE; | |
432 | cw6.region.top = cw6.region.base + fw_state_fb->size; | |
433 | ||
434 | dmub->fw_state = fw_state_fb->cpu_addr; | |
435 | ||
2277f01d WW |
436 | dmub->scratch_mem_fb = *scratch_mem_fb; |
437 | ||
7c008829 | 438 | if (dmub->hw_funcs.setup_windows) |
2f39835c NK |
439 | dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, |
440 | &cw5, &cw6); | |
7c008829 NK |
441 | |
442 | if (dmub->hw_funcs.setup_mailbox) | |
443 | dmub->hw_funcs.setup_mailbox(dmub, &inbox1); | |
444 | } | |
445 | ||
446 | if (mail_fb) { | |
447 | dmub_memset(&rb_params, 0, sizeof(rb_params)); | |
448 | rb_params.ctx = dmub; | |
449 | rb_params.base_address = mail_fb->cpu_addr; | |
450 | rb_params.capacity = DMUB_RB_SIZE; | |
451 | ||
452 | dmub_rb_init(&dmub->inbox1_rb, &rb_params); | |
453 | } | |
454 | ||
8f95ff28 EY |
455 | /* Report to DMUB what features are supported by current driver */ |
456 | if (dmub->hw_funcs.enable_dmub_boot_options) | |
457 | dmub->hw_funcs.enable_dmub_boot_options(dmub); | |
458 | ||
7c008829 NK |
459 | if (dmub->hw_funcs.reset_release) |
460 | dmub->hw_funcs.reset_release(dmub); | |
461 | ||
462 | dmub->hw_init = true; | |
463 | ||
464 | return DMUB_STATUS_OK; | |
465 | } | |
466 | ||
0167da49 NK |
467 | enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) |
468 | { | |
469 | if (!dmub->sw_init) | |
470 | return DMUB_STATUS_INVALID; | |
471 | ||
472 | if (dmub->hw_init == false) | |
473 | return DMUB_STATUS_OK; | |
474 | ||
475 | if (dmub->hw_funcs.reset) | |
476 | dmub->hw_funcs.reset(dmub); | |
477 | ||
478 | dmub->hw_init = false; | |
479 | ||
480 | return DMUB_STATUS_OK; | |
481 | } | |
482 | ||
7c008829 | 483 | enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, |
0ed3bcc4 | 484 | const union dmub_rb_cmd *cmd) |
7c008829 NK |
485 | { |
486 | if (!dmub->hw_init) | |
487 | return DMUB_STATUS_INVALID; | |
488 | ||
489 | if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) | |
490 | return DMUB_STATUS_OK; | |
491 | ||
492 | return DMUB_STATUS_QUEUE_FULL; | |
493 | } | |
494 | ||
495 | enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) | |
496 | { | |
497 | if (!dmub->hw_init) | |
498 | return DMUB_STATUS_INVALID; | |
499 | ||
b7408a06 NK |
500 | /** |
501 | * Read back all the queued commands to ensure that they've | |
502 | * been flushed to framebuffer memory. Otherwise DMCUB might | |
503 | * read back stale, fully invalid or partially invalid data. | |
504 | */ | |
505 | dmub_rb_flush_pending(&dmub->inbox1_rb); | |
506 | ||
37ffa7a1 | 507 | dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); |
7c008829 NK |
508 | return DMUB_STATUS_OK; |
509 | } | |
510 | ||
7c008829 NK |
511 | enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, |
512 | uint32_t timeout_us) | |
513 | { | |
514 | uint32_t i; | |
515 | ||
a4942118 | 516 | if (!dmub->hw_init) |
7c008829 NK |
517 | return DMUB_STATUS_INVALID; |
518 | ||
519 | for (i = 0; i <= timeout_us; i += 100) { | |
8f95ff28 | 520 | union dmub_fw_boot_status status = dmub->hw_funcs.get_fw_status(dmub); |
7c008829 | 521 | |
8f95ff28 | 522 | if (status.bits.dal_fw && status.bits.mailbox_rdy) |
7c008829 NK |
523 | return DMUB_STATUS_OK; |
524 | ||
8f95ff28 | 525 | udelay(100); |
7c008829 NK |
526 | } |
527 | ||
56fc13fe | 528 | return DMUB_STATUS_TIMEOUT; |
7c008829 NK |
529 | } |
530 | ||
531 | enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, | |
532 | uint32_t timeout_us) | |
533 | { | |
534 | uint32_t i; | |
535 | ||
536 | if (!dmub->hw_init) | |
537 | return DMUB_STATUS_INVALID; | |
538 | ||
539 | for (i = 0; i <= timeout_us; ++i) { | |
37ffa7a1 | 540 | dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); |
7c008829 NK |
541 | if (dmub_rb_empty(&dmub->inbox1_rb)) |
542 | return DMUB_STATUS_OK; | |
543 | ||
544 | udelay(1); | |
545 | } | |
546 | ||
547 | return DMUB_STATUS_TIMEOUT; | |
548 | } | |
fbbd3f8f NK |
549 | |
550 | enum dmub_status | |
551 | dmub_srv_send_gpint_command(struct dmub_srv *dmub, | |
552 | enum dmub_gpint_command command_code, | |
553 | uint16_t param, uint32_t timeout_us) | |
554 | { | |
555 | union dmub_gpint_data_register reg; | |
556 | uint32_t i; | |
557 | ||
558 | if (!dmub->sw_init) | |
559 | return DMUB_STATUS_INVALID; | |
560 | ||
561 | if (!dmub->hw_funcs.set_gpint) | |
562 | return DMUB_STATUS_INVALID; | |
563 | ||
564 | if (!dmub->hw_funcs.is_gpint_acked) | |
565 | return DMUB_STATUS_INVALID; | |
566 | ||
567 | reg.bits.status = 1; | |
568 | reg.bits.command_code = command_code; | |
569 | reg.bits.param = param; | |
570 | ||
571 | dmub->hw_funcs.set_gpint(dmub, reg); | |
572 | ||
573 | for (i = 0; i < timeout_us; ++i) { | |
574 | if (dmub->hw_funcs.is_gpint_acked(dmub, reg)) | |
575 | return DMUB_STATUS_OK; | |
576 | } | |
577 | ||
578 | return DMUB_STATUS_TIMEOUT; | |
579 | } | |
580 | ||
581 | enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub, | |
582 | uint32_t *response) | |
583 | { | |
584 | *response = 0; | |
585 | ||
586 | if (!dmub->sw_init) | |
587 | return DMUB_STATUS_INVALID; | |
588 | ||
589 | if (!dmub->hw_funcs.get_gpint_response) | |
590 | return DMUB_STATUS_INVALID; | |
591 | ||
592 | *response = dmub->hw_funcs.get_gpint_response(dmub); | |
593 | ||
594 | return DMUB_STATUS_OK; | |
595 | } |