Merge tag 'drm-next-2019-05-16' of git://anongit.freedesktop.org/drm/drm
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / mxgpu_ai.c
CommitLineData
c9c9de93
XY
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
f0a58aa3
FX
25#include "nbio/nbio_6_1_offset.h"
26#include "nbio/nbio_6_1_sh_mask.h"
cde5c34f
FX
27#include "gc/gc_9_0_offset.h"
28#include "gc/gc_9_0_sh_mask.h"
c9c9de93 29#include "soc15.h"
f98b617e 30#include "vega10_ih.h"
c9c9de93
XY
31#include "soc15_common.h"
32#include "mxgpu_ai.h"
33
34static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
35{
48527e52 36 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
c9c9de93
XY
37}
38
39static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
40{
48527e52
ML
41 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42}
c9c9de93 43
48527e52
ML
44/*
45 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
46 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
47 * by host.
48 *
49 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50 * correct value since it doesn't return the RCV_DW0 under the case that
51 * RCV_MSG_VALID is set by host.
52 */
53static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
54{
55 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
56 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
c9c9de93
XY
57}
58
48527e52 59
c9c9de93
XY
60static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
61 enum idh_event event)
62{
63 u32 reg;
c9c9de93
XY
64
65 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
66 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
67 if (reg != event)
68 return -ENOENT;
69
70 xgpu_ai_mailbox_send_ack(adev);
71
72 return 0;
73}
74
48527e52
ML
75static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
76 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
77}
78
c9c9de93
XY
79static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
80{
48527e52
ML
81 int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
82 u8 reg;
83
84 do {
85 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
86 if (reg & 2)
87 return 0;
c9c9de93 88
17b2e332
ML
89 mdelay(5);
90 timeout -= 5;
48527e52 91 } while (timeout > 1);
c9c9de93 92
48527e52 93 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
c9c9de93 94
48527e52 95 return -ETIME;
c9c9de93
XY
96}
97
94b4fd72 98static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
c9c9de93 99{
48527e52 100 int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
c9c9de93 101
48527e52 102 do {
c9c9de93 103 r = xgpu_ai_mailbox_rcv_msg(adev, event);
48527e52
ML
104 if (!r)
105 return 0;
c9c9de93 106
48527e52
ML
107 msleep(10);
108 timeout -= 10;
109 } while (timeout > 1);
110
111 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
112
113 return -ETIME;
c9c9de93
XY
114}
115
89041940
GW
116static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
117 enum idh_request req, u32 data1, u32 data2, u32 data3) {
118 u32 reg;
c9c9de93 119 int r;
48527e52
ML
120 uint8_t trn;
121
122 /* IMPORTANT:
123 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
124 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
125 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
126 * will return immediatly
127 */
128 do {
129 xgpu_ai_mailbox_set_valid(adev, false);
130 trn = xgpu_ai_peek_ack(adev);
131 if (trn) {
36b3f84a 132 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
48527e52
ML
133 msleep(1);
134 }
135 } while(trn);
c9c9de93 136
89041940
GW
137 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
138 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
139 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
140 MSGBUF_DATA, req);
141 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
142 reg);
143 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
144 data1);
145 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
146 data2);
147 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
148 data3);
149
150 xgpu_ai_mailbox_set_valid(adev, true);
c9c9de93
XY
151
152 /* start to poll ack */
153 r = xgpu_ai_poll_ack(adev);
154 if (r)
17b2e332 155 pr_err("Doesn't get ack from pf, continue\n");
c9c9de93
XY
156
157 xgpu_ai_mailbox_set_valid(adev, false);
89041940
GW
158}
159
bb5a2bdf
YT
160static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
161{
162 int r = 0;
163 u32 req, val, size;
164
165 if (!amdgim_is_hwperf(adev) || buf == NULL)
166 return -EBADRQC;
167
168 switch(type) {
169 case PP_SCLK:
170 req = IDH_IRQ_GET_PP_SCLK;
171 break;
172 case PP_MCLK:
173 req = IDH_IRQ_GET_PP_MCLK;
174 break;
175 default:
176 return -EBADRQC;
177 }
178
179 mutex_lock(&adev->virt.dpm_mutex);
180
181 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
182
183 r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
184 if (!r && adev->fw_vram_usage.va != NULL) {
185 val = RREG32_NO_KIQ(
186 SOC15_REG_OFFSET(NBIO, 0,
187 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
188 size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
189 val), PAGE_SIZE);
190
191 if (size < PAGE_SIZE)
192 strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
193 else
194 size = 0;
195
196 r = size;
197 goto out;
198 }
199
200 r = xgpu_ai_poll_msg(adev, IDH_FAIL);
201 if(r)
202 pr_info("%s DPM request failed",
203 (type == PP_SCLK)? "SCLK" : "MCLK");
204
205out:
206 mutex_unlock(&adev->virt.dpm_mutex);
207 return r;
208}
209
210static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
211{
212 int r = 0;
213 u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
214
215 if (!amdgim_is_hwperf(adev))
216 return -EBADRQC;
217
218 mutex_lock(&adev->virt.dpm_mutex);
219 xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
220
221 r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
222 if (!r)
223 goto out;
224
225 r = xgpu_ai_poll_msg(adev, IDH_FAIL);
226 if (!r)
227 pr_info("DPM request failed");
228 else
229 pr_info("Mailbox is broken");
230
231out:
232 mutex_unlock(&adev->virt.dpm_mutex);
233 return r;
234}
235
89041940
GW
236static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
237 enum idh_request req)
238{
239 int r;
240
241 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
c9c9de93
XY
242
243 /* start to check msg if request is idh_req_gpu_init_access */
244 if (req == IDH_REQ_GPU_INIT_ACCESS ||
245 req == IDH_REQ_GPU_FINI_ACCESS ||
246 req == IDH_REQ_GPU_RESET_ACCESS) {
94b4fd72 247 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
17b2e332
ML
248 if (r) {
249 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
c9c9de93 250 return r;
17b2e332 251 }
2dc8f81e 252 /* Retrieve checksum from mailbox2 */
d3c117e5 253 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
2dc8f81e
HC
254 adev->virt.fw_reserve.checksum_key =
255 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
256 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
257 }
c9c9de93
XY
258 }
259
260 return 0;
261}
262
f98b617e
ML
263static int xgpu_ai_request_reset(struct amdgpu_device *adev)
264{
265 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
266}
267
c9c9de93
XY
268static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
269 bool init)
270{
271 enum idh_request req;
272
273 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
274 return xgpu_ai_send_access_requests(adev, req);
275}
276
277static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
278 bool init)
279{
280 enum idh_request req;
281 int r = 0;
282
283 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
284 r = xgpu_ai_send_access_requests(adev, req);
285
286 return r;
287}
288
f98b617e
ML
289static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
290 struct amdgpu_irq_src *source,
291 struct amdgpu_iv_entry *entry)
292{
034b6867 293 DRM_DEBUG("get ack intr and do nothing.\n");
f98b617e
ML
294 return 0;
295}
296
297static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
298 struct amdgpu_irq_src *source,
299 unsigned type,
300 enum amdgpu_interrupt_state state)
301{
302 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
303
304 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
305 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
306 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
307
308 return 0;
309}
310
311static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
312{
313 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
314 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
48527e52
ML
315 int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
316 int locked;
317
318 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
319 * otherwise the mailbox msg will be ruined/reseted by
320 * the VF FLR.
321 *
322 * we can unlock the lock_reset to allow "amdgpu_job_timedout"
323 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
324 * which means host side had finished this VF's FLR.
325 */
326 locked = mutex_trylock(&adev->lock_reset);
327 if (locked)
328 adev->in_gpu_reset = 1;
329
330 do {
331 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
332 goto flr_done;
333
334 msleep(10);
335 timeout -= 10;
336 } while (timeout > 1);
337
338flr_done:
6e9c2b88
ED
339 if (locked) {
340 adev->in_gpu_reset = 0;
48527e52 341 mutex_unlock(&adev->lock_reset);
6e9c2b88 342 }
48527e52
ML
343
344 /* Trigger recovery for world switch failure if no TDR */
2c11ee6a 345 if (amdgpu_device_should_recover_gpu(adev)
346 && amdgpu_lockup_timeout == MAX_SCHEDULE_TIMEOUT)
12938fad 347 amdgpu_device_gpu_recover(adev, NULL);
f98b617e
ML
348}
349
350static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
351 struct amdgpu_irq_src *src,
352 unsigned type,
353 enum amdgpu_interrupt_state state)
354{
355 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
356
357 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
358 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
359 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
360
361 return 0;
362}
363
364static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
365 struct amdgpu_irq_src *source,
366 struct amdgpu_iv_entry *entry)
367{
48527e52
ML
368 enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
369
370 switch (event) {
371 case IDH_FLR_NOTIFICATION:
372 if (amdgpu_sriov_runtime(adev))
373 schedule_work(&adev->virt.flr_work);
374 break;
b6818520
TH
375 case IDH_QUERY_ALIVE:
376 xgpu_ai_mailbox_send_ack(adev);
377 break;
48527e52
ML
378 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
379 * it byfar since that polling thread will handle it,
380 * other msg like flr complete is not handled here.
34a4d2bf 381 */
48527e52
ML
382 case IDH_CLR_MSG_BUF:
383 case IDH_FLR_NOTIFICATION_CMPL:
384 case IDH_READY_TO_ACCESS_GPU:
385 default:
386 break;
0c63e113 387 }
f98b617e
ML
388
389 return 0;
390}
391
392static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
393 .set = xgpu_ai_set_mailbox_ack_irq,
394 .process = xgpu_ai_mailbox_ack_irq,
395};
396
397static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
398 .set = xgpu_ai_set_mailbox_rcv_irq,
399 .process = xgpu_ai_mailbox_rcv_irq,
400};
401
402void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
403{
404 adev->virt.ack_irq.num_types = 1;
405 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
406 adev->virt.rcv_irq.num_types = 1;
407 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
408}
409
410int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
411{
412 int r;
413
3760f76c 414 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
f98b617e
ML
415 if (r)
416 return r;
417
3760f76c 418 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
f98b617e
ML
419 if (r) {
420 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
421 return r;
422 }
423
424 return 0;
425}
426
427int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
428{
429 int r;
430
431 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
432 if (r)
433 return r;
434 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
435 if (r) {
436 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
437 return r;
438 }
439
440 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
441
442 return 0;
443}
444
445void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
446{
447 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
448 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
449}
450
c9c9de93
XY
451const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
452 .req_full_gpu = xgpu_ai_request_full_gpu_access,
453 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
f98b617e 454 .reset_gpu = xgpu_ai_request_reset,
b5914238 455 .wait_reset = NULL,
89041940 456 .trans_msg = xgpu_ai_mailbox_trans_msg,
bb5a2bdf
YT
457 .get_pp_clk = xgpu_ai_get_pp_clk,
458 .force_dpm_level = xgpu_ai_force_dpm_level,
c9c9de93 459};