drm/amdgpu: SRIOV flr_work should use down_write
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / mxgpu_ai.c
CommitLineData
c9c9de93
XY
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
f0a58aa3
FX
25#include "nbio/nbio_6_1_offset.h"
26#include "nbio/nbio_6_1_sh_mask.h"
cde5c34f
FX
27#include "gc/gc_9_0_offset.h"
28#include "gc/gc_9_0_sh_mask.h"
78d48112 29#include "mp/mp_9_0_offset.h"
c9c9de93 30#include "soc15.h"
f98b617e 31#include "vega10_ih.h"
c9c9de93
XY
32#include "soc15_common.h"
33#include "mxgpu_ai.h"
34
35static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
36{
48527e52 37 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
c9c9de93
XY
38}
39
40static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
41{
48527e52
ML
42 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
43}
c9c9de93 44
48527e52
ML
45/*
46 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
47 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
48 * by host.
49 *
50 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
51 * correct value since it doesn't return the RCV_DW0 under the case that
52 * RCV_MSG_VALID is set by host.
53 */
54static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
55{
56 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
57 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
c9c9de93
XY
58}
59
48527e52 60
c9c9de93
XY
61static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
62 enum idh_event event)
63{
64 u32 reg;
c9c9de93
XY
65
66 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
67 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
68 if (reg != event)
69 return -ENOENT;
70
71 xgpu_ai_mailbox_send_ack(adev);
72
73 return 0;
74}
75
48527e52
ML
76static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
77 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
78}
79
c9c9de93
XY
80static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
81{
48527e52
ML
82 int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
83 u8 reg;
84
85 do {
86 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
87 if (reg & 2)
88 return 0;
c9c9de93 89
17b2e332
ML
90 mdelay(5);
91 timeout -= 5;
48527e52 92 } while (timeout > 1);
c9c9de93 93
48527e52 94 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
c9c9de93 95
48527e52 96 return -ETIME;
c9c9de93
XY
97}
98
94b4fd72 99static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
c9c9de93 100{
48527e52 101 int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
c9c9de93 102
48527e52 103 do {
c9c9de93 104 r = xgpu_ai_mailbox_rcv_msg(adev, event);
48527e52
ML
105 if (!r)
106 return 0;
c9c9de93 107
48527e52
ML
108 msleep(10);
109 timeout -= 10;
110 } while (timeout > 1);
111
112 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
113
114 return -ETIME;
c9c9de93
XY
115}
116
89041940
GW
117static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
118 enum idh_request req, u32 data1, u32 data2, u32 data3) {
119 u32 reg;
c9c9de93 120 int r;
48527e52
ML
121 uint8_t trn;
122
123 /* IMPORTANT:
124 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
125 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
126 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
127 * will return immediatly
128 */
129 do {
130 xgpu_ai_mailbox_set_valid(adev, false);
131 trn = xgpu_ai_peek_ack(adev);
132 if (trn) {
36b3f84a 133 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
48527e52
ML
134 msleep(1);
135 }
136 } while(trn);
c9c9de93 137
89041940
GW
138 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
139 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
140 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
141 MSGBUF_DATA, req);
142 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
143 reg);
144 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
145 data1);
146 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
147 data2);
148 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
149 data3);
150
151 xgpu_ai_mailbox_set_valid(adev, true);
c9c9de93
XY
152
153 /* start to poll ack */
154 r = xgpu_ai_poll_ack(adev);
155 if (r)
17b2e332 156 pr_err("Doesn't get ack from pf, continue\n");
c9c9de93
XY
157
158 xgpu_ai_mailbox_set_valid(adev, false);
89041940
GW
159}
160
161static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
162 enum idh_request req)
163{
164 int r;
165
166 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
c9c9de93
XY
167
168 /* start to check msg if request is idh_req_gpu_init_access */
169 if (req == IDH_REQ_GPU_INIT_ACCESS ||
170 req == IDH_REQ_GPU_FINI_ACCESS ||
171 req == IDH_REQ_GPU_RESET_ACCESS) {
94b4fd72 172 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
17b2e332
ML
173 if (r) {
174 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
c9c9de93 175 return r;
17b2e332 176 }
2dc8f81e 177 /* Retrieve checksum from mailbox2 */
d3c117e5 178 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
2dc8f81e
HC
179 adev->virt.fw_reserve.checksum_key =
180 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
181 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
182 }
c9c9de93
XY
183 }
184
185 return 0;
186}
187
f98b617e
ML
188static int xgpu_ai_request_reset(struct amdgpu_device *adev)
189{
3aa883ac
JZ
190 int ret, i = 0;
191
192 while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
193 ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
194 if (!ret)
195 break;
196 i++;
197 }
198
199 return ret;
f98b617e
ML
200}
201
c9c9de93
XY
202static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
203 bool init)
204{
205 enum idh_request req;
206
207 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
208 return xgpu_ai_send_access_requests(adev, req);
209}
210
211static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
212 bool init)
213{
214 enum idh_request req;
215 int r = 0;
216
217 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
218 r = xgpu_ai_send_access_requests(adev, req);
219
220 return r;
221}
222
f98b617e
ML
223static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
224 struct amdgpu_irq_src *source,
225 struct amdgpu_iv_entry *entry)
226{
034b6867 227 DRM_DEBUG("get ack intr and do nothing.\n");
f98b617e
ML
228 return 0;
229}
230
231static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
232 struct amdgpu_irq_src *source,
233 unsigned type,
234 enum amdgpu_interrupt_state state)
235{
236 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
237
238 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
239 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
240 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
241
242 return 0;
243}
244
245static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
246{
247 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
248 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
48527e52 249 int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
48527e52
ML
250
251 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
252 * otherwise the mailbox msg will be ruined/reseted by
253 * the VF FLR.
48527e52 254 */
fa4a427d 255 if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
6049db43
DL
256 return;
257
fa4a427d
VS
258 down_write(&adev->reset_sem);
259
3c2a01cb 260 amdgpu_virt_fini_data_exchange(adev);
f1403342 261
64261a0d
YW
262 xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
263
48527e52
ML
264 do {
265 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
266 goto flr_done;
267
268 msleep(10);
269 timeout -= 10;
270 } while (timeout > 1);
271
272flr_done:
6049db43 273 atomic_set(&adev->in_gpu_reset, 0);
798c5115 274 up_write(&adev->reset_sem);
48527e52
ML
275
276 /* Trigger recovery for world switch failure if no TDR */
2c11ee6a 277 if (amdgpu_device_should_recover_gpu(adev)
2a9787dc
LC
278 && (!amdgpu_device_has_job_running(adev) ||
279 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
12938fad 280 amdgpu_device_gpu_recover(adev, NULL);
f98b617e
ML
281}
282
283static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
284 struct amdgpu_irq_src *src,
285 unsigned type,
286 enum amdgpu_interrupt_state state)
287{
288 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
289
290 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
291 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
292 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
293
294 return 0;
295}
296
297static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
298 struct amdgpu_irq_src *source,
299 struct amdgpu_iv_entry *entry)
300{
48527e52
ML
301 enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
302
303 switch (event) {
304 case IDH_FLR_NOTIFICATION:
305 if (amdgpu_sriov_runtime(adev))
306 schedule_work(&adev->virt.flr_work);
307 break;
b6818520
TH
308 case IDH_QUERY_ALIVE:
309 xgpu_ai_mailbox_send_ack(adev);
310 break;
48527e52
ML
311 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
312 * it byfar since that polling thread will handle it,
313 * other msg like flr complete is not handled here.
34a4d2bf 314 */
48527e52
ML
315 case IDH_CLR_MSG_BUF:
316 case IDH_FLR_NOTIFICATION_CMPL:
317 case IDH_READY_TO_ACCESS_GPU:
318 default:
319 break;
0c63e113 320 }
f98b617e
ML
321
322 return 0;
323}
324
325static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
326 .set = xgpu_ai_set_mailbox_ack_irq,
327 .process = xgpu_ai_mailbox_ack_irq,
328};
329
330static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
331 .set = xgpu_ai_set_mailbox_rcv_irq,
332 .process = xgpu_ai_mailbox_rcv_irq,
333};
334
335void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
336{
337 adev->virt.ack_irq.num_types = 1;
338 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
339 adev->virt.rcv_irq.num_types = 1;
340 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
341}
342
343int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
344{
345 int r;
346
3760f76c 347 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
f98b617e
ML
348 if (r)
349 return r;
350
3760f76c 351 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
f98b617e
ML
352 if (r) {
353 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
354 return r;
355 }
356
357 return 0;
358}
359
360int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
361{
362 int r;
363
364 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
365 if (r)
366 return r;
367 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
368 if (r) {
369 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
370 return r;
371 }
372
373 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
374
375 return 0;
376}
377
378void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
379{
380 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
381 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
382}
383
c9c9de93
XY
384const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
385 .req_full_gpu = xgpu_ai_request_full_gpu_access,
386 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
f98b617e 387 .reset_gpu = xgpu_ai_request_reset,
b5914238 388 .wait_reset = NULL,
89041940 389 .trans_msg = xgpu_ai_mailbox_trans_msg,
c9c9de93 390};