Commit | Line | Data |
---|---|---|
3636169c JZ |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
24 | #include "amdgpu.h" | |
25 | #include "nbio/nbio_2_3_offset.h" | |
26 | #include "nbio/nbio_2_3_sh_mask.h" | |
27 | #include "gc/gc_10_1_0_offset.h" | |
28 | #include "gc/gc_10_1_0_sh_mask.h" | |
29 | #include "soc15.h" | |
30 | #include "navi10_ih.h" | |
31 | #include "soc15_common.h" | |
32 | #include "mxgpu_nv.h" | |
3636169c JZ |
33 | |
34 | static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev) | |
35 | { | |
36 | WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); | |
37 | } | |
38 | ||
39 | static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val) | |
40 | { | |
41 | WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); | |
42 | } | |
43 | ||
44 | /* | |
45 | * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine | |
46 | * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1 | |
47 | * by host. | |
48 | * | |
49 | * if called no in IRQ routine, this peek_msg cannot guaranteed to return the | |
50 | * correct value since it doesn't return the RCV_DW0 under the case that | |
51 | * RCV_MSG_VALID is set by host. | |
52 | */ | |
53 | static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) | |
54 | { | |
55 | return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
56 | mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); | |
57 | } | |
58 | ||
59 | ||
60 | static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, | |
61 | enum idh_event event) | |
62 | { | |
63 | u32 reg; | |
64 | ||
65 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
66 | mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); | |
67 | if (reg != event) | |
68 | return -ENOENT; | |
69 | ||
70 | xgpu_nv_mailbox_send_ack(adev); | |
71 | ||
72 | return 0; | |
73 | } | |
74 | ||
75 | static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) | |
76 | { | |
77 | return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; | |
78 | } | |
79 | ||
80 | static int xgpu_nv_poll_ack(struct amdgpu_device *adev) | |
81 | { | |
82 | int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT; | |
83 | u8 reg; | |
84 | ||
85 | do { | |
86 | reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE); | |
87 | if (reg & 2) | |
88 | return 0; | |
89 | ||
90 | mdelay(5); | |
91 | timeout -= 5; | |
92 | } while (timeout > 1); | |
93 | ||
94 | pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT); | |
95 | ||
96 | return -ETIME; | |
97 | } | |
98 | ||
99 | static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) | |
100 | { | |
101 | int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT; | |
102 | ||
103 | do { | |
104 | r = xgpu_nv_mailbox_rcv_msg(adev, event); | |
105 | if (!r) | |
106 | return 0; | |
107 | ||
108 | msleep(10); | |
109 | timeout -= 10; | |
110 | } while (timeout > 1); | |
111 | ||
112 | pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); | |
113 | ||
114 | return -ETIME; | |
115 | } | |
116 | ||
117 | static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, | |
118 | enum idh_request req, u32 data1, u32 data2, u32 data3) | |
119 | { | |
120 | u32 reg; | |
121 | int r; | |
122 | uint8_t trn; | |
123 | ||
124 | /* IMPORTANT: | |
125 | * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK | |
126 | * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK | |
127 | * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack() | |
128 | * will return immediatly | |
129 | */ | |
130 | do { | |
131 | xgpu_nv_mailbox_set_valid(adev, false); | |
132 | trn = xgpu_nv_peek_ack(adev); | |
133 | if (trn) { | |
134 | pr_err("trn=%x ACK should not assert! wait again !\n", trn); | |
135 | msleep(1); | |
136 | } | |
137 | } while (trn); | |
138 | ||
139 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
140 | mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0)); | |
141 | reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0, | |
142 | MSGBUF_DATA, req); | |
143 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0), | |
144 | reg); | |
145 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1), | |
146 | data1); | |
147 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2), | |
148 | data2); | |
149 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3), | |
150 | data3); | |
151 | ||
152 | xgpu_nv_mailbox_set_valid(adev, true); | |
153 | ||
154 | /* start to poll ack */ | |
155 | r = xgpu_nv_poll_ack(adev); | |
156 | if (r) | |
157 | pr_err("Doesn't get ack from pf, continue\n"); | |
158 | ||
159 | xgpu_nv_mailbox_set_valid(adev, false); | |
160 | } | |
161 | ||
162 | static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, | |
163 | enum idh_request req) | |
164 | { | |
165 | int r; | |
166 | ||
167 | xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0); | |
168 | ||
169 | /* start to check msg if request is idh_req_gpu_init_access */ | |
170 | if (req == IDH_REQ_GPU_INIT_ACCESS || | |
171 | req == IDH_REQ_GPU_FINI_ACCESS || | |
172 | req == IDH_REQ_GPU_RESET_ACCESS) { | |
173 | r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); | |
174 | if (r) { | |
175 | pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); | |
176 | return r; | |
177 | } | |
178 | /* Retrieve checksum from mailbox2 */ | |
179 | if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { | |
180 | adev->virt.fw_reserve.checksum_key = | |
181 | RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
182 | mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2)); | |
183 | } | |
184 | } | |
185 | ||
186 | return 0; | |
187 | } | |
188 | ||
189 | static int xgpu_nv_request_reset(struct amdgpu_device *adev) | |
190 | { | |
191 | return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); | |
192 | } | |
193 | ||
194 | static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev, | |
195 | bool init) | |
196 | { | |
197 | enum idh_request req; | |
198 | ||
199 | req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; | |
200 | return xgpu_nv_send_access_requests(adev, req); | |
201 | } | |
202 | ||
203 | static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, | |
204 | bool init) | |
205 | { | |
206 | enum idh_request req; | |
207 | int r = 0; | |
208 | ||
209 | req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; | |
210 | r = xgpu_nv_send_access_requests(adev, req); | |
211 | ||
212 | return r; | |
213 | } | |
214 | ||
215 | static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, | |
216 | struct amdgpu_irq_src *source, | |
217 | struct amdgpu_iv_entry *entry) | |
218 | { | |
219 | DRM_DEBUG("get ack intr and do nothing.\n"); | |
220 | return 0; | |
221 | } | |
222 | ||
223 | static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev, | |
224 | struct amdgpu_irq_src *source, | |
225 | unsigned type, | |
226 | enum amdgpu_interrupt_state state) | |
227 | { | |
228 | u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); | |
229 | ||
230 | tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN, | |
231 | (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); | |
232 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); | |
233 | ||
234 | return 0; | |
235 | } | |
236 | ||
237 | static void xgpu_nv_mailbox_flr_work(struct work_struct *work) | |
238 | { | |
239 | struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); | |
240 | struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); | |
241 | int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT; | |
242 | int locked; | |
243 | ||
244 | /* block amdgpu_gpu_recover till msg FLR COMPLETE received, | |
245 | * otherwise the mailbox msg will be ruined/reseted by | |
246 | * the VF FLR. | |
247 | * | |
248 | * we can unlock the lock_reset to allow "amdgpu_job_timedout" | |
249 | * to run gpu_recover() after FLR_NOTIFICATION_CMPL received | |
250 | * which means host side had finished this VF's FLR. | |
251 | */ | |
252 | locked = mutex_trylock(&adev->lock_reset); | |
253 | if (locked) | |
6df3dab6 | 254 | adev->in_gpu_reset = true; |
3636169c JZ |
255 | |
256 | do { | |
257 | if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) | |
258 | goto flr_done; | |
259 | ||
260 | msleep(10); | |
261 | timeout -= 10; | |
262 | } while (timeout > 1); | |
263 | ||
264 | flr_done: | |
265 | if (locked) { | |
6df3dab6 | 266 | adev->in_gpu_reset = false; |
3636169c JZ |
267 | mutex_unlock(&adev->lock_reset); |
268 | } | |
269 | ||
270 | /* Trigger recovery for world switch failure if no TDR */ | |
1512d064 ML |
271 | if (amdgpu_device_should_recover_gpu(adev) |
272 | && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || | |
273 | adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || | |
274 | adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || | |
275 | adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) | |
3636169c JZ |
276 | amdgpu_device_gpu_recover(adev, NULL); |
277 | } | |
278 | ||
279 | static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, | |
280 | struct amdgpu_irq_src *src, | |
281 | unsigned type, | |
282 | enum amdgpu_interrupt_state state) | |
283 | { | |
284 | u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); | |
285 | ||
286 | tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN, | |
287 | (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); | |
288 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); | |
289 | ||
290 | return 0; | |
291 | } | |
292 | ||
293 | static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, | |
294 | struct amdgpu_irq_src *source, | |
295 | struct amdgpu_iv_entry *entry) | |
296 | { | |
297 | enum idh_event event = xgpu_nv_mailbox_peek_msg(adev); | |
298 | ||
299 | switch (event) { | |
300 | case IDH_FLR_NOTIFICATION: | |
301 | if (amdgpu_sriov_runtime(adev)) | |
302 | schedule_work(&adev->virt.flr_work); | |
303 | break; | |
304 | /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore | |
305 | * it byfar since that polling thread will handle it, | |
306 | * other msg like flr complete is not handled here. | |
307 | */ | |
308 | case IDH_CLR_MSG_BUF: | |
309 | case IDH_FLR_NOTIFICATION_CMPL: | |
310 | case IDH_READY_TO_ACCESS_GPU: | |
311 | default: | |
312 | break; | |
313 | } | |
314 | ||
315 | return 0; | |
316 | } | |
317 | ||
318 | static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = { | |
319 | .set = xgpu_nv_set_mailbox_ack_irq, | |
320 | .process = xgpu_nv_mailbox_ack_irq, | |
321 | }; | |
322 | ||
323 | static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = { | |
324 | .set = xgpu_nv_set_mailbox_rcv_irq, | |
325 | .process = xgpu_nv_mailbox_rcv_irq, | |
326 | }; | |
327 | ||
328 | void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev) | |
329 | { | |
330 | adev->virt.ack_irq.num_types = 1; | |
331 | adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs; | |
332 | adev->virt.rcv_irq.num_types = 1; | |
333 | adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs; | |
334 | } | |
335 | ||
336 | int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev) | |
337 | { | |
338 | int r; | |
339 | ||
340 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); | |
341 | if (r) | |
342 | return r; | |
343 | ||
344 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); | |
345 | if (r) { | |
346 | amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); | |
347 | return r; | |
348 | } | |
349 | ||
350 | return 0; | |
351 | } | |
352 | ||
353 | int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev) | |
354 | { | |
355 | int r; | |
356 | ||
357 | r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); | |
358 | if (r) | |
359 | return r; | |
360 | r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); | |
361 | if (r) { | |
362 | amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); | |
363 | return r; | |
364 | } | |
365 | ||
366 | INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work); | |
367 | ||
368 | return 0; | |
369 | } | |
370 | ||
371 | void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) | |
372 | { | |
373 | amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); | |
374 | amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); | |
375 | } | |
376 | ||
377 | const struct amdgpu_virt_ops xgpu_nv_virt_ops = { | |
378 | .req_full_gpu = xgpu_nv_request_full_gpu_access, | |
379 | .rel_full_gpu = xgpu_nv_release_full_gpu_access, | |
380 | .reset_gpu = xgpu_nv_request_reset, | |
381 | .wait_reset = NULL, | |
382 | .trans_msg = xgpu_nv_mailbox_trans_msg, | |
383 | }; |