Commit | Line | Data |
---|---|---|
c9c9de93 XY |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
24 | #include "amdgpu.h" | |
25 | #include "vega10/soc15ip.h" | |
26 | #include "vega10/NBIO/nbio_6_1_offset.h" | |
27 | #include "vega10/NBIO/nbio_6_1_sh_mask.h" | |
cde5c34f FX |
28 | #include "gc/gc_9_0_offset.h" |
29 | #include "gc/gc_9_0_sh_mask.h" | |
c9c9de93 | 30 | #include "soc15.h" |
f98b617e | 31 | #include "vega10_ih.h" |
c9c9de93 XY |
32 | #include "soc15_common.h" |
33 | #include "mxgpu_ai.h" | |
34 | ||
35 | static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) | |
36 | { | |
37 | u32 reg; | |
38 | int timeout = AI_MAILBOX_TIMEDOUT; | |
39 | u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID); | |
40 | ||
41 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
42 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | |
43 | reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1); | |
44 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
45 | mmBIF_BX_PF0_MAILBOX_CONTROL), reg); | |
46 | ||
47 | /*Wait for RCV_MSG_VALID to be 0*/ | |
48 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
49 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | |
50 | while (reg & mask) { | |
51 | if (timeout <= 0) { | |
52 | pr_err("RCV_MSG_VALID is not cleared\n"); | |
53 | break; | |
54 | } | |
55 | mdelay(1); | |
56 | timeout -=1; | |
57 | ||
58 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
59 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | |
60 | } | |
61 | } | |
62 | ||
63 | static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) | |
64 | { | |
65 | u32 reg; | |
66 | ||
67 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
68 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | |
69 | reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, | |
70 | TRN_MSG_VALID, val ? 1 : 0); | |
71 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL), | |
72 | reg); | |
73 | } | |
74 | ||
c9c9de93 XY |
75 | static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, |
76 | enum idh_event event) | |
77 | { | |
78 | u32 reg; | |
79 | u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID); | |
80 | ||
81 | if (event != IDH_FLR_NOTIFICATION_CMPL) { | |
82 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
83 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | |
84 | if (!(reg & mask)) | |
85 | return -ENOENT; | |
86 | } | |
87 | ||
88 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
89 | mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); | |
90 | if (reg != event) | |
91 | return -ENOENT; | |
92 | ||
93 | xgpu_ai_mailbox_send_ack(adev); | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
98 | static int xgpu_ai_poll_ack(struct amdgpu_device *adev) | |
99 | { | |
100 | int r = 0, timeout = AI_MAILBOX_TIMEDOUT; | |
101 | u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK); | |
102 | u32 reg; | |
103 | ||
104 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
105 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | |
106 | while (!(reg & mask)) { | |
107 | if (timeout <= 0) { | |
108 | pr_err("Doesn't get ack from pf.\n"); | |
109 | r = -ETIME; | |
110 | break; | |
111 | } | |
17b2e332 ML |
112 | mdelay(5); |
113 | timeout -= 5; | |
c9c9de93 XY |
114 | |
115 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
116 | mmBIF_BX_PF0_MAILBOX_CONTROL)); | |
117 | } | |
118 | ||
119 | return r; | |
120 | } | |
121 | ||
94b4fd72 | 122 | static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) |
c9c9de93 XY |
123 | { |
124 | int r = 0, timeout = AI_MAILBOX_TIMEDOUT; | |
125 | ||
126 | r = xgpu_ai_mailbox_rcv_msg(adev, event); | |
127 | while (r) { | |
128 | if (timeout <= 0) { | |
17b2e332 | 129 | pr_err("Doesn't get msg:%d from pf.\n", event); |
c9c9de93 XY |
130 | r = -ETIME; |
131 | break; | |
132 | } | |
17b2e332 ML |
133 | mdelay(5); |
134 | timeout -= 5; | |
c9c9de93 XY |
135 | |
136 | r = xgpu_ai_mailbox_rcv_msg(adev, event); | |
137 | } | |
138 | ||
139 | return r; | |
140 | } | |
141 | ||
89041940 GW |
142 | static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, |
143 | enum idh_request req, u32 data1, u32 data2, u32 data3) { | |
144 | u32 reg; | |
c9c9de93 XY |
145 | int r; |
146 | ||
89041940 GW |
147 | reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, |
148 | mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); | |
149 | reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, | |
150 | MSGBUF_DATA, req); | |
151 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), | |
152 | reg); | |
153 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), | |
154 | data1); | |
155 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), | |
156 | data2); | |
157 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), | |
158 | data3); | |
159 | ||
160 | xgpu_ai_mailbox_set_valid(adev, true); | |
c9c9de93 XY |
161 | |
162 | /* start to poll ack */ | |
163 | r = xgpu_ai_poll_ack(adev); | |
164 | if (r) | |
17b2e332 | 165 | pr_err("Doesn't get ack from pf, continue\n"); |
c9c9de93 XY |
166 | |
167 | xgpu_ai_mailbox_set_valid(adev, false); | |
89041940 GW |
168 | } |
169 | ||
170 | static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, | |
171 | enum idh_request req) | |
172 | { | |
173 | int r; | |
174 | ||
175 | xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); | |
c9c9de93 XY |
176 | |
177 | /* start to check msg if request is idh_req_gpu_init_access */ | |
178 | if (req == IDH_REQ_GPU_INIT_ACCESS || | |
179 | req == IDH_REQ_GPU_FINI_ACCESS || | |
180 | req == IDH_REQ_GPU_RESET_ACCESS) { | |
94b4fd72 | 181 | r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); |
17b2e332 ML |
182 | if (r) { |
183 | pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); | |
c9c9de93 | 184 | return r; |
17b2e332 | 185 | } |
2dc8f81e HC |
186 | /* Retrieve checksum from mailbox2 */ |
187 | if (req == IDH_REQ_GPU_INIT_ACCESS) { | |
188 | adev->virt.fw_reserve.checksum_key = | |
189 | RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | |
190 | mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); | |
191 | } | |
c9c9de93 XY |
192 | } |
193 | ||
194 | return 0; | |
195 | } | |
196 | ||
f98b617e ML |
197 | static int xgpu_ai_request_reset(struct amdgpu_device *adev) |
198 | { | |
199 | return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); | |
200 | } | |
201 | ||
c9c9de93 XY |
202 | static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, |
203 | bool init) | |
204 | { | |
205 | enum idh_request req; | |
206 | ||
207 | req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; | |
208 | return xgpu_ai_send_access_requests(adev, req); | |
209 | } | |
210 | ||
211 | static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, | |
212 | bool init) | |
213 | { | |
214 | enum idh_request req; | |
215 | int r = 0; | |
216 | ||
217 | req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; | |
218 | r = xgpu_ai_send_access_requests(adev, req); | |
219 | ||
220 | return r; | |
221 | } | |
222 | ||
f98b617e ML |
223 | static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, |
224 | struct amdgpu_irq_src *source, | |
225 | struct amdgpu_iv_entry *entry) | |
226 | { | |
034b6867 | 227 | DRM_DEBUG("get ack intr and do nothing.\n"); |
f98b617e ML |
228 | return 0; |
229 | } | |
230 | ||
231 | static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev, | |
232 | struct amdgpu_irq_src *source, | |
233 | unsigned type, | |
234 | enum amdgpu_interrupt_state state) | |
235 | { | |
236 | u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); | |
237 | ||
238 | tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN, | |
239 | (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); | |
240 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); | |
241 | ||
242 | return 0; | |
243 | } | |
244 | ||
245 | static void xgpu_ai_mailbox_flr_work(struct work_struct *work) | |
246 | { | |
247 | struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); | |
248 | struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); | |
249 | ||
250 | /* wait until RCV_MSG become 3 */ | |
251 | if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { | |
252 | pr_err("failed to recieve FLR_CMPL\n"); | |
253 | return; | |
254 | } | |
255 | ||
256 | /* Trigger recovery due to world switch failure */ | |
5740682e | 257 | amdgpu_gpu_recover(adev, NULL); |
f98b617e ML |
258 | } |
259 | ||
260 | static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, | |
261 | struct amdgpu_irq_src *src, | |
262 | unsigned type, | |
263 | enum amdgpu_interrupt_state state) | |
264 | { | |
265 | u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); | |
266 | ||
267 | tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, | |
268 | (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); | |
269 | WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); | |
270 | ||
271 | return 0; | |
272 | } | |
273 | ||
274 | static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, | |
275 | struct amdgpu_irq_src *source, | |
276 | struct amdgpu_iv_entry *entry) | |
277 | { | |
278 | int r; | |
279 | ||
0c63e113 ML |
280 | /* trigger gpu-reset by hypervisor only if TDR disbaled */ |
281 | if (amdgpu_lockup_timeout == 0) { | |
282 | /* see what event we get */ | |
283 | r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); | |
284 | ||
34a4d2bf ML |
285 | /* sometimes the interrupt is delayed to inject to VM, so under such case |
286 | * the IDH_FLR_NOTIFICATION is overwritten by VF FLR from GIM side, thus | |
287 | * above recieve message could be failed, we should schedule the flr_work | |
288 | * anyway | |
289 | */ | |
290 | if (r) { | |
291 | DRM_ERROR("FLR_NOTIFICATION is missed\n"); | |
292 | xgpu_ai_mailbox_send_ack(adev); | |
293 | } | |
294 | ||
295 | schedule_work(&adev->virt.flr_work); | |
0c63e113 | 296 | } |
f98b617e ML |
297 | |
298 | return 0; | |
299 | } | |
300 | ||
301 | static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = { | |
302 | .set = xgpu_ai_set_mailbox_ack_irq, | |
303 | .process = xgpu_ai_mailbox_ack_irq, | |
304 | }; | |
305 | ||
306 | static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = { | |
307 | .set = xgpu_ai_set_mailbox_rcv_irq, | |
308 | .process = xgpu_ai_mailbox_rcv_irq, | |
309 | }; | |
310 | ||
311 | void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev) | |
312 | { | |
313 | adev->virt.ack_irq.num_types = 1; | |
314 | adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs; | |
315 | adev->virt.rcv_irq.num_types = 1; | |
316 | adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs; | |
317 | } | |
318 | ||
319 | int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) | |
320 | { | |
321 | int r; | |
322 | ||
3af906f0 | 323 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); |
f98b617e ML |
324 | if (r) |
325 | return r; | |
326 | ||
3af906f0 | 327 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); |
f98b617e ML |
328 | if (r) { |
329 | amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); | |
330 | return r; | |
331 | } | |
332 | ||
333 | return 0; | |
334 | } | |
335 | ||
336 | int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) | |
337 | { | |
338 | int r; | |
339 | ||
340 | r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); | |
341 | if (r) | |
342 | return r; | |
343 | r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); | |
344 | if (r) { | |
345 | amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); | |
346 | return r; | |
347 | } | |
348 | ||
349 | INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); | |
350 | ||
351 | return 0; | |
352 | } | |
353 | ||
354 | void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) | |
355 | { | |
356 | amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); | |
357 | amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); | |
358 | } | |
359 | ||
c9c9de93 XY |
360 | const struct amdgpu_virt_ops xgpu_ai_virt_ops = { |
361 | .req_full_gpu = xgpu_ai_request_full_gpu_access, | |
362 | .rel_full_gpu = xgpu_ai_release_full_gpu_access, | |
f98b617e | 363 | .reset_gpu = xgpu_ai_request_reset, |
b5914238 | 364 | .wait_reset = NULL, |
89041940 | 365 | .trans_msg = xgpu_ai_mailbox_trans_msg, |
c9c9de93 | 366 | }; |