Commit | Line | Data |
---|---|---|
75d16923 LL |
1 | /* |
2 | * Copyright 2022 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include "amdgpu.h" | |
24 | #include "amdgpu_xcp.h" | |
2c1c7ba4 JZ |
25 | #include "amdgpu_drv.h" |
26 | ||
27 | #include <drm/drm_drv.h> | |
9938333a | 28 | #include "../amdxcp/amdgpu_xcp_drv.h" |
75d16923 LL |
29 | |
30 | static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr, | |
31 | struct amdgpu_xcp_ip *xcp_ip, int xcp_state) | |
32 | { | |
33 | int (*run_func)(void *handle, uint32_t inst_mask); | |
34 | int ret = 0; | |
35 | ||
36 | if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs) | |
37 | return 0; | |
38 | ||
39 | run_func = NULL; | |
40 | ||
41 | switch (xcp_state) { | |
42 | case AMDGPU_XCP_PREPARE_SUSPEND: | |
43 | run_func = xcp_ip->ip_funcs->prepare_suspend; | |
44 | break; | |
45 | case AMDGPU_XCP_SUSPEND: | |
46 | run_func = xcp_ip->ip_funcs->suspend; | |
47 | break; | |
48 | case AMDGPU_XCP_PREPARE_RESUME: | |
49 | run_func = xcp_ip->ip_funcs->prepare_resume; | |
50 | break; | |
51 | case AMDGPU_XCP_RESUME: | |
52 | run_func = xcp_ip->ip_funcs->resume; | |
53 | break; | |
54 | } | |
55 | ||
56 | if (run_func) | |
57 | ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask); | |
58 | ||
59 | return ret; | |
60 | } | |
61 | ||
62 | static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, | |
63 | int state) | |
64 | { | |
65 | struct amdgpu_xcp_ip *xcp_ip; | |
66 | struct amdgpu_xcp *xcp; | |
67 | int i, ret; | |
68 | ||
194224a5 | 69 | if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid) |
75d16923 LL |
70 | return -EINVAL; |
71 | ||
72 | xcp = &xcp_mgr->xcp[xcp_id]; | |
73 | for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) { | |
74 | xcp_ip = &xcp->ip[i]; | |
75 | ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state); | |
76 | if (ret) | |
77 | break; | |
78 | } | |
79 | ||
80 | return ret; | |
81 | } | |
82 | ||
83 | int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) | |
84 | { | |
85 | return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, | |
86 | AMDGPU_XCP_PREPARE_SUSPEND); | |
87 | } | |
88 | ||
89 | int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) | |
90 | { | |
91 | return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND); | |
92 | } | |
93 | ||
94 | int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) | |
95 | { | |
96 | return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, | |
97 | AMDGPU_XCP_PREPARE_RESUME); | |
98 | } | |
99 | ||
100 | int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) | |
101 | { | |
102 | return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME); | |
103 | } | |
104 | ||
105 | static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, | |
106 | struct amdgpu_xcp_ip *ip) | |
107 | { | |
108 | struct amdgpu_xcp *xcp; | |
109 | ||
110 | if (!ip) | |
111 | return; | |
112 | ||
113 | xcp = &xcp_mgr->xcp[xcp_id]; | |
114 | xcp->ip[ip->ip_id] = *ip; | |
115 | xcp->ip[ip->ip_id].valid = true; | |
116 | ||
117 | xcp->valid = true; | |
118 | } | |
119 | ||
e47947ab | 120 | int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) |
75d16923 | 121 | { |
d425c6f4 | 122 | struct amdgpu_device *adev = xcp_mgr->adev; |
75d16923 | 123 | struct amdgpu_xcp_ip ip; |
da539b21 | 124 | uint8_t mem_id; |
75d16923 LL |
125 | int i, j, ret; |
126 | ||
e47947ab LL |
127 | if (!num_xcps || num_xcps > MAX_XCP) |
128 | return -EINVAL; | |
129 | ||
130 | xcp_mgr->mode = mode; | |
131 | ||
75d16923 LL |
132 | for (i = 0; i < MAX_XCP; ++i) |
133 | xcp_mgr->xcp[i].valid = false; | |
134 | ||
025654ae LL |
135 | /* This is needed for figuring out memory id of xcp */ |
136 | xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions; | |
137 | ||
75d16923 LL |
138 | for (i = 0; i < num_xcps; ++i) { |
139 | for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) { | |
140 | ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j, | |
141 | &ip); | |
142 | if (ret) | |
143 | continue; | |
144 | ||
145 | __amdgpu_xcp_add_block(xcp_mgr, i, &ip); | |
146 | } | |
da539b21 LL |
147 | |
148 | xcp_mgr->xcp[i].id = i; | |
149 | ||
150 | if (xcp_mgr->funcs->get_xcp_mem_id) { | |
151 | ret = xcp_mgr->funcs->get_xcp_mem_id( | |
152 | xcp_mgr, &xcp_mgr->xcp[i], &mem_id); | |
153 | if (ret) | |
154 | continue; | |
155 | else | |
156 | xcp_mgr->xcp[i].mem_id = mem_id; | |
157 | } | |
75d16923 LL |
158 | } |
159 | ||
160 | xcp_mgr->num_xcps = num_xcps; | |
d425c6f4 | 161 | amdgpu_xcp_update_partition_sched_list(adev); |
75d16923 LL |
162 | |
163 | return 0; | |
164 | } | |
165 | ||
c45e38f2 LL |
166 | static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, |
167 | int mode) | |
75d16923 | 168 | { |
46d79cbf | 169 | int ret, curr_mode, num_xcps = 0; |
75d16923 | 170 | |
75d16923 LL |
171 | if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode) |
172 | return 0; | |
173 | ||
174 | mutex_lock(&xcp_mgr->xcp_lock); | |
175 | ||
46d79cbf LL |
176 | curr_mode = xcp_mgr->mode; |
177 | /* State set to transient mode */ | |
178 | xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS; | |
179 | ||
75d16923 LL |
180 | ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps); |
181 | ||
46d79cbf LL |
182 | if (ret) { |
183 | /* Failed, get whatever mode it's at now */ | |
184 | if (xcp_mgr->funcs->query_partition_mode) | |
185 | xcp_mgr->mode = amdgpu_xcp_query_partition_mode( | |
186 | xcp_mgr, AMDGPU_XCP_FL_LOCKED); | |
187 | else | |
188 | xcp_mgr->mode = curr_mode; | |
189 | ||
75d16923 | 190 | goto out; |
46d79cbf | 191 | } |
75d16923 | 192 | |
75d16923 LL |
193 | out: |
194 | mutex_unlock(&xcp_mgr->xcp_lock); | |
195 | ||
196 | return ret; | |
197 | } | |
198 | ||
c45e38f2 LL |
199 | int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) |
200 | { | |
201 | if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE) | |
202 | return -EINVAL; | |
203 | ||
204 | if (xcp_mgr->mode == mode) | |
205 | return 0; | |
206 | ||
207 | return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode); | |
208 | } | |
209 | ||
210 | int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) | |
211 | { | |
212 | if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) | |
213 | return 0; | |
214 | ||
215 | return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode); | |
216 | } | |
217 | ||
ded7d99e | 218 | int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) |
75d16923 LL |
219 | { |
220 | int mode; | |
221 | ||
222 | if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) | |
223 | return xcp_mgr->mode; | |
224 | ||
225 | if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) | |
226 | return xcp_mgr->mode; | |
227 | ||
ded7d99e LL |
228 | if (!(flags & AMDGPU_XCP_FL_LOCKED)) |
229 | mutex_lock(&xcp_mgr->xcp_lock); | |
75d16923 | 230 | mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr); |
46d79cbf | 231 | if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode) |
75d16923 LL |
232 | dev_WARN( |
233 | xcp_mgr->adev->dev, | |
234 | "Cached partition mode %d not matching with device mode %d", | |
235 | xcp_mgr->mode, mode); | |
236 | ||
ded7d99e LL |
237 | if (!(flags & AMDGPU_XCP_FL_LOCKED)) |
238 | mutex_unlock(&xcp_mgr->xcp_lock); | |
75d16923 LL |
239 | |
240 | return mode; | |
241 | } | |
242 | ||
2c1c7ba4 JZ |
243 | static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) |
244 | { | |
245 | struct drm_device *p_ddev; | |
2c1c7ba4 | 246 | struct drm_device *ddev; |
9938333a | 247 | int i, ret; |
2c1c7ba4 | 248 | |
2c1c7ba4 JZ |
249 | ddev = adev_to_drm(adev); |
250 | ||
150c2131 JZ |
251 | /* xcp #0 shares drm device setting with adev */ |
252 | adev->xcp_mgr->xcp->ddev = ddev; | |
253 | ||
254 | for (i = 1; i < MAX_XCP; i++) { | |
9938333a | 255 | ret = amdgpu_xcp_drm_dev_alloc(&p_ddev); |
400a39f1 JZ |
256 | if (ret == -ENOSPC) { |
257 | dev_warn(adev->dev, | |
258 | "Skip xcp node #%d when out of drm node resource.", i); | |
259 | return 0; | |
260 | } else if (ret) { | |
9938333a | 261 | return ret; |
400a39f1 | 262 | } |
2c1c7ba4 JZ |
263 | |
264 | /* Redirect all IOCTLs to the primary device */ | |
23105541 JZ |
265 | adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev; |
266 | adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev; | |
267 | adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver; | |
268 | adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager; | |
2c1c7ba4 | 269 | p_ddev->render->dev = ddev; |
44a97665 | 270 | p_ddev->primary->dev = ddev; |
2c1c7ba4 | 271 | p_ddev->vma_offset_manager = ddev->vma_offset_manager; |
23105541 | 272 | p_ddev->driver = &amdgpu_partition_driver; |
2c1c7ba4 JZ |
273 | adev->xcp_mgr->xcp[i].ddev = p_ddev; |
274 | } | |
275 | ||
276 | return 0; | |
277 | } | |
278 | ||
75d16923 LL |
279 | int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, |
280 | int init_num_xcps, | |
281 | struct amdgpu_xcp_mgr_funcs *xcp_funcs) | |
282 | { | |
283 | struct amdgpu_xcp_mgr *xcp_mgr; | |
284 | ||
285 | if (!xcp_funcs || !xcp_funcs->switch_partition_mode || | |
286 | !xcp_funcs->get_ip_details) | |
287 | return -EINVAL; | |
288 | ||
289 | xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL); | |
290 | ||
291 | if (!xcp_mgr) | |
292 | return -ENOMEM; | |
293 | ||
294 | xcp_mgr->adev = adev; | |
295 | xcp_mgr->funcs = xcp_funcs; | |
296 | xcp_mgr->mode = init_mode; | |
297 | mutex_init(&xcp_mgr->xcp_lock); | |
298 | ||
299 | if (init_mode != AMDGPU_XCP_MODE_NONE) | |
e47947ab | 300 | amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode); |
75d16923 LL |
301 | |
302 | adev->xcp_mgr = xcp_mgr; | |
303 | ||
2c1c7ba4 | 304 | return amdgpu_xcp_dev_alloc(adev); |
75d16923 LL |
305 | } |
306 | ||
307 | int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, | |
308 | enum AMDGPU_XCP_IP_BLOCK ip, int instance) | |
309 | { | |
310 | struct amdgpu_xcp *xcp; | |
311 | int i, id_mask = 0; | |
312 | ||
313 | if (ip >= AMDGPU_XCP_MAX_BLOCKS) | |
314 | return -EINVAL; | |
315 | ||
316 | for (i = 0; i < xcp_mgr->num_xcps; ++i) { | |
317 | xcp = &xcp_mgr->xcp[i]; | |
318 | if ((xcp->valid) && (xcp->ip[ip].valid) && | |
319 | (xcp->ip[ip].inst_mask & BIT(instance))) | |
320 | id_mask |= BIT(i); | |
321 | } | |
322 | ||
323 | if (!id_mask) | |
324 | id_mask = -ENXIO; | |
325 | ||
326 | return id_mask; | |
327 | } | |
4bdca205 LL |
328 | |
329 | int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp, | |
330 | enum AMDGPU_XCP_IP_BLOCK ip, | |
331 | uint32_t *inst_mask) | |
332 | { | |
333 | if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid)) | |
334 | return -EINVAL; | |
335 | ||
336 | *inst_mask = xcp->ip[ip].inst_mask; | |
337 | ||
338 | return 0; | |
339 | } | |
2c1c7ba4 JZ |
340 | |
341 | int amdgpu_xcp_dev_register(struct amdgpu_device *adev, | |
342 | const struct pci_device_id *ent) | |
343 | { | |
344 | int i, ret; | |
345 | ||
346 | if (!adev->xcp_mgr) | |
347 | return 0; | |
348 | ||
150c2131 | 349 | for (i = 1; i < MAX_XCP; i++) { |
400a39f1 JZ |
350 | if (!adev->xcp_mgr->xcp[i].ddev) |
351 | break; | |
352 | ||
2c1c7ba4 JZ |
353 | ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data); |
354 | if (ret) | |
355 | return ret; | |
356 | } | |
357 | ||
358 | return 0; | |
359 | } | |
360 | ||
361 | void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev) | |
362 | { | |
23105541 | 363 | struct drm_device *p_ddev; |
2c1c7ba4 JZ |
364 | int i; |
365 | ||
366 | if (!adev->xcp_mgr) | |
367 | return; | |
368 | ||
150c2131 | 369 | for (i = 1; i < MAX_XCP; i++) { |
400a39f1 JZ |
370 | if (!adev->xcp_mgr->xcp[i].ddev) |
371 | break; | |
372 | ||
23105541 JZ |
373 | p_ddev = adev->xcp_mgr->xcp[i].ddev; |
374 | drm_dev_unplug(p_ddev); | |
375 | p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev; | |
376 | p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev; | |
377 | p_ddev->driver = adev->xcp_mgr->xcp[i].driver; | |
378 | p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager; | |
379 | } | |
2c1c7ba4 JZ |
380 | } |
381 | ||
be3800f5 JZ |
382 | int amdgpu_xcp_open_device(struct amdgpu_device *adev, |
383 | struct amdgpu_fpriv *fpriv, | |
384 | struct drm_file *file_priv) | |
385 | { | |
386 | int i; | |
387 | ||
388 | if (!adev->xcp_mgr) | |
389 | return 0; | |
390 | ||
18cf073f | 391 | fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION; |
be3800f5 JZ |
392 | for (i = 0; i < MAX_XCP; ++i) { |
393 | if (!adev->xcp_mgr->xcp[i].ddev) | |
394 | break; | |
395 | ||
396 | if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) { | |
397 | if (adev->xcp_mgr->xcp[i].valid == FALSE) { | |
398 | dev_err(adev->dev, "renderD%d partition %d not valid!", | |
399 | file_priv->minor->index, i); | |
400 | return -ENOENT; | |
401 | } | |
9f77af01 | 402 | dev_dbg(adev->dev, "renderD%d partition %d opened!", |
be3800f5 JZ |
403 | file_priv->minor->index, i); |
404 | fpriv->xcp_id = i; | |
405 | break; | |
406 | } | |
407 | } | |
934deb64 | 408 | |
18cf073f | 409 | fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 : |
934deb64 | 410 | adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id; |
be3800f5 JZ |
411 | return 0; |
412 | } | |
413 | ||
3e7c6fe3 JZ |
414 | void amdgpu_xcp_release_sched(struct amdgpu_device *adev, |
415 | struct amdgpu_ctx_entity *entity) | |
416 | { | |
417 | struct drm_gpu_scheduler *sched; | |
418 | struct amdgpu_ring *ring; | |
419 | ||
420 | if (!adev->xcp_mgr) | |
421 | return; | |
422 | ||
423 | sched = entity->entity.rq->sched; | |
424 | if (sched->ready) { | |
425 | ring = to_amdgpu_ring(entity->entity.rq->sched); | |
426 | atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt); | |
427 | } | |
428 | } | |
429 |