Commit | Line | Data |
---|---|---|
0ad35fed ZW |
1 | /* |
2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
21 | * SOFTWARE. | |
12d14cc4 ZW |
22 | * |
23 | * Authors: | |
24 | * Kevin Tian <kevin.tian@intel.com> | |
25 | * Eddie Dong <eddie.dong@intel.com> | |
26 | * | |
27 | * Contributors: | |
28 | * Niu Bing <bing.niu@intel.com> | |
29 | * Zhi Wang <zhi.a.wang@intel.com> | |
30 | * | |
0ad35fed ZW |
31 | */ |
32 | ||
33 | #ifndef _GVT_H_ | |
34 | #define _GVT_H_ | |
35 | ||
36 | #include "debug.h" | |
37 | #include "hypercall.h" | |
12d14cc4 | 38 | #include "mmio.h" |
82d375d1 | 39 | #include "reg.h" |
c8fe6a68 | 40 | #include "interrupt.h" |
2707e444 | 41 | #include "gtt.h" |
04d348ae ZW |
42 | #include "display.h" |
43 | #include "edid.h" | |
8453d674 | 44 | #include "execlist.h" |
28c4c6ca | 45 | #include "scheduler.h" |
4b63960e | 46 | #include "sched_policy.h" |
1aec75ee | 47 | #include "mmio_context.h" |
be1da707 | 48 | #include "cmd_parser.h" |
9f31d106 | 49 | #include "fb_decoder.h" |
e546e281 | 50 | #include "dmabuf.h" |
e502a2af | 51 | #include "page_track.h" |
0ad35fed ZW |
52 | |
53 | #define GVT_MAX_VGPU 8 | |
54 | ||
0ad35fed | 55 | struct intel_gvt_host { |
9bdb0734 | 56 | struct device *dev; |
0ad35fed ZW |
57 | bool initialized; |
58 | int hypervisor_type; | |
59 | struct intel_gvt_mpt *mpt; | |
60 | }; | |
61 | ||
62 | extern struct intel_gvt_host intel_gvt_host; | |
63 | ||
64 | /* Describe per-platform limitations. */ | |
65 | struct intel_gvt_device_info { | |
66 | u32 max_support_vgpus; | |
579cea5f | 67 | u32 cfg_space_size; |
c8fe6a68 | 68 | u32 mmio_size; |
579cea5f | 69 | u32 mmio_bar; |
c8fe6a68 | 70 | unsigned long msi_cap_offset; |
2707e444 ZW |
71 | u32 gtt_start_offset; |
72 | u32 gtt_entry_size; | |
73 | u32 gtt_entry_size_shift; | |
be1da707 ZW |
74 | int gmadr_bytes_in_cmd; |
75 | u32 max_surface_size; | |
0ad35fed ZW |
76 | }; |
77 | ||
28a60dee ZW |
78 | /* GM resources owned by a vGPU */ |
79 | struct intel_vgpu_gm { | |
80 | u64 aperture_sz; | |
81 | u64 hidden_sz; | |
82 | struct drm_mm_node low_gm_node; | |
83 | struct drm_mm_node high_gm_node; | |
84 | }; | |
85 | ||
86 | #define INTEL_GVT_MAX_NUM_FENCES 32 | |
87 | ||
88 | /* Fences owned by a vGPU */ | |
89 | struct intel_vgpu_fence { | |
0cf289bd | 90 | struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; |
28a60dee ZW |
91 | u32 base; |
92 | u32 size; | |
93 | }; | |
94 | ||
82d375d1 ZW |
95 | struct intel_vgpu_mmio { |
96 | void *vreg; | |
82d375d1 ZW |
97 | }; |
98 | ||
82d375d1 ZW |
99 | #define INTEL_GVT_MAX_BAR_NUM 4 |
100 | ||
101 | struct intel_vgpu_pci_bar { | |
102 | u64 size; | |
103 | bool tracked; | |
104 | }; | |
105 | ||
106 | struct intel_vgpu_cfg_space { | |
02d578e5 | 107 | unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE]; |
82d375d1 ZW |
108 | struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; |
109 | }; | |
110 | ||
111 | #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) | |
112 | ||
c8fe6a68 ZW |
113 | struct intel_vgpu_irq { |
114 | bool irq_warn_once[INTEL_GVT_EVENT_MAX]; | |
2c7f9a4d | 115 | DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES], |
04d348ae | 116 | INTEL_GVT_EVENT_MAX); |
c8fe6a68 ZW |
117 | }; |
118 | ||
4d60c5fd | 119 | struct intel_vgpu_opregion { |
4dff110b | 120 | bool mapped; |
4d60c5fd ZW |
121 | void *va; |
122 | u32 gfn[INTEL_GVT_OPREGION_PAGES]; | |
4d60c5fd ZW |
123 | }; |
124 | ||
125 | #define vgpu_opregion(vgpu) (&(vgpu->opregion)) | |
126 | ||
04d348ae ZW |
127 | struct intel_vgpu_display { |
128 | struct intel_vgpu_i2c_edid i2c_edid; | |
0102d0d9 | 129 | struct intel_vgpu_port ports[I915_MAX_PORTS]; |
04d348ae ZW |
130 | struct intel_vgpu_sbi sbi; |
131 | }; | |
132 | ||
f6504cce PG |
133 | struct vgpu_sched_ctl { |
134 | int weight; | |
135 | }; | |
136 | ||
ad1d3636 ZW |
137 | enum { |
138 | INTEL_VGPU_EXECLIST_SUBMISSION = 1, | |
139 | INTEL_VGPU_GUC_SUBMISSION, | |
140 | }; | |
141 | ||
142 | struct intel_vgpu_submission_ops { | |
143 | const char *name; | |
3a891a62 CW |
144 | int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); |
145 | void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); | |
146 | void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); | |
ad1d3636 ZW |
147 | }; |
148 | ||
1406a14b ZW |
149 | struct intel_vgpu_submission { |
150 | struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; | |
151 | struct list_head workload_q_head[I915_NUM_ENGINES]; | |
251d46b0 | 152 | struct intel_context *shadow[I915_NUM_ENGINES]; |
1406a14b ZW |
153 | struct kmem_cache *workloads; |
154 | atomic_t running_workload_num; | |
f39a89b8 XZ |
155 | union { |
156 | u64 i915_context_pml4; | |
157 | u64 i915_context_pdps[GEN8_3LVL_PDPES]; | |
158 | }; | |
1406a14b | 159 | DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); |
91d5d854 | 160 | DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); |
325eb94a ZW |
161 | void *ring_scan_buffer[I915_NUM_ENGINES]; |
162 | int ring_scan_buffer_size[I915_NUM_ENGINES]; | |
ad1d3636 ZW |
163 | const struct intel_vgpu_submission_ops *ops; |
164 | int virtual_submission_interface; | |
165 | bool active; | |
fb55c735 YZ |
166 | struct { |
167 | u32 lrca; | |
168 | bool valid; | |
169 | u64 ring_context_gpa; | |
170 | } last_ctx[I915_NUM_ENGINES]; | |
1406a14b ZW |
171 | }; |
172 | ||
0ad35fed ZW |
173 | struct intel_vgpu { |
174 | struct intel_gvt *gvt; | |
f25a49ab | 175 | struct mutex vgpu_lock; |
0ad35fed ZW |
176 | int id; |
177 | unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ | |
82d375d1 | 178 | bool active; |
fd64be63 MH |
179 | bool pv_notified; |
180 | bool failsafe; | |
6184cc8d | 181 | unsigned int resetting_eng; |
9a512e23 CX |
182 | |
183 | /* Both sched_data and sched_ctl can be seen a part of the global gvt | |
184 | * scheduler structure. So below 2 vgpu data are protected | |
185 | * by sched_lock, not vgpu_lock. | |
186 | */ | |
4b63960e | 187 | void *sched_data; |
bc90d097 | 188 | struct vgpu_sched_ctl sched_ctl; |
28a60dee ZW |
189 | |
190 | struct intel_vgpu_fence fence; | |
191 | struct intel_vgpu_gm gm; | |
82d375d1 ZW |
192 | struct intel_vgpu_cfg_space cfg_space; |
193 | struct intel_vgpu_mmio mmio; | |
c8fe6a68 | 194 | struct intel_vgpu_irq irq; |
2707e444 | 195 | struct intel_vgpu_gtt gtt; |
4d60c5fd | 196 | struct intel_vgpu_opregion opregion; |
04d348ae | 197 | struct intel_vgpu_display display; |
1406a14b | 198 | struct intel_vgpu_submission submission; |
e502a2af | 199 | struct radix_tree_root page_track_tree; |
a2ae95af | 200 | u32 hws_pga[I915_NUM_ENGINES]; |
f30437c5 | 201 | |
bc7b0be3 CD |
202 | struct dentry *debugfs; |
203 | ||
06d63c48 JS |
204 | /* Hypervisor-specific device state. */ |
205 | void *vdev; | |
e546e281 TZ |
206 | |
207 | struct list_head dmabuf_obj_list_head; | |
208 | struct mutex dmabuf_lock; | |
209 | struct idr object_idr; | |
210 | ||
96bebe39 | 211 | u32 scan_nonprivbb; |
28a60dee ZW |
212 | }; |
213 | ||
06d63c48 JS |
214 | static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu) |
215 | { | |
216 | return vgpu->vdev; | |
217 | } | |
218 | ||
e011c6ce | 219 | /* validating GM healthy status*/ |
220 | #define vgpu_is_vm_unhealthy(ret_val) \ | |
221 | (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT)) | |
222 | ||
28a60dee ZW |
223 | struct intel_gvt_gm { |
224 | unsigned long vgpu_allocated_low_gm_size; | |
225 | unsigned long vgpu_allocated_high_gm_size; | |
226 | }; | |
227 | ||
228 | struct intel_gvt_fence { | |
229 | unsigned long vgpu_allocated_fence_num; | |
0ad35fed ZW |
230 | }; |
231 | ||
02b6ed44 TZ |
232 | /* Special MMIO blocks. */ |
233 | struct gvt_mmio_block { | |
234 | unsigned int device; | |
235 | i915_reg_t offset; | |
236 | unsigned int size; | |
237 | gvt_mmio_func read; | |
238 | gvt_mmio_func write; | |
239 | }; | |
240 | ||
178cd160 | 241 | #define INTEL_GVT_MMIO_HASH_BITS 11 |
12d14cc4 ZW |
242 | |
243 | struct intel_gvt_mmio { | |
56a78de5 | 244 | u8 *mmio_attribute; |
5c6d4c67 CD |
245 | /* Register contains RO bits */ |
246 | #define F_RO (1 << 0) | |
247 | /* Register contains graphics address */ | |
248 | #define F_GMADR (1 << 1) | |
249 | /* Mode mask registers with high 16 bits as the mask bits */ | |
250 | #define F_MODE_MASK (1 << 2) | |
251 | /* This reg can be accessed by GPU commands */ | |
252 | #define F_CMD_ACCESS (1 << 3) | |
253 | /* This reg has been accessed by a VM */ | |
254 | #define F_ACCESSED (1 << 4) | |
255 | /* This reg has been accessed through GPU commands */ | |
256 | #define F_CMD_ACCESSED (1 << 5) | |
257 | /* This reg could be accessed by unaligned address */ | |
258 | #define F_UNALIGN (1 << 6) | |
6cef21a1 HY |
259 | /* This reg is saved/restored in context */ |
260 | #define F_IN_CTX (1 << 7) | |
5c6d4c67 | 261 | |
02b6ed44 TZ |
262 | struct gvt_mmio_block *mmio_block; |
263 | unsigned int num_mmio_block; | |
264 | ||
12d14cc4 | 265 | DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); |
bc7b0be3 | 266 | unsigned long num_tracked_mmio; |
12d14cc4 ZW |
267 | }; |
268 | ||
579cea5f ZW |
269 | struct intel_gvt_firmware { |
270 | void *cfg_space; | |
271 | void *mmio; | |
272 | bool firmware_loaded; | |
273 | }; | |
274 | ||
1f31c829 ZW |
275 | #define NR_MAX_INTEL_VGPU_TYPES 20 |
276 | struct intel_vgpu_type { | |
277 | char name[16]; | |
1f31c829 ZW |
278 | unsigned int avail_instance; |
279 | unsigned int low_gm_size; | |
280 | unsigned int high_gm_size; | |
281 | unsigned int fence; | |
bc90d097 | 282 | unsigned int weight; |
d1a513be | 283 | enum intel_vgpu_edid resolution; |
1f31c829 ZW |
284 | }; |
285 | ||
0ad35fed | 286 | struct intel_gvt { |
f25a49ab CX |
287 | /* GVT scope lock, protect GVT itself, and all resource currently |
288 | * not yet protected by special locks(vgpu and scheduler lock). | |
289 | */ | |
0ad35fed | 290 | struct mutex lock; |
9a512e23 CX |
291 | /* scheduler scope lock, protect gvt and vgpu schedule related data */ |
292 | struct mutex sched_lock; | |
293 | ||
a61ac1e7 | 294 | struct intel_gt *gt; |
0ad35fed ZW |
295 | struct idr vgpu_idr; /* vGPU IDR pool */ |
296 | ||
297 | struct intel_gvt_device_info device_info; | |
28a60dee ZW |
298 | struct intel_gvt_gm gm; |
299 | struct intel_gvt_fence fence; | |
12d14cc4 | 300 | struct intel_gvt_mmio mmio; |
579cea5f | 301 | struct intel_gvt_firmware firmware; |
c8fe6a68 | 302 | struct intel_gvt_irq irq; |
2707e444 | 303 | struct intel_gvt_gtt gtt; |
28c4c6ca | 304 | struct intel_gvt_workload_scheduler scheduler; |
3fc03069 | 305 | struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; |
be1da707 | 306 | DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); |
1f31c829 ZW |
307 | struct intel_vgpu_type *types; |
308 | unsigned int num_types; | |
afe04fbe | 309 | struct intel_vgpu *idle_vgpu; |
04d348ae ZW |
310 | |
311 | struct task_struct *service_thread; | |
312 | wait_queue_head_t service_thread_wq; | |
f25a49ab CX |
313 | |
314 | /* service_request is always used in bit operation, we should always | |
315 | * use it with atomic bit ops so that no need to use gvt big lock. | |
316 | */ | |
04d348ae | 317 | unsigned long service_request; |
bc7b0be3 | 318 | |
cd7e61b9 WL |
319 | struct { |
320 | struct engine_mmio *mmio; | |
321 | int ctx_mmio_count[I915_NUM_ENGINES]; | |
8cfbca78 ZW |
322 | u32 *tlb_mmio_offset_list; |
323 | u32 tlb_mmio_offset_list_cnt; | |
324 | u32 *mocs_mmio_offset_list; | |
325 | u32 mocs_mmio_offset_list_cnt; | |
cd7e61b9 | 326 | } engine_mmio_list; |
83164886 | 327 | |
bc7b0be3 | 328 | struct dentry *debugfs_root; |
0ad35fed ZW |
329 | }; |
330 | ||
feddf6e8 ZW |
331 | static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) |
332 | { | |
333 | return i915->gvt; | |
334 | } | |
335 | ||
04d348ae ZW |
336 | enum { |
337 | INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, | |
c713cb2f PG |
338 | |
339 | /* Scheduling trigger by timer */ | |
91d0101a | 340 | INTEL_GVT_REQUEST_SCHED = 1, |
c713cb2f PG |
341 | |
342 | /* Scheduling trigger by event */ | |
343 | INTEL_GVT_REQUEST_EVENT_SCHED = 2, | |
04d348ae ZW |
344 | }; |
345 | ||
346 | static inline void intel_gvt_request_service(struct intel_gvt *gvt, | |
347 | int service) | |
348 | { | |
349 | set_bit(service, (void *)&gvt->service_request); | |
350 | wake_up(&gvt->service_thread_wq); | |
351 | } | |
352 | ||
579cea5f ZW |
353 | void intel_gvt_free_firmware(struct intel_gvt *gvt); |
354 | int intel_gvt_load_firmware(struct intel_gvt *gvt); | |
355 | ||
1f31c829 ZW |
356 | /* Aperture/GM space definitions for GVT device */ |
357 | #define MB_TO_BYTES(mb) ((mb) << 20ULL) | |
358 | #define BYTES_TO_MB(b) ((b) >> 20ULL) | |
359 | ||
360 | #define HOST_LOW_GM_SIZE MB_TO_BYTES(128) | |
361 | #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) | |
362 | #define HOST_FENCE 4 | |
363 | ||
a61ac1e7 CW |
364 | #define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt) |
365 | ||
28a60dee | 366 | /* Aperture/GM space definitions for GVT device */ |
a61ac1e7 CW |
367 | #define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end |
368 | #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start | |
28a60dee | 369 | |
a61ac1e7 CW |
370 | #define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total |
371 | #define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3) | |
372 | #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) | |
28a60dee ZW |
373 | |
374 | #define gvt_aperture_gmadr_base(gvt) (0) | |
375 | #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ | |
376 | + gvt_aperture_sz(gvt) - 1) | |
377 | ||
378 | #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \ | |
379 | + gvt_aperture_sz(gvt)) | |
380 | #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ | |
381 | + gvt_hidden_sz(gvt) - 1) | |
382 | ||
a61ac1e7 | 383 | #define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences) |
28a60dee ZW |
384 | |
385 | /* Aperture/GM space definitions for vGPU */ | |
386 | #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) | |
387 | #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) | |
388 | #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz) | |
389 | #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz) | |
390 | ||
391 | #define vgpu_aperture_pa_base(vgpu) \ | |
392 | (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu)) | |
393 | ||
394 | #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz) | |
395 | ||
396 | #define vgpu_aperture_pa_end(vgpu) \ | |
397 | (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) | |
398 | ||
399 | #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu)) | |
400 | #define vgpu_aperture_gmadr_end(vgpu) \ | |
401 | (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) | |
402 | ||
403 | #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu)) | |
404 | #define vgpu_hidden_gmadr_end(vgpu) \ | |
405 | (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1) | |
406 | ||
407 | #define vgpu_fence_base(vgpu) (vgpu->fence.base) | |
408 | #define vgpu_fence_sz(vgpu) (vgpu->fence.size) | |
409 | ||
410 | struct intel_vgpu_creation_params { | |
411 | __u64 handle; | |
412 | __u64 low_gm_sz; /* in MB */ | |
413 | __u64 high_gm_sz; /* in MB */ | |
414 | __u64 fence_sz; | |
d1a513be | 415 | __u64 resolution; |
28a60dee ZW |
416 | __s32 primary; |
417 | __u64 vgpu_id; | |
bc90d097 PG |
418 | |
419 | __u32 weight; | |
28a60dee ZW |
420 | }; |
421 | ||
422 | int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, | |
423 | struct intel_vgpu_creation_params *param); | |
d22a48bf | 424 | void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); |
28a60dee ZW |
425 | void intel_vgpu_free_resource(struct intel_vgpu *vgpu); |
426 | void intel_vgpu_write_fence(struct intel_vgpu *vgpu, | |
427 | u32 fence, u64 value); | |
428 | ||
90551a12 ZW |
429 | /* Macros for easily accessing vGPU virtual/shadow register. |
430 | Explicitly seperate use for typed MMIO reg or real offset.*/ | |
431 | #define vgpu_vreg_t(vgpu, reg) \ | |
432 | (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) | |
433 | #define vgpu_vreg(vgpu, offset) \ | |
434 | (*(u32 *)(vgpu->mmio.vreg + (offset))) | |
435 | #define vgpu_vreg64_t(vgpu, reg) \ | |
436 | (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) | |
437 | #define vgpu_vreg64(vgpu, offset) \ | |
438 | (*(u64 *)(vgpu->mmio.vreg + (offset))) | |
82d375d1 ZW |
439 | |
440 | #define for_each_active_vgpu(gvt, vgpu, id) \ | |
441 | idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ | |
442 | for_each_if(vgpu->active) | |
443 | ||
444 | static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, | |
445 | u32 offset, u32 val, bool low) | |
446 | { | |
447 | u32 *pval; | |
448 | ||
449 | /* BAR offset should be 32 bits algiend */ | |
450 | offset = rounddown(offset, 4); | |
451 | pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); | |
452 | ||
453 | if (low) { | |
454 | /* | |
455 | * only update bit 31 - bit 4, | |
456 | * leave the bit 3 - bit 0 unchanged. | |
457 | */ | |
458 | *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0)); | |
550dd77e XC |
459 | } else { |
460 | *pval = val; | |
82d375d1 ZW |
461 | } |
462 | } | |
463 | ||
1f31c829 ZW |
464 | int intel_gvt_init_vgpu_types(struct intel_gvt *gvt); |
465 | void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); | |
82d375d1 | 466 | |
afe04fbe PG |
467 | struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt); |
468 | void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu); | |
1f31c829 ZW |
469 | struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, |
470 | struct intel_vgpu_type *type); | |
82d375d1 | 471 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); |
f9090d4c | 472 | void intel_gvt_release_vgpu(struct intel_vgpu *vgpu); |
cfe65f40 | 473 | void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, |
3a891a62 | 474 | intel_engine_mask_t engine_mask); |
9ec1e66b | 475 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); |
b79c52ae ZW |
476 | void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); |
477 | void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); | |
1f31c829 | 478 | |
2707e444 ZW |
479 | /* validating GM functions */ |
480 | #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ | |
481 | ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ | |
482 | (gmadr <= vgpu_aperture_gmadr_end(vgpu))) | |
483 | ||
484 | #define vgpu_gmadr_is_hidden(vgpu, gmadr) \ | |
485 | ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \ | |
486 | (gmadr <= vgpu_hidden_gmadr_end(vgpu))) | |
487 | ||
488 | #define vgpu_gmadr_is_valid(vgpu, gmadr) \ | |
489 | ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \ | |
490 | (vgpu_gmadr_is_hidden(vgpu, gmadr)))) | |
491 | ||
492 | #define gvt_gmadr_is_aperture(gvt, gmadr) \ | |
493 | ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \ | |
494 | (gmadr <= gvt_aperture_gmadr_end(gvt))) | |
495 | ||
496 | #define gvt_gmadr_is_hidden(gvt, gmadr) \ | |
497 | ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \ | |
498 | (gmadr <= gvt_hidden_gmadr_end(gvt))) | |
499 | ||
500 | #define gvt_gmadr_is_valid(gvt, gmadr) \ | |
501 | (gvt_gmadr_is_aperture(gvt, gmadr) || \ | |
502 | gvt_gmadr_is_hidden(gvt, gmadr)) | |
503 | ||
504 | bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size); | |
505 | int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr); | |
506 | int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr); | |
507 | int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, | |
508 | unsigned long *h_index); | |
509 | int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, | |
510 | unsigned long *g_index); | |
4d60c5fd | 511 | |
536fc234 CD |
512 | void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, |
513 | bool primary); | |
c64ff6c7 CD |
514 | void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); |
515 | ||
9ec1e66b | 516 | int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, |
4d60c5fd ZW |
517 | void *p_data, unsigned int bytes); |
518 | ||
9ec1e66b | 519 | int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, |
4d60c5fd ZW |
520 | void *p_data, unsigned int bytes); |
521 | ||
1ca20f33 HY |
522 | void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected); |
523 | ||
f090a00d CD |
524 | static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) |
525 | { | |
526 | /* We are 64bit bar. */ | |
527 | return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & | |
528 | PCI_BASE_ADDRESS_MEM_MASK; | |
529 | } | |
530 | ||
4d60c5fd | 531 | void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); |
4dff110b XZ |
532 | int intel_vgpu_init_opregion(struct intel_vgpu *vgpu); |
533 | int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa); | |
4d60c5fd ZW |
534 | |
535 | int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); | |
23736d1b | 536 | void populate_pvinfo_page(struct intel_vgpu *vgpu); |
4d60c5fd | 537 | |
89ea20b9 | 538 | int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); |
e011c6ce | 539 | void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); |
89ea20b9 | 540 | |
9ec1e66b JS |
541 | struct intel_gvt_ops { |
542 | int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, | |
543 | unsigned int); | |
544 | int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *, | |
545 | unsigned int); | |
546 | int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *, | |
547 | unsigned int); | |
548 | int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *, | |
549 | unsigned int); | |
550 | struct intel_vgpu *(*vgpu_create)(struct intel_gvt *, | |
551 | struct intel_vgpu_type *); | |
f9090d4c HY |
552 | void (*vgpu_destroy)(struct intel_vgpu *vgpu); |
553 | void (*vgpu_release)(struct intel_vgpu *vgpu); | |
9ec1e66b | 554 | void (*vgpu_reset)(struct intel_vgpu *); |
b79c52ae ZW |
555 | void (*vgpu_activate)(struct intel_vgpu *); |
556 | void (*vgpu_deactivate)(struct intel_vgpu *); | |
c5d71cb3 | 557 | struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt, |
558 | const char *name); | |
c5bd8535 | 559 | bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups); |
e546e281 TZ |
560 | int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *); |
561 | int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); | |
4fafba2d ZW |
562 | int (*write_protect_handler)(struct intel_vgpu *, u64, void *, |
563 | unsigned int); | |
1ca20f33 | 564 | void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected); |
9ec1e66b JS |
565 | }; |
566 | ||
567 | ||
fd64be63 MH |
568 | enum { |
569 | GVT_FAILSAFE_UNSUPPORTED_GUEST, | |
a33fc7a0 | 570 | GVT_FAILSAFE_INSUFFICIENT_RESOURCE, |
e011c6ce | 571 | GVT_FAILSAFE_GUEST_ERR, |
fd64be63 MH |
572 | }; |
573 | ||
a61ac1e7 | 574 | static inline void mmio_hw_access_pre(struct intel_gt *gt) |
9b7bd65e | 575 | { |
a61ac1e7 | 576 | intel_runtime_pm_get(gt->uncore->rpm); |
9b7bd65e CD |
577 | } |
578 | ||
a61ac1e7 | 579 | static inline void mmio_hw_access_post(struct intel_gt *gt) |
9b7bd65e | 580 | { |
a61ac1e7 | 581 | intel_runtime_pm_put_unchecked(gt->uncore->rpm); |
9b7bd65e CD |
582 | } |
583 | ||
5c6d4c67 CD |
584 | /** |
585 | * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed | |
586 | * @gvt: a GVT device | |
587 | * @offset: register offset | |
588 | * | |
589 | */ | |
590 | static inline void intel_gvt_mmio_set_accessed( | |
591 | struct intel_gvt *gvt, unsigned int offset) | |
592 | { | |
593 | gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED; | |
594 | } | |
595 | ||
596 | /** | |
597 | * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command | |
598 | * @gvt: a GVT device | |
599 | * @offset: register offset | |
600 | * | |
601 | */ | |
602 | static inline bool intel_gvt_mmio_is_cmd_access( | |
603 | struct intel_gvt *gvt, unsigned int offset) | |
604 | { | |
605 | return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS; | |
606 | } | |
607 | ||
608 | /** | |
609 | * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned | |
610 | * @gvt: a GVT device | |
611 | * @offset: register offset | |
612 | * | |
613 | */ | |
614 | static inline bool intel_gvt_mmio_is_unalign( | |
615 | struct intel_gvt *gvt, unsigned int offset) | |
616 | { | |
617 | return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN; | |
618 | } | |
619 | ||
620 | /** | |
621 | * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command | |
622 | * @gvt: a GVT device | |
623 | * @offset: register offset | |
624 | * | |
625 | */ | |
626 | static inline void intel_gvt_mmio_set_cmd_accessed( | |
627 | struct intel_gvt *gvt, unsigned int offset) | |
628 | { | |
629 | gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED; | |
630 | } | |
631 | ||
632 | /** | |
633 | * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask | |
634 | * @gvt: a GVT device | |
635 | * @offset: register offset | |
636 | * | |
637 | * Returns: | |
638 | * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't. | |
639 | * | |
640 | */ | |
641 | static inline bool intel_gvt_mmio_has_mode_mask( | |
642 | struct intel_gvt *gvt, unsigned int offset) | |
643 | { | |
644 | return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; | |
645 | } | |
646 | ||
6cef21a1 HY |
647 | /** |
648 | * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask | |
649 | * @gvt: a GVT device | |
650 | * @offset: register offset | |
651 | * | |
652 | * Returns: | |
653 | * True if a MMIO has a in-context mask, false if it isn't. | |
654 | * | |
655 | */ | |
656 | static inline bool intel_gvt_mmio_is_in_ctx( | |
657 | struct intel_gvt *gvt, unsigned int offset) | |
658 | { | |
659 | return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX; | |
660 | } | |
661 | ||
662 | /** | |
663 | * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context | |
664 | * @gvt: a GVT device | |
665 | * @offset: register offset | |
666 | * | |
667 | */ | |
668 | static inline void intel_gvt_mmio_set_in_ctx( | |
669 | struct intel_gvt *gvt, unsigned int offset) | |
670 | { | |
671 | gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX; | |
672 | } | |
673 | ||
f8871ec8 | 674 | void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); |
bc7b0be3 | 675 | void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); |
f8871ec8 | 676 | void intel_gvt_debugfs_init(struct intel_gvt *gvt); |
bc7b0be3 CD |
677 | void intel_gvt_debugfs_clean(struct intel_gvt *gvt); |
678 | ||
679 | ||
7fb6a7d6 | 680 | #include "trace.h" |
0ad35fed ZW |
681 | #include "mpt.h" |
682 | ||
683 | #endif |