drm/i915/gvt: Use I915_GTT_PAGE_SIZE
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gvt / cmd_parser.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Ke Yu
25  *    Kevin Tian <kevin.tian@intel.com>
26  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
27  *
28  * Contributors:
29  *    Min He <min.he@intel.com>
30  *    Ping Gao <ping.a.gao@intel.com>
31  *    Tina Zhang <tina.zhang@intel.com>
32  *    Yulei Zhang <yulei.zhang@intel.com>
33  *    Zhi Wang <zhi.a.wang@intel.com>
34  *
35  */
36
37 #include <linux/slab.h>
38 #include "i915_drv.h"
39 #include "gvt.h"
40 #include "i915_pvinfo.h"
41 #include "trace.h"
42
43 #define INVALID_OP    (~0U)
44
45 #define OP_LEN_MI           9
46 #define OP_LEN_2D           10
47 #define OP_LEN_3D_MEDIA     16
48 #define OP_LEN_MFX_VC       16
49 #define OP_LEN_VEBOX        16
50
51 #define CMD_TYPE(cmd)   (((cmd) >> 29) & 7)
52
53 struct sub_op_bits {
54         int hi;
55         int low;
56 };
57 struct decode_info {
58         char *name;
59         int op_len;
60         int nr_sub_op;
61         struct sub_op_bits *sub_op;
62 };
63
64 #define   MAX_CMD_BUDGET                        0x7fffffff
65 #define   MI_WAIT_FOR_PLANE_C_FLIP_PENDING      (1<<15)
66 #define   MI_WAIT_FOR_PLANE_B_FLIP_PENDING      (1<<9)
67 #define   MI_WAIT_FOR_PLANE_A_FLIP_PENDING      (1<<1)
68
69 #define   MI_WAIT_FOR_SPRITE_C_FLIP_PENDING      (1<<20)
70 #define   MI_WAIT_FOR_SPRITE_B_FLIP_PENDING      (1<<10)
71 #define   MI_WAIT_FOR_SPRITE_A_FLIP_PENDING      (1<<2)
72
73 /* Render Command Map */
74
75 /* MI_* command Opcode (28:23) */
76 #define OP_MI_NOOP                          0x0
77 #define OP_MI_SET_PREDICATE                 0x1  /* HSW+ */
78 #define OP_MI_USER_INTERRUPT                0x2
79 #define OP_MI_WAIT_FOR_EVENT                0x3
80 #define OP_MI_FLUSH                         0x4
81 #define OP_MI_ARB_CHECK                     0x5
82 #define OP_MI_RS_CONTROL                    0x6  /* HSW+ */
83 #define OP_MI_REPORT_HEAD                   0x7
84 #define OP_MI_ARB_ON_OFF                    0x8
85 #define OP_MI_URB_ATOMIC_ALLOC              0x9  /* HSW+ */
86 #define OP_MI_BATCH_BUFFER_END              0xA
87 #define OP_MI_SUSPEND_FLUSH                 0xB
88 #define OP_MI_PREDICATE                     0xC  /* IVB+ */
89 #define OP_MI_TOPOLOGY_FILTER               0xD  /* IVB+ */
90 #define OP_MI_SET_APPID                     0xE  /* IVB+ */
91 #define OP_MI_RS_CONTEXT                    0xF  /* HSW+ */
92 #define OP_MI_LOAD_SCAN_LINES_INCL          0x12 /* HSW+ */
93 #define OP_MI_DISPLAY_FLIP                  0x14
94 #define OP_MI_SEMAPHORE_MBOX                0x16
95 #define OP_MI_SET_CONTEXT                   0x18
96 #define OP_MI_MATH                          0x1A
97 #define OP_MI_URB_CLEAR                     0x19
98 #define OP_MI_SEMAPHORE_SIGNAL              0x1B  /* BDW+ */
99 #define OP_MI_SEMAPHORE_WAIT                0x1C  /* BDW+ */
100
101 #define OP_MI_STORE_DATA_IMM                0x20
102 #define OP_MI_STORE_DATA_INDEX              0x21
103 #define OP_MI_LOAD_REGISTER_IMM             0x22
104 #define OP_MI_UPDATE_GTT                    0x23
105 #define OP_MI_STORE_REGISTER_MEM            0x24
106 #define OP_MI_FLUSH_DW                      0x26
107 #define OP_MI_CLFLUSH                       0x27
108 #define OP_MI_REPORT_PERF_COUNT             0x28
109 #define OP_MI_LOAD_REGISTER_MEM             0x29  /* HSW+ */
110 #define OP_MI_LOAD_REGISTER_REG             0x2A  /* HSW+ */
111 #define OP_MI_RS_STORE_DATA_IMM             0x2B  /* HSW+ */
112 #define OP_MI_LOAD_URB_MEM                  0x2C  /* HSW+ */
113 #define OP_MI_STORE_URM_MEM                 0x2D  /* HSW+ */
114 #define OP_MI_2E                            0x2E  /* BDW+ */
115 #define OP_MI_2F                            0x2F  /* BDW+ */
116 #define OP_MI_BATCH_BUFFER_START            0x31
117
118 /* Bit definition for dword 0 */
119 #define _CMDBIT_BB_START_IN_PPGTT       (1UL << 8)
120
121 #define OP_MI_CONDITIONAL_BATCH_BUFFER_END  0x36
122
123 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
124 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
125 #define BATCH_BUFFER_ADR_SPACE_BIT(x)   (((x) >> 8) & 1U)
126 #define BATCH_BUFFER_2ND_LEVEL_BIT(x)   ((x) >> 22 & 1U)
127
128 /* 2D command: Opcode (28:22) */
129 #define OP_2D(x)    ((2<<7) | x)
130
131 #define OP_XY_SETUP_BLT                             OP_2D(0x1)
132 #define OP_XY_SETUP_CLIP_BLT                        OP_2D(0x3)
133 #define OP_XY_SETUP_MONO_PATTERN_SL_BLT             OP_2D(0x11)
134 #define OP_XY_PIXEL_BLT                             OP_2D(0x24)
135 #define OP_XY_SCANLINES_BLT                         OP_2D(0x25)
136 #define OP_XY_TEXT_BLT                              OP_2D(0x26)
137 #define OP_XY_TEXT_IMMEDIATE_BLT                    OP_2D(0x31)
138 #define OP_XY_COLOR_BLT                             OP_2D(0x50)
139 #define OP_XY_PAT_BLT                               OP_2D(0x51)
140 #define OP_XY_MONO_PAT_BLT                          OP_2D(0x52)
141 #define OP_XY_SRC_COPY_BLT                          OP_2D(0x53)
142 #define OP_XY_MONO_SRC_COPY_BLT                     OP_2D(0x54)
143 #define OP_XY_FULL_BLT                              OP_2D(0x55)
144 #define OP_XY_FULL_MONO_SRC_BLT                     OP_2D(0x56)
145 #define OP_XY_FULL_MONO_PATTERN_BLT                 OP_2D(0x57)
146 #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT        OP_2D(0x58)
147 #define OP_XY_MONO_PAT_FIXED_BLT                    OP_2D(0x59)
148 #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT           OP_2D(0x71)
149 #define OP_XY_PAT_BLT_IMMEDIATE                     OP_2D(0x72)
150 #define OP_XY_SRC_COPY_CHROMA_BLT                   OP_2D(0x73)
151 #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT            OP_2D(0x74)
152 #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT   OP_2D(0x75)
153 #define OP_XY_PAT_CHROMA_BLT                        OP_2D(0x76)
154 #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE              OP_2D(0x77)
155
156 /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
157 #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
158         ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
159
160 #define OP_STATE_PREFETCH                       OP_3D_MEDIA(0x0, 0x0, 0x03)
161
162 #define OP_STATE_BASE_ADDRESS                   OP_3D_MEDIA(0x0, 0x1, 0x01)
163 #define OP_STATE_SIP                            OP_3D_MEDIA(0x0, 0x1, 0x02)
164 #define OP_3D_MEDIA_0_1_4                       OP_3D_MEDIA(0x0, 0x1, 0x04)
165
166 #define OP_3DSTATE_VF_STATISTICS_GM45           OP_3D_MEDIA(0x1, 0x0, 0x0B)
167
168 #define OP_PIPELINE_SELECT                      OP_3D_MEDIA(0x1, 0x1, 0x04)
169
170 #define OP_MEDIA_VFE_STATE                      OP_3D_MEDIA(0x2, 0x0, 0x0)
171 #define OP_MEDIA_CURBE_LOAD                     OP_3D_MEDIA(0x2, 0x0, 0x1)
172 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
173 #define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
174 #define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
175
176 #define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
177 #define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
178 #define OP_MEDIA_OBJECT_WALKER                  OP_3D_MEDIA(0x2, 0x1, 0x3)
179 #define OP_GPGPU_WALKER                         OP_3D_MEDIA(0x2, 0x1, 0x5)
180
181 #define OP_3DSTATE_CLEAR_PARAMS                 OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
182 #define OP_3DSTATE_DEPTH_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
183 #define OP_3DSTATE_STENCIL_BUFFER               OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
184 #define OP_3DSTATE_HIER_DEPTH_BUFFER            OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
185 #define OP_3DSTATE_VERTEX_BUFFERS               OP_3D_MEDIA(0x3, 0x0, 0x08)
186 #define OP_3DSTATE_VERTEX_ELEMENTS              OP_3D_MEDIA(0x3, 0x0, 0x09)
187 #define OP_3DSTATE_INDEX_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x0A)
188 #define OP_3DSTATE_VF_STATISTICS                OP_3D_MEDIA(0x3, 0x0, 0x0B)
189 #define OP_3DSTATE_VF                           OP_3D_MEDIA(0x3, 0x0, 0x0C)  /* HSW+ */
190 #define OP_3DSTATE_CC_STATE_POINTERS            OP_3D_MEDIA(0x3, 0x0, 0x0E)
191 #define OP_3DSTATE_SCISSOR_STATE_POINTERS       OP_3D_MEDIA(0x3, 0x0, 0x0F)
192 #define OP_3DSTATE_VS                           OP_3D_MEDIA(0x3, 0x0, 0x10)
193 #define OP_3DSTATE_GS                           OP_3D_MEDIA(0x3, 0x0, 0x11)
194 #define OP_3DSTATE_CLIP                         OP_3D_MEDIA(0x3, 0x0, 0x12)
195 #define OP_3DSTATE_SF                           OP_3D_MEDIA(0x3, 0x0, 0x13)
196 #define OP_3DSTATE_WM                           OP_3D_MEDIA(0x3, 0x0, 0x14)
197 #define OP_3DSTATE_CONSTANT_VS                  OP_3D_MEDIA(0x3, 0x0, 0x15)
198 #define OP_3DSTATE_CONSTANT_GS                  OP_3D_MEDIA(0x3, 0x0, 0x16)
199 #define OP_3DSTATE_CONSTANT_PS                  OP_3D_MEDIA(0x3, 0x0, 0x17)
200 #define OP_3DSTATE_SAMPLE_MASK                  OP_3D_MEDIA(0x3, 0x0, 0x18)
201 #define OP_3DSTATE_CONSTANT_HS                  OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
202 #define OP_3DSTATE_CONSTANT_DS                  OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
203 #define OP_3DSTATE_HS                           OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
204 #define OP_3DSTATE_TE                           OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
205 #define OP_3DSTATE_DS                           OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
206 #define OP_3DSTATE_STREAMOUT                    OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
207 #define OP_3DSTATE_SBE                          OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
208 #define OP_3DSTATE_PS                           OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
209 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
210 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC   OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
211 #define OP_3DSTATE_BLEND_STATE_POINTERS         OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
212 #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
213 #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
214 #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
215 #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
216 #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
217 #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
218 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
219 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
220 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
221 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
222 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
223 #define OP_3DSTATE_URB_VS                       OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
224 #define OP_3DSTATE_URB_HS                       OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
225 #define OP_3DSTATE_URB_DS                       OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
226 #define OP_3DSTATE_URB_GS                       OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
227 #define OP_3DSTATE_GATHER_CONSTANT_VS           OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
228 #define OP_3DSTATE_GATHER_CONSTANT_GS           OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
229 #define OP_3DSTATE_GATHER_CONSTANT_HS           OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
230 #define OP_3DSTATE_GATHER_CONSTANT_DS           OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
231 #define OP_3DSTATE_GATHER_CONSTANT_PS           OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
232 #define OP_3DSTATE_DX9_CONSTANTF_VS             OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
233 #define OP_3DSTATE_DX9_CONSTANTF_PS             OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
234 #define OP_3DSTATE_DX9_CONSTANTI_VS             OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
235 #define OP_3DSTATE_DX9_CONSTANTI_PS             OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
236 #define OP_3DSTATE_DX9_CONSTANTB_VS             OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
237 #define OP_3DSTATE_DX9_CONSTANTB_PS             OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
238 #define OP_3DSTATE_DX9_LOCAL_VALID_VS           OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
239 #define OP_3DSTATE_DX9_LOCAL_VALID_PS           OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
240 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS       OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
241 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS       OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
242 #define OP_3DSTATE_BINDING_TABLE_EDIT_VS        OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
243 #define OP_3DSTATE_BINDING_TABLE_EDIT_GS        OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
244 #define OP_3DSTATE_BINDING_TABLE_EDIT_HS        OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
245 #define OP_3DSTATE_BINDING_TABLE_EDIT_DS        OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
246 #define OP_3DSTATE_BINDING_TABLE_EDIT_PS        OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
247
248 #define OP_3DSTATE_VF_INSTANCING                OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
249 #define OP_3DSTATE_VF_SGVS                      OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
250 #define OP_3DSTATE_VF_TOPOLOGY                  OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
251 #define OP_3DSTATE_WM_CHROMAKEY                 OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
252 #define OP_3DSTATE_PS_BLEND                     OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
253 #define OP_3DSTATE_WM_DEPTH_STENCIL             OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
254 #define OP_3DSTATE_PS_EXTRA                     OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
255 #define OP_3DSTATE_RASTER                       OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
256 #define OP_3DSTATE_SBE_SWIZ                     OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
257 #define OP_3DSTATE_WM_HZ_OP                     OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
258 #define OP_3DSTATE_COMPONENT_PACKING            OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
259
260 #define OP_3DSTATE_DRAWING_RECTANGLE            OP_3D_MEDIA(0x3, 0x1, 0x00)
261 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0        OP_3D_MEDIA(0x3, 0x1, 0x02)
262 #define OP_3DSTATE_CHROMA_KEY                   OP_3D_MEDIA(0x3, 0x1, 0x04)
263 #define OP_SNB_3DSTATE_DEPTH_BUFFER             OP_3D_MEDIA(0x3, 0x1, 0x05)
264 #define OP_3DSTATE_POLY_STIPPLE_OFFSET          OP_3D_MEDIA(0x3, 0x1, 0x06)
265 #define OP_3DSTATE_POLY_STIPPLE_PATTERN         OP_3D_MEDIA(0x3, 0x1, 0x07)
266 #define OP_3DSTATE_LINE_STIPPLE                 OP_3D_MEDIA(0x3, 0x1, 0x08)
267 #define OP_3DSTATE_AA_LINE_PARAMS               OP_3D_MEDIA(0x3, 0x1, 0x0A)
268 #define OP_3DSTATE_GS_SVB_INDEX                 OP_3D_MEDIA(0x3, 0x1, 0x0B)
269 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1        OP_3D_MEDIA(0x3, 0x1, 0x0C)
270 #define OP_3DSTATE_MULTISAMPLE_BDW              OP_3D_MEDIA(0x3, 0x0, 0x0D)
271 #define OP_SNB_3DSTATE_STENCIL_BUFFER           OP_3D_MEDIA(0x3, 0x1, 0x0E)
272 #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER        OP_3D_MEDIA(0x3, 0x1, 0x0F)
273 #define OP_SNB_3DSTATE_CLEAR_PARAMS             OP_3D_MEDIA(0x3, 0x1, 0x10)
274 #define OP_3DSTATE_MONOFILTER_SIZE              OP_3D_MEDIA(0x3, 0x1, 0x11)
275 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS       OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
276 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS       OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
277 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS       OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
278 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS       OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
279 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS       OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
280 #define OP_3DSTATE_SO_DECL_LIST                 OP_3D_MEDIA(0x3, 0x1, 0x17)
281 #define OP_3DSTATE_SO_BUFFER                    OP_3D_MEDIA(0x3, 0x1, 0x18)
282 #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC     OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
283 #define OP_3DSTATE_GATHER_POOL_ALLOC            OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
284 #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
285 #define OP_3DSTATE_SAMPLE_PATTERN               OP_3D_MEDIA(0x3, 0x1, 0x1C)
286 #define OP_PIPE_CONTROL                         OP_3D_MEDIA(0x3, 0x2, 0x00)
287 #define OP_3DPRIMITIVE                          OP_3D_MEDIA(0x3, 0x3, 0x00)
288
289 /* VCCP Command Parser */
290
291 /*
292  * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
293  * git://anongit.freedesktop.org/vaapi/intel-driver
294  * src/i965_defines.h
295  *
296  */
297
298 #define OP_MFX(pipeline, op, sub_opa, sub_opb)     \
299         (3 << 13 | \
300          (pipeline) << 11 | \
301          (op) << 8 | \
302          (sub_opa) << 5 | \
303          (sub_opb))
304
305 #define OP_MFX_PIPE_MODE_SELECT                    OP_MFX(2, 0, 0, 0)  /* ALL */
306 #define OP_MFX_SURFACE_STATE                       OP_MFX(2, 0, 0, 1)  /* ALL */
307 #define OP_MFX_PIPE_BUF_ADDR_STATE                 OP_MFX(2, 0, 0, 2)  /* ALL */
308 #define OP_MFX_IND_OBJ_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 3)  /* ALL */
309 #define OP_MFX_BSP_BUF_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 4)  /* ALL */
310 #define OP_2_0_0_5                                 OP_MFX(2, 0, 0, 5)  /* ALL */
311 #define OP_MFX_STATE_POINTER                       OP_MFX(2, 0, 0, 6)  /* ALL */
312 #define OP_MFX_QM_STATE                            OP_MFX(2, 0, 0, 7)  /* IVB+ */
313 #define OP_MFX_FQM_STATE                           OP_MFX(2, 0, 0, 8)  /* IVB+ */
314 #define OP_MFX_PAK_INSERT_OBJECT                   OP_MFX(2, 0, 2, 8)  /* IVB+ */
315 #define OP_MFX_STITCH_OBJECT                       OP_MFX(2, 0, 2, 0xA)  /* IVB+ */
316
317 #define OP_MFD_IT_OBJECT                           OP_MFX(2, 0, 1, 9) /* ALL */
318
319 #define OP_MFX_WAIT                                OP_MFX(1, 0, 0, 0) /* IVB+ */
320 #define OP_MFX_AVC_IMG_STATE                       OP_MFX(2, 1, 0, 0) /* ALL */
321 #define OP_MFX_AVC_QM_STATE                        OP_MFX(2, 1, 0, 1) /* ALL */
322 #define OP_MFX_AVC_DIRECTMODE_STATE                OP_MFX(2, 1, 0, 2) /* ALL */
323 #define OP_MFX_AVC_SLICE_STATE                     OP_MFX(2, 1, 0, 3) /* ALL */
324 #define OP_MFX_AVC_REF_IDX_STATE                   OP_MFX(2, 1, 0, 4) /* ALL */
325 #define OP_MFX_AVC_WEIGHTOFFSET_STATE              OP_MFX(2, 1, 0, 5) /* ALL */
326 #define OP_MFD_AVC_PICID_STATE                     OP_MFX(2, 1, 1, 5) /* HSW+ */
327 #define OP_MFD_AVC_DPB_STATE                       OP_MFX(2, 1, 1, 6) /* IVB+ */
328 #define OP_MFD_AVC_SLICEADDR                       OP_MFX(2, 1, 1, 7) /* IVB+ */
329 #define OP_MFD_AVC_BSD_OBJECT                      OP_MFX(2, 1, 1, 8) /* ALL */
330 #define OP_MFC_AVC_PAK_OBJECT                      OP_MFX(2, 1, 2, 9) /* ALL */
331
332 #define OP_MFX_VC1_PRED_PIPE_STATE                 OP_MFX(2, 2, 0, 1) /* ALL */
333 #define OP_MFX_VC1_DIRECTMODE_STATE                OP_MFX(2, 2, 0, 2) /* ALL */
334 #define OP_MFD_VC1_SHORT_PIC_STATE                 OP_MFX(2, 2, 1, 0) /* IVB+ */
335 #define OP_MFD_VC1_LONG_PIC_STATE                  OP_MFX(2, 2, 1, 1) /* IVB+ */
336 #define OP_MFD_VC1_BSD_OBJECT                      OP_MFX(2, 2, 1, 8) /* ALL */
337
338 #define OP_MFX_MPEG2_PIC_STATE                     OP_MFX(2, 3, 0, 0) /* ALL */
339 #define OP_MFX_MPEG2_QM_STATE                      OP_MFX(2, 3, 0, 1) /* ALL */
340 #define OP_MFD_MPEG2_BSD_OBJECT                    OP_MFX(2, 3, 1, 8) /* ALL */
341 #define OP_MFC_MPEG2_SLICEGROUP_STATE              OP_MFX(2, 3, 2, 3) /* ALL */
342 #define OP_MFC_MPEG2_PAK_OBJECT                    OP_MFX(2, 3, 2, 9) /* ALL */
343
344 #define OP_MFX_2_6_0_0                             OP_MFX(2, 6, 0, 0) /* IVB+ */
345 #define OP_MFX_2_6_0_8                             OP_MFX(2, 6, 0, 8) /* IVB+ */
346 #define OP_MFX_2_6_0_9                             OP_MFX(2, 6, 0, 9) /* IVB+ */
347
348 #define OP_MFX_JPEG_PIC_STATE                      OP_MFX(2, 7, 0, 0)
349 #define OP_MFX_JPEG_HUFF_TABLE_STATE               OP_MFX(2, 7, 0, 2)
350 #define OP_MFD_JPEG_BSD_OBJECT                     OP_MFX(2, 7, 1, 8)
351
352 #define OP_VEB(pipeline, op, sub_opa, sub_opb) \
353         (3 << 13 | \
354          (pipeline) << 11 | \
355          (op) << 8 | \
356          (sub_opa) << 5 | \
357          (sub_opb))
358
359 #define OP_VEB_SURFACE_STATE                       OP_VEB(2, 4, 0, 0)
360 #define OP_VEB_STATE                               OP_VEB(2, 4, 0, 2)
361 #define OP_VEB_DNDI_IECP_STATE                     OP_VEB(2, 4, 0, 3)
362
363 struct parser_exec_state;
364
365 typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
366
367 #define GVT_CMD_HASH_BITS   7
368
369 /* which DWords need address fix */
370 #define ADDR_FIX_1(x1)                  (1 << (x1))
371 #define ADDR_FIX_2(x1, x2)              (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
372 #define ADDR_FIX_3(x1, x2, x3)          (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
373 #define ADDR_FIX_4(x1, x2, x3, x4)      (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
374 #define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
375
376 struct cmd_info {
377         char *name;
378         u32 opcode;
379
380 #define F_LEN_MASK      (1U<<0)
381 #define F_LEN_CONST  1U
382 #define F_LEN_VAR    0U
383
384 /*
385  * command has its own ip advance logic
386  * e.g. MI_BATCH_START, MI_BATCH_END
387  */
388 #define F_IP_ADVANCE_CUSTOM (1<<1)
389
390 #define F_POST_HANDLE   (1<<2)
391         u32 flag;
392
393 #define R_RCS   (1 << RCS)
394 #define R_VCS1  (1 << VCS)
395 #define R_VCS2  (1 << VCS2)
396 #define R_VCS   (R_VCS1 | R_VCS2)
397 #define R_BCS   (1 << BCS)
398 #define R_VECS  (1 << VECS)
399 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
400         /* rings that support this cmd: BLT/RCS/VCS/VECS */
401         uint16_t rings;
402
403         /* devices that support this cmd: SNB/IVB/HSW/... */
404         uint16_t devices;
405
406         /* which DWords are address that need fix up.
407          * bit 0 means a 32-bit non address operand in command
408          * bit 1 means address operand, which could be 32-bit
409          * or 64-bit depending on different architectures.(
410          * defined by "gmadr_bytes_in_cmd" in intel_gvt.
411          * No matter the address length, each address only takes
412          * one bit in the bitmap.
413          */
414         uint16_t addr_bitmap;
415
416         /* flag == F_LEN_CONST : command length
417          * flag == F_LEN_VAR : length bias bits
418          * Note: length is in DWord
419          */
420         uint8_t len;
421
422         parser_cmd_handler handler;
423 };
424
425 struct cmd_entry {
426         struct hlist_node hlist;
427         struct cmd_info *info;
428 };
429
430 enum {
431         RING_BUFFER_INSTRUCTION,
432         BATCH_BUFFER_INSTRUCTION,
433         BATCH_BUFFER_2ND_LEVEL,
434 };
435
436 enum {
437         GTT_BUFFER,
438         PPGTT_BUFFER
439 };
440
441 struct parser_exec_state {
442         struct intel_vgpu *vgpu;
443         int ring_id;
444
445         int buf_type;
446
447         /* batch buffer address type */
448         int buf_addr_type;
449
450         /* graphics memory address of ring buffer start */
451         unsigned long ring_start;
452         unsigned long ring_size;
453         unsigned long ring_head;
454         unsigned long ring_tail;
455
456         /* instruction graphics memory address */
457         unsigned long ip_gma;
458
459         /* mapped va of the instr_gma */
460         void *ip_va;
461         void *rb_va;
462
463         void *ret_bb_va;
464         /* next instruction when return from  batch buffer to ring buffer */
465         unsigned long ret_ip_gma_ring;
466
467         /* next instruction when return from 2nd batch buffer to batch buffer */
468         unsigned long ret_ip_gma_bb;
469
470         /* batch buffer address type (GTT or PPGTT)
471          * used when ret from 2nd level batch buffer
472          */
473         int saved_buf_addr_type;
474
475         struct cmd_info *info;
476
477         struct intel_vgpu_workload *workload;
478 };
479
480 #define gmadr_dw_number(s)      \
481         (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
482
483 static unsigned long bypass_scan_mask = 0;
484
485 /* ring ALL, type = 0 */
486 static struct sub_op_bits sub_op_mi[] = {
487         {31, 29},
488         {28, 23},
489 };
490
491 static struct decode_info decode_info_mi = {
492         "MI",
493         OP_LEN_MI,
494         ARRAY_SIZE(sub_op_mi),
495         sub_op_mi,
496 };
497
498 /* ring RCS, command type 2 */
499 static struct sub_op_bits sub_op_2d[] = {
500         {31, 29},
501         {28, 22},
502 };
503
504 static struct decode_info decode_info_2d = {
505         "2D",
506         OP_LEN_2D,
507         ARRAY_SIZE(sub_op_2d),
508         sub_op_2d,
509 };
510
511 /* ring RCS, command type 3 */
512 static struct sub_op_bits sub_op_3d_media[] = {
513         {31, 29},
514         {28, 27},
515         {26, 24},
516         {23, 16},
517 };
518
519 static struct decode_info decode_info_3d_media = {
520         "3D_Media",
521         OP_LEN_3D_MEDIA,
522         ARRAY_SIZE(sub_op_3d_media),
523         sub_op_3d_media,
524 };
525
526 /* ring VCS, command type 3 */
527 static struct sub_op_bits sub_op_mfx_vc[] = {
528         {31, 29},
529         {28, 27},
530         {26, 24},
531         {23, 21},
532         {20, 16},
533 };
534
535 static struct decode_info decode_info_mfx_vc = {
536         "MFX_VC",
537         OP_LEN_MFX_VC,
538         ARRAY_SIZE(sub_op_mfx_vc),
539         sub_op_mfx_vc,
540 };
541
542 /* ring VECS, command type 3 */
543 static struct sub_op_bits sub_op_vebox[] = {
544         {31, 29},
545         {28, 27},
546         {26, 24},
547         {23, 21},
548         {20, 16},
549 };
550
551 static struct decode_info decode_info_vebox = {
552         "VEBOX",
553         OP_LEN_VEBOX,
554         ARRAY_SIZE(sub_op_vebox),
555         sub_op_vebox,
556 };
557
558 static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
559         [RCS] = {
560                 &decode_info_mi,
561                 NULL,
562                 NULL,
563                 &decode_info_3d_media,
564                 NULL,
565                 NULL,
566                 NULL,
567                 NULL,
568         },
569
570         [VCS] = {
571                 &decode_info_mi,
572                 NULL,
573                 NULL,
574                 &decode_info_mfx_vc,
575                 NULL,
576                 NULL,
577                 NULL,
578                 NULL,
579         },
580
581         [BCS] = {
582                 &decode_info_mi,
583                 NULL,
584                 &decode_info_2d,
585                 NULL,
586                 NULL,
587                 NULL,
588                 NULL,
589                 NULL,
590         },
591
592         [VECS] = {
593                 &decode_info_mi,
594                 NULL,
595                 NULL,
596                 &decode_info_vebox,
597                 NULL,
598                 NULL,
599                 NULL,
600                 NULL,
601         },
602
603         [VCS2] = {
604                 &decode_info_mi,
605                 NULL,
606                 NULL,
607                 &decode_info_mfx_vc,
608                 NULL,
609                 NULL,
610                 NULL,
611                 NULL,
612         },
613 };
614
615 static inline u32 get_opcode(u32 cmd, int ring_id)
616 {
617         struct decode_info *d_info;
618
619         d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
620         if (d_info == NULL)
621                 return INVALID_OP;
622
623         return cmd >> (32 - d_info->op_len);
624 }
625
626 static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
627                 unsigned int opcode, int ring_id)
628 {
629         struct cmd_entry *e;
630
631         hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
632                 if ((opcode == e->info->opcode) &&
633                                 (e->info->rings & (1 << ring_id)))
634                         return e->info;
635         }
636         return NULL;
637 }
638
639 static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
640                 u32 cmd, int ring_id)
641 {
642         u32 opcode;
643
644         opcode = get_opcode(cmd, ring_id);
645         if (opcode == INVALID_OP)
646                 return NULL;
647
648         return find_cmd_entry(gvt, opcode, ring_id);
649 }
650
651 static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
652 {
653         return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
654 }
655
656 static inline void print_opcode(u32 cmd, int ring_id)
657 {
658         struct decode_info *d_info;
659         int i;
660
661         d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
662         if (d_info == NULL)
663                 return;
664
665         gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
666                         cmd >> (32 - d_info->op_len), d_info->name);
667
668         for (i = 0; i < d_info->nr_sub_op; i++)
669                 pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
670                                         d_info->sub_op[i].low));
671
672         pr_err("\n");
673 }
674
675 static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
676 {
677         return s->ip_va + (index << 2);
678 }
679
680 static inline u32 cmd_val(struct parser_exec_state *s, int index)
681 {
682         return *cmd_ptr(s, index);
683 }
684
685 static void parser_exec_state_dump(struct parser_exec_state *s)
686 {
687         int cnt = 0;
688         int i;
689
690         gvt_dbg_cmd("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
691                         " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
692                         s->ring_id, s->ring_start, s->ring_start + s->ring_size,
693                         s->ring_head, s->ring_tail);
694
695         gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
696                         s->buf_type == RING_BUFFER_INSTRUCTION ?
697                         "RING_BUFFER" : "BATCH_BUFFER",
698                         s->buf_addr_type == GTT_BUFFER ?
699                         "GTT" : "PPGTT", s->ip_gma);
700
701         if (s->ip_va == NULL) {
702                 gvt_dbg_cmd(" ip_va(NULL)");
703                 return;
704         }
705
706         gvt_dbg_cmd("  ip_va=%p: %08x %08x %08x %08x\n",
707                         s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
708                         cmd_val(s, 2), cmd_val(s, 3));
709
710         print_opcode(cmd_val(s, 0), s->ring_id);
711
712         /* print the whole page to trace */
713         pr_err("    ip_va=%p: %08x %08x %08x %08x\n",
714                         s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
715                         cmd_val(s, 2), cmd_val(s, 3));
716
717         s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
718
719         while (cnt < 1024) {
720                 pr_err("ip_va=%p: ", s->ip_va);
721                 for (i = 0; i < 8; i++)
722                         pr_err("%08x ", cmd_val(s, i));
723                 pr_err("\n");
724
725                 s->ip_va += 8 * sizeof(u32);
726                 cnt += 8;
727         }
728 }
729
730 static inline void update_ip_va(struct parser_exec_state *s)
731 {
732         unsigned long len = 0;
733
734         if (WARN_ON(s->ring_head == s->ring_tail))
735                 return;
736
737         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
738                 unsigned long ring_top = s->ring_start + s->ring_size;
739
740                 if (s->ring_head > s->ring_tail) {
741                         if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
742                                 len = (s->ip_gma - s->ring_head);
743                         else if (s->ip_gma >= s->ring_start &&
744                                         s->ip_gma <= s->ring_tail)
745                                 len = (ring_top - s->ring_head) +
746                                         (s->ip_gma - s->ring_start);
747                 } else
748                         len = (s->ip_gma - s->ring_head);
749
750                 s->ip_va = s->rb_va + len;
751         } else {/* shadow batch buffer */
752                 s->ip_va = s->ret_bb_va;
753         }
754 }
755
756 static inline int ip_gma_set(struct parser_exec_state *s,
757                 unsigned long ip_gma)
758 {
759         WARN_ON(!IS_ALIGNED(ip_gma, 4));
760
761         s->ip_gma = ip_gma;
762         update_ip_va(s);
763         return 0;
764 }
765
766 static inline int ip_gma_advance(struct parser_exec_state *s,
767                 unsigned int dw_len)
768 {
769         s->ip_gma += (dw_len << 2);
770
771         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
772                 if (s->ip_gma >= s->ring_start + s->ring_size)
773                         s->ip_gma -= s->ring_size;
774                 update_ip_va(s);
775         } else {
776                 s->ip_va += (dw_len << 2);
777         }
778
779         return 0;
780 }
781
782 static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
783 {
784         if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
785                 return info->len;
786         else
787                 return (cmd & ((1U << info->len) - 1)) + 2;
788         return 0;
789 }
790
791 static inline int cmd_length(struct parser_exec_state *s)
792 {
793         return get_cmd_length(s->info, cmd_val(s, 0));
794 }
795
796 /* do not remove this, some platform may need clflush here */
797 #define patch_value(s, addr, val) do { \
798         *addr = val; \
799 } while (0)
800
801 static bool is_shadowed_mmio(unsigned int offset)
802 {
803         bool ret = false;
804
805         if ((offset == 0x2168) || /*BB current head register UDW */
806             (offset == 0x2140) || /*BB current header register */
807             (offset == 0x211c) || /*second BB header register UDW */
808             (offset == 0x2114)) { /*second BB header register UDW */
809                 ret = true;
810         }
811         return ret;
812 }
813
814 static inline bool is_force_nonpriv_mmio(unsigned int offset)
815 {
816         return (offset >= 0x24d0 && offset < 0x2500);
817 }
818
819 static int force_nonpriv_reg_handler(struct parser_exec_state *s,
820                                      unsigned int offset, unsigned int index)
821 {
822         struct intel_gvt *gvt = s->vgpu->gvt;
823         unsigned int data = cmd_val(s, index + 1);
824
825         if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
826                 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
827                         offset, data);
828                 return -EPERM;
829         }
830         return 0;
831 }
832
833 static int cmd_reg_handler(struct parser_exec_state *s,
834         unsigned int offset, unsigned int index, char *cmd)
835 {
836         struct intel_vgpu *vgpu = s->vgpu;
837         struct intel_gvt *gvt = vgpu->gvt;
838
839         if (offset + 4 > gvt->device_info.mmio_size) {
840                 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
841                                 cmd, offset);
842                 return -EFAULT;
843         }
844
845         if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
846                 gvt_vgpu_err("%s access to non-render register (%x)\n",
847                                 cmd, offset);
848                 return 0;
849         }
850
851         if (is_shadowed_mmio(offset)) {
852                 gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
853                 return 0;
854         }
855
856         if (is_force_nonpriv_mmio(offset) &&
857                 force_nonpriv_reg_handler(s, offset, index))
858                 return -EPERM;
859
860         if (offset == i915_mmio_reg_offset(DERRMR) ||
861                 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
862                 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
863                 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
864         }
865
866         /* TODO: Update the global mask if this MMIO is a masked-MMIO */
867         intel_gvt_mmio_set_cmd_accessed(gvt, offset);
868         return 0;
869 }
870
871 #define cmd_reg(s, i) \
872         (cmd_val(s, i) & GENMASK(22, 2))
873
874 #define cmd_reg_inhibit(s, i) \
875         (cmd_val(s, i) & GENMASK(22, 18))
876
877 #define cmd_gma(s, i) \
878         (cmd_val(s, i) & GENMASK(31, 2))
879
880 #define cmd_gma_hi(s, i) \
881         (cmd_val(s, i) & GENMASK(15, 0))
882
883 static int cmd_handler_lri(struct parser_exec_state *s)
884 {
885         int i, ret = 0;
886         int cmd_len = cmd_length(s);
887         struct intel_gvt *gvt = s->vgpu->gvt;
888
889         for (i = 1; i < cmd_len; i += 2) {
890                 if (IS_BROADWELL(gvt->dev_priv) &&
891                                 (s->ring_id != RCS)) {
892                         if (s->ring_id == BCS &&
893                                         cmd_reg(s, i) ==
894                                         i915_mmio_reg_offset(DERRMR))
895                                 ret |= 0;
896                         else
897                                 ret |= (cmd_reg_inhibit(s, i)) ?
898                                         -EBADRQC : 0;
899                 }
900                 if (ret)
901                         break;
902                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
903                 if (ret)
904                         break;
905         }
906         return ret;
907 }
908
909 static int cmd_handler_lrr(struct parser_exec_state *s)
910 {
911         int i, ret = 0;
912         int cmd_len = cmd_length(s);
913
914         for (i = 1; i < cmd_len; i += 2) {
915                 if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
916                         ret |= ((cmd_reg_inhibit(s, i) ||
917                                         (cmd_reg_inhibit(s, i + 1)))) ?
918                                 -EBADRQC : 0;
919                 if (ret)
920                         break;
921                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
922                 if (ret)
923                         break;
924                 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
925                 if (ret)
926                         break;
927         }
928         return ret;
929 }
930
931 static inline int cmd_address_audit(struct parser_exec_state *s,
932                 unsigned long guest_gma, int op_size, bool index_mode);
933
934 static int cmd_handler_lrm(struct parser_exec_state *s)
935 {
936         struct intel_gvt *gvt = s->vgpu->gvt;
937         int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
938         unsigned long gma;
939         int i, ret = 0;
940         int cmd_len = cmd_length(s);
941
942         for (i = 1; i < cmd_len;) {
943                 if (IS_BROADWELL(gvt->dev_priv))
944                         ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
945                 if (ret)
946                         break;
947                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
948                 if (ret)
949                         break;
950                 if (cmd_val(s, 0) & (1 << 22)) {
951                         gma = cmd_gma(s, i + 1);
952                         if (gmadr_bytes == 8)
953                                 gma |= (cmd_gma_hi(s, i + 2)) << 32;
954                         ret |= cmd_address_audit(s, gma, sizeof(u32), false);
955                         if (ret)
956                                 break;
957                 }
958                 i += gmadr_dw_number(s) + 1;
959         }
960         return ret;
961 }
962
963 static int cmd_handler_srm(struct parser_exec_state *s)
964 {
965         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
966         unsigned long gma;
967         int i, ret = 0;
968         int cmd_len = cmd_length(s);
969
970         for (i = 1; i < cmd_len;) {
971                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
972                 if (ret)
973                         break;
974                 if (cmd_val(s, 0) & (1 << 22)) {
975                         gma = cmd_gma(s, i + 1);
976                         if (gmadr_bytes == 8)
977                                 gma |= (cmd_gma_hi(s, i + 2)) << 32;
978                         ret |= cmd_address_audit(s, gma, sizeof(u32), false);
979                         if (ret)
980                                 break;
981                 }
982                 i += gmadr_dw_number(s) + 1;
983         }
984         return ret;
985 }
986
987 struct cmd_interrupt_event {
988         int pipe_control_notify;
989         int mi_flush_dw;
990         int mi_user_interrupt;
991 };
992
993 static struct cmd_interrupt_event cmd_interrupt_events[] = {
994         [RCS] = {
995                 .pipe_control_notify = RCS_PIPE_CONTROL,
996                 .mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
997                 .mi_user_interrupt = RCS_MI_USER_INTERRUPT,
998         },
999         [BCS] = {
1000                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1001                 .mi_flush_dw = BCS_MI_FLUSH_DW,
1002                 .mi_user_interrupt = BCS_MI_USER_INTERRUPT,
1003         },
1004         [VCS] = {
1005                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1006                 .mi_flush_dw = VCS_MI_FLUSH_DW,
1007                 .mi_user_interrupt = VCS_MI_USER_INTERRUPT,
1008         },
1009         [VCS2] = {
1010                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1011                 .mi_flush_dw = VCS2_MI_FLUSH_DW,
1012                 .mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
1013         },
1014         [VECS] = {
1015                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1016                 .mi_flush_dw = VECS_MI_FLUSH_DW,
1017                 .mi_user_interrupt = VECS_MI_USER_INTERRUPT,
1018         },
1019 };
1020
1021 static int cmd_handler_pipe_control(struct parser_exec_state *s)
1022 {
1023         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1024         unsigned long gma;
1025         bool index_mode = false;
1026         unsigned int post_sync;
1027         int ret = 0;
1028
1029         post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
1030
1031         /* LRI post sync */
1032         if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
1033                 ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
1034         /* post sync */
1035         else if (post_sync) {
1036                 if (post_sync == 2)
1037                         ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
1038                 else if (post_sync == 3)
1039                         ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1040                 else if (post_sync == 1) {
1041                         /* check ggtt*/
1042                         if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1043                                 gma = cmd_val(s, 2) & GENMASK(31, 3);
1044                                 if (gmadr_bytes == 8)
1045                                         gma |= (cmd_gma_hi(s, 3)) << 32;
1046                                 /* Store Data Index */
1047                                 if (cmd_val(s, 1) & (1 << 21))
1048                                         index_mode = true;
1049                                 ret |= cmd_address_audit(s, gma, sizeof(u64),
1050                                                 index_mode);
1051                         }
1052                 }
1053         }
1054
1055         if (ret)
1056                 return ret;
1057
1058         if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
1059                 set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
1060                                 s->workload->pending_events);
1061         return 0;
1062 }
1063
1064 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1065 {
1066         set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
1067                         s->workload->pending_events);
1068         return 0;
1069 }
1070
1071 static int cmd_advance_default(struct parser_exec_state *s)
1072 {
1073         return ip_gma_advance(s, cmd_length(s));
1074 }
1075
1076 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
1077 {
1078         int ret;
1079
1080         if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1081                 s->buf_type = BATCH_BUFFER_INSTRUCTION;
1082                 ret = ip_gma_set(s, s->ret_ip_gma_bb);
1083                 s->buf_addr_type = s->saved_buf_addr_type;
1084         } else {
1085                 s->buf_type = RING_BUFFER_INSTRUCTION;
1086                 s->buf_addr_type = GTT_BUFFER;
1087                 if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
1088                         s->ret_ip_gma_ring -= s->ring_size;
1089                 ret = ip_gma_set(s, s->ret_ip_gma_ring);
1090         }
1091         return ret;
1092 }
1093
1094 struct mi_display_flip_command_info {
1095         int pipe;
1096         int plane;
1097         int event;
1098         i915_reg_t stride_reg;
1099         i915_reg_t ctrl_reg;
1100         i915_reg_t surf_reg;
1101         u64 stride_val;
1102         u64 tile_val;
1103         u64 surf_val;
1104         bool async_flip;
1105 };
1106
1107 struct plane_code_mapping {
1108         int pipe;
1109         int plane;
1110         int event;
1111 };
1112
1113 static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1114                 struct mi_display_flip_command_info *info)
1115 {
1116         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1117         struct plane_code_mapping gen8_plane_code[] = {
1118                 [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
1119                 [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
1120                 [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
1121                 [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
1122                 [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
1123                 [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
1124         };
1125         u32 dword0, dword1, dword2;
1126         u32 v;
1127
1128         dword0 = cmd_val(s, 0);
1129         dword1 = cmd_val(s, 1);
1130         dword2 = cmd_val(s, 2);
1131
1132         v = (dword0 & GENMASK(21, 19)) >> 19;
1133         if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
1134                 return -EBADRQC;
1135
1136         info->pipe = gen8_plane_code[v].pipe;
1137         info->plane = gen8_plane_code[v].plane;
1138         info->event = gen8_plane_code[v].event;
1139         info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1140         info->tile_val = (dword1 & 0x1);
1141         info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1142         info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1143
1144         if (info->plane == PLANE_A) {
1145                 info->ctrl_reg = DSPCNTR(info->pipe);
1146                 info->stride_reg = DSPSTRIDE(info->pipe);
1147                 info->surf_reg = DSPSURF(info->pipe);
1148         } else if (info->plane == PLANE_B) {
1149                 info->ctrl_reg = SPRCTL(info->pipe);
1150                 info->stride_reg = SPRSTRIDE(info->pipe);
1151                 info->surf_reg = SPRSURF(info->pipe);
1152         } else {
1153                 WARN_ON(1);
1154                 return -EBADRQC;
1155         }
1156         return 0;
1157 }
1158
1159 static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1160                 struct mi_display_flip_command_info *info)
1161 {
1162         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1163         struct intel_vgpu *vgpu = s->vgpu;
1164         u32 dword0 = cmd_val(s, 0);
1165         u32 dword1 = cmd_val(s, 1);
1166         u32 dword2 = cmd_val(s, 2);
1167         u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
1168
1169         info->plane = PRIMARY_PLANE;
1170
1171         switch (plane) {
1172         case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
1173                 info->pipe = PIPE_A;
1174                 info->event = PRIMARY_A_FLIP_DONE;
1175                 break;
1176         case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
1177                 info->pipe = PIPE_B;
1178                 info->event = PRIMARY_B_FLIP_DONE;
1179                 break;
1180         case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
1181                 info->pipe = PIPE_C;
1182                 info->event = PRIMARY_C_FLIP_DONE;
1183                 break;
1184
1185         case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
1186                 info->pipe = PIPE_A;
1187                 info->event = SPRITE_A_FLIP_DONE;
1188                 info->plane = SPRITE_PLANE;
1189                 break;
1190         case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
1191                 info->pipe = PIPE_B;
1192                 info->event = SPRITE_B_FLIP_DONE;
1193                 info->plane = SPRITE_PLANE;
1194                 break;
1195         case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
1196                 info->pipe = PIPE_C;
1197                 info->event = SPRITE_C_FLIP_DONE;
1198                 info->plane = SPRITE_PLANE;
1199                 break;
1200
1201         default:
1202                 gvt_vgpu_err("unknown plane code %d\n", plane);
1203                 return -EBADRQC;
1204         }
1205
1206         info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1207         info->tile_val = (dword1 & GENMASK(2, 0));
1208         info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1209         info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1210
1211         info->ctrl_reg = DSPCNTR(info->pipe);
1212         info->stride_reg = DSPSTRIDE(info->pipe);
1213         info->surf_reg = DSPSURF(info->pipe);
1214
1215         return 0;
1216 }
1217
1218 static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1219                 struct mi_display_flip_command_info *info)
1220 {
1221         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1222         u32 stride, tile;
1223
1224         if (!info->async_flip)
1225                 return 0;
1226
1227         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1228                 stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1229                 tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
1230                                 GENMASK(12, 10)) >> 10;
1231         } else {
1232                 stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
1233                                 GENMASK(15, 6)) >> 6;
1234                 tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
1235         }
1236
1237         if (stride != info->stride_val)
1238                 gvt_dbg_cmd("cannot change stride during async flip\n");
1239
1240         if (tile != info->tile_val)
1241                 gvt_dbg_cmd("cannot change tile during async flip\n");
1242
1243         return 0;
1244 }
1245
1246 static int gen8_update_plane_mmio_from_mi_display_flip(
1247                 struct parser_exec_state *s,
1248                 struct mi_display_flip_command_info *info)
1249 {
1250         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1251         struct intel_vgpu *vgpu = s->vgpu;
1252
1253         set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
1254                       info->surf_val << 12);
1255         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1256                 set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
1257                               info->stride_val);
1258                 set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
1259                               info->tile_val << 10);
1260         } else {
1261                 set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
1262                               info->stride_val << 6);
1263                 set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
1264                               info->tile_val << 10);
1265         }
1266
1267         vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
1268         intel_vgpu_trigger_virtual_event(vgpu, info->event);
1269         return 0;
1270 }
1271
1272 static int decode_mi_display_flip(struct parser_exec_state *s,
1273                 struct mi_display_flip_command_info *info)
1274 {
1275         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1276
1277         if (IS_BROADWELL(dev_priv))
1278                 return gen8_decode_mi_display_flip(s, info);
1279         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1280                 return skl_decode_mi_display_flip(s, info);
1281
1282         return -ENODEV;
1283 }
1284
1285 static int check_mi_display_flip(struct parser_exec_state *s,
1286                 struct mi_display_flip_command_info *info)
1287 {
1288         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1289
1290         if (IS_BROADWELL(dev_priv)
1291                 || IS_SKYLAKE(dev_priv)
1292                 || IS_KABYLAKE(dev_priv))
1293                 return gen8_check_mi_display_flip(s, info);
1294         return -ENODEV;
1295 }
1296
1297 static int update_plane_mmio_from_mi_display_flip(
1298                 struct parser_exec_state *s,
1299                 struct mi_display_flip_command_info *info)
1300 {
1301         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1302
1303         if (IS_BROADWELL(dev_priv)
1304                 || IS_SKYLAKE(dev_priv)
1305                 || IS_KABYLAKE(dev_priv))
1306                 return gen8_update_plane_mmio_from_mi_display_flip(s, info);
1307         return -ENODEV;
1308 }
1309
1310 static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1311 {
1312         struct mi_display_flip_command_info info;
1313         struct intel_vgpu *vgpu = s->vgpu;
1314         int ret;
1315         int i;
1316         int len = cmd_length(s);
1317
1318         ret = decode_mi_display_flip(s, &info);
1319         if (ret) {
1320                 gvt_vgpu_err("fail to decode MI display flip command\n");
1321                 return ret;
1322         }
1323
1324         ret = check_mi_display_flip(s, &info);
1325         if (ret) {
1326                 gvt_vgpu_err("invalid MI display flip command\n");
1327                 return ret;
1328         }
1329
1330         ret = update_plane_mmio_from_mi_display_flip(s, &info);
1331         if (ret) {
1332                 gvt_vgpu_err("fail to update plane mmio\n");
1333                 return ret;
1334         }
1335
1336         for (i = 0; i < len; i++)
1337                 patch_value(s, cmd_ptr(s, i), MI_NOOP);
1338         return 0;
1339 }
1340
1341 static bool is_wait_for_flip_pending(u32 cmd)
1342 {
1343         return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
1344                         MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
1345                         MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
1346                         MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
1347                         MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
1348                         MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
1349 }
1350
1351 static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
1352 {
1353         u32 cmd = cmd_val(s, 0);
1354
1355         if (!is_wait_for_flip_pending(cmd))
1356                 return 0;
1357
1358         patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1359         return 0;
1360 }
1361
1362 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1363 {
1364         unsigned long addr;
1365         unsigned long gma_high, gma_low;
1366         struct intel_vgpu *vgpu = s->vgpu;
1367         int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1368
1369         if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
1370                 gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
1371                 return INTEL_GVT_INVALID_ADDR;
1372         }
1373
1374         gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1375         if (gmadr_bytes == 4) {
1376                 addr = gma_low;
1377         } else {
1378                 gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
1379                 addr = (((unsigned long)gma_high) << 32) | gma_low;
1380         }
1381         return addr;
1382 }
1383
1384 static inline int cmd_address_audit(struct parser_exec_state *s,
1385                 unsigned long guest_gma, int op_size, bool index_mode)
1386 {
1387         struct intel_vgpu *vgpu = s->vgpu;
1388         u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
1389         int i;
1390         int ret;
1391
1392         if (op_size > max_surface_size) {
1393                 gvt_vgpu_err("command address audit fail name %s\n",
1394                         s->info->name);
1395                 return -EFAULT;
1396         }
1397
1398         if (index_mode) {
1399                 if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
1400                         ret = -EFAULT;
1401                         goto err;
1402                 }
1403         } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
1404                 ret = -EFAULT;
1405                 goto err;
1406         }
1407
1408         return 0;
1409
1410 err:
1411         gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1412                         s->info->name, guest_gma, op_size);
1413
1414         pr_err("cmd dump: ");
1415         for (i = 0; i < cmd_length(s); i++) {
1416                 if (!(i % 4))
1417                         pr_err("\n%08x ", cmd_val(s, i));
1418                 else
1419                         pr_err("%08x ", cmd_val(s, i));
1420         }
1421         pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1422                         vgpu->id,
1423                         vgpu_aperture_gmadr_base(vgpu),
1424                         vgpu_aperture_gmadr_end(vgpu),
1425                         vgpu_hidden_gmadr_base(vgpu),
1426                         vgpu_hidden_gmadr_end(vgpu));
1427         return ret;
1428 }
1429
1430 static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1431 {
1432         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1433         int op_size = (cmd_length(s) - 3) * sizeof(u32);
1434         int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
1435         unsigned long gma, gma_low, gma_high;
1436         int ret = 0;
1437
1438         /* check ppggt */
1439         if (!(cmd_val(s, 0) & (1 << 22)))
1440                 return 0;
1441
1442         gma = cmd_val(s, 2) & GENMASK(31, 2);
1443
1444         if (gmadr_bytes == 8) {
1445                 gma_low = cmd_val(s, 1) & GENMASK(31, 2);
1446                 gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1447                 gma = (gma_high << 32) | gma_low;
1448                 core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
1449         }
1450         ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
1451         return ret;
1452 }
1453
1454 static inline int unexpected_cmd(struct parser_exec_state *s)
1455 {
1456         struct intel_vgpu *vgpu = s->vgpu;
1457
1458         gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1459
1460         return -EBADRQC;
1461 }
1462
1463 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
1464 {
1465         return unexpected_cmd(s);
1466 }
1467
1468 static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
1469 {
1470         return unexpected_cmd(s);
1471 }
1472
1473 static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
1474 {
1475         return unexpected_cmd(s);
1476 }
1477
1478 static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
1479 {
1480         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1481         int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
1482                         sizeof(u32);
1483         unsigned long gma, gma_high;
1484         int ret = 0;
1485
1486         if (!(cmd_val(s, 0) & (1 << 22)))
1487                 return ret;
1488
1489         gma = cmd_val(s, 1) & GENMASK(31, 2);
1490         if (gmadr_bytes == 8) {
1491                 gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1492                 gma = (gma_high << 32) | gma;
1493         }
1494         ret = cmd_address_audit(s, gma, op_size, false);
1495         return ret;
1496 }
1497
1498 static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
1499 {
1500         return unexpected_cmd(s);
1501 }
1502
1503 static int cmd_handler_mi_clflush(struct parser_exec_state *s)
1504 {
1505         return unexpected_cmd(s);
1506 }
1507
1508 static int cmd_handler_mi_conditional_batch_buffer_end(
1509                 struct parser_exec_state *s)
1510 {
1511         return unexpected_cmd(s);
1512 }
1513
1514 static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
1515 {
1516         return unexpected_cmd(s);
1517 }
1518
1519 static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
1520 {
1521         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1522         unsigned long gma;
1523         bool index_mode = false;
1524         int ret = 0;
1525
1526         /* Check post-sync and ppgtt bit */
1527         if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
1528                 gma = cmd_val(s, 1) & GENMASK(31, 3);
1529                 if (gmadr_bytes == 8)
1530                         gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
1531                 /* Store Data Index */
1532                 if (cmd_val(s, 0) & (1 << 21))
1533                         index_mode = true;
1534                 ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
1535         }
1536         /* Check notify bit */
1537         if ((cmd_val(s, 0) & (1 << 8)))
1538                 set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
1539                                 s->workload->pending_events);
1540         return ret;
1541 }
1542
1543 static void addr_type_update_snb(struct parser_exec_state *s)
1544 {
1545         if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
1546                         (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
1547                 s->buf_addr_type = PPGTT_BUFFER;
1548         }
1549 }
1550
1551
1552 static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1553                 unsigned long gma, unsigned long end_gma, void *va)
1554 {
1555         unsigned long copy_len, offset;
1556         unsigned long len = 0;
1557         unsigned long gpa;
1558
1559         while (gma != end_gma) {
1560                 gpa = intel_vgpu_gma_to_gpa(mm, gma);
1561                 if (gpa == INTEL_GVT_INVALID_ADDR) {
1562                         gvt_vgpu_err("invalid gma address: %lx\n", gma);
1563                         return -EFAULT;
1564                 }
1565
1566                 offset = gma & (I915_GTT_PAGE_SIZE - 1);
1567
1568                 copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
1569                         I915_GTT_PAGE_SIZE - offset : end_gma - gma;
1570
1571                 intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
1572
1573                 len += copy_len;
1574                 gma += copy_len;
1575         }
1576         return len;
1577 }
1578
1579
1580 /*
1581  * Check whether a batch buffer needs to be scanned. Currently
1582  * the only criteria is based on privilege.
1583  */
1584 static int batch_buffer_needs_scan(struct parser_exec_state *s)
1585 {
1586         struct intel_gvt *gvt = s->vgpu->gvt;
1587
1588         if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
1589                 || IS_KABYLAKE(gvt->dev_priv)) {
1590                 /* BDW decides privilege based on address space */
1591                 if (cmd_val(s, 0) & (1 << 8))
1592                         return 0;
1593         }
1594         return 1;
1595 }
1596
1597 static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
1598 {
1599         unsigned long gma = 0;
1600         struct cmd_info *info;
1601         uint32_t cmd_len = 0;
1602         bool bb_end = false;
1603         struct intel_vgpu *vgpu = s->vgpu;
1604         u32 cmd;
1605
1606         *bb_size = 0;
1607
1608         /* get the start gm address of the batch buffer */
1609         gma = get_gma_bb_from_cmd(s, 1);
1610         if (gma == INTEL_GVT_INVALID_ADDR)
1611                 return -EFAULT;
1612
1613         cmd = cmd_val(s, 0);
1614         info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1615         if (info == NULL) {
1616                 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1617                                 cmd, get_opcode(cmd, s->ring_id));
1618                 return -EBADRQC;
1619         }
1620         do {
1621                 if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1622                                 gma, gma + 4, &cmd) < 0)
1623                         return -EFAULT;
1624                 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1625                 if (info == NULL) {
1626                         gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1627                                 cmd, get_opcode(cmd, s->ring_id));
1628                         return -EBADRQC;
1629                 }
1630
1631                 if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1632                         bb_end = true;
1633                 } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1634                         if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
1635                                 /* chained batch buffer */
1636                                 bb_end = true;
1637                 }
1638                 cmd_len = get_cmd_length(info, cmd) << 2;
1639                 *bb_size += cmd_len;
1640                 gma += cmd_len;
1641         } while (!bb_end);
1642
1643         return 0;
1644 }
1645
1646 static int perform_bb_shadow(struct parser_exec_state *s)
1647 {
1648         struct intel_vgpu *vgpu = s->vgpu;
1649         struct intel_vgpu_shadow_bb *bb;
1650         unsigned long gma = 0;
1651         unsigned long bb_size;
1652         int ret = 0;
1653
1654         /* get the start gm address of the batch buffer */
1655         gma = get_gma_bb_from_cmd(s, 1);
1656         if (gma == INTEL_GVT_INVALID_ADDR)
1657                 return -EFAULT;
1658
1659         ret = find_bb_size(s, &bb_size);
1660         if (ret)
1661                 return ret;
1662
1663         bb = kzalloc(sizeof(*bb), GFP_KERNEL);
1664         if (!bb)
1665                 return -ENOMEM;
1666
1667         bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
1668                                          roundup(bb_size, PAGE_SIZE));
1669         if (IS_ERR(bb->obj)) {
1670                 ret = PTR_ERR(bb->obj);
1671                 goto err_free_bb;
1672         }
1673
1674         ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
1675         if (ret)
1676                 goto err_free_obj;
1677
1678         bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
1679         if (IS_ERR(bb->va)) {
1680                 ret = PTR_ERR(bb->va);
1681                 goto err_finish_shmem_access;
1682         }
1683
1684         if (bb->clflush & CLFLUSH_BEFORE) {
1685                 drm_clflush_virt_range(bb->va, bb->obj->base.size);
1686                 bb->clflush &= ~CLFLUSH_BEFORE;
1687         }
1688
1689         ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1690                               gma, gma + bb_size,
1691                               bb->va);
1692         if (ret < 0) {
1693                 gvt_vgpu_err("fail to copy guest ring buffer\n");
1694                 ret = -EFAULT;
1695                 goto err_unmap;
1696         }
1697
1698         INIT_LIST_HEAD(&bb->list);
1699         list_add(&bb->list, &s->workload->shadow_bb);
1700
1701         bb->accessing = true;
1702         bb->bb_start_cmd_va = s->ip_va;
1703
1704         /*
1705          * ip_va saves the virtual address of the shadow batch buffer, while
1706          * ip_gma saves the graphics address of the original batch buffer.
1707          * As the shadow batch buffer is just a copy from the originial one,
1708          * it should be right to use shadow batch buffer'va and original batch
1709          * buffer's gma in pair. After all, we don't want to pin the shadow
1710          * buffer here (too early).
1711          */
1712         s->ip_va = bb->va;
1713         s->ip_gma = gma;
1714         return 0;
1715 err_unmap:
1716         i915_gem_object_unpin_map(bb->obj);
1717 err_finish_shmem_access:
1718         i915_gem_obj_finish_shmem_access(bb->obj);
1719 err_free_obj:
1720         i915_gem_object_put(bb->obj);
1721 err_free_bb:
1722         kfree(bb);
1723         return ret;
1724 }
1725
1726 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1727 {
1728         bool second_level;
1729         int ret = 0;
1730         struct intel_vgpu *vgpu = s->vgpu;
1731
1732         if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1733                 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1734                 return -EFAULT;
1735         }
1736
1737         second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1738         if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1739                 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1740                 return -EFAULT;
1741         }
1742
1743         s->saved_buf_addr_type = s->buf_addr_type;
1744         addr_type_update_snb(s);
1745         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
1746                 s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
1747                 s->buf_type = BATCH_BUFFER_INSTRUCTION;
1748         } else if (second_level) {
1749                 s->buf_type = BATCH_BUFFER_2ND_LEVEL;
1750                 s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
1751                 s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
1752         }
1753
1754         if (batch_buffer_needs_scan(s)) {
1755                 ret = perform_bb_shadow(s);
1756                 if (ret < 0)
1757                         gvt_vgpu_err("invalid shadow batch buffer\n");
1758         } else {
1759                 /* emulate a batch buffer end to do return right */
1760                 ret = cmd_handler_mi_batch_buffer_end(s);
1761                 if (ret < 0)
1762                         return ret;
1763         }
1764         return ret;
1765 }
1766
1767 static struct cmd_info cmd_info[] = {
1768         {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1769
1770         {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
1771                 0, 1, NULL},
1772
1773         {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
1774                 0, 1, cmd_handler_mi_user_interrupt},
1775
1776         {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
1777                 D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
1778
1779         {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1780
1781         {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1782                 NULL},
1783
1784         {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1785                 NULL},
1786
1787         {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1788                 NULL},
1789
1790         {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1791                 NULL},
1792
1793         {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
1794                 D_ALL, 0, 1, NULL},
1795
1796         {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
1797                 F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1798                 cmd_handler_mi_batch_buffer_end},
1799
1800         {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
1801                 0, 1, NULL},
1802
1803         {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1804                 NULL},
1805
1806         {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
1807                 D_ALL, 0, 1, NULL},
1808
1809         {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1810                 NULL},
1811
1812         {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1813                 NULL},
1814
1815         {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
1816                 R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
1817
1818         {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
1819                 0, 8, NULL},
1820
1821         {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
1822
1823         {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1824
1825         {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
1826                 D_BDW_PLUS, 0, 8, NULL},
1827
1828         {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1829                 ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
1830
1831         {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1832                 ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
1833
1834         {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
1835                 0, 8, cmd_handler_mi_store_data_index},
1836
1837         {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
1838                 D_ALL, 0, 8, cmd_handler_lri},
1839
1840         {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
1841                 cmd_handler_mi_update_gtt},
1842
1843         {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
1844                 D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
1845
1846         {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
1847                 cmd_handler_mi_flush_dw},
1848
1849         {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
1850                 10, cmd_handler_mi_clflush},
1851
1852         {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
1853                 D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
1854
1855         {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
1856                 D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
1857
1858         {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
1859                 D_ALL, 0, 8, cmd_handler_lrr},
1860
1861         {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
1862                 D_ALL, 0, 8, NULL},
1863
1864         {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
1865                 ADDR_FIX_1(2), 8, NULL},
1866
1867         {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
1868                 ADDR_FIX_1(2), 8, NULL},
1869
1870         {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
1871                 8, cmd_handler_mi_op_2e},
1872
1873         {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
1874                 8, cmd_handler_mi_op_2f},
1875
1876         {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
1877                 F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
1878                 cmd_handler_mi_batch_buffer_start},
1879
1880         {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
1881                 F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
1882                 cmd_handler_mi_conditional_batch_buffer_end},
1883
1884         {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
1885                 R_RCS | R_BCS, D_ALL, 0, 2, NULL},
1886
1887         {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1888                 ADDR_FIX_2(4, 7), 8, NULL},
1889
1890         {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1891                 0, 8, NULL},
1892
1893         {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
1894                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1895
1896         {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1897
1898         {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
1899                 0, 8, NULL},
1900
1901         {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1902                 ADDR_FIX_1(3), 8, NULL},
1903
1904         {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
1905                 D_ALL, 0, 8, NULL},
1906
1907         {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
1908                 ADDR_FIX_1(4), 8, NULL},
1909
1910         {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1911                 ADDR_FIX_2(4, 5), 8, NULL},
1912
1913         {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1914                 ADDR_FIX_1(4), 8, NULL},
1915
1916         {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
1917                 ADDR_FIX_2(4, 7), 8, NULL},
1918
1919         {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
1920                 D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1921
1922         {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1923
1924         {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
1925                 D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
1926
1927         {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
1928                 R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1929
1930         {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
1931                 OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
1932                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1933
1934         {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
1935                 D_ALL, ADDR_FIX_1(4), 8, NULL},
1936
1937         {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
1938                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1939
1940         {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
1941                 D_ALL, ADDR_FIX_1(4), 8, NULL},
1942
1943         {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
1944                 D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1945
1946         {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
1947                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1948
1949         {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
1950                 OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
1951                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1952
1953         {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
1954                 ADDR_FIX_2(4, 5), 8, NULL},
1955
1956         {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
1957                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1958
1959         {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
1960                 OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
1961                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1962
1963         {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
1964                 OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
1965                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1966
1967         {"3DSTATE_BLEND_STATE_POINTERS",
1968                 OP_3DSTATE_BLEND_STATE_POINTERS,
1969                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1970
1971         {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
1972                 OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
1973                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1974
1975         {"3DSTATE_BINDING_TABLE_POINTERS_VS",
1976                 OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
1977                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1978
1979         {"3DSTATE_BINDING_TABLE_POINTERS_HS",
1980                 OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
1981                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1982
1983         {"3DSTATE_BINDING_TABLE_POINTERS_DS",
1984                 OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
1985                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1986
1987         {"3DSTATE_BINDING_TABLE_POINTERS_GS",
1988                 OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
1989                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1990
1991         {"3DSTATE_BINDING_TABLE_POINTERS_PS",
1992                 OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
1993                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1994
1995         {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
1996                 OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
1997                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1998
1999         {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
2000                 OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
2001                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2002
2003         {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
2004                 OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
2005                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2006
2007         {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
2008                 OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
2009                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2010
2011         {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
2012                 OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
2013                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2014
2015         {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
2016                 0, 8, NULL},
2017
2018         {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
2019                 0, 8, NULL},
2020
2021         {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
2022                 0, 8, NULL},
2023
2024         {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
2025                 0, 8, NULL},
2026
2027         {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
2028                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2029
2030         {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
2031                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2032
2033         {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
2034                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2035
2036         {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
2037                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2038
2039         {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
2040                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2041
2042         {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
2043                 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2044
2045         {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
2046                 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2047
2048         {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
2049                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2050
2051         {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
2052                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2053
2054         {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
2055                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2056
2057         {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
2058                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2059
2060         {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
2061                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2062
2063         {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
2064                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2065
2066         {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
2067                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2068
2069         {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
2070                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2071
2072         {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
2073                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2074
2075         {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
2076                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2077
2078         {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
2079                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2080
2081         {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
2082                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2083
2084         {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
2085                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2086
2087         {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
2088                 D_BDW_PLUS, 0, 8, NULL},
2089
2090         {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2091                 NULL},
2092
2093         {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
2094                 D_BDW_PLUS, 0, 8, NULL},
2095
2096         {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
2097                 D_BDW_PLUS, 0, 8, NULL},
2098
2099         {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2100                 8, NULL},
2101
2102         {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
2103                 R_RCS, D_BDW_PLUS, 0, 8, NULL},
2104
2105         {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2106                 8, NULL},
2107
2108         {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2109                 NULL},
2110
2111         {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2112                 NULL},
2113
2114         {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2115                 NULL},
2116
2117         {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
2118                 D_BDW_PLUS, 0, 8, NULL},
2119
2120         {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
2121                 R_RCS, D_ALL, 0, 8, NULL},
2122
2123         {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
2124                 D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
2125
2126         {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
2127                 R_RCS, D_ALL, 0, 1, NULL},
2128
2129         {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2130
2131         {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
2132                 R_RCS, D_ALL, 0, 8, NULL},
2133
2134         {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
2135                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2136
2137         {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2138
2139         {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2140
2141         {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2142
2143         {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
2144                 D_BDW_PLUS, 0, 8, NULL},
2145
2146         {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
2147                 D_BDW_PLUS, 0, 8, NULL},
2148
2149         {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
2150                 D_ALL, 0, 8, NULL},
2151
2152         {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
2153                 D_BDW_PLUS, 0, 8, NULL},
2154
2155         {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
2156                 D_BDW_PLUS, 0, 8, NULL},
2157
2158         {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2159
2160         {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2161
2162         {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2163
2164         {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
2165                 D_ALL, 0, 8, NULL},
2166
2167         {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2168
2169         {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2170
2171         {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
2172                 R_RCS, D_ALL, 0, 8, NULL},
2173
2174         {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
2175                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2176
2177         {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
2178                 0, 8, NULL},
2179
2180         {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
2181                 D_ALL, ADDR_FIX_1(2), 8, NULL},
2182
2183         {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
2184                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2185
2186         {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
2187                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2188
2189         {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
2190                 D_ALL, 0, 8, NULL},
2191
2192         {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
2193                 D_ALL, 0, 8, NULL},
2194
2195         {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
2196                 D_ALL, 0, 8, NULL},
2197
2198         {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
2199                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2200
2201         {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
2202                 D_BDW_PLUS, 0, 8, NULL},
2203
2204         {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
2205                 D_ALL, ADDR_FIX_1(2), 8, NULL},
2206
2207         {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
2208                 R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
2209
2210         {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
2211                 R_RCS, D_ALL, 0, 8, NULL},
2212
2213         {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2214                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2215
2216         {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
2217                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2218
2219         {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
2220                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2221
2222         {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2223                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2224
2225         {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2226                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2227
2228         {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
2229                 R_RCS, D_ALL, 0, 8, NULL},
2230
2231         {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
2232                 D_ALL, 0, 9, NULL},
2233
2234         {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2235                 ADDR_FIX_2(2, 4), 8, NULL},
2236
2237         {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2238                 OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
2239                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2240
2241         {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
2242                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2243
2244         {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2245                 OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
2246                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2247
2248         {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
2249                 D_BDW_PLUS, 0, 8, NULL},
2250
2251         {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
2252                 ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
2253
2254         {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2255
2256         {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
2257                 1, NULL},
2258
2259         {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
2260                 ADDR_FIX_1(1), 8, NULL},
2261
2262         {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2263
2264         {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2265                 ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
2266
2267         {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
2268                 ADDR_FIX_1(1), 8, NULL},
2269
2270         {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2271
2272         {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2273
2274         {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2275                 0, 8, NULL},
2276
2277         {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
2278                 D_SKL_PLUS, 0, 8, NULL},
2279
2280         {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
2281                 F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2282
2283         {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
2284                 0, 16, NULL},
2285
2286         {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
2287                 0, 16, NULL},
2288
2289         {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2290
2291         {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
2292                 0, 16, NULL},
2293
2294         {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
2295                 0, 16, NULL},
2296
2297         {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2298                 0, 16, NULL},
2299
2300         {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2301                 0, 8, NULL},
2302
2303         {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
2304                 NULL},
2305
2306         {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
2307                 F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2308
2309         {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
2310                 R_VCS, D_ALL, 0, 12, NULL},
2311
2312         {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
2313                 R_VCS, D_ALL, 0, 12, NULL},
2314
2315         {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
2316                 R_VCS, D_BDW_PLUS, 0, 12, NULL},
2317
2318         {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
2319                 F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2320
2321         {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
2322                 F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
2323
2324         {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2325
2326         {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
2327                 R_VCS, D_ALL, 0, 12, NULL},
2328
2329         {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
2330                 R_VCS, D_ALL, 0, 12, NULL},
2331
2332         {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
2333                 R_VCS, D_ALL, 0, 12, NULL},
2334
2335         {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
2336                 R_VCS, D_ALL, 0, 12, NULL},
2337
2338         {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
2339                 R_VCS, D_ALL, 0, 12, NULL},
2340
2341         {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
2342                 R_VCS, D_ALL, 0, 12, NULL},
2343
2344         {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
2345                 R_VCS, D_ALL, 0, 6, NULL},
2346
2347         {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
2348                 R_VCS, D_ALL, 0, 12, NULL},
2349
2350         {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
2351                 R_VCS, D_ALL, 0, 12, NULL},
2352
2353         {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
2354                 R_VCS, D_ALL, 0, 12, NULL},
2355
2356         {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
2357                 R_VCS, D_ALL, 0, 12, NULL},
2358
2359         {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
2360                 R_VCS, D_ALL, 0, 12, NULL},
2361
2362         {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
2363                 R_VCS, D_ALL, 0, 12, NULL},
2364
2365         {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
2366                 R_VCS, D_ALL, 0, 12, NULL},
2367         {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
2368                 R_VCS, D_ALL, 0, 12, NULL},
2369
2370         {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
2371                 R_VCS, D_ALL, 0, 12, NULL},
2372
2373         {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
2374                 R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
2375
2376         {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
2377                 R_VCS, D_ALL, 0, 12, NULL},
2378
2379         {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
2380                 R_VCS, D_ALL, 0, 12, NULL},
2381
2382         {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
2383                 R_VCS, D_ALL, 0, 12, NULL},
2384
2385         {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
2386                 R_VCS, D_ALL, 0, 12, NULL},
2387
2388         {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
2389                 R_VCS, D_ALL, 0, 12, NULL},
2390
2391         {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
2392                 R_VCS, D_ALL, 0, 12, NULL},
2393
2394         {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
2395                 R_VCS, D_ALL, 0, 12, NULL},
2396
2397         {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
2398                 R_VCS, D_ALL, 0, 12, NULL},
2399
2400         {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
2401                 R_VCS, D_ALL, 0, 12, NULL},
2402
2403         {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
2404                 R_VCS, D_ALL, 0, 12, NULL},
2405
2406         {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
2407                 R_VCS, D_ALL, 0, 12, NULL},
2408
2409         {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
2410                 0, 16, NULL},
2411
2412         {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2413
2414         {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2415
2416         {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
2417                 R_VCS, D_ALL, 0, 12, NULL},
2418
2419         {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
2420                 R_VCS, D_ALL, 0, 12, NULL},
2421
2422         {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
2423                 R_VCS, D_ALL, 0, 12, NULL},
2424
2425         {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
2426
2427         {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
2428                 0, 12, NULL},
2429
2430         {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2431                 0, 20, NULL},
2432 };
2433
2434 static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2435 {
2436         hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2437 }
2438
2439 /* call the cmd handler, and advance ip */
2440 static int cmd_parser_exec(struct parser_exec_state *s)
2441 {
2442         struct intel_vgpu *vgpu = s->vgpu;
2443         struct cmd_info *info;
2444         u32 cmd;
2445         int ret = 0;
2446
2447         cmd = cmd_val(s, 0);
2448
2449         info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2450         if (info == NULL) {
2451                 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2452                                 cmd, get_opcode(cmd, s->ring_id));
2453                 return -EBADRQC;
2454         }
2455
2456         s->info = info;
2457
2458         trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
2459                           cmd_length(s), s->buf_type);
2460
2461         if (info->handler) {
2462                 ret = info->handler(s);
2463                 if (ret < 0) {
2464                         gvt_vgpu_err("%s handler error\n", info->name);
2465                         return ret;
2466                 }
2467         }
2468
2469         if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2470                 ret = cmd_advance_default(s);
2471                 if (ret) {
2472                         gvt_vgpu_err("%s IP advance error\n", info->name);
2473                         return ret;
2474                 }
2475         }
2476         return 0;
2477 }
2478
2479 static inline bool gma_out_of_range(unsigned long gma,
2480                 unsigned long gma_head, unsigned int gma_tail)
2481 {
2482         if (gma_tail >= gma_head)
2483                 return (gma < gma_head) || (gma > gma_tail);
2484         else
2485                 return (gma > gma_tail) && (gma < gma_head);
2486 }
2487
2488 /* Keep the consistent return type, e.g EBADRQC for unknown
2489  * cmd, EFAULT for invalid address, EPERM for nonpriv. later
2490  * works as the input of VM healthy status.
2491  */
2492 static int command_scan(struct parser_exec_state *s,
2493                 unsigned long rb_head, unsigned long rb_tail,
2494                 unsigned long rb_start, unsigned long rb_len)
2495 {
2496
2497         unsigned long gma_head, gma_tail, gma_bottom;
2498         int ret = 0;
2499         struct intel_vgpu *vgpu = s->vgpu;
2500
2501         gma_head = rb_start + rb_head;
2502         gma_tail = rb_start + rb_tail;
2503         gma_bottom = rb_start +  rb_len;
2504
2505         while (s->ip_gma != gma_tail) {
2506                 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2507                         if (!(s->ip_gma >= rb_start) ||
2508                                 !(s->ip_gma < gma_bottom)) {
2509                                 gvt_vgpu_err("ip_gma %lx out of ring scope."
2510                                         "(base:0x%lx, bottom: 0x%lx)\n",
2511                                         s->ip_gma, rb_start,
2512                                         gma_bottom);
2513                                 parser_exec_state_dump(s);
2514                                 return -EFAULT;
2515                         }
2516                         if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2517                                 gvt_vgpu_err("ip_gma %lx out of range."
2518                                         "base 0x%lx head 0x%lx tail 0x%lx\n",
2519                                         s->ip_gma, rb_start,
2520                                         rb_head, rb_tail);
2521                                 parser_exec_state_dump(s);
2522                                 break;
2523                         }
2524                 }
2525                 ret = cmd_parser_exec(s);
2526                 if (ret) {
2527                         gvt_vgpu_err("cmd parser error\n");
2528                         parser_exec_state_dump(s);
2529                         break;
2530                 }
2531         }
2532
2533         return ret;
2534 }
2535
2536 static int scan_workload(struct intel_vgpu_workload *workload)
2537 {
2538         unsigned long gma_head, gma_tail, gma_bottom;
2539         struct parser_exec_state s;
2540         int ret = 0;
2541
2542         /* ring base is page aligned */
2543         if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
2544                 return -EINVAL;
2545
2546         gma_head = workload->rb_start + workload->rb_head;
2547         gma_tail = workload->rb_start + workload->rb_tail;
2548         gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
2549
2550         s.buf_type = RING_BUFFER_INSTRUCTION;
2551         s.buf_addr_type = GTT_BUFFER;
2552         s.vgpu = workload->vgpu;
2553         s.ring_id = workload->ring_id;
2554         s.ring_start = workload->rb_start;
2555         s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2556         s.ring_head = gma_head;
2557         s.ring_tail = gma_tail;
2558         s.rb_va = workload->shadow_ring_buffer_va;
2559         s.workload = workload;
2560
2561         if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2562                 gma_head == gma_tail)
2563                 return 0;
2564
2565         if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2566                 ret = -EINVAL;
2567                 goto out;
2568         }
2569
2570         ret = ip_gma_set(&s, gma_head);
2571         if (ret)
2572                 goto out;
2573
2574         ret = command_scan(&s, workload->rb_head, workload->rb_tail,
2575                 workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
2576
2577 out:
2578         return ret;
2579 }
2580
2581 static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2582 {
2583
2584         unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
2585         struct parser_exec_state s;
2586         int ret = 0;
2587         struct intel_vgpu_workload *workload = container_of(wa_ctx,
2588                                 struct intel_vgpu_workload,
2589                                 wa_ctx);
2590
2591         /* ring base is page aligned */
2592         if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
2593                                         I915_GTT_PAGE_SIZE)))
2594                 return -EINVAL;
2595
2596         ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
2597         ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2598                         PAGE_SIZE);
2599         gma_head = wa_ctx->indirect_ctx.guest_gma;
2600         gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2601         gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
2602
2603         s.buf_type = RING_BUFFER_INSTRUCTION;
2604         s.buf_addr_type = GTT_BUFFER;
2605         s.vgpu = workload->vgpu;
2606         s.ring_id = workload->ring_id;
2607         s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2608         s.ring_size = ring_size;
2609         s.ring_head = gma_head;
2610         s.ring_tail = gma_tail;
2611         s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2612         s.workload = workload;
2613
2614         if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2615                 ret = -EINVAL;
2616                 goto out;
2617         }
2618
2619         ret = ip_gma_set(&s, gma_head);
2620         if (ret)
2621                 goto out;
2622
2623         ret = command_scan(&s, 0, ring_tail,
2624                 wa_ctx->indirect_ctx.guest_gma, ring_size);
2625 out:
2626         return ret;
2627 }
2628
2629 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2630 {
2631         struct intel_vgpu *vgpu = workload->vgpu;
2632         struct intel_vgpu_submission *s = &vgpu->submission;
2633         unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2634         void *shadow_ring_buffer_va;
2635         int ring_id = workload->ring_id;
2636         int ret;
2637
2638         guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2639
2640         /* calculate workload ring buffer size */
2641         workload->rb_len = (workload->rb_tail + guest_rb_size -
2642                         workload->rb_head) % guest_rb_size;
2643
2644         gma_head = workload->rb_start + workload->rb_head;
2645         gma_tail = workload->rb_start + workload->rb_tail;
2646         gma_top = workload->rb_start + guest_rb_size;
2647
2648         if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
2649                 void *p;
2650
2651                 /* realloc the new ring buffer if needed */
2652                 p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
2653                                 GFP_KERNEL);
2654                 if (!p) {
2655                         gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
2656                         return -ENOMEM;
2657                 }
2658                 s->ring_scan_buffer[ring_id] = p;
2659                 s->ring_scan_buffer_size[ring_id] = workload->rb_len;
2660         }
2661
2662         shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
2663
2664         /* get shadow ring buffer va */
2665         workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
2666
2667         /* head > tail --> copy head <-> top */
2668         if (gma_head > gma_tail) {
2669                 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2670                                       gma_head, gma_top, shadow_ring_buffer_va);
2671                 if (ret < 0) {
2672                         gvt_vgpu_err("fail to copy guest ring buffer\n");
2673                         return ret;
2674                 }
2675                 shadow_ring_buffer_va += ret;
2676                 gma_head = workload->rb_start;
2677         }
2678
2679         /* copy head or start <-> tail */
2680         ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
2681                                 shadow_ring_buffer_va);
2682         if (ret < 0) {
2683                 gvt_vgpu_err("fail to copy guest ring buffer\n");
2684                 return ret;
2685         }
2686         return 0;
2687 }
2688
2689 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
2690 {
2691         int ret;
2692         struct intel_vgpu *vgpu = workload->vgpu;
2693
2694         ret = shadow_workload_ring_buffer(workload);
2695         if (ret) {
2696                 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2697                 return ret;
2698         }
2699
2700         ret = scan_workload(workload);
2701         if (ret) {
2702                 gvt_vgpu_err("scan workload error\n");
2703                 return ret;
2704         }
2705         return 0;
2706 }
2707
2708 static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2709 {
2710         int ctx_size = wa_ctx->indirect_ctx.size;
2711         unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2712         struct intel_vgpu_workload *workload = container_of(wa_ctx,
2713                                         struct intel_vgpu_workload,
2714                                         wa_ctx);
2715         struct intel_vgpu *vgpu = workload->vgpu;
2716         struct drm_i915_gem_object *obj;
2717         int ret = 0;
2718         void *map;
2719
2720         obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
2721                                      roundup(ctx_size + CACHELINE_BYTES,
2722                                              PAGE_SIZE));
2723         if (IS_ERR(obj))
2724                 return PTR_ERR(obj);
2725
2726         /* get the va of the shadow batch buffer */
2727         map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2728         if (IS_ERR(map)) {
2729                 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2730                 ret = PTR_ERR(map);
2731                 goto put_obj;
2732         }
2733
2734         ret = i915_gem_object_set_to_cpu_domain(obj, false);
2735         if (ret) {
2736                 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2737                 goto unmap_src;
2738         }
2739
2740         ret = copy_gma_to_hva(workload->vgpu,
2741                                 workload->vgpu->gtt.ggtt_mm,
2742                                 guest_gma, guest_gma + ctx_size,
2743                                 map);
2744         if (ret < 0) {
2745                 gvt_vgpu_err("fail to copy guest indirect ctx\n");
2746                 goto unmap_src;
2747         }
2748
2749         wa_ctx->indirect_ctx.obj = obj;
2750         wa_ctx->indirect_ctx.shadow_va = map;
2751         return 0;
2752
2753 unmap_src:
2754         i915_gem_object_unpin_map(obj);
2755 put_obj:
2756         i915_gem_object_put(obj);
2757         return ret;
2758 }
2759
2760 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2761 {
2762         uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
2763         unsigned char *bb_start_sva;
2764
2765         if (!wa_ctx->per_ctx.valid)
2766                 return 0;
2767
2768         per_ctx_start[0] = 0x18800001;
2769         per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
2770
2771         bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
2772                                 wa_ctx->indirect_ctx.size;
2773
2774         memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
2775
2776         return 0;
2777 }
2778
2779 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2780 {
2781         int ret;
2782         struct intel_vgpu_workload *workload = container_of(wa_ctx,
2783                                         struct intel_vgpu_workload,
2784                                         wa_ctx);
2785         struct intel_vgpu *vgpu = workload->vgpu;
2786
2787         if (wa_ctx->indirect_ctx.size == 0)
2788                 return 0;
2789
2790         ret = shadow_indirect_ctx(wa_ctx);
2791         if (ret) {
2792                 gvt_vgpu_err("fail to shadow indirect ctx\n");
2793                 return ret;
2794         }
2795
2796         combine_wa_ctx(wa_ctx);
2797
2798         ret = scan_wa_ctx(wa_ctx);
2799         if (ret) {
2800                 gvt_vgpu_err("scan wa ctx error\n");
2801                 return ret;
2802         }
2803
2804         return 0;
2805 }
2806
2807 static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
2808                 unsigned int opcode, int rings)
2809 {
2810         struct cmd_info *info = NULL;
2811         unsigned int ring;
2812
2813         for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
2814                 info = find_cmd_entry(gvt, opcode, ring);
2815                 if (info)
2816                         break;
2817         }
2818         return info;
2819 }
2820
2821 static int init_cmd_table(struct intel_gvt *gvt)
2822 {
2823         int i;
2824         struct cmd_entry *e;
2825         struct cmd_info *info;
2826         unsigned int gen_type;
2827
2828         gen_type = intel_gvt_get_device_type(gvt);
2829
2830         for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
2831                 if (!(cmd_info[i].devices & gen_type))
2832                         continue;
2833
2834                 e = kzalloc(sizeof(*e), GFP_KERNEL);
2835                 if (!e)
2836                         return -ENOMEM;
2837
2838                 e->info = &cmd_info[i];
2839                 info = find_cmd_entry_any_ring(gvt,
2840                                 e->info->opcode, e->info->rings);
2841                 if (info) {
2842                         gvt_err("%s %s duplicated\n", e->info->name,
2843                                         info->name);
2844                         return -EEXIST;
2845                 }
2846
2847                 INIT_HLIST_NODE(&e->hlist);
2848                 add_cmd_entry(gvt, e);
2849                 gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
2850                                 e->info->name, e->info->opcode, e->info->flag,
2851                                 e->info->devices, e->info->rings);
2852         }
2853         return 0;
2854 }
2855
2856 static void clean_cmd_table(struct intel_gvt *gvt)
2857 {
2858         struct hlist_node *tmp;
2859         struct cmd_entry *e;
2860         int i;
2861
2862         hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
2863                 kfree(e);
2864
2865         hash_init(gvt->cmd_table);
2866 }
2867
2868 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
2869 {
2870         clean_cmd_table(gvt);
2871 }
2872
2873 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
2874 {
2875         int ret;
2876
2877         ret = init_cmd_table(gvt);
2878         if (ret) {
2879                 intel_gvt_clean_cmd_parser(gvt);
2880                 return ret;
2881         }
2882         return 0;
2883 }