ia64/pv_ops/xen: preliminary to paravirtualizing fsys.S for xen.
[linux-2.6-block.git] / arch / ia64 / include / asm / xen / inst.h
1 /******************************************************************************
2  * arch/ia64/include/asm/xen/inst.h
3  *
4  * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5  *                    VA Linux Systems Japan K.K.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  *
21  */
22
23 #include <asm/xen/privop.h>
24
25 #define ia64_ivt                                xen_ivt
26 #define DO_SAVE_MIN                             XEN_DO_SAVE_MIN
27
28 #define __paravirt_switch_to                    xen_switch_to
29 #define __paravirt_leave_syscall                xen_leave_syscall
30 #define __paravirt_work_processed_syscall       xen_work_processed_syscall
31 #define __paravirt_leave_kernel                 xen_leave_kernel
32 #define __paravirt_pending_syscall_end          xen_work_pending_syscall_end
33 #define __paravirt_work_processed_syscall_target \
34                                                 xen_work_processed_syscall
35
36 #define paravirt_fsyscall_table                 xen_fsyscall_table
37 #define paravirt_fsys_bubble_down               xen_fsys_bubble_down
38
39 #define MOV_FROM_IFA(reg)       \
40         movl reg = XSI_IFA;     \
41         ;;                      \
42         ld8 reg = [reg]
43
44 #define MOV_FROM_ITIR(reg)      \
45         movl reg = XSI_ITIR;    \
46         ;;                      \
47         ld8 reg = [reg]
48
49 #define MOV_FROM_ISR(reg)       \
50         movl reg = XSI_ISR;     \
51         ;;                      \
52         ld8 reg = [reg]
53
54 #define MOV_FROM_IHA(reg)       \
55         movl reg = XSI_IHA;     \
56         ;;                      \
57         ld8 reg = [reg]
58
59 #define MOV_FROM_IPSR(pred, reg)        \
60 (pred)  movl reg = XSI_IPSR;            \
61         ;;                              \
62 (pred)  ld8 reg = [reg]
63
64 #define MOV_FROM_IIM(reg)       \
65         movl reg = XSI_IIM;     \
66         ;;                      \
67         ld8 reg = [reg]
68
69 #define MOV_FROM_IIP(reg)       \
70         movl reg = XSI_IIP;     \
71         ;;                      \
72         ld8 reg = [reg]
73
74 .macro __MOV_FROM_IVR reg, clob
75         .ifc "\reg", "r8"
76                 XEN_HYPER_GET_IVR
77                 .exitm
78         .endif
79         .ifc "\clob", "r8"
80                 XEN_HYPER_GET_IVR
81                 ;;
82                 mov \reg = r8
83                 .exitm
84         .endif
85
86         mov \clob = r8
87         ;;
88         XEN_HYPER_GET_IVR
89         ;;
90         mov \reg = r8
91         ;;
92         mov r8 = \clob
93 .endm
94 #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
95
96 .macro __MOV_FROM_PSR pred, reg, clob
97         .ifc "\reg", "r8"
98                 (\pred) XEN_HYPER_GET_PSR;
99                 .exitm
100         .endif
101         .ifc "\clob", "r8"
102                 (\pred) XEN_HYPER_GET_PSR
103                 ;;
104                 (\pred) mov \reg = r8
105                 .exitm
106         .endif
107
108         (\pred) mov \clob = r8
109         (\pred) XEN_HYPER_GET_PSR
110         ;;
111         (\pred) mov \reg = r8
112         (\pred) mov r8 = \clob
113 .endm
114 #define MOV_FROM_PSR(pred, reg, clob)   __MOV_FROM_PSR pred, reg, clob
115
116
117 #define MOV_TO_IFA(reg, clob)   \
118         movl clob = XSI_IFA;    \
119         ;;                      \
120         st8 [clob] = reg        \
121
122 #define MOV_TO_ITIR(pred, reg, clob)    \
123 (pred)  movl clob = XSI_ITIR;           \
124         ;;                              \
125 (pred)  st8 [clob] = reg
126
127 #define MOV_TO_IHA(pred, reg, clob)     \
128 (pred)  movl clob = XSI_IHA;            \
129         ;;                              \
130 (pred)  st8 [clob] = reg
131
132 #define MOV_TO_IPSR(pred, reg, clob)    \
133 (pred)  movl clob = XSI_IPSR;           \
134         ;;                              \
135 (pred)  st8 [clob] = reg;               \
136         ;;
137
138 #define MOV_TO_IFS(pred, reg, clob)     \
139 (pred)  movl clob = XSI_IFS;            \
140         ;;                              \
141 (pred)  st8 [clob] = reg;               \
142         ;;
143
144 #define MOV_TO_IIP(reg, clob)   \
145         movl clob = XSI_IIP;    \
146         ;;                      \
147         st8 [clob] = reg
148
149 .macro ____MOV_TO_KR kr, reg, clob0, clob1
150         .ifc "\clob0", "r9"
151                 .error "clob0 \clob0 must not be r9"
152         .endif
153         .ifc "\clob1", "r8"
154                 .error "clob1 \clob1 must not be r8"
155         .endif
156
157         .ifnc "\reg", "r9"
158                 .ifnc "\clob1", "r9"
159                         mov \clob1 = r9
160                 .endif
161                 mov r9 = \reg
162         .endif
163         .ifnc "\clob0", "r8"
164                 mov \clob0 = r8
165         .endif
166         mov r8 = \kr
167         ;;
168         XEN_HYPER_SET_KR
169
170         .ifnc "\reg", "r9"
171                 .ifnc "\clob1", "r9"
172                         mov r9 = \clob1
173                 .endif
174         .endif
175         .ifnc "\clob0", "r8"
176                 mov r8 = \clob0
177         .endif
178 .endm
179
180 .macro __MOV_TO_KR kr, reg, clob0, clob1
181         .ifc "\clob0", "r9"
182                 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
183                 .exitm
184         .endif
185         .ifc "\clob1", "r8"
186                 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
187                 .exitm
188         .endif
189
190         ____MOV_TO_KR \kr, \reg, \clob0, \clob1
191 .endm
192
193 #define MOV_TO_KR(kr, reg, clob0, clob1) \
194         __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
195
196
197 .macro __ITC_I pred, reg, clob
198         .ifc "\reg", "r8"
199                 (\pred) XEN_HYPER_ITC_I
200                 .exitm
201         .endif
202         .ifc "\clob", "r8"
203                 (\pred) mov r8 = \reg
204                 ;;
205                 (\pred) XEN_HYPER_ITC_I
206                 .exitm
207         .endif
208
209         (\pred) mov \clob = r8
210         (\pred) mov r8 = \reg
211         ;;
212         (\pred) XEN_HYPER_ITC_I
213         ;;
214         (\pred) mov r8 = \clob
215         ;;
216 .endm
217 #define ITC_I(pred, reg, clob)  __ITC_I pred, reg, clob
218
219 .macro __ITC_D pred, reg, clob
220         .ifc "\reg", "r8"
221                 (\pred) XEN_HYPER_ITC_D
222                 ;;
223                 .exitm
224         .endif
225         .ifc "\clob", "r8"
226                 (\pred) mov r8 = \reg
227                 ;;
228                 (\pred) XEN_HYPER_ITC_D
229                 ;;
230                 .exitm
231         .endif
232
233         (\pred) mov \clob = r8
234         (\pred) mov r8 = \reg
235         ;;
236         (\pred) XEN_HYPER_ITC_D
237         ;;
238         (\pred) mov r8 = \clob
239         ;;
240 .endm
241 #define ITC_D(pred, reg, clob)  __ITC_D pred, reg, clob
242
243 .macro __ITC_I_AND_D pred_i, pred_d, reg, clob
244         .ifc "\reg", "r8"
245                 (\pred_i)XEN_HYPER_ITC_I
246                 ;;
247                 (\pred_d)XEN_HYPER_ITC_D
248                 ;;
249                 .exitm
250         .endif
251         .ifc "\clob", "r8"
252                 mov r8 = \reg
253                 ;;
254                 (\pred_i)XEN_HYPER_ITC_I
255                 ;;
256                 (\pred_d)XEN_HYPER_ITC_D
257                 ;;
258                 .exitm
259         .endif
260
261         mov \clob = r8
262         mov r8 = \reg
263         ;;
264         (\pred_i)XEN_HYPER_ITC_I
265         ;;
266         (\pred_d)XEN_HYPER_ITC_D
267         ;;
268         mov r8 = \clob
269         ;;
270 .endm
271 #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
272         __ITC_I_AND_D pred_i, pred_d, reg, clob
273
274 .macro __THASH pred, reg0, reg1, clob
275         .ifc "\reg0", "r8"
276                 (\pred) mov r8 = \reg1
277                 (\pred) XEN_HYPER_THASH
278                 .exitm
279         .endc
280         .ifc "\reg1", "r8"
281                 (\pred) XEN_HYPER_THASH
282                 ;;
283                 (\pred) mov \reg0 = r8
284                 ;;
285                 .exitm
286         .endif
287         .ifc "\clob", "r8"
288                 (\pred) mov r8 = \reg1
289                 (\pred) XEN_HYPER_THASH
290                 ;;
291                 (\pred) mov \reg0 = r8
292                 ;;
293                 .exitm
294         .endif
295
296         (\pred) mov \clob = r8
297         (\pred) mov r8 = \reg1
298         (\pred) XEN_HYPER_THASH
299         ;;
300         (\pred) mov \reg0 = r8
301         (\pred) mov r8 = \clob
302         ;;
303 .endm
304 #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
305
306 #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1)    \
307         mov clob0 = 1;                                          \
308         movl clob1 = XSI_PSR_IC;                                \
309         ;;                                                      \
310         st4 [clob1] = clob0                                     \
311         ;;
312
313 #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1)     \
314         ;;                                      \
315         srlz.d;                                 \
316         mov clob1 = 1;                          \
317         movl clob0 = XSI_PSR_IC;                \
318         ;;                                      \
319         st4 [clob0] = clob1
320
321 #define RSM_PSR_IC(clob)        \
322         movl clob = XSI_PSR_IC; \
323         ;;                      \
324         st4 [clob] = r0;        \
325         ;;
326
327 /* pred will be clobbered */
328 #define MASK_TO_PEND_OFS    (-1)
329 #define SSM_PSR_I(pred, pred_clob, clob)                                \
330 (pred)  movl clob = XSI_PSR_I_ADDR                                      \
331         ;;                                                              \
332 (pred)  ld8 clob = [clob]                                               \
333         ;;                                                              \
334         /* if (pred) vpsr.i = 1 */                                      \
335         /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */         \
336 (pred)  st1 [clob] = r0, MASK_TO_PEND_OFS                               \
337         ;;                                                              \
338         /* if (vcpu->vcpu_info->evtchn_upcall_pending) */               \
339 (pred)  ld1 clob = [clob]                                               \
340         ;;                                                              \
341 (pred)  cmp.ne.unc pred_clob, p0 = clob, r0                             \
342         ;;                                                              \
343 (pred_clob)XEN_HYPER_SSM_I      /* do areal ssm psr.i */
344
345 #define RSM_PSR_I(pred, clob0, clob1)   \
346         movl clob0 = XSI_PSR_I_ADDR;    \
347         mov clob1 = 1;                  \
348         ;;                              \
349         ld8 clob0 = [clob0];            \
350         ;;                              \
351 (pred)  st1 [clob0] = clob1
352
353 #define RSM_PSR_I_IC(clob0, clob1, clob2)               \
354         movl clob0 = XSI_PSR_I_ADDR;                    \
355         movl clob1 = XSI_PSR_IC;                        \
356         ;;                                              \
357         ld8 clob0 = [clob0];                            \
358         mov clob2 = 1;                                  \
359         ;;                                              \
360         /* note: clears both vpsr.i and vpsr.ic! */     \
361         st1 [clob0] = clob2;                            \
362         st4 [clob1] = r0;                               \
363         ;;
364
365 #define RSM_PSR_DT              \
366         XEN_HYPER_RSM_PSR_DT
367
368 #define SSM_PSR_DT_AND_SRLZ_I   \
369         XEN_HYPER_SSM_PSR_DT
370
371 #define BSW_0(clob0, clob1, clob2)                      \
372         ;;                                              \
373         /* r16-r31 all now hold bank1 values */         \
374         mov clob2 = ar.unat;                            \
375         movl clob0 = XSI_BANK1_R16;                     \
376         movl clob1 = XSI_BANK1_R16 + 8;                 \
377         ;;                                              \
378 .mem.offset 0, 0; st8.spill [clob0] = r16, 16;          \
379 .mem.offset 8, 0; st8.spill [clob1] = r17, 16;          \
380         ;;                                              \
381 .mem.offset 0, 0; st8.spill [clob0] = r18, 16;          \
382 .mem.offset 8, 0; st8.spill [clob1] = r19, 16;          \
383         ;;                                              \
384 .mem.offset 0, 0; st8.spill [clob0] = r20, 16;          \
385 .mem.offset 8, 0; st8.spill [clob1] = r21, 16;          \
386         ;;                                              \
387 .mem.offset 0, 0; st8.spill [clob0] = r22, 16;          \
388 .mem.offset 8, 0; st8.spill [clob1] = r23, 16;          \
389         ;;                                              \
390 .mem.offset 0, 0; st8.spill [clob0] = r24, 16;          \
391 .mem.offset 8, 0; st8.spill [clob1] = r25, 16;          \
392         ;;                                              \
393 .mem.offset 0, 0; st8.spill [clob0] = r26, 16;          \
394 .mem.offset 8, 0; st8.spill [clob1] = r27, 16;          \
395         ;;                                              \
396 .mem.offset 0, 0; st8.spill [clob0] = r28, 16;          \
397 .mem.offset 8, 0; st8.spill [clob1] = r29, 16;          \
398         ;;                                              \
399 .mem.offset 0, 0; st8.spill [clob0] = r30, 16;          \
400 .mem.offset 8, 0; st8.spill [clob1] = r31, 16;          \
401         ;;                                              \
402         mov clob1 = ar.unat;                            \
403         movl clob0 = XSI_B1NAT;                         \
404         ;;                                              \
405         st8 [clob0] = clob1;                            \
406         mov ar.unat = clob2;                            \
407         movl clob0 = XSI_BANKNUM;                       \
408         ;;                                              \
409         st4 [clob0] = r0
410
411
412         /* FIXME: THIS CODE IS NOT NaT SAFE! */
413 #define XEN_BSW_1(clob)                 \
414         mov clob = ar.unat;             \
415         movl r30 = XSI_B1NAT;           \
416         ;;                              \
417         ld8 r30 = [r30];                \
418         mov r31 = 1;                    \
419         ;;                              \
420         mov ar.unat = r30;              \
421         movl r30 = XSI_BANKNUM;         \
422         ;;                              \
423         st4 [r30] = r31;                \
424         movl r30 = XSI_BANK1_R16;       \
425         movl r31 = XSI_BANK1_R16+8;     \
426         ;;                              \
427         ld8.fill r16 = [r30], 16;       \
428         ld8.fill r17 = [r31], 16;       \
429         ;;                              \
430         ld8.fill r18 = [r30], 16;       \
431         ld8.fill r19 = [r31], 16;       \
432         ;;                              \
433         ld8.fill r20 = [r30], 16;       \
434         ld8.fill r21 = [r31], 16;       \
435         ;;                              \
436         ld8.fill r22 = [r30], 16;       \
437         ld8.fill r23 = [r31], 16;       \
438         ;;                              \
439         ld8.fill r24 = [r30], 16;       \
440         ld8.fill r25 = [r31], 16;       \
441         ;;                              \
442         ld8.fill r26 = [r30], 16;       \
443         ld8.fill r27 = [r31], 16;       \
444         ;;                              \
445         ld8.fill r28 = [r30], 16;       \
446         ld8.fill r29 = [r31], 16;       \
447         ;;                              \
448         ld8.fill r30 = [r30];           \
449         ld8.fill r31 = [r31];           \
450         ;;                              \
451         mov ar.unat = clob
452
453 #define BSW_1(clob0, clob1)     XEN_BSW_1(clob1)
454
455
456 #define COVER   \
457         XEN_HYPER_COVER
458
459 #define RFI                     \
460         XEN_HYPER_RFI;          \
461         dv_serialize_data