ia64/pv_ops: implement binary patching optimization for native.
[linux-2.6-block.git] / arch / ia64 / include / asm / paravirt_privop.h
1 /******************************************************************************
2  * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3  *                    VA Linux Systems Japan K.K.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  *
19  */
20
21 #ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
22 #define _ASM_IA64_PARAVIRT_PRIVOP_H
23
24 #ifdef CONFIG_PARAVIRT
25
26 #ifndef __ASSEMBLY__
27
28 #include <linux/types.h>
29 #include <asm/kregs.h> /* for IA64_PSR_I */
30
31 /******************************************************************************
32  * replacement of intrinsics operations.
33  */
34
35 struct pv_cpu_ops {
36         void (*fc)(unsigned long addr);
37         unsigned long (*thash)(unsigned long addr);
38         unsigned long (*get_cpuid)(int index);
39         unsigned long (*get_pmd)(int index);
40         unsigned long (*getreg)(int reg);
41         void (*setreg)(int reg, unsigned long val);
42         void (*ptcga)(unsigned long addr, unsigned long size);
43         unsigned long (*get_rr)(unsigned long index);
44         void (*set_rr)(unsigned long index, unsigned long val);
45         void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
46                                unsigned long val2, unsigned long val3,
47                                unsigned long val4);
48         void (*ssm_i)(void);
49         void (*rsm_i)(void);
50         unsigned long (*get_psr_i)(void);
51         void (*intrin_local_irq_restore)(unsigned long flags);
52 };
53
54 extern struct pv_cpu_ops pv_cpu_ops;
55
56 extern void ia64_native_setreg_func(int regnum, unsigned long val);
57 extern unsigned long ia64_native_getreg_func(int regnum);
58
59 /************************************************/
60 /* Instructions paravirtualized for performance */
61 /************************************************/
62
63 #ifndef ASM_SUPPORTED
64 #define paravirt_ssm_i()        pv_cpu_ops.ssm_i()
65 #define paravirt_rsm_i()        pv_cpu_ops.rsm_i()
66 #define __paravirt_getreg()     pv_cpu_ops.getreg()
67 #endif
68
69 /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
70  * static inline function doesn't satisfy it. */
71 #define paravirt_ssm(mask)                      \
72         do {                                    \
73                 if ((mask) == IA64_PSR_I)       \
74                         paravirt_ssm_i();       \
75                 else                            \
76                         ia64_native_ssm(mask);  \
77         } while (0)
78
79 #define paravirt_rsm(mask)                      \
80         do {                                    \
81                 if ((mask) == IA64_PSR_I)       \
82                         paravirt_rsm_i();       \
83                 else                            \
84                         ia64_native_rsm(mask);  \
85         } while (0)
86
87 /* returned ip value should be the one in the caller,
88  * not in __paravirt_getreg() */
89 #define paravirt_getreg(reg)                                    \
90         ({                                                      \
91                 unsigned long res;                              \
92                 if ((reg) == _IA64_REG_IP)                      \
93                         res = ia64_native_getreg(_IA64_REG_IP); \
94                 else                                            \
95                         res = __paravirt_getreg(reg);           \
96                 res;                                            \
97         })
98
99 /******************************************************************************
100  * replacement of hand written assembly codes.
101  */
102 struct pv_cpu_asm_switch {
103         unsigned long switch_to;
104         unsigned long leave_syscall;
105         unsigned long work_processed_syscall;
106         unsigned long leave_kernel;
107 };
108 void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
109
110 #endif /* __ASSEMBLY__ */
111
112 #define IA64_PARAVIRT_ASM_FUNC(name)    paravirt_ ## name
113
114 #else
115
116 /* fallback for native case */
117 #define IA64_PARAVIRT_ASM_FUNC(name)    ia64_native_ ## name
118
119 #endif /* CONFIG_PARAVIRT */
120
121 /* these routines utilize privilege-sensitive or performance-sensitive
122  * privileged instructions so the code must be replaced with
123  * paravirtualized versions */
124 #define ia64_switch_to                  IA64_PARAVIRT_ASM_FUNC(switch_to)
125 #define ia64_leave_syscall              IA64_PARAVIRT_ASM_FUNC(leave_syscall)
126 #define ia64_work_processed_syscall     \
127         IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
128 #define ia64_leave_kernel               IA64_PARAVIRT_ASM_FUNC(leave_kernel)
129
130
131 #if defined(CONFIG_PARAVIRT)
132 /******************************************************************************
133  * binary patching infrastructure
134  */
135 #define PARAVIRT_PATCH_TYPE_FC                          1
136 #define PARAVIRT_PATCH_TYPE_THASH                       2
137 #define PARAVIRT_PATCH_TYPE_GET_CPUID                   3
138 #define PARAVIRT_PATCH_TYPE_GET_PMD                     4
139 #define PARAVIRT_PATCH_TYPE_PTCGA                       5
140 #define PARAVIRT_PATCH_TYPE_GET_RR                      6
141 #define PARAVIRT_PATCH_TYPE_SET_RR                      7
142 #define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4              8
143 #define PARAVIRT_PATCH_TYPE_SSM_I                       9
144 #define PARAVIRT_PATCH_TYPE_RSM_I                       10
145 #define PARAVIRT_PATCH_TYPE_GET_PSR_I                   11
146 #define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE    12
147
148 /* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
149 #define PARAVIRT_PATCH_TYPE_GETREG                      0x10000000
150 #define PARAVIRT_PATCH_TYPE_SETREG                      0x20000000
151
152 /*
153  * struct task_struct* (*ia64_switch_to)(void* next_task);
154  * void *ia64_leave_syscall;
155  * void *ia64_work_processed_syscall
156  * void *ia64_leave_kernel;
157  */
158
159 #define PARAVIRT_PATCH_TYPE_BR_START                    0x30000000
160 #define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO                \
161         (PARAVIRT_PATCH_TYPE_BR_START + 0)
162 #define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL            \
163         (PARAVIRT_PATCH_TYPE_BR_START + 1)
164 #define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL   \
165         (PARAVIRT_PATCH_TYPE_BR_START + 2)
166 #define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL             \
167         (PARAVIRT_PATCH_TYPE_BR_START + 3)
168
169 #ifdef ASM_SUPPORTED
170 #include <asm/paravirt_patch.h>
171
172 /*
173  * pv_cpu_ops calling stub.
174  * normal function call convension can't be written by gcc
175  * inline assembly.
176  *
177  * from the caller's point of view,
178  * the following registers will be clobbered.
179  * r2, r3
180  * r8-r15
181  * r16, r17
182  * b6, b7
183  * p6-p15
184  * ar.ccv
185  *
186  * from the callee's point of view ,
187  * the following registers can be used.
188  * r2, r3: scratch
189  * r8: scratch, input argument0 and return value
190  * r0-r15: scratch, input argument1-5
191  * b6: return pointer
192  * b7: scratch
193  * p6-p15: scratch
194  * ar.ccv: scratch
195  *
196  * other registers must not be changed. especially
197  * b0: rp: preserved. gcc ignores b0 in clobbered register.
198  * r16: saved gp
199  */
200 /* 5 bundles */
201 #define __PARAVIRT_BR                                                   \
202         ";;\n"                                                          \
203         "{ .mlx\n"                                                      \
204         "nop 0\n"                                                       \
205         "movl r2 = %[op_addr]\n"/* get function pointer address */      \
206         ";;\n"                                                          \
207         "}\n"                                                           \
208         "1:\n"                                                          \
209         "{ .mii\n"                                                      \
210         "ld8 r2 = [r2]\n"       /* load function descriptor address */  \
211         "mov r17 = ip\n"        /* get ip to calc return address */     \
212         "mov r16 = gp\n"        /* save gp */                           \
213         ";;\n"                                                          \
214         "}\n"                                                           \
215         "{ .mii\n"                                                      \
216         "ld8 r3 = [r2], 8\n"    /* load entry address */                \
217         "adds r17 =  1f - 1b, r17\n"    /* calculate return address */  \
218         ";;\n"                                                          \
219         "mov b7 = r3\n"         /* set entry address */                 \
220         "}\n"                                                           \
221         "{ .mib\n"                                                      \
222         "ld8 gp = [r2]\n"       /* load gp value */                     \
223         "mov b6 = r17\n"        /* set return address */                \
224         "br.cond.sptk.few b7\n" /* intrinsics are very short isns */    \
225         "}\n"                                                           \
226         "1:\n"                                                          \
227         "{ .mii\n"                                                      \
228         "mov gp = r16\n"        /* restore gp value */                  \
229         "nop 0\n"                                                       \
230         "nop 0\n"                                                       \
231         ";;\n"                                                          \
232         "}\n"
233
234 #define PARAVIRT_OP(op)                         \
235         [op_addr] "i"(&pv_cpu_ops.op)
236
237 #define PARAVIRT_TYPE(type)                     \
238         PARAVIRT_PATCH_TYPE_ ## type
239
240 #define PARAVIRT_REG_CLOBBERS0                                  \
241         "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14",        \
242                 "r15", "r16", "r17"
243
244 #define PARAVIRT_REG_CLOBBERS1                                  \
245         "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
246                 "r15", "r16", "r17"
247
248 #define PARAVIRT_REG_CLOBBERS2                                  \
249         "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14",        \
250                 "r15", "r16", "r17"
251
252 #define PARAVIRT_REG_CLOBBERS5                                  \
253         "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/        \
254                 "r15", "r16", "r17"
255
256 #define PARAVIRT_BR_CLOBBERS                    \
257         "b6", "b7"
258
259 #define PARAVIRT_PR_CLOBBERS                                            \
260         "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
261
262 #define PARAVIRT_AR_CLOBBERS                    \
263         "ar.ccv"
264
265 #define PARAVIRT_CLOBBERS0                      \
266                 PARAVIRT_REG_CLOBBERS0,         \
267                 PARAVIRT_BR_CLOBBERS,           \
268                 PARAVIRT_PR_CLOBBERS,           \
269                 PARAVIRT_AR_CLOBBERS,           \
270                 "memory"
271
272 #define PARAVIRT_CLOBBERS1                      \
273                 PARAVIRT_REG_CLOBBERS1,         \
274                 PARAVIRT_BR_CLOBBERS,           \
275                 PARAVIRT_PR_CLOBBERS,           \
276                 PARAVIRT_AR_CLOBBERS,           \
277                 "memory"
278
279 #define PARAVIRT_CLOBBERS2                      \
280                 PARAVIRT_REG_CLOBBERS2,         \
281                 PARAVIRT_BR_CLOBBERS,           \
282                 PARAVIRT_PR_CLOBBERS,           \
283                 PARAVIRT_AR_CLOBBERS,           \
284                 "memory"
285
286 #define PARAVIRT_CLOBBERS5                      \
287                 PARAVIRT_REG_CLOBBERS5,         \
288                 PARAVIRT_BR_CLOBBERS,           \
289                 PARAVIRT_PR_CLOBBERS,           \
290                 PARAVIRT_AR_CLOBBERS,           \
291                 "memory"
292
293 #define PARAVIRT_BR0(op, type)                                  \
294         register unsigned long ia64_clobber asm ("r8");         \
295         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
296                                           PARAVIRT_TYPE(type))  \
297                       : "=r"(ia64_clobber)                      \
298                       : PARAVIRT_OP(op)                         \
299                       : PARAVIRT_CLOBBERS0)
300
301 #define PARAVIRT_BR0_RET(op, type)                              \
302         register unsigned long ia64_intri_res asm ("r8");       \
303         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
304                                           PARAVIRT_TYPE(type))  \
305                       : "=r"(ia64_intri_res)                    \
306                       : PARAVIRT_OP(op)                         \
307                       : PARAVIRT_CLOBBERS0)
308
309 #define PARAVIRT_BR1(op, type, arg1)                            \
310         register unsigned long __##arg1 asm ("r8") = arg1;      \
311         register unsigned long ia64_clobber asm ("r8");         \
312         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
313                                           PARAVIRT_TYPE(type))  \
314                       : "=r"(ia64_clobber)                      \
315                       : PARAVIRT_OP(op), "0"(__##arg1)          \
316                       : PARAVIRT_CLOBBERS1)
317
318 #define PARAVIRT_BR1_RET(op, type, arg1)                        \
319         register unsigned long ia64_intri_res asm ("r8");       \
320         register unsigned long __##arg1 asm ("r8") = arg1;      \
321         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
322                                           PARAVIRT_TYPE(type))  \
323                       : "=r"(ia64_intri_res)                    \
324                       : PARAVIRT_OP(op), "0"(__##arg1)          \
325                       : PARAVIRT_CLOBBERS1)
326
327 #define PARAVIRT_BR2(op, type, arg1, arg2)                              \
328         register unsigned long __##arg1 asm ("r8") = arg1;              \
329         register unsigned long __##arg2 asm ("r9") = arg2;              \
330         register unsigned long ia64_clobber1 asm ("r8");                \
331         register unsigned long ia64_clobber2 asm ("r9");                \
332         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,                \
333                                           PARAVIRT_TYPE(type))          \
334                       : "=r"(ia64_clobber1), "=r"(ia64_clobber2)        \
335                       : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2)   \
336                       : PARAVIRT_CLOBBERS2)
337
338
339 #define PARAVIRT_DEFINE_CPU_OP0(op, type)               \
340         static inline void                              \
341         paravirt_ ## op (void)                          \
342         {                                               \
343                 PARAVIRT_BR0(op, type);                 \
344         }
345
346 #define PARAVIRT_DEFINE_CPU_OP0_RET(op, type)           \
347         static inline unsigned long                     \
348         paravirt_ ## op (void)                          \
349         {                                               \
350                 PARAVIRT_BR0_RET(op, type);             \
351                 return ia64_intri_res;                  \
352         }
353
354 #define PARAVIRT_DEFINE_CPU_OP1(op, type)               \
355         static inline void                              \
356         paravirt_ ## op (unsigned long arg1)            \
357         {                                               \
358                 PARAVIRT_BR1(op, type, arg1);           \
359         }
360
361 #define PARAVIRT_DEFINE_CPU_OP1_RET(op, type)           \
362         static inline unsigned long                     \
363         paravirt_ ## op (unsigned long arg1)            \
364         {                                               \
365                 PARAVIRT_BR1_RET(op, type, arg1);       \
366                 return ia64_intri_res;                  \
367         }
368
369 #define PARAVIRT_DEFINE_CPU_OP2(op, type)               \
370         static inline void                              \
371         paravirt_ ## op (unsigned long arg1,            \
372                          unsigned long arg2)            \
373         {                                               \
374                 PARAVIRT_BR2(op, type, arg1, arg2);     \
375         }
376
377
378 PARAVIRT_DEFINE_CPU_OP1(fc, FC);
379 PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
380 PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
381 PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
382 PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
383 PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
384 PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
385 PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
386 PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
387 PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
388 PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
389
390 static inline void
391 paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
392                         unsigned long val2, unsigned long val3,
393                         unsigned long val4)
394 {
395         register unsigned long __val0 asm ("r8") = val0;
396         register unsigned long __val1 asm ("r9") = val1;
397         register unsigned long __val2 asm ("r10") = val2;
398         register unsigned long __val3 asm ("r11") = val3;
399         register unsigned long __val4 asm ("r14") = val4;
400
401         register unsigned long ia64_clobber0 asm ("r8");
402         register unsigned long ia64_clobber1 asm ("r9");
403         register unsigned long ia64_clobber2 asm ("r10");
404         register unsigned long ia64_clobber3 asm ("r11");
405         register unsigned long ia64_clobber4 asm ("r14");
406
407         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
408                                           PARAVIRT_TYPE(SET_RR0_TO_RR4))
409                       : "=r"(ia64_clobber0),
410                         "=r"(ia64_clobber1),
411                         "=r"(ia64_clobber2),
412                         "=r"(ia64_clobber3),
413                         "=r"(ia64_clobber4)
414                       : PARAVIRT_OP(set_rr0_to_rr4),
415                         "0"(__val0), "1"(__val1), "2"(__val2),
416                         "3"(__val3), "4"(__val4)
417                       : PARAVIRT_CLOBBERS5);
418 }
419
420 /* unsigned long paravirt_getreg(int reg) */
421 #define __paravirt_getreg(reg)                                          \
422         ({                                                              \
423                 register unsigned long ia64_intri_res asm ("r8");       \
424                 register unsigned long __reg asm ("r8") = (reg);        \
425                                                                         \
426                 BUILD_BUG_ON(!__builtin_constant_p(reg));               \
427                 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
428                                                   PARAVIRT_TYPE(GETREG) \
429                                                   + (reg))              \
430                               : "=r"(ia64_intri_res)                    \
431                               : PARAVIRT_OP(getreg), "0"(__reg)         \
432                               : PARAVIRT_CLOBBERS1);                    \
433                                                                         \
434                 ia64_intri_res;                                         \
435         })
436
437 /* void paravirt_setreg(int reg, unsigned long val) */
438 #define paravirt_setreg(reg, val)                                       \
439         do {                                                            \
440                 register unsigned long __val asm ("r8") = val;          \
441                 register unsigned long __reg asm ("r9") = reg;          \
442                 register unsigned long ia64_clobber1 asm ("r8");        \
443                 register unsigned long ia64_clobber2 asm ("r9");        \
444                                                                         \
445                 BUILD_BUG_ON(!__builtin_constant_p(reg));               \
446                 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
447                                                   PARAVIRT_TYPE(SETREG) \
448                                                   + (reg))              \
449                               : "=r"(ia64_clobber1),                    \
450                                 "=r"(ia64_clobber2)                     \
451                               : PARAVIRT_OP(setreg),                    \
452                                 "1"(__reg), "0"(__val)                  \
453                               : PARAVIRT_CLOBBERS2);                    \
454         } while (0)
455
456 #endif /* ASM_SUPPORTED */
457 #endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
458
459 #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */