ia64/pv_ops/binary patch: define paravirt_dv_serialize_data() and suppress false...
[linux-2.6-block.git] / arch / ia64 / include / asm / paravirt_privop.h
CommitLineData
1ff730b5 1/******************************************************************************
1ff730b5
IY
2 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
22#define _ASM_IA64_PARAVIRT_PRIVOP_H
23
24#ifdef CONFIG_PARAVIRT
25
26#ifndef __ASSEMBLY__
27
28#include <linux/types.h>
29#include <asm/kregs.h> /* for IA64_PSR_I */
30
31/******************************************************************************
32 * replacement of intrinsics operations.
33 */
34
35struct pv_cpu_ops {
36 void (*fc)(unsigned long addr);
37 unsigned long (*thash)(unsigned long addr);
38 unsigned long (*get_cpuid)(int index);
39 unsigned long (*get_pmd)(int index);
40 unsigned long (*getreg)(int reg);
41 void (*setreg)(int reg, unsigned long val);
42 void (*ptcga)(unsigned long addr, unsigned long size);
43 unsigned long (*get_rr)(unsigned long index);
44 void (*set_rr)(unsigned long index, unsigned long val);
45 void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
46 unsigned long val2, unsigned long val3,
47 unsigned long val4);
48 void (*ssm_i)(void);
49 void (*rsm_i)(void);
50 unsigned long (*get_psr_i)(void);
51 void (*intrin_local_irq_restore)(unsigned long flags);
52};
53
54extern struct pv_cpu_ops pv_cpu_ops;
55
56extern void ia64_native_setreg_func(int regnum, unsigned long val);
57extern unsigned long ia64_native_getreg_func(int regnum);
58
59/************************************************/
60/* Instructions paravirtualized for performance */
61/************************************************/
62
03f511dd
IY
63#ifndef ASM_SUPPORTED
64#define paravirt_ssm_i() pv_cpu_ops.ssm_i()
65#define paravirt_rsm_i() pv_cpu_ops.rsm_i()
66#define __paravirt_getreg() pv_cpu_ops.getreg()
67#endif
68
1ff730b5
IY
69/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
70 * static inline function doesn't satisfy it. */
71#define paravirt_ssm(mask) \
72 do { \
73 if ((mask) == IA64_PSR_I) \
03f511dd 74 paravirt_ssm_i(); \
1ff730b5
IY
75 else \
76 ia64_native_ssm(mask); \
77 } while (0)
78
79#define paravirt_rsm(mask) \
80 do { \
81 if ((mask) == IA64_PSR_I) \
03f511dd 82 paravirt_rsm_i(); \
1ff730b5
IY
83 else \
84 ia64_native_rsm(mask); \
85 } while (0)
86
93fe10b6
IY
87/* returned ip value should be the one in the caller,
88 * not in __paravirt_getreg() */
89#define paravirt_getreg(reg) \
90 ({ \
91 unsigned long res; \
93fe10b6
IY
92 if ((reg) == _IA64_REG_IP) \
93 res = ia64_native_getreg(_IA64_REG_IP); \
94 else \
03f511dd 95 res = __paravirt_getreg(reg); \
93fe10b6
IY
96 res; \
97 })
98
4df8d22b
IY
99/******************************************************************************
100 * replacement of hand written assembly codes.
101 */
102struct pv_cpu_asm_switch {
103 unsigned long switch_to;
104 unsigned long leave_syscall;
105 unsigned long work_processed_syscall;
106 unsigned long leave_kernel;
107};
108void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
109
1ff730b5
IY
110#endif /* __ASSEMBLY__ */
111
4df8d22b
IY
112#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
113
1ff730b5
IY
114#else
115
116/* fallback for native case */
4df8d22b 117#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
1ff730b5
IY
118
119#endif /* CONFIG_PARAVIRT */
120
dae17da6
IY
121#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)
122#define paravirt_dv_serialize_data() ia64_dv_serialize_data()
123#else
124#define paravirt_dv_serialize_data() /* nothing */
125#endif
126
4df8d22b
IY
127/* these routines utilize privilege-sensitive or performance-sensitive
128 * privileged instructions so the code must be replaced with
129 * paravirtualized versions */
130#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
131#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
132#define ia64_work_processed_syscall \
133 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
134#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
135
03f511dd
IY
136
137#if defined(CONFIG_PARAVIRT)
138/******************************************************************************
139 * binary patching infrastructure
140 */
141#define PARAVIRT_PATCH_TYPE_FC 1
142#define PARAVIRT_PATCH_TYPE_THASH 2
143#define PARAVIRT_PATCH_TYPE_GET_CPUID 3
144#define PARAVIRT_PATCH_TYPE_GET_PMD 4
145#define PARAVIRT_PATCH_TYPE_PTCGA 5
146#define PARAVIRT_PATCH_TYPE_GET_RR 6
147#define PARAVIRT_PATCH_TYPE_SET_RR 7
148#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8
149#define PARAVIRT_PATCH_TYPE_SSM_I 9
150#define PARAVIRT_PATCH_TYPE_RSM_I 10
151#define PARAVIRT_PATCH_TYPE_GET_PSR_I 11
152#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12
153
154/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
155#define PARAVIRT_PATCH_TYPE_GETREG 0x10000000
156#define PARAVIRT_PATCH_TYPE_SETREG 0x20000000
157
158/*
159 * struct task_struct* (*ia64_switch_to)(void* next_task);
160 * void *ia64_leave_syscall;
161 * void *ia64_work_processed_syscall
162 * void *ia64_leave_kernel;
163 */
164
165#define PARAVIRT_PATCH_TYPE_BR_START 0x30000000
166#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \
167 (PARAVIRT_PATCH_TYPE_BR_START + 0)
168#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \
169 (PARAVIRT_PATCH_TYPE_BR_START + 1)
170#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \
171 (PARAVIRT_PATCH_TYPE_BR_START + 2)
172#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \
173 (PARAVIRT_PATCH_TYPE_BR_START + 3)
174
175#ifdef ASM_SUPPORTED
176#include <asm/paravirt_patch.h>
177
178/*
179 * pv_cpu_ops calling stub.
180 * normal function call convension can't be written by gcc
181 * inline assembly.
182 *
183 * from the caller's point of view,
184 * the following registers will be clobbered.
185 * r2, r3
186 * r8-r15
187 * r16, r17
188 * b6, b7
189 * p6-p15
190 * ar.ccv
191 *
192 * from the callee's point of view ,
193 * the following registers can be used.
194 * r2, r3: scratch
195 * r8: scratch, input argument0 and return value
196 * r0-r15: scratch, input argument1-5
197 * b6: return pointer
198 * b7: scratch
199 * p6-p15: scratch
200 * ar.ccv: scratch
201 *
202 * other registers must not be changed. especially
203 * b0: rp: preserved. gcc ignores b0 in clobbered register.
204 * r16: saved gp
205 */
206/* 5 bundles */
207#define __PARAVIRT_BR \
208 ";;\n" \
209 "{ .mlx\n" \
210 "nop 0\n" \
211 "movl r2 = %[op_addr]\n"/* get function pointer address */ \
212 ";;\n" \
213 "}\n" \
214 "1:\n" \
215 "{ .mii\n" \
216 "ld8 r2 = [r2]\n" /* load function descriptor address */ \
217 "mov r17 = ip\n" /* get ip to calc return address */ \
218 "mov r16 = gp\n" /* save gp */ \
219 ";;\n" \
220 "}\n" \
221 "{ .mii\n" \
222 "ld8 r3 = [r2], 8\n" /* load entry address */ \
223 "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \
224 ";;\n" \
225 "mov b7 = r3\n" /* set entry address */ \
226 "}\n" \
227 "{ .mib\n" \
228 "ld8 gp = [r2]\n" /* load gp value */ \
229 "mov b6 = r17\n" /* set return address */ \
230 "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \
231 "}\n" \
232 "1:\n" \
233 "{ .mii\n" \
234 "mov gp = r16\n" /* restore gp value */ \
235 "nop 0\n" \
236 "nop 0\n" \
237 ";;\n" \
238 "}\n"
239
240#define PARAVIRT_OP(op) \
241 [op_addr] "i"(&pv_cpu_ops.op)
242
243#define PARAVIRT_TYPE(type) \
244 PARAVIRT_PATCH_TYPE_ ## type
245
246#define PARAVIRT_REG_CLOBBERS0 \
247 "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
248 "r15", "r16", "r17"
249
250#define PARAVIRT_REG_CLOBBERS1 \
251 "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
252 "r15", "r16", "r17"
253
254#define PARAVIRT_REG_CLOBBERS2 \
255 "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \
256 "r15", "r16", "r17"
257
258#define PARAVIRT_REG_CLOBBERS5 \
259 "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \
260 "r15", "r16", "r17"
261
262#define PARAVIRT_BR_CLOBBERS \
263 "b6", "b7"
264
265#define PARAVIRT_PR_CLOBBERS \
266 "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
267
268#define PARAVIRT_AR_CLOBBERS \
269 "ar.ccv"
270
271#define PARAVIRT_CLOBBERS0 \
272 PARAVIRT_REG_CLOBBERS0, \
273 PARAVIRT_BR_CLOBBERS, \
274 PARAVIRT_PR_CLOBBERS, \
275 PARAVIRT_AR_CLOBBERS, \
276 "memory"
277
278#define PARAVIRT_CLOBBERS1 \
279 PARAVIRT_REG_CLOBBERS1, \
280 PARAVIRT_BR_CLOBBERS, \
281 PARAVIRT_PR_CLOBBERS, \
282 PARAVIRT_AR_CLOBBERS, \
283 "memory"
284
285#define PARAVIRT_CLOBBERS2 \
286 PARAVIRT_REG_CLOBBERS2, \
287 PARAVIRT_BR_CLOBBERS, \
288 PARAVIRT_PR_CLOBBERS, \
289 PARAVIRT_AR_CLOBBERS, \
290 "memory"
291
292#define PARAVIRT_CLOBBERS5 \
293 PARAVIRT_REG_CLOBBERS5, \
294 PARAVIRT_BR_CLOBBERS, \
295 PARAVIRT_PR_CLOBBERS, \
296 PARAVIRT_AR_CLOBBERS, \
297 "memory"
298
299#define PARAVIRT_BR0(op, type) \
300 register unsigned long ia64_clobber asm ("r8"); \
301 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
302 PARAVIRT_TYPE(type)) \
303 : "=r"(ia64_clobber) \
304 : PARAVIRT_OP(op) \
305 : PARAVIRT_CLOBBERS0)
306
307#define PARAVIRT_BR0_RET(op, type) \
308 register unsigned long ia64_intri_res asm ("r8"); \
309 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
310 PARAVIRT_TYPE(type)) \
311 : "=r"(ia64_intri_res) \
312 : PARAVIRT_OP(op) \
313 : PARAVIRT_CLOBBERS0)
314
315#define PARAVIRT_BR1(op, type, arg1) \
316 register unsigned long __##arg1 asm ("r8") = arg1; \
317 register unsigned long ia64_clobber asm ("r8"); \
318 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
319 PARAVIRT_TYPE(type)) \
320 : "=r"(ia64_clobber) \
321 : PARAVIRT_OP(op), "0"(__##arg1) \
322 : PARAVIRT_CLOBBERS1)
323
324#define PARAVIRT_BR1_RET(op, type, arg1) \
325 register unsigned long ia64_intri_res asm ("r8"); \
326 register unsigned long __##arg1 asm ("r8") = arg1; \
327 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
328 PARAVIRT_TYPE(type)) \
329 : "=r"(ia64_intri_res) \
330 : PARAVIRT_OP(op), "0"(__##arg1) \
331 : PARAVIRT_CLOBBERS1)
332
333#define PARAVIRT_BR2(op, type, arg1, arg2) \
334 register unsigned long __##arg1 asm ("r8") = arg1; \
335 register unsigned long __##arg2 asm ("r9") = arg2; \
336 register unsigned long ia64_clobber1 asm ("r8"); \
337 register unsigned long ia64_clobber2 asm ("r9"); \
338 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
339 PARAVIRT_TYPE(type)) \
340 : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \
341 : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \
342 : PARAVIRT_CLOBBERS2)
343
344
345#define PARAVIRT_DEFINE_CPU_OP0(op, type) \
346 static inline void \
347 paravirt_ ## op (void) \
348 { \
349 PARAVIRT_BR0(op, type); \
350 }
351
352#define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \
353 static inline unsigned long \
354 paravirt_ ## op (void) \
355 { \
356 PARAVIRT_BR0_RET(op, type); \
357 return ia64_intri_res; \
358 }
359
360#define PARAVIRT_DEFINE_CPU_OP1(op, type) \
361 static inline void \
362 paravirt_ ## op (unsigned long arg1) \
363 { \
364 PARAVIRT_BR1(op, type, arg1); \
365 }
366
367#define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \
368 static inline unsigned long \
369 paravirt_ ## op (unsigned long arg1) \
370 { \
371 PARAVIRT_BR1_RET(op, type, arg1); \
372 return ia64_intri_res; \
373 }
374
375#define PARAVIRT_DEFINE_CPU_OP2(op, type) \
376 static inline void \
377 paravirt_ ## op (unsigned long arg1, \
378 unsigned long arg2) \
379 { \
380 PARAVIRT_BR2(op, type, arg1, arg2); \
381 }
382
383
384PARAVIRT_DEFINE_CPU_OP1(fc, FC);
385PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
386PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
387PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
388PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
389PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
390PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
391PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
392PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
393PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
394PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
395
396static inline void
397paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
398 unsigned long val2, unsigned long val3,
399 unsigned long val4)
400{
401 register unsigned long __val0 asm ("r8") = val0;
402 register unsigned long __val1 asm ("r9") = val1;
403 register unsigned long __val2 asm ("r10") = val2;
404 register unsigned long __val3 asm ("r11") = val3;
405 register unsigned long __val4 asm ("r14") = val4;
406
407 register unsigned long ia64_clobber0 asm ("r8");
408 register unsigned long ia64_clobber1 asm ("r9");
409 register unsigned long ia64_clobber2 asm ("r10");
410 register unsigned long ia64_clobber3 asm ("r11");
411 register unsigned long ia64_clobber4 asm ("r14");
412
413 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
414 PARAVIRT_TYPE(SET_RR0_TO_RR4))
415 : "=r"(ia64_clobber0),
416 "=r"(ia64_clobber1),
417 "=r"(ia64_clobber2),
418 "=r"(ia64_clobber3),
419 "=r"(ia64_clobber4)
420 : PARAVIRT_OP(set_rr0_to_rr4),
421 "0"(__val0), "1"(__val1), "2"(__val2),
422 "3"(__val3), "4"(__val4)
423 : PARAVIRT_CLOBBERS5);
424}
425
426/* unsigned long paravirt_getreg(int reg) */
427#define __paravirt_getreg(reg) \
428 ({ \
429 register unsigned long ia64_intri_res asm ("r8"); \
430 register unsigned long __reg asm ("r8") = (reg); \
431 \
432 BUILD_BUG_ON(!__builtin_constant_p(reg)); \
433 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
434 PARAVIRT_TYPE(GETREG) \
435 + (reg)) \
436 : "=r"(ia64_intri_res) \
437 : PARAVIRT_OP(getreg), "0"(__reg) \
438 : PARAVIRT_CLOBBERS1); \
439 \
440 ia64_intri_res; \
441 })
442
443/* void paravirt_setreg(int reg, unsigned long val) */
444#define paravirt_setreg(reg, val) \
445 do { \
446 register unsigned long __val asm ("r8") = val; \
447 register unsigned long __reg asm ("r9") = reg; \
448 register unsigned long ia64_clobber1 asm ("r8"); \
449 register unsigned long ia64_clobber2 asm ("r9"); \
450 \
451 BUILD_BUG_ON(!__builtin_constant_p(reg)); \
452 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
453 PARAVIRT_TYPE(SETREG) \
454 + (reg)) \
455 : "=r"(ia64_clobber1), \
456 "=r"(ia64_clobber2) \
457 : PARAVIRT_OP(setreg), \
458 "1"(__reg), "0"(__val) \
459 : PARAVIRT_CLOBBERS2); \
460 } while (0)
461
462#endif /* ASM_SUPPORTED */
463#endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
464
1ff730b5 465#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */