Merge tag 'mvebu-fixes-5.2-2' of git://git.infradead.org/linux-mvebu into arm/fixes
[linux-2.6-block.git] / arch / powerpc / kernel / kvm.c
CommitLineData
d94d71cb 1// SPDX-License-Identifier: GPL-2.0-only
2a342ed5
AG
2/*
3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
940b45ec 4 * Copyright 2010-2011 Freescale Semiconductor, Inc.
2a342ed5
AG
5 *
6 * Authors:
7 * Alexander Graf <agraf@suse.de>
2a342ed5
AG
8 */
9
10#include <linux/kvm_host.h>
11#include <linux/init.h>
66b15db6 12#include <linux/export.h>
298a32b1 13#include <linux/kmemleak.h>
2a342ed5
AG
14#include <linux/kvm_para.h>
15#include <linux/slab.h>
16#include <linux/of.h>
9f9eae5c 17#include <linux/pagemap.h>
2a342ed5
AG
18
19#include <asm/reg.h>
2a342ed5
AG
20#include <asm/sections.h>
21#include <asm/cacheflush.h>
22#include <asm/disassemble.h>
940b45ec 23#include <asm/ppc-opcode.h>
2e1ae9c0 24#include <asm/epapr_hcalls.h>
2a342ed5 25
d17051cb
AG
26#define KVM_MAGIC_PAGE (-4096L)
27#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
28
d1293c92
AG
29#define KVM_INST_LWZ 0x80000000
30#define KVM_INST_STW 0x90000000
31#define KVM_INST_LD 0xe8000000
32#define KVM_INST_STD 0xf8000000
33#define KVM_INST_NOP 0x60000000
34#define KVM_INST_B 0x48000000
35#define KVM_INST_B_MASK 0x03ffffff
36#define KVM_INST_B_MAX 0x01ffffff
940b45ec 37#define KVM_INST_LI 0x38000000
d1293c92 38
73a18109 39#define KVM_MASK_RT 0x03e00000
512ba59e 40#define KVM_RT_30 0x03c00000
cbe487fa 41#define KVM_MASK_RB 0x0000f800
d1293c92 42#define KVM_INST_MFMSR 0x7c0000a6
b5904972
SW
43
44#define SPR_FROM 0
45#define SPR_TO 0x100
46
47#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
48 (((sprn) & 0x1f) << 16) | \
49 (((sprn) & 0x3e0) << 6) | \
50 (moveto))
51
52#define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
53#define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
73a18109 54
d1290b15 55#define KVM_INST_TLBSYNC 0x7c00046c
78109277 56#define KVM_INST_MTMSRD_L0 0x7c000164
819a63dc 57#define KVM_INST_MTMSRD_L1 0x7c010164
78109277 58#define KVM_INST_MTMSR 0x7c000124
d1290b15 59
940b45ec 60#define KVM_INST_WRTEE 0x7c000106
644bfa01
AG
61#define KVM_INST_WRTEEI_0 0x7c000146
62#define KVM_INST_WRTEEI_1 0x7c008146
63
cbe487fa
AG
64#define KVM_INST_MTSRIN 0x7c0001e4
65
73a18109 66static bool kvm_patching_worked = true;
b18db0b8 67char kvm_tmp[1024 * 1024];
2d4f5671 68static int kvm_tmp_index;
73a18109
AG
69
70static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
71{
72 *inst = new_inst;
73 flush_icache_range((ulong)inst, (ulong)inst + 4);
74}
75
512ba59e
AG
76static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
77{
78#ifdef CONFIG_64BIT
79 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
80#else
81 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
82#endif
83}
84
d1293c92
AG
85static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
86{
87#ifdef CONFIG_64BIT
88 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
89#else
90 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
91#endif
92}
93
94static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
95{
96 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
97}
98
99static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
100{
101#ifdef CONFIG_64BIT
102 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
103#else
104 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
105#endif
106}
107
108static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
109{
110 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
111}
112
d1290b15
AG
113static void kvm_patch_ins_nop(u32 *inst)
114{
115 kvm_patch_ins(inst, KVM_INST_NOP);
116}
117
71ee8e34
AG
118static void kvm_patch_ins_b(u32 *inst, int addr)
119{
a36be100 120#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
71ee8e34
AG
121 /* On relocatable kernels interrupts handlers and our code
122 can be in different regions, so we don't patch them */
123
71ee8e34
AG
124 if ((ulong)inst < (ulong)&__end_interrupts)
125 return;
126#endif
127
128 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
129}
130
2d4f5671
AG
131static u32 *kvm_alloc(int len)
132{
133 u32 *p;
134
135 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
136 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
137 kvm_tmp_index, len);
138 kvm_patching_worked = false;
139 return NULL;
140 }
141
142 p = (void*)&kvm_tmp[kvm_tmp_index];
143 kvm_tmp_index += len;
144
145 return p;
146}
147
819a63dc
AG
148extern u32 kvm_emulate_mtmsrd_branch_offs;
149extern u32 kvm_emulate_mtmsrd_reg_offs;
df08bd10 150extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
819a63dc
AG
151extern u32 kvm_emulate_mtmsrd_len;
152extern u32 kvm_emulate_mtmsrd[];
153
154static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
155{
156 u32 *p;
157 int distance_start;
158 int distance_end;
159 ulong next_inst;
160
161 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
162 if (!p)
163 return;
164
165 /* Find out where we are and put everything there */
166 distance_start = (ulong)p - (ulong)inst;
167 next_inst = ((ulong)inst + 4);
168 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
169
170 /* Make sure we only write valid b instructions */
171 if (distance_start > KVM_INST_B_MAX) {
172 kvm_patching_worked = false;
173 return;
174 }
175
176 /* Modify the chunk to fit the invocation */
177 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
178 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
df08bd10
AG
179 switch (get_rt(rt)) {
180 case 30:
181 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
182 magic_var(scratch2), KVM_RT_30);
183 break;
184 case 31:
185 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
186 magic_var(scratch1), KVM_RT_30);
187 break;
188 default:
189 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
190 break;
191 }
192
193 p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
819a63dc
AG
194 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
195
196 /* Patch the invocation */
197 kvm_patch_ins_b(inst, distance_start);
198}
199
78109277
AG
200extern u32 kvm_emulate_mtmsr_branch_offs;
201extern u32 kvm_emulate_mtmsr_reg1_offs;
202extern u32 kvm_emulate_mtmsr_reg2_offs;
78109277
AG
203extern u32 kvm_emulate_mtmsr_orig_ins_offs;
204extern u32 kvm_emulate_mtmsr_len;
205extern u32 kvm_emulate_mtmsr[];
206
207static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
208{
209 u32 *p;
210 int distance_start;
211 int distance_end;
212 ulong next_inst;
213
214 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
215 if (!p)
216 return;
217
218 /* Find out where we are and put everything there */
219 distance_start = (ulong)p - (ulong)inst;
220 next_inst = ((ulong)inst + 4);
221 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
222
223 /* Make sure we only write valid b instructions */
224 if (distance_start > KVM_INST_B_MAX) {
225 kvm_patching_worked = false;
226 return;
227 }
228
229 /* Modify the chunk to fit the invocation */
230 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
231 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
512ba59e
AG
232
233 /* Make clobbered registers work too */
234 switch (get_rt(rt)) {
235 case 30:
236 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
237 magic_var(scratch2), KVM_RT_30);
238 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
239 magic_var(scratch2), KVM_RT_30);
240 break;
241 case 31:
242 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
243 magic_var(scratch1), KVM_RT_30);
244 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
245 magic_var(scratch1), KVM_RT_30);
246 break;
247 default:
248 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
249 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
250 break;
251 }
252
78109277
AG
253 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
254 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
255
256 /* Patch the invocation */
257 kvm_patch_ins_b(inst, distance_start);
258}
259
644bfa01
AG
260#ifdef CONFIG_BOOKE
261
940b45ec
SW
262extern u32 kvm_emulate_wrtee_branch_offs;
263extern u32 kvm_emulate_wrtee_reg_offs;
264extern u32 kvm_emulate_wrtee_orig_ins_offs;
265extern u32 kvm_emulate_wrtee_len;
266extern u32 kvm_emulate_wrtee[];
644bfa01 267
940b45ec 268static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
644bfa01
AG
269{
270 u32 *p;
271 int distance_start;
272 int distance_end;
273 ulong next_inst;
274
940b45ec 275 p = kvm_alloc(kvm_emulate_wrtee_len * 4);
644bfa01
AG
276 if (!p)
277 return;
278
279 /* Find out where we are and put everything there */
280 distance_start = (ulong)p - (ulong)inst;
281 next_inst = ((ulong)inst + 4);
940b45ec 282 distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
644bfa01
AG
283
284 /* Make sure we only write valid b instructions */
285 if (distance_start > KVM_INST_B_MAX) {
286 kvm_patching_worked = false;
287 return;
288 }
289
290 /* Modify the chunk to fit the invocation */
940b45ec
SW
291 memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
292 p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
293
294 if (imm_one) {
295 p[kvm_emulate_wrtee_reg_offs] =
c75df6f9 296 KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
940b45ec
SW
297 } else {
298 /* Make clobbered registers work too */
299 switch (get_rt(rt)) {
300 case 30:
301 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
302 magic_var(scratch2), KVM_RT_30);
303 break;
304 case 31:
305 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
306 magic_var(scratch1), KVM_RT_30);
307 break;
308 default:
309 p[kvm_emulate_wrtee_reg_offs] |= rt;
310 break;
311 }
312 }
313
314 p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
315 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
316
317 /* Patch the invocation */
318 kvm_patch_ins_b(inst, distance_start);
319}
320
321extern u32 kvm_emulate_wrteei_0_branch_offs;
322extern u32 kvm_emulate_wrteei_0_len;
323extern u32 kvm_emulate_wrteei_0[];
324
325static void kvm_patch_ins_wrteei_0(u32 *inst)
326{
327 u32 *p;
328 int distance_start;
329 int distance_end;
330 ulong next_inst;
331
332 p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
333 if (!p)
334 return;
335
336 /* Find out where we are and put everything there */
337 distance_start = (ulong)p - (ulong)inst;
338 next_inst = ((ulong)inst + 4);
339 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
340
341 /* Make sure we only write valid b instructions */
342 if (distance_start > KVM_INST_B_MAX) {
343 kvm_patching_worked = false;
344 return;
345 }
346
347 memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
348 p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
349 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
644bfa01
AG
350
351 /* Patch the invocation */
352 kvm_patch_ins_b(inst, distance_start);
353}
354
355#endif
356
cbe487fa
AG
357#ifdef CONFIG_PPC_BOOK3S_32
358
359extern u32 kvm_emulate_mtsrin_branch_offs;
360extern u32 kvm_emulate_mtsrin_reg1_offs;
361extern u32 kvm_emulate_mtsrin_reg2_offs;
362extern u32 kvm_emulate_mtsrin_orig_ins_offs;
363extern u32 kvm_emulate_mtsrin_len;
364extern u32 kvm_emulate_mtsrin[];
365
366static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
367{
368 u32 *p;
369 int distance_start;
370 int distance_end;
371 ulong next_inst;
372
373 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
374 if (!p)
375 return;
376
377 /* Find out where we are and put everything there */
378 distance_start = (ulong)p - (ulong)inst;
379 next_inst = ((ulong)inst + 4);
380 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
381
382 /* Make sure we only write valid b instructions */
383 if (distance_start > KVM_INST_B_MAX) {
384 kvm_patching_worked = false;
385 return;
386 }
387
388 /* Modify the chunk to fit the invocation */
389 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
390 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
391 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
392 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
393 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
394 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
395
396 /* Patch the invocation */
397 kvm_patch_ins_b(inst, distance_start);
398}
399
400#endif
401
73a18109
AG
402static void kvm_map_magic_page(void *data)
403{
7508e16c
AG
404 u32 *features = data;
405
1820a8d2 406 ulong in[8] = {0};
7508e16c
AG
407 ulong out[8];
408
409 in[0] = KVM_MAGIC_PAGE;
5c165aec 410 in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
7508e16c 411
1820a8d2 412 epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
7508e16c
AG
413
414 *features = out[0];
73a18109
AG
415}
416
7508e16c 417static void kvm_check_ins(u32 *inst, u32 features)
73a18109
AG
418{
419 u32 _inst = *inst;
420 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
421 u32 inst_rt = _inst & KVM_MASK_RT;
422
423 switch (inst_no_rt) {
d1293c92
AG
424 /* Loads */
425 case KVM_INST_MFMSR:
426 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
427 break;
b5904972 428 case KVM_INST_MFSPR(SPRN_SPRG0):
d1293c92
AG
429 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
430 break;
b5904972 431 case KVM_INST_MFSPR(SPRN_SPRG1):
d1293c92
AG
432 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
433 break;
b5904972 434 case KVM_INST_MFSPR(SPRN_SPRG2):
d1293c92
AG
435 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
436 break;
b5904972 437 case KVM_INST_MFSPR(SPRN_SPRG3):
d1293c92
AG
438 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
439 break;
b5904972 440 case KVM_INST_MFSPR(SPRN_SRR0):
d1293c92
AG
441 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
442 break;
b5904972 443 case KVM_INST_MFSPR(SPRN_SRR1):
d1293c92
AG
444 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
445 break;
b5904972
SW
446#ifdef CONFIG_BOOKE
447 case KVM_INST_MFSPR(SPRN_DEAR):
448#else
449 case KVM_INST_MFSPR(SPRN_DAR):
450#endif
d1293c92
AG
451 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
452 break;
b5904972 453 case KVM_INST_MFSPR(SPRN_DSISR):
d1293c92
AG
454 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
455 break;
456
b5904972
SW
457#ifdef CONFIG_PPC_BOOK3E_MMU
458 case KVM_INST_MFSPR(SPRN_MAS0):
459 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
460 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
461 break;
462 case KVM_INST_MFSPR(SPRN_MAS1):
463 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
464 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
465 break;
466 case KVM_INST_MFSPR(SPRN_MAS2):
467 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
468 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
469 break;
470 case KVM_INST_MFSPR(SPRN_MAS3):
471 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
472 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
473 break;
474 case KVM_INST_MFSPR(SPRN_MAS4):
475 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
476 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
477 break;
478 case KVM_INST_MFSPR(SPRN_MAS6):
479 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
480 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
481 break;
482 case KVM_INST_MFSPR(SPRN_MAS7):
483 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
484 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
485 break;
486#endif /* CONFIG_PPC_BOOK3E_MMU */
487
488 case KVM_INST_MFSPR(SPRN_SPRG4):
489#ifdef CONFIG_BOOKE
490 case KVM_INST_MFSPR(SPRN_SPRG4R):
491#endif
492 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
493 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
494 break;
495 case KVM_INST_MFSPR(SPRN_SPRG5):
496#ifdef CONFIG_BOOKE
497 case KVM_INST_MFSPR(SPRN_SPRG5R):
498#endif
499 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
500 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
501 break;
502 case KVM_INST_MFSPR(SPRN_SPRG6):
503#ifdef CONFIG_BOOKE
504 case KVM_INST_MFSPR(SPRN_SPRG6R):
505#endif
506 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
507 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
508 break;
509 case KVM_INST_MFSPR(SPRN_SPRG7):
510#ifdef CONFIG_BOOKE
511 case KVM_INST_MFSPR(SPRN_SPRG7R):
512#endif
513 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
514 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
515 break;
516
517#ifdef CONFIG_BOOKE
518 case KVM_INST_MFSPR(SPRN_ESR):
519 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
520 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
521 break;
522#endif
523
524 case KVM_INST_MFSPR(SPRN_PIR):
525 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
526 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
527 break;
528
529
d1293c92 530 /* Stores */
b5904972 531 case KVM_INST_MTSPR(SPRN_SPRG0):
d1293c92
AG
532 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
533 break;
b5904972 534 case KVM_INST_MTSPR(SPRN_SPRG1):
d1293c92
AG
535 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
536 break;
b5904972 537 case KVM_INST_MTSPR(SPRN_SPRG2):
d1293c92
AG
538 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
539 break;
b5904972 540 case KVM_INST_MTSPR(SPRN_SPRG3):
d1293c92
AG
541 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
542 break;
b5904972 543 case KVM_INST_MTSPR(SPRN_SRR0):
d1293c92
AG
544 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
545 break;
b5904972 546 case KVM_INST_MTSPR(SPRN_SRR1):
d1293c92
AG
547 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
548 break;
b5904972
SW
549#ifdef CONFIG_BOOKE
550 case KVM_INST_MTSPR(SPRN_DEAR):
551#else
552 case KVM_INST_MTSPR(SPRN_DAR):
553#endif
d1293c92
AG
554 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
555 break;
b5904972 556 case KVM_INST_MTSPR(SPRN_DSISR):
d1293c92
AG
557 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
558 break;
b5904972
SW
559#ifdef CONFIG_PPC_BOOK3E_MMU
560 case KVM_INST_MTSPR(SPRN_MAS0):
561 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
562 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
563 break;
564 case KVM_INST_MTSPR(SPRN_MAS1):
565 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
566 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
567 break;
568 case KVM_INST_MTSPR(SPRN_MAS2):
569 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
570 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
571 break;
572 case KVM_INST_MTSPR(SPRN_MAS3):
573 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
574 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
575 break;
576 case KVM_INST_MTSPR(SPRN_MAS4):
577 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
578 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
579 break;
580 case KVM_INST_MTSPR(SPRN_MAS6):
581 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
582 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
583 break;
584 case KVM_INST_MTSPR(SPRN_MAS7):
585 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
586 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
587 break;
588#endif /* CONFIG_PPC_BOOK3E_MMU */
589
590 case KVM_INST_MTSPR(SPRN_SPRG4):
591 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
592 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
593 break;
594 case KVM_INST_MTSPR(SPRN_SPRG5):
595 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
596 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
597 break;
598 case KVM_INST_MTSPR(SPRN_SPRG6):
599 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
600 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
601 break;
602 case KVM_INST_MTSPR(SPRN_SPRG7):
603 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
604 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
605 break;
606
607#ifdef CONFIG_BOOKE
608 case KVM_INST_MTSPR(SPRN_ESR):
609 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
610 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
611 break;
612#endif
d1290b15
AG
613
614 /* Nops */
615 case KVM_INST_TLBSYNC:
616 kvm_patch_ins_nop(inst);
617 break;
819a63dc
AG
618
619 /* Rewrites */
620 case KVM_INST_MTMSRD_L1:
df08bd10 621 kvm_patch_ins_mtmsrd(inst, inst_rt);
819a63dc 622 break;
78109277
AG
623 case KVM_INST_MTMSR:
624 case KVM_INST_MTMSRD_L0:
512ba59e 625 kvm_patch_ins_mtmsr(inst, inst_rt);
78109277 626 break;
940b45ec
SW
627#ifdef CONFIG_BOOKE
628 case KVM_INST_WRTEE:
629 kvm_patch_ins_wrtee(inst, inst_rt, 0);
630 break;
631#endif
73a18109
AG
632 }
633
cbe487fa
AG
634 switch (inst_no_rt & ~KVM_MASK_RB) {
635#ifdef CONFIG_PPC_BOOK3S_32
636 case KVM_INST_MTSRIN:
637 if (features & KVM_MAGIC_FEAT_SR) {
638 u32 inst_rb = _inst & KVM_MASK_RB;
639 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
640 }
641 break;
cbe487fa
AG
642#endif
643 }
644
73a18109 645 switch (_inst) {
644bfa01
AG
646#ifdef CONFIG_BOOKE
647 case KVM_INST_WRTEEI_0:
940b45ec
SW
648 kvm_patch_ins_wrteei_0(inst);
649 break;
650
644bfa01 651 case KVM_INST_WRTEEI_1:
940b45ec 652 kvm_patch_ins_wrtee(inst, 0, 1);
644bfa01
AG
653 break;
654#endif
73a18109
AG
655 }
656}
657
befdc0a6
LYB
658extern u32 kvm_template_start[];
659extern u32 kvm_template_end[];
660
73a18109
AG
661static void kvm_use_magic_page(void)
662{
663 u32 *p;
664 u32 *start, *end;
7508e16c 665 u32 features;
73a18109
AG
666
667 /* Tell the host to map the magic page to -4096 on all CPUs */
7508e16c 668 on_each_cpu(kvm_map_magic_page, &features, 1);
73a18109
AG
669
670 /* Quick self-test to see if the mapping works */
9f9eae5c 671 if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
73a18109
AG
672 kvm_patching_worked = false;
673 return;
674 }
675
676 /* Now loop through all code and find instructions */
677 start = (void*)_stext;
678 end = (void*)_etext;
679
b5904972
SW
680 /*
681 * Being interrupted in the middle of patching would
682 * be bad for SPRG4-7, which KVM can't keep in sync
683 * with emulated accesses because reads don't trap.
684 */
685 local_irq_disable();
686
befdc0a6
LYB
687 for (p = start; p < end; p++) {
688 /* Avoid patching the template code */
689 if (p >= kvm_template_start && p < kvm_template_end) {
690 p = kvm_template_end - 1;
691 continue;
692 }
7508e16c 693 kvm_check_ins(p, features);
befdc0a6 694 }
73a18109 695
b5904972
SW
696 local_irq_enable();
697
73a18109
AG
698 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
699 kvm_patching_worked ? "worked" : "failed");
700}
701
2d4f5671
AG
702static __init void kvm_free_tmp(void)
703{
298a32b1
CM
704 /*
705 * Inform kmemleak about the hole in the .bss section since the
706 * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
707 */
708 kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
709 ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
dbe67df4
JL
710 free_reserved_area(&kvm_tmp[kvm_tmp_index],
711 &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
2d4f5671
AG
712}
713
73a18109
AG
714static int __init kvm_guest_init(void)
715{
716 if (!kvm_para_available())
2d4f5671 717 goto free_tmp;
73a18109 718
2e1ae9c0 719 if (!epapr_paravirt_enabled)
2d4f5671 720 goto free_tmp;
73a18109
AG
721
722 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
723 kvm_use_magic_page();
724
591bd8e7 725#ifdef CONFIG_PPC_BOOK3S_64
ad087376
AG
726 /* Enable napping */
727 powersave_nap = 1;
591bd8e7 728#endif
ad087376 729
2d4f5671
AG
730free_tmp:
731 kvm_free_tmp();
732
73a18109
AG
733 return 0;
734}
735
736postcore_initcall(kvm_guest_init);