mm: remove include/linux/bootmem.h
[linux-2.6-block.git] / arch / mips / kvm / dyntrans.c
CommitLineData
50c83085 1/*
d116e812
DCZ
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
50c83085
SL
11
12#include <linux/errno.h>
13#include <linux/err.h>
28cc5bd5 14#include <linux/highmem.h>
50c83085 15#include <linux/kvm_host.h>
dacc3ed1 16#include <linux/uaccess.h>
50c83085
SL
17#include <linux/vmalloc.h>
18#include <linux/fs.h>
57c8a661 19#include <linux/memblock.h>
facaaec1 20#include <asm/cacheflush.h>
50c83085 21
d7d5b05f 22#include "commpage.h"
50c83085 23
d5cd26bc
JH
24/**
25 * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
26 * @vcpu: Virtual CPU.
27 * @opc: PC of instruction to replace.
28 * @replace: Instruction to write
29 */
258f3a2e
JH
30static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
31 union mips_instruction replace)
d5cd26bc 32{
dacc3ed1
JH
33 unsigned long vaddr = (unsigned long)opc;
34 int err;
d5cd26bc 35
4b21e8ab
JH
36retry:
37 /* The GVA page table is still active so use the Linux TLB handlers */
38 kvm_trap_emul_gva_lockless_begin(vcpu);
dacc3ed1 39 err = put_user(replace.word, opc);
4b21e8ab
JH
40 kvm_trap_emul_gva_lockless_end(vcpu);
41
dacc3ed1 42 if (unlikely(err)) {
4b21e8ab
JH
43 /*
44 * We write protect clean pages in GVA page table so normal
45 * Linux TLB mod handler doesn't silently dirty the page.
46 * Its also possible we raced with a GVA invalidation.
47 * Try to force the page to become dirty.
48 */
49 err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
50 if (unlikely(err)) {
51 kvm_info("%s: Address unwriteable: %p\n",
52 __func__, opc);
53 return -EFAULT;
54 }
55
56 /*
57 * Try again. This will likely trigger a TLB refill, which will
58 * fetch the new dirty entry from the GVA page table, which
59 * should then succeed.
60 */
61 goto retry;
d5cd26bc 62 }
dacc3ed1 63 __local_flush_icache_user_range(vaddr, vaddr + 4);
d5cd26bc
JH
64
65 return 0;
66}
67
258f3a2e 68int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
d116e812 69 struct kvm_vcpu *vcpu)
50c83085 70{
258f3a2e
JH
71 union mips_instruction nop_inst = { 0 };
72
50c83085 73 /* Replace the CACHE instruction, with a NOP */
258f3a2e 74 return kvm_mips_trans_replace(vcpu, opc, nop_inst);
50c83085
SL
75}
76
77/*
d116e812
DCZ
78 * Address based CACHE instructions are transformed into synci(s). A little
79 * heavy for just D-cache invalidates, but avoids an expensive trap
50c83085 80 */
258f3a2e 81int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
d116e812 82 struct kvm_vcpu *vcpu)
50c83085 83{
258f3a2e 84 union mips_instruction synci_inst = { 0 };
50c83085 85
258f3a2e
JH
86 synci_inst.i_format.opcode = bcond_op;
87 synci_inst.i_format.rs = inst.i_format.rs;
88 synci_inst.i_format.rt = synci_op;
5cc4aafc
JH
89 if (cpu_has_mips_r6)
90 synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
91 else
92 synci_inst.i_format.simmediate = inst.i_format.simmediate;
50c83085 93
d5cd26bc 94 return kvm_mips_trans_replace(vcpu, opc, synci_inst);
50c83085
SL
95}
96
258f3a2e
JH
97int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
98 struct kvm_vcpu *vcpu)
50c83085 99{
258f3a2e
JH
100 union mips_instruction mfc0_inst = { 0 };
101 u32 rd, sel;
50c83085 102
258f3a2e
JH
103 rd = inst.c0r_format.rd;
104 sel = inst.c0r_format.sel;
50c83085 105
258f3a2e
JH
106 if (rd == MIPS_CP0_ERRCTL && sel == 0) {
107 mfc0_inst.r_format.opcode = spec_op;
108 mfc0_inst.r_format.rd = inst.c0r_format.rt;
109 mfc0_inst.r_format.func = add_op;
50c83085 110 } else {
258f3a2e
JH
111 mfc0_inst.i_format.opcode = lw_op;
112 mfc0_inst.i_format.rt = inst.c0r_format.rt;
42aa12e7 113 mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
258f3a2e 114 offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
5808844f
JH
115#ifdef CONFIG_CPU_BIG_ENDIAN
116 if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
117 mfc0_inst.i_format.simmediate |= 4;
118#endif
50c83085
SL
119 }
120
d5cd26bc 121 return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
50c83085
SL
122}
123
258f3a2e
JH
124int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
125 struct kvm_vcpu *vcpu)
50c83085 126{
258f3a2e
JH
127 union mips_instruction mtc0_inst = { 0 };
128 u32 rd, sel;
50c83085 129
258f3a2e
JH
130 rd = inst.c0r_format.rd;
131 sel = inst.c0r_format.sel;
50c83085 132
258f3a2e
JH
133 mtc0_inst.i_format.opcode = sw_op;
134 mtc0_inst.i_format.rt = inst.c0r_format.rt;
42aa12e7 135 mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
258f3a2e 136 offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
5808844f
JH
137#ifdef CONFIG_CPU_BIG_ENDIAN
138 if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
139 mtc0_inst.i_format.simmediate |= 4;
140#endif
50c83085 141
d5cd26bc 142 return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
50c83085 143}