KVM: s390: hardware support for guest debugging
[linux-2.6-block.git] / arch / s390 / kvm / guestdbg.c
CommitLineData
27291e21
DH
1/*
2 * kvm guest debug support
3 *
4 * Copyright IBM Corp. 2014
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
11 */
12#include <linux/kvm_host.h>
13#include <linux/errno.h>
14#include "kvm-s390.h"
15#include "gaccess.h"
16
17/*
18 * Extends the address range given by *start and *stop to include the address
19 * range starting with estart and the length len. Takes care of overflowing
20 * intervals and tries to minimize the overall intervall size.
21 */
22static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
23{
24 u64 estop;
25
26 if (len > 0)
27 len--;
28 else
29 len = 0;
30
31 estop = estart + len;
32
33 /* 0-0 range represents "not set" */
34 if ((*start == 0) && (*stop == 0)) {
35 *start = estart;
36 *stop = estop;
37 } else if (*start <= *stop) {
38 /* increase the existing range */
39 if (estart < *start)
40 *start = estart;
41 if (estop > *stop)
42 *stop = estop;
43 } else {
44 /* "overflowing" interval, whereby *stop > *start */
45 if (estart <= *stop) {
46 if (estop > *stop)
47 *stop = estop;
48 } else if (estop > *start) {
49 if (estart < *start)
50 *start = estart;
51 }
52 /* minimize the range */
53 else if ((estop - *stop) < (*start - estart))
54 *stop = estop;
55 else
56 *start = estart;
57 }
58}
59
60#define MAX_INST_SIZE 6
61
62static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
63{
64 unsigned long start, len;
65 u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
66 u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
67 u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
68 int i;
69
70 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 ||
71 vcpu->arch.guestdbg.hw_bp_info == NULL)
72 return;
73
74 /*
75 * If the guest is not interrested in branching events, we can savely
76 * limit them to the PER address range.
77 */
78 if (!(*cr9 & PER_EVENT_BRANCH))
79 *cr9 |= PER_CONTROL_BRANCH_ADDRESS;
80 *cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH;
81
82 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
83 start = vcpu->arch.guestdbg.hw_bp_info[i].addr;
84 len = vcpu->arch.guestdbg.hw_bp_info[i].len;
85
86 /*
87 * The instruction in front of the desired bp has to
88 * report instruction-fetching events
89 */
90 if (start < MAX_INST_SIZE) {
91 len += start;
92 start = 0;
93 } else {
94 start -= MAX_INST_SIZE;
95 len += MAX_INST_SIZE;
96 }
97
98 extend_address_range(cr10, cr11, start, len);
99 }
100}
101
102static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
103{
104 unsigned long start, len;
105 u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
106 u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
107 u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
108 int i;
109
110 if (vcpu->arch.guestdbg.nr_hw_wp <= 0 ||
111 vcpu->arch.guestdbg.hw_wp_info == NULL)
112 return;
113
114 /* if host uses storage alternation for special address
115 * spaces, enable all events and give all to the guest */
116 if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
117 *cr9 &= ~PER_CONTROL_ALTERATION;
118 *cr10 = 0;
119 *cr11 = PSW_ADDR_INSN;
120 } else {
121 *cr9 &= ~PER_CONTROL_ALTERATION;
122 *cr9 |= PER_EVENT_STORE;
123
124 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
125 start = vcpu->arch.guestdbg.hw_wp_info[i].addr;
126 len = vcpu->arch.guestdbg.hw_wp_info[i].len;
127
128 extend_address_range(cr10, cr11, start, len);
129 }
130 }
131}
132
133void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu)
134{
135 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0];
136 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9];
137 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10];
138 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11];
139}
140
141void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu)
142{
143 vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0;
144 vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9;
145 vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10;
146 vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11;
147}
148
149void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
150{
151 /*
152 * TODO: if guest psw has per enabled, otherwise 0s!
153 * This reduces the amount of reported events.
154 * Need to intercept all psw changes!
155 */
156
157 if (guestdbg_sstep_enabled(vcpu)) {
158 vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
159 vcpu->arch.sie_block->gcr[10] = 0;
160 vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN;
161 }
162
163 if (guestdbg_hw_bp_enabled(vcpu)) {
164 enable_all_hw_bp(vcpu);
165 enable_all_hw_wp(vcpu);
166 }
167
168 /* TODO: Instruction-fetching-nullification not allowed for now */
169 if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION)
170 vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION;
171}
172
173#define MAX_WP_SIZE 100
174
175static int __import_wp_info(struct kvm_vcpu *vcpu,
176 struct kvm_hw_breakpoint *bp_data,
177 struct kvm_hw_wp_info_arch *wp_info)
178{
179 int ret = 0;
180 wp_info->len = bp_data->len;
181 wp_info->addr = bp_data->addr;
182 wp_info->phys_addr = bp_data->phys_addr;
183 wp_info->old_data = NULL;
184
185 if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE)
186 return -EINVAL;
187
188 wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL);
189 if (!wp_info->old_data)
190 return -ENOMEM;
191 /* try to backup the original value */
192 ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data,
193 wp_info->len);
194 if (ret) {
195 kfree(wp_info->old_data);
196 wp_info->old_data = NULL;
197 }
198
199 return ret;
200}
201
202#define MAX_BP_COUNT 50
203
204int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
205 struct kvm_guest_debug *dbg)
206{
207 int ret = 0, nr_wp = 0, nr_bp = 0, i, size;
208 struct kvm_hw_breakpoint *bp_data = NULL;
209 struct kvm_hw_wp_info_arch *wp_info = NULL;
210 struct kvm_hw_bp_info_arch *bp_info = NULL;
211
212 if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp)
213 return 0;
214 else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
215 return -EINVAL;
216
217 size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint);
218 bp_data = kmalloc(size, GFP_KERNEL);
219 if (!bp_data) {
220 ret = -ENOMEM;
221 goto error;
222 }
223
224 ret = copy_from_user(bp_data, dbg->arch.hw_bp, size);
225 if (ret)
226 goto error;
227
228 for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
229 switch (bp_data[i].type) {
230 case KVM_HW_WP_WRITE:
231 nr_wp++;
232 break;
233 case KVM_HW_BP:
234 nr_bp++;
235 break;
236 default:
237 break;
238 }
239 }
240
241 size = nr_wp * sizeof(struct kvm_hw_wp_info_arch);
242 if (size > 0) {
243 wp_info = kmalloc(size, GFP_KERNEL);
244 if (!wp_info) {
245 ret = -ENOMEM;
246 goto error;
247 }
248 }
249 size = nr_bp * sizeof(struct kvm_hw_bp_info_arch);
250 if (size > 0) {
251 bp_info = kmalloc(size, GFP_KERNEL);
252 if (!bp_info) {
253 ret = -ENOMEM;
254 goto error;
255 }
256 }
257
258 for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) {
259 switch (bp_data[i].type) {
260 case KVM_HW_WP_WRITE:
261 ret = __import_wp_info(vcpu, &bp_data[i],
262 &wp_info[nr_wp]);
263 if (ret)
264 goto error;
265 nr_wp++;
266 break;
267 case KVM_HW_BP:
268 bp_info[nr_bp].len = bp_data[i].len;
269 bp_info[nr_bp].addr = bp_data[i].addr;
270 nr_bp++;
271 break;
272 }
273 }
274
275 vcpu->arch.guestdbg.nr_hw_bp = nr_bp;
276 vcpu->arch.guestdbg.hw_bp_info = bp_info;
277 vcpu->arch.guestdbg.nr_hw_wp = nr_wp;
278 vcpu->arch.guestdbg.hw_wp_info = wp_info;
279 return 0;
280error:
281 kfree(bp_data);
282 kfree(wp_info);
283 kfree(bp_info);
284 return ret;
285}
286
287void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu)
288{
289 int i;
290 struct kvm_hw_wp_info_arch *hw_wp_info = NULL;
291
292 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
293 hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
294 kfree(hw_wp_info->old_data);
295 hw_wp_info->old_data = NULL;
296 }
297 kfree(vcpu->arch.guestdbg.hw_wp_info);
298 vcpu->arch.guestdbg.hw_wp_info = NULL;
299
300 kfree(vcpu->arch.guestdbg.hw_bp_info);
301 vcpu->arch.guestdbg.hw_bp_info = NULL;
302
303 vcpu->arch.guestdbg.nr_hw_wp = 0;
304 vcpu->arch.guestdbg.nr_hw_bp = 0;
305}
306
307static inline int in_addr_range(u64 addr, u64 a, u64 b)
308{
309 if (a <= b)
310 return (addr >= a) && (addr <= b);
311 else
312 /* "overflowing" interval */
313 return (addr <= a) && (addr >= b);
314}
315
316#define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1)
317
318static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu,
319 unsigned long addr)
320{
321 struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info;
322 int i;
323
324 if (vcpu->arch.guestdbg.nr_hw_bp == 0)
325 return NULL;
326
327 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
328 /* addr is directly the start or in the range of a bp */
329 if (addr == bp_info->addr)
330 goto found;
331 if (bp_info->len > 0 &&
332 in_addr_range(addr, bp_info->addr, end_of_range(bp_info)))
333 goto found;
334
335 bp_info++;
336 }
337
338 return NULL;
339found:
340 return bp_info;
341}
342
343static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
344{
345 int i;
346 struct kvm_hw_wp_info_arch *wp_info = NULL;
347 void *temp = NULL;
348
349 if (vcpu->arch.guestdbg.nr_hw_wp == 0)
350 return NULL;
351
352 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
353 wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
354 if (!wp_info || !wp_info->old_data || wp_info->len <= 0)
355 continue;
356
357 temp = kmalloc(wp_info->len, GFP_KERNEL);
358 if (!temp)
359 continue;
360
361 /* refetch the wp data and compare it to the old value */
362 if (!read_guest(vcpu, wp_info->phys_addr, temp,
363 wp_info->len)) {
364 if (memcmp(temp, wp_info->old_data, wp_info->len)) {
365 kfree(temp);
366 return wp_info;
367 }
368 }
369 kfree(temp);
370 temp = NULL;
371 }
372
373 return NULL;
374}
375
376void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
377{
378 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
379 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
380}
381
382#define per_bp_event(code) \
383 (code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH))
384#define per_write_wp_event(code) \
385 (code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL))
386
387static int debug_exit_required(struct kvm_vcpu *vcpu)
388{
389 u32 perc = (vcpu->arch.sie_block->perc << 24);
390 struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
391 struct kvm_hw_wp_info_arch *wp_info = NULL;
392 struct kvm_hw_bp_info_arch *bp_info = NULL;
393 unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
394 unsigned long peraddr = vcpu->arch.sie_block->peraddr;
395
396 if (guestdbg_hw_bp_enabled(vcpu)) {
397 if (per_write_wp_event(perc) &&
398 vcpu->arch.guestdbg.nr_hw_wp > 0) {
399 wp_info = any_wp_changed(vcpu);
400 if (wp_info) {
401 debug_exit->addr = wp_info->addr;
402 debug_exit->type = KVM_HW_WP_WRITE;
403 goto exit_required;
404 }
405 }
406 if (per_bp_event(perc) &&
407 vcpu->arch.guestdbg.nr_hw_bp > 0) {
408 bp_info = find_hw_bp(vcpu, addr);
409 /* remove duplicate events if PC==PER address */
410 if (bp_info && (addr != peraddr)) {
411 debug_exit->addr = addr;
412 debug_exit->type = KVM_HW_BP;
413 vcpu->arch.guestdbg.last_bp = addr;
414 goto exit_required;
415 }
416 /* breakpoint missed */
417 bp_info = find_hw_bp(vcpu, peraddr);
418 if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) {
419 debug_exit->addr = peraddr;
420 debug_exit->type = KVM_HW_BP;
421 goto exit_required;
422 }
423 }
424 }
425 if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) {
426 debug_exit->addr = addr;
427 debug_exit->type = KVM_SINGLESTEP;
428 goto exit_required;
429 }
430
431 return 0;
432exit_required:
433 return 1;
434}
435
436#define guest_per_enabled(vcpu) \
437 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
438
439static void filter_guest_per_event(struct kvm_vcpu *vcpu)
440{
441 u32 perc = vcpu->arch.sie_block->perc << 24;
442 u64 peraddr = vcpu->arch.sie_block->peraddr;
443 u64 addr = vcpu->arch.sie_block->gpsw.addr;
444 u64 cr9 = vcpu->arch.sie_block->gcr[9];
445 u64 cr10 = vcpu->arch.sie_block->gcr[10];
446 u64 cr11 = vcpu->arch.sie_block->gcr[11];
447 /* filter all events, demanded by the guest */
448 u32 guest_perc = perc & cr9 & PER_EVENT_MASK;
449
450 if (!guest_per_enabled(vcpu))
451 guest_perc = 0;
452
453 /* filter "successful-branching" events */
454 if (guest_perc & PER_EVENT_BRANCH &&
455 cr9 & PER_CONTROL_BRANCH_ADDRESS &&
456 !in_addr_range(addr, cr10, cr11))
457 guest_perc &= ~PER_EVENT_BRANCH;
458
459 /* filter "instruction-fetching" events */
460 if (guest_perc & PER_EVENT_IFETCH &&
461 !in_addr_range(peraddr, cr10, cr11))
462 guest_perc &= ~PER_EVENT_IFETCH;
463
464 /* All other PER events will be given to the guest */
465 /* TODO: Check alterated address/address space */
466
467 vcpu->arch.sie_block->perc = guest_perc >> 24;
468
469 if (!guest_perc)
470 vcpu->arch.sie_block->iprcc &= ~PGM_PER;
471}
472
473void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
474{
475 if (debug_exit_required(vcpu))
476 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
477
478 filter_guest_per_event(vcpu);
479}