1 // SPDX-License-Identifier: GPL-2.0
5 #include <linux/bitfield.h>
7 #define MDSCR_KDE (1 << 13)
8 #define MDSCR_MDE (1 << 15)
9 #define MDSCR_SS (1 << 0)
11 #define DBGBCR_LEN8 (0xff << 5)
12 #define DBGBCR_EXEC (0x0 << 3)
13 #define DBGBCR_EL1 (0x1 << 1)
14 #define DBGBCR_E (0x1 << 0)
15 #define DBGBCR_LBN_SHIFT 16
16 #define DBGBCR_BT_SHIFT 20
17 #define DBGBCR_BT_ADDR_LINK_CTX (0x1 << DBGBCR_BT_SHIFT)
18 #define DBGBCR_BT_CTX_LINK (0x3 << DBGBCR_BT_SHIFT)
20 #define DBGWCR_LEN8 (0xff << 5)
21 #define DBGWCR_RD (0x1 << 3)
22 #define DBGWCR_WR (0x2 << 3)
23 #define DBGWCR_EL1 (0x1 << 1)
24 #define DBGWCR_E (0x1 << 0)
25 #define DBGWCR_LBN_SHIFT 16
26 #define DBGWCR_WT_SHIFT 20
27 #define DBGWCR_WT_LINK (0x1 << DBGWCR_WT_SHIFT)
29 #define SPSR_D (1 << 9)
30 #define SPSR_SS (1 << 21)
32 extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx;
33 extern unsigned char iter_ss_begin, iter_ss_end;
34 static volatile uint64_t sw_bp_addr, hw_bp_addr;
35 static volatile uint64_t wp_addr, wp_data_addr;
36 static volatile uint64_t svc_addr;
37 static volatile uint64_t ss_addr[4], ss_idx;
38 #define PC(v) ((uint64_t)&(v))
40 #define GEN_DEBUG_WRITE_REG(reg_name) \
41 static void write_##reg_name(int num, uint64_t val) \
45 write_sysreg(val, reg_name##0_el1); \
48 write_sysreg(val, reg_name##1_el1); \
51 write_sysreg(val, reg_name##2_el1); \
54 write_sysreg(val, reg_name##3_el1); \
57 write_sysreg(val, reg_name##4_el1); \
60 write_sysreg(val, reg_name##5_el1); \
63 write_sysreg(val, reg_name##6_el1); \
66 write_sysreg(val, reg_name##7_el1); \
69 write_sysreg(val, reg_name##8_el1); \
72 write_sysreg(val, reg_name##9_el1); \
75 write_sysreg(val, reg_name##10_el1); \
78 write_sysreg(val, reg_name##11_el1); \
81 write_sysreg(val, reg_name##12_el1); \
84 write_sysreg(val, reg_name##13_el1); \
87 write_sysreg(val, reg_name##14_el1); \
90 write_sysreg(val, reg_name##15_el1); \
97 /* Define write_dbgbcr()/write_dbgbvr()/write_dbgwcr()/write_dbgwvr() */
98 GEN_DEBUG_WRITE_REG(dbgbcr)
99 GEN_DEBUG_WRITE_REG(dbgbvr)
100 GEN_DEBUG_WRITE_REG(dbgwcr)
101 GEN_DEBUG_WRITE_REG(dbgwvr)
103 static void reset_debug_state(void)
105 uint8_t brps, wrps, i;
108 asm volatile("msr daifset, #8");
110 write_sysreg(0, osdlr_el1);
111 write_sysreg(0, oslar_el1);
114 write_sysreg(0, mdscr_el1);
115 write_sysreg(0, contextidr_el1);
117 /* Reset all bcr/bvr/wcr/wvr registers */
118 dfr0 = read_sysreg(id_aa64dfr0_el1);
119 brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0);
120 for (i = 0; i <= brps; i++) {
124 wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0);
125 for (i = 0; i <= wrps; i++) {
133 static void enable_os_lock(void)
135 write_sysreg(1, oslar_el1);
138 GUEST_ASSERT(read_sysreg(oslsr_el1) & 2);
141 static void enable_monitor_debug_exceptions(void)
145 asm volatile("msr daifclr, #8");
147 mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
148 write_sysreg(mdscr, mdscr_el1);
152 static void install_wp(uint8_t wpn, uint64_t addr)
156 wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
157 write_dbgwcr(wpn, wcr);
158 write_dbgwvr(wpn, addr);
162 enable_monitor_debug_exceptions();
165 static void install_hw_bp(uint8_t bpn, uint64_t addr)
169 bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
170 write_dbgbcr(bpn, bcr);
171 write_dbgbvr(bpn, addr);
174 enable_monitor_debug_exceptions();
177 static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
183 /* Setup a context-aware breakpoint for Linked Context ID Match */
184 ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
186 write_dbgbcr(ctx_bp, ctx_bcr);
187 write_dbgbvr(ctx_bp, ctx);
189 /* Setup a linked watchpoint (linked to the context-aware breakpoint) */
190 wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E |
191 DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT);
192 write_dbgwcr(addr_wp, wcr);
193 write_dbgwvr(addr_wp, addr);
196 enable_monitor_debug_exceptions();
199 void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
202 uint32_t addr_bcr, ctx_bcr;
204 /* Setup a context-aware breakpoint for Linked Context ID Match */
205 ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
207 write_dbgbcr(ctx_bp, ctx_bcr);
208 write_dbgbvr(ctx_bp, ctx);
211 * Setup a normal breakpoint for Linked Address Match, and link it
212 * to the context-aware breakpoint.
214 addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
215 DBGBCR_BT_ADDR_LINK_CTX |
216 ((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT);
217 write_dbgbcr(addr_bp, addr_bcr);
218 write_dbgbvr(addr_bp, addr);
221 enable_monitor_debug_exceptions();
224 static void install_ss(void)
228 asm volatile("msr daifclr, #8");
230 mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_SS;
231 write_sysreg(mdscr, mdscr_el1);
235 static volatile char write_data;
237 static void guest_code(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
239 uint64_t ctx = 0xabcdef; /* a random context number */
241 /* Software-breakpoint */
243 asm volatile("sw_bp: brk #0");
244 GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp));
246 /* Hardware-breakpoint */
248 install_hw_bp(bpn, PC(hw_bp));
249 asm volatile("hw_bp: nop");
250 GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp));
252 /* Hardware-breakpoint + svc */
254 install_hw_bp(bpn, PC(bp_svc));
255 asm volatile("bp_svc: svc #0");
256 GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_svc));
257 GUEST_ASSERT_EQ(svc_addr, PC(bp_svc) + 4);
259 /* Hardware-breakpoint + software-breakpoint */
261 install_hw_bp(bpn, PC(bp_brk));
262 asm volatile("bp_brk: brk #0");
263 GUEST_ASSERT_EQ(sw_bp_addr, PC(bp_brk));
264 GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_brk));
268 install_wp(wpn, PC(write_data));
270 GUEST_ASSERT_EQ(write_data, 'x');
271 GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
277 asm volatile("ss_start:\n"
282 GUEST_ASSERT_EQ(ss_addr[0], PC(ss_start));
283 GUEST_ASSERT_EQ(ss_addr[1], PC(ss_start) + 4);
284 GUEST_ASSERT_EQ(ss_addr[2], PC(ss_start) + 8);
286 /* OS Lock does not block software-breakpoint */
290 asm volatile("sw_bp2: brk #0");
291 GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp2));
293 /* OS Lock blocking hardware-breakpoint */
296 install_hw_bp(bpn, PC(hw_bp2));
298 asm volatile("hw_bp2: nop");
299 GUEST_ASSERT_EQ(hw_bp_addr, 0);
301 /* OS Lock blocking watchpoint */
306 install_wp(wpn, PC(write_data));
308 GUEST_ASSERT_EQ(write_data, 'x');
309 GUEST_ASSERT_EQ(wp_data_addr, 0);
311 /* OS Lock blocking single-step */
317 asm volatile("mrs x0, esr_el1\n\t"
319 "msr daifset, #8\n\t"
321 GUEST_ASSERT_EQ(ss_addr[0], 0);
323 /* Linked hardware-breakpoint */
326 install_hw_bp_ctx(bpn, ctx_bpn, PC(hw_bp_ctx), ctx);
328 write_sysreg(ctx, contextidr_el1);
330 asm volatile("hw_bp_ctx: nop");
331 write_sysreg(0, contextidr_el1);
332 GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp_ctx));
334 /* Linked watchpoint */
336 install_wp_ctx(wpn, ctx_bpn, PC(write_data), ctx);
338 write_sysreg(ctx, contextidr_el1);
341 GUEST_ASSERT_EQ(write_data, 'x');
342 GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
347 static void guest_sw_bp_handler(struct ex_regs *regs)
349 sw_bp_addr = regs->pc;
353 static void guest_hw_bp_handler(struct ex_regs *regs)
355 hw_bp_addr = regs->pc;
356 regs->pstate |= SPSR_D;
359 static void guest_wp_handler(struct ex_regs *regs)
361 wp_data_addr = read_sysreg(far_el1);
363 regs->pstate |= SPSR_D;
366 static void guest_ss_handler(struct ex_regs *regs)
368 __GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%lu'", ss_idx);
369 ss_addr[ss_idx++] = regs->pc;
370 regs->pstate |= SPSR_SS;
373 static void guest_svc_handler(struct ex_regs *regs)
378 static void guest_code_ss(int test_cnt)
381 uint64_t bvr, wvr, w_bvr, w_wvr;
383 for (i = 0; i < test_cnt; i++) {
384 /* Bits [1:0] of dbg{b,w}vr are RES0 */
389 * Enable Single Step execution. Note! This _must_ be a bare
390 * ucall as the ucall() path uses atomic operations to manage
391 * the ucall structures, and the built-in "atomics" are usually
392 * implemented via exclusive access instructions. The exlusive
393 * monitor is cleared on ERET, and so taking debug exceptions
394 * during a LDREX=>STREX sequence will prevent forward progress
395 * and hang the guest/test.
400 * The userspace will verify that the pc is as expected during
401 * single step execution between iter_ss_begin and iter_ss_end.
403 asm volatile("iter_ss_begin:nop\n");
405 write_sysreg(w_bvr, dbgbvr0_el1);
406 write_sysreg(w_wvr, dbgwvr0_el1);
407 bvr = read_sysreg(dbgbvr0_el1);
408 wvr = read_sysreg(dbgwvr0_el1);
410 /* Userspace disables Single Step when the end is nigh. */
411 asm volatile("iter_ss_end:\n");
413 GUEST_ASSERT_EQ(bvr, w_bvr);
414 GUEST_ASSERT_EQ(wvr, w_wvr);
419 static int debug_version(uint64_t id_aa64dfr0)
421 return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0);
424 static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
426 struct kvm_vcpu *vcpu;
430 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
432 vm_init_descriptor_tables(vm);
433 vcpu_init_descriptor_tables(vcpu);
435 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
436 ESR_EC_BRK_INS, guest_sw_bp_handler);
437 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
438 ESR_EC_HW_BP_CURRENT, guest_hw_bp_handler);
439 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
440 ESR_EC_WP_CURRENT, guest_wp_handler);
441 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
442 ESR_EC_SSTEP_CURRENT, guest_ss_handler);
443 vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
444 ESR_EC_SVC64, guest_svc_handler);
446 /* Specify bpn/wpn/ctx_bpn to be tested */
447 vcpu_args_set(vcpu, 3, bpn, wpn, ctx_bpn);
448 pr_debug("Use bpn#%d, wpn#%d and ctx_bpn#%d\n", bpn, wpn, ctx_bpn);
451 switch (get_ucall(vcpu, &uc)) {
453 REPORT_GUEST_ASSERT(uc);
458 TEST_FAIL("Unknown ucall %lu", uc.cmd);
465 void test_single_step_from_userspace(int test_cnt)
467 struct kvm_vcpu *vcpu;
472 uint64_t test_pc = 0;
473 bool ss_enable = false;
474 struct kvm_guest_debug debug = {};
476 vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss);
478 vcpu_args_set(vcpu, 1, test_cnt);
482 if (run->exit_reason != KVM_EXIT_DEBUG) {
483 cmd = get_ucall(vcpu, &uc);
484 if (cmd == UCALL_ABORT) {
485 REPORT_GUEST_ASSERT(uc);
487 } else if (cmd == UCALL_DONE) {
491 TEST_ASSERT(cmd == UCALL_NONE,
492 "Unexpected ucall cmd 0x%lx", cmd);
494 debug.control = KVM_GUESTDBG_ENABLE |
495 KVM_GUESTDBG_SINGLESTEP;
497 vcpu_guest_debug_set(vcpu, &debug);
501 TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG");
503 /* Check if the current pc is expected. */
504 vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
505 TEST_ASSERT(!test_pc || pc == test_pc,
506 "Unexpected pc 0x%lx (expected 0x%lx)",
509 if ((pc + 4) == (uint64_t)&iter_ss_end) {
511 debug.control = KVM_GUESTDBG_ENABLE;
513 vcpu_guest_debug_set(vcpu, &debug);
518 * If the current pc is between iter_ss_bgin and
519 * iter_ss_end, the pc for the next KVM_EXIT_DEBUG should
520 * be the current pc + 4.
522 if ((pc >= (uint64_t)&iter_ss_begin) &&
523 (pc < (uint64_t)&iter_ss_end))
533 * Run debug testing using the various breakpoint#, watchpoint# and
534 * context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration.
536 void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
538 uint8_t brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base;
541 /* Number of breakpoints */
542 brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1;
543 __TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
545 /* Number of watchpoints */
546 wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1;
548 /* Number of context aware breakpoints */
549 ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1;
551 pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
552 brp_num, wrp_num, ctx_brp_num);
554 /* Number of normal (non-context aware) breakpoints */
555 normal_brp_num = brp_num - ctx_brp_num;
557 /* Lowest context aware breakpoint number */
558 ctx_brp_base = normal_brp_num;
560 /* Run tests with all supported breakpoints/watchpoints */
561 for (c = ctx_brp_base; c < ctx_brp_base + ctx_brp_num; c++) {
562 for (b = 0; b < normal_brp_num; b++) {
563 for (w = 0; w < wrp_num; w++)
564 test_guest_debug_exceptions(b, w, c);
569 static void help(char *name)
572 printf("Usage: %s [-h] [-i iterations of the single step test]\n", name);
577 int main(int argc, char *argv[])
579 struct kvm_vcpu *vcpu;
582 int ss_iteration = 10000;
585 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
586 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &aa64dfr0);
587 __TEST_REQUIRE(debug_version(aa64dfr0) >= 6,
588 "Armv8 debug architecture not supported.");
591 while ((opt = getopt(argc, argv, "i:")) != -1) {
594 ss_iteration = atoi_positive("Number of iterations", optarg);
603 test_guest_debug_exceptions_all(aa64dfr0);
604 test_single_step_from_userspace(ss_iteration);