1 /* $NetBSD: db_machdep.c,v 1.45 2022/10/26 23:38:05 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2014 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas of 3am Software Foundry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: db_machdep.c,v 1.45 2022/10/26 23:38:05 riastradh Exp $"); 34 35 #ifdef _KERNEL_OPT 36 #include "opt_compat_netbsd32.h" 37 #endif 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/atomic.h> 42 #include <sys/cpu.h> 43 #include <sys/lwp.h> 44 #include <sys/intr.h> 45 46 #include <uvm/uvm.h> 47 48 #include <arm/cpufunc.h> 49 50 #include <aarch64/db_machdep.h> 51 #include <aarch64/armreg.h> 52 #include <aarch64/locore.h> 53 #include <aarch64/pmap.h> 54 55 #include <arm/cpufunc.h> 56 57 #include <ddb/db_access.h> 58 #include <ddb/db_active.h> 59 #include <ddb/db_command.h> 60 #include <ddb/db_output.h> 61 #include <ddb/db_proc.h> 62 #include <ddb/db_variables.h> 63 #include <ddb/db_run.h> 64 #include <ddb/db_sym.h> 65 #include <ddb/db_extern.h> 66 #include <ddb/db_interface.h> 67 #include <ddb/db_user.h> 68 69 #include <dev/cons.h> 70 71 void db_md_cpuinfo_cmd(db_expr_t, bool, db_expr_t, const char *); 72 void db_md_frame_cmd(db_expr_t, bool, db_expr_t, const char *); 73 void db_md_lwp_cmd(db_expr_t, bool, db_expr_t, const char *); 74 void db_md_pte_cmd(db_expr_t, bool, db_expr_t, const char *); 75 void db_md_reset_cmd(db_expr_t, bool, db_expr_t, const char *); 76 void db_md_tlbi_cmd(db_expr_t, bool, db_expr_t, const char *); 77 void db_md_ttbr_cmd(db_expr_t, bool, db_expr_t, const char *); 78 void db_md_sysreg_cmd(db_expr_t, bool, db_expr_t, const char *); 79 void db_md_break_cmd(db_expr_t, bool, db_expr_t, const char *); 80 void db_md_watch_cmd(db_expr_t, bool, db_expr_t, const char *); 81 #if defined(_KERNEL) && defined(MULTIPROCESSOR) 82 void db_md_switch_cpu_cmd(db_expr_t, bool, db_expr_t, const char *); 83 #endif 84 #if defined(_KERNEL) 85 static void db_md_meminfo_cmd(db_expr_t, bool, db_expr_t, const char *); 86 #endif 87 88 #ifdef _KERNEL 89 #define MAX_BREAKPOINT 15 90 #define MAX_WATCHPOINT 15 91 /* The number varies depending on the CPU core (e.g. big.LITTLE) */ 92 static int max_breakpoint = MAX_BREAKPOINT; 93 static int max_watchpoint = MAX_WATCHPOINT; 94 95 struct breakpoint_info { 96 db_addr_t addr; 97 }; 98 static struct breakpoint_info breakpoint_buf[MAX_BREAKPOINT + 1]; 99 100 struct watchpoint_info { 101 db_addr_t addr; 102 int size; 103 int accesstype; 104 }; 105 static struct watchpoint_info watchpoint_buf[MAX_WATCHPOINT + 1]; 106 #endif 107 108 const struct db_command db_machine_command_table[] = { 109 #if defined(_KERNEL) && defined(MULTIPROCESSOR) 110 { 111 DDB_ADD_CMD( 112 "cpu", db_md_switch_cpu_cmd, 0, 113 "switch to a different cpu", 114 NULL, NULL) 115 }, 116 #endif 117 #if defined(_KERNEL) 118 { 119 DDB_ADD_CMD( 120 "break", db_md_break_cmd, 0, 121 "set or clear breakpoint", 122 "[address|#]", 123 "\taddress: breakpoint address to set\n" 124 "\t#: breakpoint number to remove\n") 125 }, 126 { 127 DDB_ADD_CMD( 128 "cpuinfo", db_md_cpuinfo_cmd, 0, 129 "Displays the current cpuinfo", 130 NULL, NULL) 131 }, 132 { 133 DDB_ADD_CMD( 134 "frame", db_md_frame_cmd, 0, 135 "Displays the contents of a trapframe", 136 "address", 137 "\taddress:\taddress of trapframe to display") 138 }, 139 { 140 DDB_ADD_CMD( 141 "lwp", db_md_lwp_cmd, 0, 142 "Displays the lwp", 143 "address", 144 "\taddress:\taddress of lwp to display") 145 }, 146 { 147 DDB_ADD_CMD( 148 "pte", db_md_pte_cmd, 0, 149 "Display information of pte", 150 "address", 151 "\taddress:\tvirtual address of page") 152 }, 153 { 154 DDB_ADD_CMD( 155 "reset", db_md_reset_cmd, 0, 156 "Reset the system", 157 NULL, NULL) 158 }, 159 { 160 DDB_ADD_CMD( 161 "sysreg", db_md_sysreg_cmd, 0, 162 "Displays system registers", 163 NULL, NULL) 164 }, 165 { 166 DDB_ADD_CMD( 167 "tlbi", db_md_tlbi_cmd, 0, 168 "flush tlb", 169 NULL, NULL) 170 }, 171 { 172 DDB_ADD_CMD( 173 "ttbr", db_md_ttbr_cmd, 0, 174 "Dump or count TTBR table", 175 "[/apc] address | pid", 176 "\taddress:\taddress of pmap to display\n" 177 "\tpid:\t\tpid of pmap to display") 178 }, 179 { 180 DDB_ADD_CMD( 181 "watch", db_md_watch_cmd, 0, 182 "set or clear watchpoint", 183 "[/rwbhlq] [address|#]", 184 "\taddress: watchpoint address to set\n" 185 "\t#: watchpoint number to remove\n" 186 "\t/rw: read or write access\n" 187 "\t/bhlq: size of access\n") 188 }, 189 { 190 DDB_ADD_CMD( 191 "meminfo", db_md_meminfo_cmd, 0, 192 "Dump info about memory ranges", 193 NULL, NULL) 194 }, 195 #endif 196 { 197 DDB_END_CMD 198 }, 199 }; 200 201 const struct db_variable db_regs[] = { 202 { "x0", (long *) &ddb_regs.tf_reg[0], FCN_NULL, NULL }, 203 { "x1", (long *) &ddb_regs.tf_reg[1], FCN_NULL, NULL }, 204 { "x2", (long *) &ddb_regs.tf_reg[2], FCN_NULL, NULL }, 205 { "x3", (long *) &ddb_regs.tf_reg[3], FCN_NULL, NULL }, 206 { "x4", (long *) &ddb_regs.tf_reg[4], FCN_NULL, NULL }, 207 { "x5", (long *) &ddb_regs.tf_reg[5], FCN_NULL, NULL }, 208 { "x6", (long *) &ddb_regs.tf_reg[6], FCN_NULL, NULL }, 209 { "x7", (long *) &ddb_regs.tf_reg[7], FCN_NULL, NULL }, 210 { "x8", (long *) &ddb_regs.tf_reg[8], FCN_NULL, NULL }, 211 { "x9", (long *) &ddb_regs.tf_reg[9], FCN_NULL, NULL }, 212 { "x10", (long *) &ddb_regs.tf_reg[10], FCN_NULL, NULL }, 213 { "x11", (long *) &ddb_regs.tf_reg[11], FCN_NULL, NULL }, 214 { "x12", (long *) &ddb_regs.tf_reg[12], FCN_NULL, NULL }, 215 { "x13", (long *) &ddb_regs.tf_reg[13], FCN_NULL, NULL }, 216 { "x14", (long *) &ddb_regs.tf_reg[14], FCN_NULL, NULL }, 217 { "x15", (long *) &ddb_regs.tf_reg[15], FCN_NULL, NULL }, 218 { "x16", (long *) &ddb_regs.tf_reg[16], FCN_NULL, NULL }, 219 { "x17", (long *) &ddb_regs.tf_reg[17], FCN_NULL, NULL }, 220 { "x18", (long *) &ddb_regs.tf_reg[18], FCN_NULL, NULL }, 221 { "x19", (long *) &ddb_regs.tf_reg[19], FCN_NULL, NULL }, 222 { "x20", (long *) &ddb_regs.tf_reg[20], FCN_NULL, NULL }, 223 { "x21", (long *) &ddb_regs.tf_reg[21], FCN_NULL, NULL }, 224 { "x22", (long *) &ddb_regs.tf_reg[22], FCN_NULL, NULL }, 225 { "x23", (long *) &ddb_regs.tf_reg[23], FCN_NULL, NULL }, 226 { "x24", (long *) &ddb_regs.tf_reg[24], FCN_NULL, NULL }, 227 { "x25", (long *) &ddb_regs.tf_reg[25], FCN_NULL, NULL }, 228 { "x26", (long *) &ddb_regs.tf_reg[26], FCN_NULL, NULL }, 229 { "x27", (long *) &ddb_regs.tf_reg[27], FCN_NULL, NULL }, 230 { "x28", (long *) &ddb_regs.tf_reg[28], FCN_NULL, NULL }, 231 { "x29", (long *) &ddb_regs.tf_reg[29], FCN_NULL, NULL }, 232 { "x30", (long *) &ddb_regs.tf_reg[30], FCN_NULL, NULL }, 233 { "sp", (long *) &ddb_regs.tf_sp, FCN_NULL, NULL }, 234 { "pc", (long *) &ddb_regs.tf_pc, FCN_NULL, NULL }, 235 { "spsr", (long *) &ddb_regs.tf_spsr, FCN_NULL, NULL } 236 }; 237 238 const struct db_variable * const db_eregs = db_regs + __arraycount(db_regs); 239 int db_active; 240 241 242 void 243 dump_trapframe(struct trapframe *tf, void (*pr)(const char *, ...)) 244 { 245 struct trapframe tf_buf; 246 247 db_read_bytes((db_addr_t)tf, sizeof(tf_buf), (char *)&tf_buf); 248 tf = &tf_buf; 249 250 #ifdef COMPAT_NETBSD32 251 if (tf->tf_spsr & SPSR_A32) { 252 (*pr)(" pc=%016"PRIxREGISTER", spsr=%016"PRIxREGISTER 253 " (AArch32)\n", tf->tf_pc, tf->tf_spsr); 254 (*pr)(" esr=%016"PRIxREGISTER", far=%016"PRIxREGISTER"\n", 255 tf->tf_esr, tf->tf_far); 256 (*pr)(" r0=%016"PRIxREGISTER", r1=%016"PRIxREGISTER"\n", 257 tf->tf_reg[0], tf->tf_reg[1]); 258 (*pr)(" r2=%016"PRIxREGISTER", r3=%016"PRIxREGISTER"\n", 259 tf->tf_reg[2], tf->tf_reg[3]); 260 (*pr)(" r4=%016"PRIxREGISTER", r5=%016"PRIxREGISTER"\n", 261 tf->tf_reg[4], tf->tf_reg[5]); 262 (*pr)(" r6=%016"PRIxREGISTER", r7=%016"PRIxREGISTER"\n", 263 tf->tf_reg[6], tf->tf_reg[7]); 264 (*pr)(" r8=%016"PRIxREGISTER", r9=%016"PRIxREGISTER"\n", 265 tf->tf_reg[8], tf->tf_reg[9]); 266 (*pr)(" r10=%016"PRIxREGISTER", r11=%016"PRIxREGISTER"\n", 267 tf->tf_reg[10], tf->tf_reg[11]); 268 (*pr)(" r12=%016"PRIxREGISTER", sp=r13=%016"PRIxREGISTER"\n", 269 tf->tf_reg[12], tf->tf_reg[13]); 270 (*pr)("lr=r14=%016"PRIxREGISTER", pc=r15=%016"PRIxREGISTER"\n", 271 tf->tf_reg[14], tf->tf_pc); 272 return; 273 } 274 #endif 275 (*pr)(" pc=%016"PRIxREGISTER", spsr=%016"PRIxREGISTER"\n", 276 tf->tf_pc, tf->tf_spsr); 277 (*pr)(" esr=%016"PRIxREGISTER", far=%016"PRIxREGISTER"\n", 278 tf->tf_esr, tf->tf_far); 279 (*pr)(" x0=%016"PRIxREGISTER", x1=%016"PRIxREGISTER"\n", 280 tf->tf_reg[0], tf->tf_reg[1]); 281 (*pr)(" x2=%016"PRIxREGISTER", x3=%016"PRIxREGISTER"\n", 282 tf->tf_reg[2], tf->tf_reg[3]); 283 (*pr)(" x4=%016"PRIxREGISTER", x5=%016"PRIxREGISTER"\n", 284 tf->tf_reg[4], tf->tf_reg[5]); 285 (*pr)(" x6=%016"PRIxREGISTER", x7=%016"PRIxREGISTER"\n", 286 tf->tf_reg[6], tf->tf_reg[7]); 287 (*pr)(" x8=%016"PRIxREGISTER", x9=%016"PRIxREGISTER"\n", 288 tf->tf_reg[8], tf->tf_reg[9]); 289 (*pr)(" x10=%016"PRIxREGISTER", x11=%016"PRIxREGISTER"\n", 290 tf->tf_reg[10], tf->tf_reg[11]); 291 (*pr)(" x12=%016"PRIxREGISTER", x13=%016"PRIxREGISTER"\n", 292 tf->tf_reg[12], tf->tf_reg[13]); 293 (*pr)(" x14=%016"PRIxREGISTER", x15=%016"PRIxREGISTER"\n", 294 tf->tf_reg[14], tf->tf_reg[15]); 295 (*pr)(" x16=%016"PRIxREGISTER", x17=%016"PRIxREGISTER"\n", 296 tf->tf_reg[16], tf->tf_reg[17]); 297 (*pr)(" x18=%016"PRIxREGISTER", x19=%016"PRIxREGISTER"\n", 298 tf->tf_reg[18], tf->tf_reg[19]); 299 (*pr)(" x20=%016"PRIxREGISTER", x21=%016"PRIxREGISTER"\n", 300 tf->tf_reg[20], tf->tf_reg[21]); 301 (*pr)(" x22=%016"PRIxREGISTER", x23=%016"PRIxREGISTER"\n", 302 tf->tf_reg[22], tf->tf_reg[23]); 303 (*pr)(" x24=%016"PRIxREGISTER", x25=%016"PRIxREGISTER"\n", 304 tf->tf_reg[24], tf->tf_reg[25]); 305 (*pr)(" x26=%016"PRIxREGISTER", x27=%016"PRIxREGISTER"\n", 306 tf->tf_reg[26], tf->tf_reg[27]); 307 (*pr)(" x28=%016"PRIxREGISTER", fp=x29=%016"PRIxREGISTER"\n", 308 tf->tf_reg[28], tf->tf_reg[29]); 309 (*pr)("lr=x30=%016"PRIxREGISTER", sp=%016"PRIxREGISTER"\n", 310 tf->tf_reg[30], tf->tf_sp); 311 } 312 313 void 314 dump_switchframe(struct trapframe *tf, void (*pr)(const char *, ...)) 315 { 316 struct trapframe tf_buf; 317 318 db_read_bytes((db_addr_t)tf, sizeof(tf_buf), (char *)&tf_buf); 319 tf = &tf_buf; 320 321 (*pr)(" x19=%016"PRIxREGISTER", x20=%016"PRIxREGISTER"\n", 322 tf->tf_reg[19], tf->tf_reg[20]); 323 (*pr)(" x21=%016"PRIxREGISTER", x22=%016"PRIxREGISTER"\n", 324 tf->tf_reg[21], tf->tf_reg[22]); 325 (*pr)(" x23=%016"PRIxREGISTER", x24=%016"PRIxREGISTER"\n", 326 tf->tf_reg[23], tf->tf_reg[24]); 327 (*pr)(" x25=%016"PRIxREGISTER", x26=%016"PRIxREGISTER"\n", 328 tf->tf_reg[25], tf->tf_reg[26]); 329 (*pr)(" x27=%016"PRIxREGISTER", x28=%016"PRIxREGISTER"\n", 330 tf->tf_reg[27], tf->tf_reg[28]); 331 (*pr)("fp=x29=%016"PRIxREGISTER", lr=x30=%016"PRIxREGISTER"\n", 332 tf->tf_reg[29], tf->tf_reg[30]); 333 } 334 335 336 #if defined(_KERNEL) 337 static void 338 show_cpuinfo(struct cpu_info *ci) 339 { 340 struct cpu_info cpuinfobuf; 341 u_int cpuidx; 342 int i; 343 344 db_read_bytes((db_addr_t)ci, sizeof(cpuinfobuf), (char *)&cpuinfobuf); 345 346 cpuidx = cpu_index(&cpuinfobuf); 347 db_printf("cpu_info=%p, cpu_name=%s\n", ci, cpuinfobuf.ci_cpuname); 348 db_printf("%p cpu[%u].ci_cpuid = 0x%lx\n", 349 &ci->ci_cpuid, cpuidx, cpuinfobuf.ci_cpuid); 350 db_printf("%p cpu[%u].ci_curlwp = %p\n", 351 &ci->ci_curlwp, cpuidx, cpuinfobuf.ci_curlwp); 352 db_printf("%p cpu[%u].ci_onproc = %p\n", 353 &ci->ci_onproc, cpuidx, cpuinfobuf.ci_onproc); 354 for (i = 0; i < SOFTINT_COUNT; i++) { 355 db_printf("%p cpu[%u].ci_softlwps[%d] = %p\n", 356 &ci->ci_softlwps[i], cpuidx, i, cpuinfobuf.ci_softlwps[i]); 357 } 358 db_printf("%p cpu[%u].ci_lastintr = %" PRIu64 "\n", 359 &ci->ci_lastintr, cpuidx, cpuinfobuf.ci_lastintr); 360 db_printf("%p cpu[%u].ci_want_resched = %d\n", 361 &ci->ci_want_resched, cpuidx, cpuinfobuf.ci_want_resched); 362 db_printf("%p cpu[%u].ci_cpl = %d\n", 363 &ci->ci_cpl, cpuidx, cpuinfobuf.ci_cpl); 364 db_printf("%p cpu[%u].ci_softints = 0x%08x\n", 365 &ci->ci_softints, cpuidx, cpuinfobuf.ci_softints); 366 db_printf("%p cpu[%u].ci_intr_depth = %u\n", 367 &ci->ci_intr_depth, cpuidx, cpuinfobuf.ci_intr_depth); 368 db_printf("%p cpu[%u].ci_biglock_count = %u\n", 369 &ci->ci_biglock_count, cpuidx, cpuinfobuf.ci_biglock_count); 370 } 371 372 void 373 db_md_cpuinfo_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 374 const char *modif) 375 { 376 #ifdef MULTIPROCESSOR 377 CPU_INFO_ITERATOR cii; 378 struct cpu_info *ci; 379 bool showall = false; 380 381 if (modif != NULL) { 382 for (; *modif != '\0'; modif++) { 383 switch (*modif) { 384 case 'a': 385 showall = true; 386 break; 387 } 388 } 389 } 390 391 if (showall) { 392 for (CPU_INFO_FOREACH(cii, ci)) { 393 show_cpuinfo(ci); 394 } 395 } else 396 #endif /* MULTIPROCESSOR */ 397 show_cpuinfo(curcpu()); 398 } 399 400 void 401 db_md_frame_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 402 const char *modif) 403 { 404 struct trapframe *tf; 405 406 if (!have_addr) { 407 db_printf("frame address must be specified\n"); 408 return; 409 } 410 411 tf = (struct trapframe *)addr; 412 dump_trapframe(tf, db_printf); 413 } 414 415 void 416 db_md_lwp_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 417 const char *modif) 418 { 419 lwp_t *l, lwp_buf; 420 struct pcb *pcb, pcb_buf; 421 422 if (!have_addr) { 423 db_printf("lwp address must be specified\n"); 424 return; 425 } 426 427 db_read_bytes(addr, sizeof(lwp_buf), (char *)&lwp_buf); 428 l = &lwp_buf; 429 430 #define SAFESTRPTR(p) (((p) == NULL) ? "NULL" : (p)) 431 432 db_printf("lwp=%p\n", (void *)addr); 433 434 db_printf("\tlwp_getpcb(l) =%p\n", lwp_getpcb(l)); 435 436 db_printf("\tl->l_md.md_onfault=%p\n", l->l_md.md_onfault); 437 db_printf("\tl->l_md.md_utf =%p\n", l->l_md.md_utf); 438 dump_trapframe(l->l_md.md_utf, db_printf); 439 440 db_read_bytes((db_addr_t)l->l_addr, sizeof(pcb_buf), (char *)&pcb_buf); 441 pcb = &pcb_buf; 442 443 db_printf("\tl->l_addr.pcb_tf =%p\n", pcb->pcb_tf); 444 if (pcb->pcb_tf != l->l_md.md_utf) 445 dump_switchframe(pcb->pcb_tf, db_printf); 446 db_printf("\tl->l_md.md_cpacr =%016" PRIx64 "\n", l->l_md.md_cpacr); 447 db_printf("\tl->l_md.md_flags =%08x\n", l->l_md.md_flags); 448 449 db_printf("\tl->l_cpu =%p\n", l->l_cpu); 450 db_printf("\tl->l_proc =%p\n", l->l_proc); 451 db_printf("\tl->l_private =%p\n", l->l_private); 452 db_printf("\tl->l_name =%s\n", SAFESTRPTR(l->l_name)); 453 db_printf("\tl->l_wmesg =%s\n", SAFESTRPTR(l->l_wmesg)); 454 } 455 456 static void 457 db_par_print(uint64_t par, vaddr_t va) 458 { 459 paddr_t pa = (__SHIFTOUT(par, PAR_PA) << PAR_PA_SHIFT) + 460 (va & __BITS(PAR_PA_SHIFT - 1, 0)); 461 462 if (__SHIFTOUT(par, PAR_F) == 0) { 463 db_printf("%016" PRIx64 464 ": ATTR=0x%02" __PRIxBITS 465 ", NS=%" __PRIuBITS 466 ", SH=%" __PRIuBITS 467 ", PA=%016" PRIxPADDR 468 " (no fault)\n", 469 par, 470 __SHIFTOUT(par, PAR_ATTR), 471 __SHIFTOUT(par, PAR_NS), 472 __SHIFTOUT(par, PAR_SH), 473 pa); 474 } else { 475 db_printf("%016" PRIx64 476 ", S=%" __PRIuBITS 477 ", PTW=%" __PRIuBITS 478 ", FST=%" __PRIuBITS 479 " (fault)\n", 480 par, 481 __SHIFTOUT(par, PAR_S), 482 __SHIFTOUT(par, PAR_PTW), 483 __SHIFTOUT(par, PAR_FST)); 484 } 485 } 486 487 void 488 db_md_pte_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 489 const char *modif) 490 { 491 uint64_t par; 492 493 if (!have_addr) { 494 db_printf("pte address must be specified\n"); 495 return; 496 } 497 498 reg_s1e0r_write(addr); 499 isb(); 500 par = reg_par_el1_read(); 501 db_printf("Stage1 EL0 translation %016llx -> PAR_EL1 = ", addr); 502 db_par_print(par, addr); 503 504 reg_s1e1r_write(addr); 505 isb(); 506 par = reg_par_el1_read(); 507 db_printf("Stage1 EL1 translation %016llx -> PAR_EL1 = ", addr); 508 db_par_print(par, addr); 509 510 db_pteinfo(addr, db_printf); 511 } 512 513 void 514 db_md_reset_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 515 const char *modif) 516 { 517 if (cpu_reset_address == NULL) { 518 db_printf("cpu_reset_address is not set\n"); 519 return; 520 } 521 522 cpu_reset_address(); 523 } 524 525 void 526 db_md_tlbi_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 527 const char *modif) 528 { 529 aarch64_tlbi_all(); 530 } 531 532 void 533 db_md_ttbr_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 534 const char *modif) 535 { 536 bool countmode = false, by_pid = true; 537 538 if (!have_addr) { 539 db_printf("usage: machine ttbr [/a] [/p] [/c] address|pid\n"); 540 db_printf("\t/a == argument is an address of any pmap_t\n"); 541 db_printf("\t/p == argument is a pid [default]\n"); 542 db_printf("\t/c == count TLB entries\n"); 543 return; 544 } 545 546 if (modif != NULL) { 547 for (; *modif != '\0'; modif++) { 548 switch (*modif) { 549 case 'c': 550 countmode = true; 551 break; 552 case 'a': 553 by_pid = false; 554 break; 555 case 'p': 556 by_pid = true; 557 break; 558 } 559 } 560 } 561 562 if (by_pid) { 563 proc_t *p = db_proc_find((pid_t)addr); 564 if (p == NULL) { 565 db_printf("bad address\n"); 566 return; 567 } 568 addr = (db_addr_t)p->p_vmspace->vm_map.pmap; 569 } 570 571 db_ttbrdump(countmode, addr, db_printf); 572 } 573 574 void 575 db_md_sysreg_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 576 const char *modif) 577 { 578 #define SHOW_ARMREG(x) \ 579 db_printf("%-16s = %016" PRIx64 "\n", #x, reg_ ## x ## _read()) 580 581 // SHOW_ARMREG(cbar_el1); /* Cortex */ 582 SHOW_ARMREG(ccsidr_el1); 583 SHOW_ARMREG(clidr_el1); 584 SHOW_ARMREG(cntfrq_el0); 585 SHOW_ARMREG(cntkctl_el1); 586 SHOW_ARMREG(cntp_ctl_el0); 587 SHOW_ARMREG(cntp_cval_el0); 588 SHOW_ARMREG(cntp_tval_el0); 589 SHOW_ARMREG(cntpct_el0); 590 // SHOW_ARMREG(cntps_ctl_el1); /* need secure state */ 591 // SHOW_ARMREG(cntps_cval_el1); /* need secure state */ 592 // SHOW_ARMREG(cntps_tval_el1); /* need secure state */ 593 SHOW_ARMREG(cntv_ctl_el0); 594 SHOW_ARMREG(cntv_ctl_el0); 595 SHOW_ARMREG(cntv_cval_el0); 596 SHOW_ARMREG(cntv_tval_el0); 597 SHOW_ARMREG(cntv_tval_el0); 598 SHOW_ARMREG(cntvct_el0); 599 SHOW_ARMREG(cpacr_el1); 600 SHOW_ARMREG(csselr_el1); 601 SHOW_ARMREG(ctr_el0); 602 SHOW_ARMREG(currentel); 603 SHOW_ARMREG(daif); 604 SHOW_ARMREG(dczid_el0); 605 SHOW_ARMREG(elr_el1); 606 SHOW_ARMREG(esr_el1); 607 SHOW_ARMREG(far_el1); 608 // SHOW_ARMREG(fpcr); /* FP trap */ 609 // SHOW_ARMREG(fpsr); /* FP trap */ 610 SHOW_ARMREG(id_aa64afr0_el1); 611 SHOW_ARMREG(id_aa64afr1_el1); 612 SHOW_ARMREG(id_aa64dfr0_el1); 613 SHOW_ARMREG(id_aa64dfr1_el1); 614 SHOW_ARMREG(id_aa64isar0_el1); 615 SHOW_ARMREG(id_aa64isar1_el1); 616 SHOW_ARMREG(id_aa64mmfr0_el1); 617 SHOW_ARMREG(id_aa64mmfr1_el1); 618 SHOW_ARMREG(id_aa64pfr0_el1); 619 SHOW_ARMREG(id_aa64pfr1_el1); 620 SHOW_ARMREG(isr_el1); 621 // SHOW_ARMREG(l2ctlr_el1); /* Cortex */ 622 SHOW_ARMREG(mair_el1); 623 SHOW_ARMREG(mdscr_el1); 624 SHOW_ARMREG(midr_el1); 625 SHOW_ARMREG(mpidr_el1); 626 SHOW_ARMREG(mvfr0_el1); 627 SHOW_ARMREG(mvfr1_el1); 628 SHOW_ARMREG(mvfr2_el1); 629 SHOW_ARMREG(nzcv); 630 SHOW_ARMREG(par_el1); 631 SHOW_ARMREG(pmccfiltr_el0); 632 SHOW_ARMREG(pmccntr_el0); 633 SHOW_ARMREG(revidr_el1); 634 // SHOW_ARMREG(rmr_el1); /* unknown reason trap */ 635 // SHOW_ARMREG(rvbar_el1); 636 SHOW_ARMREG(sctlr_el1); 637 SHOW_ARMREG(spsel); 638 SHOW_ARMREG(spsr_el1); 639 SHOW_ARMREG(tcr_el1); 640 SHOW_ARMREG(tpidr_el0); 641 SHOW_ARMREG(tpidrro_el0); 642 SHOW_ARMREG(tpidr_el1); 643 SHOW_ARMREG(ttbr0_el1); 644 SHOW_ARMREG(ttbr1_el1); 645 SHOW_ARMREG(vbar_el1); 646 } 647 648 /* 649 * hardware breakpoint/watchpoint command 650 */ 651 static void 652 aarch64_set_bcr_bvr(int n, uint64_t bcr, uint64_t bvr) 653 { 654 #define DBG_BCR_BVR_SET(regno, bcr, bvr) \ 655 do { \ 656 reg_dbgbcr ## regno ## _el1_write(bcr); \ 657 reg_dbgbvr ## regno ## _el1_write(bvr); \ 658 } while (0 /* CONSTCOND */) 659 660 switch (n) { 661 case 0: DBG_BCR_BVR_SET(0, bcr, bvr); break; 662 case 1: DBG_BCR_BVR_SET(1, bcr, bvr); break; 663 case 2: DBG_BCR_BVR_SET(2, bcr, bvr); break; 664 case 3: DBG_BCR_BVR_SET(3, bcr, bvr); break; 665 case 4: DBG_BCR_BVR_SET(4, bcr, bvr); break; 666 case 5: DBG_BCR_BVR_SET(5, bcr, bvr); break; 667 case 6: DBG_BCR_BVR_SET(6, bcr, bvr); break; 668 case 7: DBG_BCR_BVR_SET(7, bcr, bvr); break; 669 case 8: DBG_BCR_BVR_SET(8, bcr, bvr); break; 670 case 9: DBG_BCR_BVR_SET(9, bcr, bvr); break; 671 case 10: DBG_BCR_BVR_SET(10, bcr, bvr); break; 672 case 11: DBG_BCR_BVR_SET(11, bcr, bvr); break; 673 case 12: DBG_BCR_BVR_SET(12, bcr, bvr); break; 674 case 13: DBG_BCR_BVR_SET(13, bcr, bvr); break; 675 case 14: DBG_BCR_BVR_SET(14, bcr, bvr); break; 676 case 15: DBG_BCR_BVR_SET(15, bcr, bvr); break; 677 } 678 } 679 680 static void 681 aarch64_set_wcr_wvr(int n, uint64_t wcr, uint64_t wvr) 682 { 683 #define DBG_WCR_WVR_SET(regno, wcr, wvr) \ 684 do { \ 685 reg_dbgwcr ## regno ## _el1_write(wcr); \ 686 reg_dbgwvr ## regno ## _el1_write(wvr); \ 687 } while (0 /* CONSTCOND */) 688 689 switch (n) { 690 case 0: DBG_WCR_WVR_SET(0, wcr, wvr); break; 691 case 1: DBG_WCR_WVR_SET(1, wcr, wvr); break; 692 case 2: DBG_WCR_WVR_SET(2, wcr, wvr); break; 693 case 3: DBG_WCR_WVR_SET(3, wcr, wvr); break; 694 case 4: DBG_WCR_WVR_SET(4, wcr, wvr); break; 695 case 5: DBG_WCR_WVR_SET(5, wcr, wvr); break; 696 case 6: DBG_WCR_WVR_SET(6, wcr, wvr); break; 697 case 7: DBG_WCR_WVR_SET(7, wcr, wvr); break; 698 case 8: DBG_WCR_WVR_SET(8, wcr, wvr); break; 699 case 9: DBG_WCR_WVR_SET(9, wcr, wvr); break; 700 case 10: DBG_WCR_WVR_SET(10, wcr, wvr); break; 701 case 11: DBG_WCR_WVR_SET(11, wcr, wvr); break; 702 case 12: DBG_WCR_WVR_SET(12, wcr, wvr); break; 703 case 13: DBG_WCR_WVR_SET(13, wcr, wvr); break; 704 case 14: DBG_WCR_WVR_SET(14, wcr, wvr); break; 705 case 15: DBG_WCR_WVR_SET(15, wcr, wvr); break; 706 } 707 } 708 709 void 710 aarch64_breakpoint_set(int n, vaddr_t addr) 711 { 712 uint64_t bcr, bvr; 713 714 if (addr == 0) { 715 bvr = 0; 716 bcr = 0; 717 } else { 718 bvr = addr & DBGBVR_MASK; 719 bcr = 720 __SHIFTIN(0, DBGBCR_BT) | 721 __SHIFTIN(0, DBGBCR_LBN) | 722 __SHIFTIN(0, DBGBCR_SSC) | 723 __SHIFTIN(0, DBGBCR_HMC) | 724 __SHIFTIN(15, DBGBCR_BAS) | 725 __SHIFTIN(3, DBGBCR_PMC) | 726 __SHIFTIN(1, DBGBCR_E); 727 } 728 729 aarch64_set_bcr_bvr(n, bcr, bvr); 730 } 731 732 void 733 aarch64_watchpoint_set(int n, vaddr_t addr, u_int size, u_int accesstype) 734 { 735 uint64_t wvr, wcr; 736 uint32_t matchbytebit; 737 738 KASSERT(size <= 8); 739 if (size > 8) 740 size = 8; 741 742 /* 743 * It is always watched in 8byte units, and 744 * BAS is a bit field of byte offset in 8byte units. 745 */ 746 matchbytebit = 0xff >> (8 - size); 747 matchbytebit <<= (addr & 7); 748 addr &= ~7UL; 749 750 /* load, store, or both */ 751 accesstype &= WATCHPOINT_ACCESS_MASK; 752 if (accesstype == 0) 753 accesstype = WATCHPOINT_ACCESS_LOADSTORE; 754 755 if (addr == 0) { 756 wvr = 0; 757 wcr = 0; 758 } else { 759 wvr = addr; 760 wcr = 761 __SHIFTIN(0, DBGWCR_MASK) | /* MASK: no mask */ 762 __SHIFTIN(0, DBGWCR_WT) | /* WT: 0 */ 763 __SHIFTIN(0, DBGWCR_LBN) | /* LBN: 0 */ 764 __SHIFTIN(0, DBGWCR_SSC) | /* SSC: 00 */ 765 __SHIFTIN(0, DBGWCR_HMC) | /* HMC: 0 */ 766 __SHIFTIN(matchbytebit, DBGWCR_BAS) | /* BAS: 0-8byte */ 767 __SHIFTIN(accesstype, DBGWCR_LSC) | /* LSC: Load/Store */ 768 __SHIFTIN(3, DBGWCR_PAC) | /* PAC: 11 */ 769 __SHIFTIN(1, DBGWCR_E); /* Enable */ 770 } 771 772 aarch64_set_wcr_wvr(n, wcr, wvr); 773 } 774 775 static int 776 db_md_breakpoint_set(int n, vaddr_t addr) 777 { 778 if (n >= __arraycount(breakpoint_buf)) 779 return -1; 780 781 if ((addr & 3) != 0) { 782 db_printf("address must be 4bytes aligned\n"); 783 return -1; 784 } 785 786 breakpoint_buf[n].addr = addr; 787 return 0; 788 } 789 790 static int 791 db_md_watchpoint_set(int n, vaddr_t addr, u_int size, u_int accesstype) 792 { 793 if (n >= __arraycount(watchpoint_buf)) 794 return -1; 795 796 if (size != 0 && ((addr) & ~7UL) != ((addr + size - 1) & ~7UL)) { 797 db_printf( 798 "address and size must fit within a block of 8bytes\n"); 799 return -1; 800 } 801 802 watchpoint_buf[n].addr = addr; 803 watchpoint_buf[n].size = size; 804 watchpoint_buf[n].accesstype = accesstype; 805 return 0; 806 } 807 808 static void 809 db_md_breakwatchpoints_clear(void) 810 { 811 int i; 812 813 for (i = 0; i <= max_breakpoint; i++) 814 aarch64_breakpoint_set(i, 0); 815 for (i = 0; i <= max_watchpoint; i++) 816 aarch64_watchpoint_set(i, 0, 0, 0); 817 } 818 819 static void 820 db_md_breakwatchpoints_reload(void) 821 { 822 int i; 823 824 for (i = 0; i <= max_breakpoint; i++) { 825 aarch64_breakpoint_set(i, 826 breakpoint_buf[i].addr); 827 } 828 for (i = 0; i <= max_watchpoint; i++) { 829 aarch64_watchpoint_set(i, 830 watchpoint_buf[i].addr, 831 watchpoint_buf[i].size, 832 watchpoint_buf[i].accesstype); 833 } 834 } 835 836 void 837 db_machdep_cpu_init(void) 838 { 839 uint64_t dfr, mdscr; 840 int i, cpu_max_breakpoint, cpu_max_watchpoint; 841 842 dfr = reg_id_aa64dfr0_el1_read(); 843 cpu_max_breakpoint = __SHIFTOUT(dfr, ID_AA64DFR0_EL1_BRPS); 844 cpu_max_watchpoint = __SHIFTOUT(dfr, ID_AA64DFR0_EL1_WRPS); 845 846 for (i = 0; i <= cpu_max_breakpoint; i++) { 847 /* clear all breakpoints */ 848 aarch64_breakpoint_set(i, 0); 849 } 850 for (i = 0; i <= cpu_max_watchpoint; i++) { 851 /* clear all watchpoints */ 852 aarch64_watchpoint_set(i, 0, 0, 0); 853 } 854 855 /* enable watchpoint and breakpoint */ 856 mdscr = reg_mdscr_el1_read(); 857 mdscr |= MDSCR_MDE | MDSCR_KDE; 858 reg_mdscr_el1_write(mdscr); 859 reg_oslar_el1_write(0); 860 } 861 862 void 863 db_machdep_init(struct cpu_info * const ci) 864 { 865 struct aarch64_sysctl_cpu_id * const id = &ci->ci_id; 866 const uint64_t dfr = id->ac_aa64dfr0; 867 const u_int cpu_max_breakpoint = __SHIFTOUT(dfr, ID_AA64DFR0_EL1_BRPS); 868 const u_int cpu_max_watchpoint = __SHIFTOUT(dfr, ID_AA64DFR0_EL1_WRPS); 869 870 /* 871 * num of {watch,break}point may be different depending on the 872 * core. 873 */ 874 if (max_breakpoint > cpu_max_breakpoint) 875 max_breakpoint = cpu_max_breakpoint; 876 if (max_watchpoint > cpu_max_watchpoint) 877 max_watchpoint = cpu_max_watchpoint; 878 } 879 880 881 static void 882 show_breakpoints(void) 883 { 884 uint64_t addr; 885 unsigned int i, nused; 886 887 for (nused = 0, i = 0; i <= max_breakpoint; i++) { 888 addr = breakpoint_buf[i].addr; 889 if (addr == 0) { 890 db_printf("%d: disabled\n", i); 891 } else { 892 db_printf("%d: breakpoint %016" PRIx64 " (", i, 893 addr); 894 db_printsym(addr, DB_STGY_ANY, db_printf); 895 db_printf(")\n"); 896 nused++; 897 } 898 } 899 db_printf("breakpoint used %d/%d\n", nused, max_breakpoint + 1); 900 } 901 902 static void 903 show_watchpoints(void) 904 { 905 uint64_t addr; 906 unsigned int i, nused; 907 908 for (nused = 0, i = 0; i <= max_watchpoint; i++) { 909 addr = watchpoint_buf[i].addr; 910 if (addr == 0) { 911 db_printf("%d: disabled\n", i); 912 } else { 913 db_printf("%d: watching %016" PRIx64 " (", i, 914 addr); 915 db_printsym(addr, DB_STGY_ANY, db_printf); 916 db_printf("), %d bytes", watchpoint_buf[i].size); 917 918 switch (watchpoint_buf[i].accesstype) { 919 case WATCHPOINT_ACCESS_LOAD: 920 db_printf(", load"); 921 break; 922 case WATCHPOINT_ACCESS_STORE: 923 db_printf(", store"); 924 break; 925 case WATCHPOINT_ACCESS_LOADSTORE: 926 db_printf(", load/store"); 927 break; 928 } 929 db_printf("\n"); 930 nused++; 931 } 932 } 933 db_printf("watchpoint used %d/%d\n", nused, max_watchpoint + 1); 934 } 935 936 void 937 db_md_break_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 938 const char *modif) 939 { 940 int i, rc; 941 int added, cleared; 942 943 if (!have_addr) { 944 show_breakpoints(); 945 return; 946 } 947 948 added = -1; 949 cleared = -1; 950 if (0 <= addr && addr <= max_breakpoint) { 951 i = addr; 952 if (breakpoint_buf[i].addr != 0) { 953 db_md_breakpoint_set(i, 0); 954 cleared = i; 955 } 956 } else { 957 for (i = 0; i <= max_breakpoint; i++) { 958 if (breakpoint_buf[i].addr == addr) { 959 db_md_breakpoint_set(i, 0); 960 cleared = i; 961 } 962 } 963 if (cleared == -1) { 964 for (i = 0; i <= max_breakpoint; i++) { 965 if (breakpoint_buf[i].addr == 0) { 966 rc = db_md_breakpoint_set(i, addr); 967 if (rc != 0) 968 return; 969 added = i; 970 break; 971 } 972 } 973 if (i > max_breakpoint) { 974 db_printf("no more available breakpoint\n"); 975 } 976 } 977 } 978 979 if (added >= 0) 980 db_printf("add breakpoint %d as %016"DDB_EXPR_FMT"x\n", 981 added, addr); 982 if (cleared >= 0) 983 db_printf("clear breakpoint %d\n", cleared); 984 985 show_breakpoints(); 986 } 987 988 void 989 db_md_watch_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 990 const char *modif) 991 { 992 int i, rc; 993 int added, cleared; 994 u_int accesstype, watchsize; 995 996 if (!have_addr) { 997 show_watchpoints(); 998 return; 999 } 1000 1001 accesstype = watchsize = 0; 1002 if ((modif != NULL) && (*modif != '\0')) { 1003 int ch; 1004 for (; *modif != '\0'; modif++) { 1005 ch = *modif; 1006 1007 switch (ch) { 1008 case 'b': 1009 watchsize = 1; 1010 break; 1011 case 'h': 1012 watchsize = 2; 1013 break; 1014 case 'l': 1015 watchsize = 4; 1016 break; 1017 case 'q': 1018 watchsize = 8; 1019 break; 1020 case 'r': 1021 accesstype |= WATCHPOINT_ACCESS_LOAD; 1022 break; 1023 case 'w': 1024 accesstype |= WATCHPOINT_ACCESS_STORE; 1025 break; 1026 } 1027 } 1028 } 1029 if (watchsize == 0) 1030 watchsize = 4; /* default: 4byte */ 1031 if (accesstype == 0) 1032 accesstype = WATCHPOINT_ACCESS_LOADSTORE; /* default */ 1033 1034 added = -1; 1035 cleared = -1; 1036 if (0 <= addr && addr <= max_watchpoint) { 1037 i = addr; 1038 if (watchpoint_buf[i].addr != 0) { 1039 db_md_watchpoint_set(i, 0, 0, 0); 1040 cleared = i; 1041 } 1042 } else { 1043 for (i = 0; i <= max_watchpoint; i++) { 1044 if (watchpoint_buf[i].addr == addr) { 1045 db_md_watchpoint_set(i, 0, 0, 0); 1046 cleared = i; 1047 } 1048 } 1049 if (cleared == -1) { 1050 for (i = 0; i <= max_watchpoint; i++) { 1051 if (watchpoint_buf[i].addr == 0) { 1052 rc = db_md_watchpoint_set(i, addr, 1053 watchsize, accesstype); 1054 if (rc != 0) 1055 return; 1056 added = i; 1057 break; 1058 } 1059 } 1060 if (i > max_watchpoint) { 1061 db_printf("no more available watchpoint\n"); 1062 } 1063 } 1064 } 1065 1066 if (added >= 0) 1067 db_printf("add watchpoint %d as %016"DDB_EXPR_FMT"x\n", 1068 added, addr); 1069 if (cleared >= 0) 1070 db_printf("clear watchpoint %d\n", cleared); 1071 1072 show_watchpoints(); 1073 } 1074 #endif 1075 1076 #ifdef MULTIPROCESSOR 1077 volatile struct cpu_info *db_trigger; 1078 volatile struct cpu_info *db_onproc; 1079 volatile struct cpu_info *db_newcpu; 1080 volatile struct trapframe *db_readytoswitch[MAXCPUS]; 1081 1082 #ifdef _KERNEL 1083 void 1084 db_md_switch_cpu_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 1085 const char *modif) 1086 { 1087 struct cpu_info *new_ci = NULL; 1088 u_int cpuno = (u_int)addr; 1089 int i; 1090 1091 membar_consumer(); 1092 if (!have_addr) { 1093 for (i = 0; i < ncpu; i++) { 1094 if (db_readytoswitch[i] != NULL) { 1095 db_printf("cpu%d: ready. tf=%p, pc=%016lx ", i, 1096 db_readytoswitch[i], 1097 db_readytoswitch[i]->tf_pc); 1098 db_printsym(db_readytoswitch[i]->tf_pc, 1099 DB_STGY_ANY, db_printf); 1100 db_printf("\n"); 1101 } else { 1102 db_printf("cpu%d: not responding\n", i); 1103 } 1104 } 1105 return; 1106 } 1107 1108 if (cpuno < ncpu) 1109 new_ci = cpu_lookup(cpuno); 1110 if (new_ci == NULL) { 1111 db_printf("cpu %u does not exist", cpuno); 1112 return; 1113 } 1114 if (db_readytoswitch[new_ci->ci_index] == 0) { 1115 db_printf("cpu %u is not responding", cpuno); 1116 return; 1117 } 1118 1119 if (new_ci == curcpu()) 1120 return; 1121 1122 db_newcpu = new_ci; 1123 db_continue_cmd(0, false, 0, ""); 1124 } 1125 1126 #endif /* _KERNEL */ 1127 #endif /* MULTIPROCESSOR */ 1128 1129 #ifdef DDB 1130 int 1131 kdb_trap(int type, struct trapframe *tf) 1132 { 1133 #ifdef MULTIPROCESSOR 1134 struct cpu_info * const ci = curcpu(); 1135 bool static_brk = false; 1136 #endif 1137 int s; 1138 bool restore_hw_watchpoints = true; 1139 1140 switch (type) { 1141 case DB_TRAP_WATCHPOINT: 1142 case DB_TRAP_BREAKPOINT: 1143 /* 1144 * In the case of a hardware watchpoint or breakpoint, 1145 * even if cpu return from ddb as is, it will be trapped again, 1146 * so clear it all once. 1147 * 1148 * breakpoint and watchpoint will be restored at the end of 1149 * next DB_TRAP_BKPT_INSN (ddb's STEP_INVISIBLE mode). 1150 */ 1151 db_md_breakwatchpoints_clear(); 1152 restore_hw_watchpoints = false; 1153 break; 1154 case DB_TRAP_BKPT_INSN: 1155 #ifdef MULTIPROCESSOR 1156 /* brk #0xffff in cpu_Debugger() ? */ 1157 if (__SHIFTOUT(tf->tf_esr, ESR_ISS) == 0xffff) 1158 static_brk = true; 1159 /* FALLTHRU */ 1160 #endif 1161 case DB_TRAP_SW_STEP: 1162 case DB_TRAP_UNKNOWN: 1163 case -1: /* from pic_ipi_ddb() */ 1164 break; 1165 default: 1166 if (db_recover != 0) { 1167 db_error("Faulted in DDB: continuing...\n"); 1168 /* NOTREACHED */ 1169 } 1170 break; 1171 } 1172 1173 #ifdef MULTIPROCESSOR 1174 if (ncpu > 1) { 1175 /* 1176 * Try to take ownership of DDB. 1177 * If we do, tell all other CPUs to enter DDB too. 1178 */ 1179 if (atomic_cas_ptr(&db_onproc, NULL, ci) == NULL) { 1180 intr_ipi_send(NULL, IPI_DDB); 1181 db_trigger = ci; 1182 } else { 1183 /* 1184 * If multiple CPUs catch kdb_trap() that is not IPI_DDB 1185 * derived at the same time, only the CPU that was able 1186 * to get db_onproc first will execute db_trap. 1187 * The CPU that could not get db_onproc will be set to 1188 * type = -1 once, and kdb_trap will be called again 1189 * with the correct type after kdb_trap returns. 1190 */ 1191 type = -1; 1192 restore_hw_watchpoints = true; 1193 } 1194 } 1195 db_readytoswitch[ci->ci_index] = tf; 1196 #endif 1197 1198 for (;;) { 1199 #ifdef MULTIPROCESSOR 1200 if (ncpu > 1) { 1201 /* waiting my turn, or exit */ 1202 dsb(ishld); 1203 while (db_onproc != ci) { 1204 __asm __volatile ("wfe"); 1205 1206 dsb(ishld); 1207 if (db_onproc == NULL) 1208 goto kdb_trap_done; 1209 } 1210 /* It's my turn! */ 1211 } 1212 #endif /* MULTIPROCESSOR */ 1213 1214 /* Should switch to kdb`s own stack here. */ 1215 ddb_regs = *tf; 1216 1217 s = splhigh(); 1218 db_active++; 1219 cnpollc(true); 1220 db_trap(type, 0/*code*/); 1221 cnpollc(false); 1222 db_active--; 1223 splx(s); 1224 1225 *tf = ddb_regs; 1226 1227 #ifdef MULTIPROCESSOR 1228 if (ncpu < 2) 1229 break; 1230 1231 if (db_newcpu == NULL && db_onproc != db_trigger) { 1232 /* 1233 * If the "machine cpu" switches CPUs after entering 1234 * ddb from a breakpoint or watchpoint, it will return 1235 * control to the CPU that triggered the ddb in the 1236 * first place in order to correctly reset the 1237 * breakpoint or watchpoint. 1238 * If db_trap() returns further from here, 1239 * watchpoints and breakpoints will be reset. 1240 * (db_run_mode = STEP_INVISIBLE) 1241 */ 1242 db_newcpu = db_trigger; 1243 } 1244 1245 if (db_newcpu != NULL) { 1246 /* XXX: override sys/ddb/db_run.c:db_run_mode */ 1247 db_continue_cmd(0, false, 0, ""); 1248 1249 /* 1250 * When static BRK instruction (cpu_Debugger()), 1251 * db_trap() advance the $PC to the next instruction of 1252 * BRK. If db_trap() will be called twice by 1253 * "machine cpu" command, the $PC will be advanced to 1254 * the after the next instruction. 1255 * To avoid this, change 'type' so that the second call 1256 * to db_trap() will not change the PC. 1257 */ 1258 if (static_brk) 1259 type = -1; 1260 1261 db_onproc = db_newcpu; 1262 db_newcpu = NULL; 1263 dsb(ishst); 1264 /* waking up the CPU waiting for its turn to db_trap */ 1265 sev(); 1266 1267 continue; /* go to waiting my turn */ 1268 } 1269 #endif /* MULTIPROCESSOR */ 1270 1271 break; 1272 } 1273 1274 #ifdef MULTIPROCESSOR 1275 if (ncpu > 1 && db_onproc == ci) { 1276 db_onproc = NULL; 1277 dsb(ishst); 1278 /* waking up the CPU waiting for its turn to exit */ 1279 sev(); 1280 1281 db_readytoswitch[cpu_index(ci)] = NULL; 1282 /* wait for all other CPUs are ready to exit */ 1283 for (;;) { 1284 int i; 1285 dsb(ishld); 1286 for (i = 0; i < ncpu; i++) { 1287 if (db_readytoswitch[i] != NULL) 1288 break; 1289 } 1290 if (i == ncpu) 1291 break; 1292 } 1293 db_trigger = NULL; 1294 sev(); 1295 } else { 1296 kdb_trap_done: 1297 db_readytoswitch[cpu_index(ci)] = NULL; 1298 dsb(ishst); 1299 __asm __volatile ("wfe"); 1300 } 1301 #endif 1302 if (restore_hw_watchpoints) 1303 db_md_breakwatchpoints_reload(); 1304 1305 return 1; 1306 } 1307 #endif 1308 1309 #if defined(_KERNEL) 1310 static void 1311 db_md_meminfo_cmd(db_expr_t addr, bool have_addr, db_expr_t count, 1312 const char *modif) 1313 { 1314 unsigned blk; 1315 1316 for (blk = 0; blk < bootconfig.dramblocks; blk++) { 1317 db_printf("blk[%u]: start %lx end %lx (pages %x)\n", 1318 blk, bootconfig.dram[blk].address, 1319 bootconfig.dram[blk].address + 1320 (uint64_t)bootconfig.dram[blk].pages * PAGE_SIZE, 1321 bootconfig.dram[blk].pages); 1322 } 1323 } 1324 #endif 1325