db_interface.c revision 1.53 1 /* $NetBSD: db_interface.c,v 1.53 2014/10/25 10:58:12 skrll Exp $ */
2
3 /*
4 * Copyright (c) 1996 Scott K. Stevens
5 *
6 * Mach Operating System
7 * Copyright (c) 1991,1990 Carnegie Mellon University
8 * All Rights Reserved.
9 *
10 * Permission to use, copy, modify and distribute this software and its
11 * documentation is hereby granted, provided that both the copyright
12 * notice and this permission notice appear in all copies of the
13 * software, derivative works or modified versions, and any portions
14 * thereof, and that both notices appear in supporting documentation.
15 *
16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
18 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
19 *
20 * Carnegie Mellon requests users of this software to return to
21 *
22 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
26 *
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
29 *
30 * From: db_interface.c,v 2.4 1991/02/05 17:11:13 mrt (CMU)
31 */
32
33 /*
34 * Interface to new debugger.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.53 2014/10/25 10:58:12 skrll Exp $");
39
40 #include "opt_ddb.h"
41 #include "opt_kgdb.h"
42
43 #include <sys/param.h>
44 #include <sys/proc.h>
45 #include <sys/reboot.h>
46 #include <sys/systm.h> /* just for boothowto */
47 #include <sys/exec.h>
48 #include <sys/atomic.h>
49 #include <sys/intr.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include <arm/arm32/db_machdep.h>
54 #include <arm/undefined.h>
55 #include <ddb/db_access.h>
56 #include <ddb/db_command.h>
57 #include <ddb/db_output.h>
58 #include <ddb/db_variables.h>
59 #include <ddb/db_sym.h>
60 #include <ddb/db_extern.h>
61 #include <ddb/db_interface.h>
62 #include <dev/cons.h>
63
64 #if defined(KGDB) || !defined(DDB)
65 #define db_printf printf
66 #endif
67
68 u_int db_fetch_reg(int, db_regs_t *);
69
70 int db_trapper(u_int, u_int, trapframe_t *, int);
71
72 int db_active = 0;
73 db_regs_t ddb_regs; /* register state */
74 db_regs_t *ddb_regp;
75
76 #ifdef MULTIPROCESSOR
77 volatile struct cpu_info *db_onproc;
78 volatile struct cpu_info *db_newcpu;
79 #endif
80
81
82
83
84 #ifdef DDB
85 /*
86 * kdb_trap - field a TRACE or BPT trap
87 */
88 int
89 kdb_trap(int type, db_regs_t *regs)
90 {
91 struct cpu_info * const ci = curcpu();
92 db_regs_t dbreg;
93 int s;
94
95 switch (type) {
96 case T_BREAKPOINT: /* breakpoint */
97 case -1: /* keyboard interrupt */
98 break;
99 #ifdef MULTIPROCESSOR
100 case -2:
101 /*
102 * We called to enter ddb from another process but by the time
103 * we got here, no one was in ddb. So ignore the request.
104 */
105 if (db_onproc == NULL)
106 return 1;
107 break;
108 #endif
109 default:
110 if (db_recover != 0) {
111 /* This will longjmp back into db_command_loop() */
112 db_error("Faulted in DDB; continuing...\n");
113 /*NOTREACHED*/
114 }
115 }
116
117 /* Should switch to kdb`s own stack here. */
118
119 #ifdef MULTIPROCESSOR
120 const bool is_mp_p = ncpu > 1;
121 if (is_mp_p) {
122 /*
123 * Try to take ownership of DDB. If we do, tell all other
124 * CPUs to enter DDB too.
125 */
126 if (atomic_cas_ptr(&db_onproc, NULL, ci) == NULL) {
127 intr_ipi_send(NULL, IPI_DDB);
128 }
129 }
130 for (;;) {
131 if (is_mp_p) {
132 /*
133 * While we aren't the master, wait until the master
134 * gives control to us or exits. If it exited, we
135 * just exit to. Otherwise this cpu will enter DDB.
136 */
137 membar_consumer();
138 while (db_onproc != ci) {
139 if (db_onproc == NULL)
140 return 1;
141 #ifdef _ARM_ARCH_6
142 __asm __volatile("wfe");
143 membar_consumer();
144 #endif
145 if (db_onproc == ci) {
146 printf("%s: switching to %s\n",
147 __func__, ci->ci_cpuname);
148 }
149 }
150 }
151 #endif
152
153 s = splhigh();
154 ci->ci_ddb_regs = &dbreg;
155 ddb_regp = &dbreg;
156 ddb_regs = *regs;
157
158 atomic_inc_32(&db_active);
159 cnpollc(true);
160 db_trap(type, 0/*code*/);
161 cnpollc(false);
162 atomic_dec_32(&db_active);
163
164 ci->ci_ddb_regs = NULL;
165 ddb_regp = &dbreg;
166 *regs = ddb_regs;
167 splx(s);
168
169 #ifdef MULTIPROCESSOR
170 if (is_mp_p && db_newcpu != NULL) {
171 db_onproc = db_newcpu;
172 db_newcpu = NULL;
173 #ifdef _ARM_ARCH_6
174 membar_producer();
175 __asm __volatile("sev; sev");
176 #endif
177 continue;
178 }
179 break;
180 }
181
182 if (is_mp_p) {
183 /*
184 * We are exiting DDB so there is noone onproc. Tell
185 * the other CPUs to exit.
186 */
187 db_onproc = NULL;
188 #ifdef _ARM_ARCH_6
189 __asm __volatile("sev; sev");
190 #endif
191 }
192 #endif
193
194 return (1);
195 }
196 #endif
197
198 int
199 db_validate_address(vaddr_t addr)
200 {
201 struct proc *p = curproc;
202 struct pmap *pmap;
203
204 if (!p || !p->p_vmspace || !p->p_vmspace->vm_map.pmap ||
205 #ifndef ARM32_NEW_VM_LAYOUT
206 addr >= VM_MAXUSER_ADDRESS
207 #else
208 addr >= VM_MIN_KERNEL_ADDRESS
209 #endif
210 )
211 pmap = pmap_kernel();
212 else
213 pmap = p->p_vmspace->vm_map.pmap;
214
215 return (pmap_extract(pmap, addr, NULL) == false);
216 }
217
218 /*
219 * Read bytes from kernel address space for debugger.
220 */
221 void
222 db_read_bytes(vaddr_t addr, size_t size, char *data)
223 {
224 char *src = (char *)addr;
225
226 if (db_validate_address((u_int)src)) {
227 db_printf("address %p is invalid\n", src);
228 return;
229 }
230
231 if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0) {
232 *((int*)data) = *((int*)src);
233 return;
234 }
235
236 if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0) {
237 *((short*)data) = *((short*)src);
238 return;
239 }
240
241 while (size-- > 0) {
242 if (db_validate_address((u_int)src)) {
243 db_printf("address %p is invalid\n", src);
244 return;
245 }
246 *data++ = *src++;
247 }
248 }
249
250 static void
251 db_write_text(vaddr_t addr, size_t size, const char *data)
252 {
253 struct pmap *pmap = pmap_kernel();
254 pd_entry_t *pde, oldpde, tmppde;
255 pt_entry_t *pte, oldpte, tmppte;
256 vaddr_t pgva;
257 size_t limit, savesize;
258 char *dst;
259
260 /* XXX: gcc */
261 oldpte = 0;
262
263 if ((savesize = size) == 0)
264 return;
265
266 dst = (char *) addr;
267
268 do {
269 /* Get the PDE of the current VA. */
270 if (pmap_get_pde_pte(pmap, (vaddr_t) dst, &pde, &pte) == false)
271 goto no_mapping;
272 switch ((oldpde = *pde) & L1_TYPE_MASK) {
273 case L1_TYPE_S:
274 pgva = (vaddr_t)dst & L1_S_FRAME;
275 limit = L1_S_SIZE - ((vaddr_t)dst & L1_S_OFFSET);
276
277 tmppde = l1pte_set_writable(oldpde);
278 *pde = tmppde;
279 PTE_SYNC(pde);
280 break;
281
282 case L1_TYPE_C:
283 pgva = (vaddr_t)dst & L2_S_FRAME;
284 limit = L2_S_SIZE - ((vaddr_t)dst & L2_S_OFFSET);
285
286 if (pte == NULL)
287 goto no_mapping;
288 oldpte = *pte;
289 tmppte = l2pte_set_writable(oldpte);
290 *pte = tmppte;
291 PTE_SYNC(pte);
292 break;
293
294 default:
295 no_mapping:
296 printf(" address 0x%08lx not a valid page\n",
297 (vaddr_t) dst);
298 return;
299 }
300 cpu_tlb_flushD_SE(pgva);
301 cpu_cpwait();
302
303 if (limit > size)
304 limit = size;
305 size -= limit;
306
307 /*
308 * Page is now writable. Do as much access as we
309 * can in this page.
310 */
311 for (; limit > 0; limit--)
312 *dst++ = *data++;
313
314 /*
315 * Restore old mapping permissions.
316 */
317 switch (oldpde & L1_TYPE_MASK) {
318 case L1_TYPE_S:
319 *pde = oldpde;
320 PTE_SYNC(pde);
321 break;
322
323 case L1_TYPE_C:
324 *pte = oldpte;
325 PTE_SYNC(pte);
326 break;
327 }
328 cpu_tlb_flushD_SE(pgva);
329 cpu_cpwait();
330 } while (size != 0);
331
332 /* Sync the I-cache. */
333 cpu_icache_sync_range(addr, savesize);
334 }
335
336 /*
337 * Write bytes to kernel address space for debugger.
338 */
339 void
340 db_write_bytes(vaddr_t addr, size_t size, const char *data)
341 {
342 extern char kernel_text[];
343 extern char etext[];
344 char *dst;
345 size_t loop;
346
347 /* If any part is in kernel text, use db_write_text() */
348 if (addr >= (vaddr_t) kernel_text && addr < (vaddr_t) etext) {
349 db_write_text(addr, size, data);
350 return;
351 }
352
353 dst = (char *)addr;
354 if (db_validate_address((u_int)dst)) {
355 db_printf("address %p is invalid\n", dst);
356 return;
357 }
358
359 if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0)
360 *((int*)dst) = *((const int *)data);
361 else
362 if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0)
363 *((short*)dst) = *((const short *)data);
364 else {
365 loop = size;
366 while (loop-- > 0) {
367 if (db_validate_address((u_int)dst)) {
368 db_printf("address %p is invalid\n", dst);
369 return;
370 }
371 *dst++ = *data++;
372 }
373 }
374
375 /* make sure the caches and memory are in sync */
376 cpu_icache_sync_range(addr, size);
377
378 /* In case the current page tables have been modified ... */
379 cpu_tlb_flushID();
380 cpu_cpwait();
381 }
382
383 #ifdef DDB
384 void
385 cpu_Debugger(void)
386 {
387 __asm(".word 0xe7ffffff");
388 }
389
390 int
391 db_trapper(u_int addr, u_int inst, trapframe_t *frame, int fault_code)
392 {
393
394 if (fault_code == 0) {
395 if ((inst & ~INSN_COND_MASK) == (BKPT_INST & ~INSN_COND_MASK))
396 kdb_trap(T_BREAKPOINT, frame);
397 else
398 kdb_trap(-1, frame);
399 } else
400 return (1);
401 return (0);
402 }
403
404 extern u_int esym;
405 extern u_int end;
406
407 static struct undefined_handler db_uh;
408
409 void
410 db_machine_init(void)
411 {
412
413 /*
414 * We get called before malloc() is available, so supply a static
415 * struct undefined_handler.
416 */
417 db_uh.uh_handler = db_trapper;
418 install_coproc_handler_static(CORE_UNKNOWN_HANDLER, &db_uh);
419 }
420 #endif
421
422 u_int
423 db_fetch_reg(int reg, db_regs_t *regs)
424 {
425
426 switch (reg) {
427 case 0:
428 return (regs->tf_r0);
429 case 1:
430 return (regs->tf_r1);
431 case 2:
432 return (regs->tf_r2);
433 case 3:
434 return (regs->tf_r3);
435 case 4:
436 return (regs->tf_r4);
437 case 5:
438 return (regs->tf_r5);
439 case 6:
440 return (regs->tf_r6);
441 case 7:
442 return (regs->tf_r7);
443 case 8:
444 return (regs->tf_r8);
445 case 9:
446 return (regs->tf_r9);
447 case 10:
448 return (regs->tf_r10);
449 case 11:
450 return (regs->tf_r11);
451 case 12:
452 return (regs->tf_r12);
453 case 13:
454 return (regs->tf_svc_sp);
455 case 14:
456 return (regs->tf_svc_lr);
457 case 15:
458 return (regs->tf_pc);
459 default:
460 panic("db_fetch_reg: botch");
461 }
462 }
463
464 u_int
465 branch_taken(u_int insn, u_int pc, db_regs_t *regs)
466 {
467 u_int addr, nregs;
468
469 switch ((insn >> 24) & 0xf) {
470 case 0xa: /* b ... */
471 case 0xb: /* bl ... */
472 addr = ((insn << 2) & 0x03ffffff);
473 if (addr & 0x02000000)
474 addr |= 0xfc000000;
475 return (pc + 8 + addr);
476 case 0x7: /* ldr pc, [pc, reg, lsl #2] */
477 addr = db_fetch_reg(insn & 0xf, regs);
478 addr = pc + 8 + (addr << 2);
479 db_read_bytes(addr, 4, (char *)&addr);
480 return (addr);
481 case 0x5: /* ldr pc, [reg] */
482 addr = db_fetch_reg((insn >> 16) & 0xf, regs);
483 db_read_bytes(addr, 4, (char *)&addr);
484 return (addr);
485 case 0x1: /* mov pc, reg */
486 addr = db_fetch_reg(insn & 0xf, regs);
487 return (addr);
488 case 0x8: /* ldmxx reg, {..., pc} */
489 case 0x9:
490 addr = db_fetch_reg((insn >> 16) & 0xf, regs);
491 nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555);
492 nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333);
493 nregs = (nregs + (nregs >> 4)) & 0x0f0f;
494 nregs = (nregs + (nregs >> 8)) & 0x001f;
495 switch ((insn >> 23) & 0x3) {
496 case 0x0: /* ldmda */
497 addr = addr - 0;
498 break;
499 case 0x1: /* ldmia */
500 addr = addr + 0 + ((nregs - 1) << 2);
501 break;
502 case 0x2: /* ldmdb */
503 addr = addr - 4;
504 break;
505 case 0x3: /* ldmib */
506 addr = addr + 4 + ((nregs - 1) << 2);
507 break;
508 }
509 db_read_bytes(addr, 4, (char *)&addr);
510 return (addr);
511 default:
512 panic("branch_taken: botch");
513 }
514 }
515