db_interface.c revision 1.52.4.1 1 /* $NetBSD: db_interface.c,v 1.52.4.1 2014/11/09 16:05:25 martin Exp $ */
2
3 /*
4 * Copyright (c) 1996 Scott K. Stevens
5 *
6 * Mach Operating System
7 * Copyright (c) 1991,1990 Carnegie Mellon University
8 * All Rights Reserved.
9 *
10 * Permission to use, copy, modify and distribute this software and its
11 * documentation is hereby granted, provided that both the copyright
12 * notice and this permission notice appear in all copies of the
13 * software, derivative works or modified versions, and any portions
14 * thereof, and that both notices appear in supporting documentation.
15 *
16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
18 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
19 *
20 * Carnegie Mellon requests users of this software to return to
21 *
22 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
26 *
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
29 *
30 * From: db_interface.c,v 2.4 1991/02/05 17:11:13 mrt (CMU)
31 */
32
33 /*
34 * Interface to new debugger.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.52.4.1 2014/11/09 16:05:25 martin Exp $");
39
40 #include "opt_ddb.h"
41 #include "opt_kgdb.h"
42 #include "opt_multiprocessor.h"
43
44 #include <sys/param.h>
45 #include <sys/proc.h>
46 #include <sys/reboot.h>
47 #include <sys/systm.h> /* just for boothowto */
48 #include <sys/exec.h>
49 #include <sys/atomic.h>
50 #include <sys/intr.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <arm/arm32/db_machdep.h>
55 #include <arm/arm32/katelib.h>
56 #include <arm/undefined.h>
57 #include <ddb/db_access.h>
58 #include <ddb/db_command.h>
59 #include <ddb/db_output.h>
60 #include <ddb/db_variables.h>
61 #include <ddb/db_sym.h>
62 #include <ddb/db_extern.h>
63 #include <ddb/db_interface.h>
64 #include <dev/cons.h>
65
66 #if defined(KGDB) || !defined(DDB)
67 #define db_printf printf
68 #endif
69
70 u_int db_fetch_reg(int, db_regs_t *);
71
72 int db_trapper(u_int, u_int, trapframe_t *, int);
73
74 int db_active = 0;
75 db_regs_t ddb_regs; /* register state */
76 db_regs_t *ddb_regp;
77
78 #ifdef MULTIPROCESSOR
79 volatile struct cpu_info *db_onproc;
80 volatile struct cpu_info *db_newcpu;
81 #endif
82
83
84
85
86 #ifdef DDB
87 /*
88 * kdb_trap - field a TRACE or BPT trap
89 */
90 int
91 kdb_trap(int type, db_regs_t *regs)
92 {
93 struct cpu_info * const ci = curcpu();
94 db_regs_t dbreg;
95 int s;
96
97 switch (type) {
98 case T_BREAKPOINT: /* breakpoint */
99 case -1: /* keyboard interrupt */
100 break;
101 #ifdef MULTIPROCESSOR
102 case -2:
103 /*
104 * We called to enter ddb from another process but by the time
105 * we got here, no one was in ddb. So ignore the request.
106 */
107 if (db_onproc == NULL)
108 return 1;
109 break;
110 #endif
111 default:
112 if (db_recover != 0) {
113 /* This will longjmp back into db_command_loop() */
114 db_error("Faulted in DDB; continuing...\n");
115 /*NOTREACHED*/
116 }
117 }
118
119 /* Should switch to kdb`s own stack here. */
120
121 #ifdef MULTIPROCESSOR
122 const bool is_mp_p = ncpu > 1;
123 if (is_mp_p) {
124 /*
125 * Try to take ownership of DDB. If we do, tell all other
126 * CPUs to enter DDB too.
127 */
128 if (atomic_cas_ptr(&db_onproc, NULL, ci) == NULL) {
129 intr_ipi_send(NULL, IPI_DDB);
130 }
131 }
132 for (;;) {
133 if (is_mp_p) {
134 /*
135 * While we aren't the master, wait until the master
136 * gives control to us or exits. If it exited, we
137 * just exit to. Otherwise this cpu will enter DDB.
138 */
139 membar_consumer();
140 while (db_onproc != ci) {
141 if (db_onproc == NULL)
142 return 1;
143 #ifdef _ARM_ARCH_6
144 __asm __volatile("wfe");
145 membar_consumer();
146 #endif
147 if (db_onproc == ci) {
148 printf("%s: switching to %s\n",
149 __func__, ci->ci_cpuname);
150 }
151 }
152 }
153 #endif
154
155 s = splhigh();
156 ci->ci_ddb_regs = &dbreg;
157 ddb_regp = &dbreg;
158 ddb_regs = *regs;
159
160 atomic_inc_32(&db_active);
161 cnpollc(true);
162 db_trap(type, 0/*code*/);
163 cnpollc(false);
164 atomic_dec_32(&db_active);
165
166 ci->ci_ddb_regs = NULL;
167 ddb_regp = &dbreg;
168 *regs = ddb_regs;
169 splx(s);
170
171 #ifdef MULTIPROCESSOR
172 if (is_mp_p && db_newcpu != NULL) {
173 db_onproc = db_newcpu;
174 db_newcpu = NULL;
175 #ifdef _ARM_ARCH_6
176 membar_producer();
177 __asm __volatile("sev; sev");
178 #endif
179 continue;
180 }
181 break;
182 }
183
184 if (is_mp_p) {
185 /*
186 * We are exiting DDB so there is noone onproc. Tell
187 * the other CPUs to exit.
188 */
189 db_onproc = NULL;
190 #ifdef _ARM_ARCH_6
191 __asm __volatile("sev; sev");
192 #endif
193 }
194 #endif
195
196 return (1);
197 }
198 #endif
199
200 int
201 db_validate_address(vaddr_t addr)
202 {
203 struct proc *p = curproc;
204 struct pmap *pmap;
205
206 if (!p || !p->p_vmspace || !p->p_vmspace->vm_map.pmap ||
207 #ifndef ARM32_NEW_VM_LAYOUT
208 addr >= VM_MAXUSER_ADDRESS
209 #else
210 addr >= VM_MIN_KERNEL_ADDRESS
211 #endif
212 )
213 pmap = pmap_kernel();
214 else
215 pmap = p->p_vmspace->vm_map.pmap;
216
217 return (pmap_extract(pmap, addr, NULL) == false);
218 }
219
220 /*
221 * Read bytes from kernel address space for debugger.
222 */
223 void
224 db_read_bytes(vaddr_t addr, size_t size, char *data)
225 {
226 char *src = (char *)addr;
227
228 if (db_validate_address((u_int)src)) {
229 db_printf("address %p is invalid\n", src);
230 return;
231 }
232
233 if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0) {
234 *((int*)data) = *((int*)src);
235 return;
236 }
237
238 if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0) {
239 *((short*)data) = *((short*)src);
240 return;
241 }
242
243 while (size-- > 0) {
244 if (db_validate_address((u_int)src)) {
245 db_printf("address %p is invalid\n", src);
246 return;
247 }
248 *data++ = *src++;
249 }
250 }
251
252 static void
253 db_write_text(vaddr_t addr, size_t size, const char *data)
254 {
255 struct pmap *pmap = pmap_kernel();
256 pd_entry_t *pde, oldpde, tmppde;
257 pt_entry_t *pte, oldpte, tmppte;
258 vaddr_t pgva;
259 size_t limit, savesize;
260 char *dst;
261
262 /* XXX: gcc */
263 oldpte = 0;
264
265 if ((savesize = size) == 0)
266 return;
267
268 dst = (char *) addr;
269
270 do {
271 /* Get the PDE of the current VA. */
272 if (pmap_get_pde_pte(pmap, (vaddr_t) dst, &pde, &pte) == false)
273 goto no_mapping;
274 switch ((oldpde = *pde) & L1_TYPE_MASK) {
275 case L1_TYPE_S:
276 pgva = (vaddr_t)dst & L1_S_FRAME;
277 limit = L1_S_SIZE - ((vaddr_t)dst & L1_S_OFFSET);
278
279 tmppde = l1pte_set_writable(oldpde);
280 *pde = tmppde;
281 PTE_SYNC(pde);
282 break;
283
284 case L1_TYPE_C:
285 pgva = (vaddr_t)dst & L2_S_FRAME;
286 limit = L2_S_SIZE - ((vaddr_t)dst & L2_S_OFFSET);
287
288 if (pte == NULL)
289 goto no_mapping;
290 oldpte = *pte;
291 tmppte = l2pte_set_writable(oldpte);
292 *pte = tmppte;
293 PTE_SYNC(pte);
294 break;
295
296 default:
297 no_mapping:
298 printf(" address 0x%08lx not a valid page\n",
299 (vaddr_t) dst);
300 return;
301 }
302 cpu_tlb_flushD_SE(pgva);
303 cpu_cpwait();
304
305 if (limit > size)
306 limit = size;
307 size -= limit;
308
309 /*
310 * Page is now writable. Do as much access as we
311 * can in this page.
312 */
313 for (; limit > 0; limit--)
314 *dst++ = *data++;
315
316 /*
317 * Restore old mapping permissions.
318 */
319 switch (oldpde & L1_TYPE_MASK) {
320 case L1_TYPE_S:
321 *pde = oldpde;
322 PTE_SYNC(pde);
323 break;
324
325 case L1_TYPE_C:
326 *pte = oldpte;
327 PTE_SYNC(pte);
328 break;
329 }
330 cpu_tlb_flushD_SE(pgva);
331 cpu_cpwait();
332 } while (size != 0);
333
334 /* Sync the I-cache. */
335 cpu_icache_sync_range(addr, savesize);
336 }
337
338 /*
339 * Write bytes to kernel address space for debugger.
340 */
341 void
342 db_write_bytes(vaddr_t addr, size_t size, const char *data)
343 {
344 extern char kernel_text[];
345 extern char etext[];
346 char *dst;
347 size_t loop;
348
349 /* If any part is in kernel text, use db_write_text() */
350 if (addr >= (vaddr_t) kernel_text && addr < (vaddr_t) etext) {
351 db_write_text(addr, size, data);
352 return;
353 }
354
355 dst = (char *)addr;
356 if (db_validate_address((u_int)dst)) {
357 db_printf("address %p is invalid\n", dst);
358 return;
359 }
360
361 if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0)
362 *((int*)dst) = *((const int *)data);
363 else
364 if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0)
365 *((short*)dst) = *((const short *)data);
366 else {
367 loop = size;
368 while (loop-- > 0) {
369 if (db_validate_address((u_int)dst)) {
370 db_printf("address %p is invalid\n", dst);
371 return;
372 }
373 *dst++ = *data++;
374 }
375 }
376
377 /* make sure the caches and memory are in sync */
378 cpu_icache_sync_range(addr, size);
379
380 /* In case the current page tables have been modified ... */
381 cpu_tlb_flushID();
382 cpu_cpwait();
383 }
384
385 #ifdef DDB
386 void
387 cpu_Debugger(void)
388 {
389 __asm(".word 0xe7ffffff");
390 }
391
392 int
393 db_trapper(u_int addr, u_int inst, trapframe_t *frame, int fault_code)
394 {
395
396 if (fault_code == 0) {
397 if ((inst & ~INSN_COND_MASK) == (BKPT_INST & ~INSN_COND_MASK))
398 kdb_trap(T_BREAKPOINT, frame);
399 else
400 kdb_trap(-1, frame);
401 } else
402 return (1);
403 return (0);
404 }
405
406 extern u_int esym;
407 extern u_int end;
408
409 static struct undefined_handler db_uh;
410
411 void
412 db_machine_init(void)
413 {
414
415 /*
416 * We get called before malloc() is available, so supply a static
417 * struct undefined_handler.
418 */
419 db_uh.uh_handler = db_trapper;
420 install_coproc_handler_static(CORE_UNKNOWN_HANDLER, &db_uh);
421 }
422 #endif
423
424 u_int
425 db_fetch_reg(int reg, db_regs_t *regs)
426 {
427
428 switch (reg) {
429 case 0:
430 return (regs->tf_r0);
431 case 1:
432 return (regs->tf_r1);
433 case 2:
434 return (regs->tf_r2);
435 case 3:
436 return (regs->tf_r3);
437 case 4:
438 return (regs->tf_r4);
439 case 5:
440 return (regs->tf_r5);
441 case 6:
442 return (regs->tf_r6);
443 case 7:
444 return (regs->tf_r7);
445 case 8:
446 return (regs->tf_r8);
447 case 9:
448 return (regs->tf_r9);
449 case 10:
450 return (regs->tf_r10);
451 case 11:
452 return (regs->tf_r11);
453 case 12:
454 return (regs->tf_r12);
455 case 13:
456 return (regs->tf_svc_sp);
457 case 14:
458 return (regs->tf_svc_lr);
459 case 15:
460 return (regs->tf_pc);
461 default:
462 panic("db_fetch_reg: botch");
463 }
464 }
465
466 u_int
467 branch_taken(u_int insn, u_int pc, db_regs_t *regs)
468 {
469 u_int addr, nregs;
470
471 switch ((insn >> 24) & 0xf) {
472 case 0xa: /* b ... */
473 case 0xb: /* bl ... */
474 addr = ((insn << 2) & 0x03ffffff);
475 if (addr & 0x02000000)
476 addr |= 0xfc000000;
477 return (pc + 8 + addr);
478 case 0x7: /* ldr pc, [pc, reg, lsl #2] */
479 addr = db_fetch_reg(insn & 0xf, regs);
480 addr = pc + 8 + (addr << 2);
481 db_read_bytes(addr, 4, (char *)&addr);
482 return (addr);
483 case 0x5: /* ldr pc, [reg] */
484 addr = db_fetch_reg((insn >> 16) & 0xf, regs);
485 db_read_bytes(addr, 4, (char *)&addr);
486 return (addr);
487 case 0x1: /* mov pc, reg */
488 addr = db_fetch_reg(insn & 0xf, regs);
489 return (addr);
490 case 0x8: /* ldmxx reg, {..., pc} */
491 case 0x9:
492 addr = db_fetch_reg((insn >> 16) & 0xf, regs);
493 nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555);
494 nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333);
495 nregs = (nregs + (nregs >> 4)) & 0x0f0f;
496 nregs = (nregs + (nregs >> 8)) & 0x001f;
497 switch ((insn >> 23) & 0x3) {
498 case 0x0: /* ldmda */
499 addr = addr - 0;
500 break;
501 case 0x1: /* ldmia */
502 addr = addr + 0 + ((nregs - 1) << 2);
503 break;
504 case 0x2: /* ldmdb */
505 addr = addr - 4;
506 break;
507 case 0x3: /* ldmib */
508 addr = addr + 4 + ((nregs - 1) << 2);
509 break;
510 }
511 db_read_bytes(addr, 4, (char *)&addr);
512 return (addr);
513 default:
514 panic("branch_taken: botch");
515 }
516 }
517