fbt.c revision 1.18 1 /* $NetBSD: fbt.c,v 1.18 2015/02/26 10:31:52 ozaki-r Exp $ */
2
3 /*
4 * CDDL HEADER START
5 *
6 * The contents of this file are subject to the terms of the
7 * Common Development and Distribution License (the "License").
8 * You may not use this file except in compliance with the License.
9 *
10 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
11 * or http://www.opensolaris.org/os/licensing.
12 * See the License for the specific language governing permissions
13 * and limitations under the License.
14 *
15 * When distributing Covered Code, include this CDDL HEADER in each
16 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
17 * If applicable, add the following below this CDDL HEADER, with the
18 * fields enclosed by brackets "[]" replaced with your own identifying
19 * information: Portions Copyright [yyyy] [name of copyright owner]
20 *
21 * CDDL HEADER END
22 *
23 * Portions Copyright 2006-2008 John Birrell jb (at) freebsd.org
24 * Portions Copyright 2010 Darran Hunt darran (at) NetBSD.org
25 *
26 * $FreeBSD: src/sys/cddl/dev/fbt/fbt.c,v 1.1.4.1 2009/08/03 08:13:06 kensmith Exp $
27 *
28 */
29
30 /*
31 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
32 * Use is subject to license terms.
33 */
34
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/conf.h>
39 #include <sys/cpuvar.h>
40 #include <sys/fcntl.h>
41 #include <sys/filio.h>
42 #include <sys/kernel.h>
43 #include <sys/kmem.h>
44 #include <sys/ksyms.h>
45 #include <sys/cpu.h>
46 #include <sys/kthread.h>
47 #include <sys/limits.h>
48 #include <sys/linker.h>
49 #include <sys/lock.h>
50 #include <sys/malloc.h>
51 #include <sys/module.h>
52 #include <sys/mutex.h>
53 #include <sys/poll.h>
54 #include <sys/proc.h>
55 #include <sys/selinfo.h>
56 #include <sys/syscall.h>
57 #include <sys/uio.h>
58 #include <sys/unistd.h>
59
60 #include <machine/cpu.h>
61 #if defined(__i386__) || defined(__amd64__)
62 #include <machine/cpufunc.h>
63 #include <machine/specialreg.h>
64 #if 0
65 #include <x86/cpuvar.h>
66 #endif
67 #include <x86/cputypes.h>
68 #elif __arm__
69 #include <machine/trap.h>
70 #include <arm/cpufunc.h>
71 #include <arm/armreg.h>
72 #include <arm/frame.h>
73 #endif
74
75 #define ELFSIZE ARCH_ELFSIZE
76 #include <sys/exec_elf.h>
77
78 #include <sys/dtrace.h>
79 #include <sys/dtrace_bsd.h>
80 #include <sys/kern_ctf.h>
81 #include <sys/dtrace_impl.h>
82
83 mod_ctf_t *modptr;
84
85 MALLOC_DEFINE(M_FBT, "fbt", "Function Boundary Tracing");
86
87 #if defined(__i386__) || defined(__amd64__)
88 #define FBT_PUSHL_EBP 0x55
89 #define FBT_MOVL_ESP_EBP0_V0 0x8b
90 #define FBT_MOVL_ESP_EBP1_V0 0xec
91 #define FBT_MOVL_ESP_EBP0_V1 0x89
92 #define FBT_MOVL_ESP_EBP1_V1 0xe5
93 #define FBT_REX_RSP_RBP 0x48
94
95 #define FBT_POPL_EBP 0x5d
96 #define FBT_RET 0xc3
97 #define FBT_RET_IMM16 0xc2
98 #define FBT_LEAVE 0xc9
99 #endif
100
101 #ifdef __amd64__
102 #define FBT_PATCHVAL 0xcc
103 #elif defined(__i386__)
104 #define FBT_PATCHVAL 0xf0
105
106 #elif defined(__arm__)
107 #define FBT_PATCHVAL DTRACE_BREAKPOINT
108
109 /* entry and return */
110 #define FBT_BX_LR_P(insn) (((insn) & ~INSN_COND_MASK) == 0x012fff1e)
111 #define FBT_B_LABEL_P(insn) (((insn) & 0xff000000) == 0xea000000)
112 /* entry */
113 #define FBT_MOV_IP_SP_P(insn) ((insn) == 0xe1a0c00d)
114 /* index=1, add=1, wback=0 */
115 #define FBT_LDR_IMM_P(insn) (((insn) & 0xfff00000) == 0xe5900000)
116 #define FBT_MOVW_P(insn) (((insn) & 0xfff00000) == 0xe3000000)
117 #define FBT_MOV_IMM_P(insn) (((insn) & 0xffff0000) == 0xe3a00000)
118 #define FBT_CMP_IMM_P(insn) (((insn) & 0xfff00000) == 0xe3500000)
119 #define FBT_PUSH_P(insn) (((insn) & 0xffff0000) == 0xe92d0000)
120 /* return */
121 /* cond=always, writeback=no, rn=sp and register_list includes pc */
122 #define FBT_LDM_P(insn) (((insn) & 0x0fff8000) == 0x089d8000)
123 #define FBT_LDMIB_P(insn) (((insn) & 0x0fff8000) == 0x099d8000)
124 #define FBT_MOV_PC_LR_P(insn) (((insn) & ~INSN_COND_MASK) == 0x01a0f00e)
125 /* cond=always, writeback=no, rn=sp and register_list includes lr, but not pc */
126 #define FBT_LDM_LR_P(insn) (((insn) & 0xffffc000) == 0xe89d4000)
127 #define FBT_LDMIB_LR_P(insn) (((insn) & 0xffffc000) == 0xe99d4000)
128
129 /* rval = insn | invop_id (overwriting cond with invop ID) */
130 #define BUILD_RVAL(insn, id) (((insn) & ~INSN_COND_MASK) | __SHIFTIN((id), INSN_COND_MASK))
131 /* encode cond in the first byte */
132 #define PATCHVAL_ENCODE_COND(insn) (FBT_PATCHVAL | __SHIFTOUT((insn), INSN_COND_MASK))
133
134 #else
135 #error "architecture not supported"
136 #endif
137
138 static dev_type_open(fbt_open);
139 static int fbt_unload(void);
140 static void fbt_getargdesc(void *, dtrace_id_t, void *, dtrace_argdesc_t *);
141 static void fbt_provide_module(void *, dtrace_modctl_t *);
142 static void fbt_destroy(void *, dtrace_id_t, void *);
143 static int fbt_enable(void *, dtrace_id_t, void *);
144 static void fbt_disable(void *, dtrace_id_t, void *);
145 static void fbt_load(void);
146 static void fbt_suspend(void *, dtrace_id_t, void *);
147 static void fbt_resume(void *, dtrace_id_t, void *);
148
149 #define FBT_ENTRY "entry"
150 #define FBT_RETURN "return"
151 #define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask)
152 #define FBT_PROBETAB_SIZE 0x8000 /* 32k entries -- 128K total */
153
154 static const struct cdevsw fbt_cdevsw = {
155 fbt_open, noclose, noread, nowrite, noioctl,
156 nostop, notty, nopoll, nommap, nokqfilter, nodiscard,
157 D_OTHER
158 };
159
160 static dtrace_pattr_t fbt_attr = {
161 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
162 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
163 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA },
164 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
165 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA },
166 };
167
168 static dtrace_pops_t fbt_pops = {
169 NULL,
170 fbt_provide_module,
171 fbt_enable,
172 fbt_disable,
173 fbt_suspend,
174 fbt_resume,
175 fbt_getargdesc,
176 NULL,
177 NULL,
178 fbt_destroy
179 };
180
181 typedef struct fbt_probe {
182 struct fbt_probe *fbtp_hashnext;
183 #if defined(__i386__) || defined(__amd64__)
184 uint8_t *fbtp_patchpoint;
185 int8_t fbtp_rval;
186 uint8_t fbtp_patchval;
187 uint8_t fbtp_savedval;
188 #elif __arm__
189 uint32_t *fbtp_patchpoint;
190 int32_t fbtp_rval;
191 uint32_t fbtp_patchval;
192 uint32_t fbtp_savedval;
193 #endif
194 uintptr_t fbtp_roffset;
195 dtrace_id_t fbtp_id;
196 const char *fbtp_name;
197 dtrace_modctl_t *fbtp_ctl;
198 int fbtp_loadcnt;
199 int fbtp_primary;
200 int fbtp_invop_cnt;
201 int fbtp_symindx;
202 struct fbt_probe *fbtp_next;
203 } fbt_probe_t;
204
205 #ifdef notyet
206 static struct cdev *fbt_cdev;
207 static int fbt_verbose = 0;
208 #endif
209 static dtrace_provider_id_t fbt_id;
210 static fbt_probe_t **fbt_probetab;
211 static int fbt_probetab_size;
212 static int fbt_probetab_mask;
213
214 #ifdef __arm__
215 extern void (* dtrace_emulation_jump_addr)(int, struct trapframe *);
216
217 static uint32_t
218 expand_imm(uint32_t imm12)
219 {
220 uint32_t unrot = imm12 & 0xff;
221 int amount = 2 * (imm12 >> 8);
222
223 if (amount)
224 return (unrot >> amount) | (unrot << (32 - amount));
225 else
226 return unrot;
227 }
228
229 static uint32_t
230 add_with_carry(uint32_t x, uint32_t y, int carry_in,
231 int *carry_out, int *overflow)
232 {
233 uint32_t result;
234 uint64_t unsigned_sum = x + y + (uint32_t)carry_in;
235 int64_t signed_sum = (int32_t)x + (int32_t)y + (int32_t)carry_in;
236 KASSERT(carry_in == 1);
237
238 result = (uint32_t)(unsigned_sum & 0xffffffff);
239 *carry_out = ((uint64_t)result == unsigned_sum) ? 1 : 0;
240 *overflow = ((int64_t)result == signed_sum) ? 0 : 1;
241
242 return result;
243 }
244
245 static void
246 fbt_emulate(int _op, struct trapframe *frame)
247 {
248 uint32_t op = _op;
249
250 switch (op >> 28) {
251 case DTRACE_INVOP_MOV_IP_SP:
252 /* mov ip, sp */
253 frame->tf_ip = frame->tf_svc_sp;
254 frame->tf_pc += 4;
255 break;
256 case DTRACE_INVOP_BX_LR:
257 /* bx lr */
258 frame->tf_pc = frame->tf_svc_lr;
259 break;
260 case DTRACE_INVOP_MOV_PC_LR:
261 /* mov pc, lr */
262 frame->tf_pc = frame->tf_svc_lr;
263 break;
264 case DTRACE_INVOP_LDM:
265 /* ldm sp, {..., pc} */
266 /* FALLTHRU */
267 case DTRACE_INVOP_LDMIB: {
268 /* ldmib sp, {..., pc} */
269 uint32_t register_list = (op & 0xffff);
270 uint32_t *sp = (uint32_t *)(intptr_t)frame->tf_svc_sp;
271 uint32_t *regs = &frame->tf_r0;
272 int i;
273
274 /* IDMIB */
275 if ((op >> 28) == 5)
276 sp++;
277
278 for (i=0; i <= 12; i++) {
279 if (register_list & (1 << i))
280 regs[i] = *sp++;
281 }
282 if (register_list & (1 << 13))
283 frame->tf_svc_sp = *sp++;
284 if (register_list & (1 << 14))
285 frame->tf_svc_lr = *sp++;
286 frame->tf_pc = *sp;
287 break;
288 }
289 case DTRACE_INVOP_LDR_IMM: {
290 /* ldr r?, [{pc,r?}, #?] */
291 uint32_t rt = (op >> 12) & 0xf;
292 uint32_t rn = (op >> 16) & 0xf;
293 uint32_t imm = op & 0xfff;
294 uint32_t *regs = &frame->tf_r0;
295 KDASSERT(rt <= 12);
296 KDASSERT(rn == 15 || rn =< 12);
297 if (rn == 15)
298 regs[rt] = *((uint32_t *)(intptr_t)(frame->tf_pc + 8 + imm));
299 else
300 regs[rt] = *((uint32_t *)(intptr_t)(regs[rn] + imm));
301 frame->tf_pc += 4;
302 break;
303 }
304 case DTRACE_INVOP_MOVW: {
305 /* movw r?, #? */
306 uint32_t rd = (op >> 12) & 0xf;
307 uint32_t imm = (op & 0xfff) | ((op & 0xf0000) >> 4);
308 uint32_t *regs = &frame->tf_r0;
309 KDASSERT(rd <= 12);
310 regs[rd] = imm;
311 frame->tf_pc += 4;
312 break;
313 }
314 case DTRACE_INVOP_MOV_IMM: {
315 /* mov r?, #? */
316 uint32_t rd = (op >> 12) & 0xf;
317 uint32_t imm = expand_imm(op & 0xfff);
318 uint32_t *regs = &frame->tf_r0;
319 KDASSERT(rd <= 12);
320 regs[rd] = imm;
321 frame->tf_pc += 4;
322 break;
323 }
324 case DTRACE_INVOP_CMP_IMM: {
325 /* cmp r?, #? */
326 uint32_t rn = (op >> 16) & 0xf;
327 uint32_t *regs = &frame->tf_r0;
328 uint32_t imm = expand_imm(op & 0xfff);
329 uint32_t spsr = frame->tf_spsr;
330 uint32_t result;
331 int carry;
332 int overflow;
333 /*
334 * (result, carry, overflow) = AddWithCarry(R[n], NOT(imm32), 1);
335 * APSR.N = result<31>;
336 * APSR.Z = IsZeroBit(result);
337 * APSR.C = carry;
338 * APSR.V = overflow;
339 */
340 KDASSERT(rn <= 12);
341 result = add_with_carry(regs[rn], ~imm, 1, &carry, &overflow);
342 if (result & 0x80000000)
343 spsr |= PSR_N_bit;
344 else
345 spsr &= ~PSR_N_bit;
346 if (result == 0)
347 spsr |= PSR_Z_bit;
348 else
349 spsr &= ~PSR_Z_bit;
350 if (carry)
351 spsr |= PSR_C_bit;
352 else
353 spsr &= ~PSR_C_bit;
354 if (overflow)
355 spsr |= PSR_V_bit;
356 else
357 spsr &= ~PSR_V_bit;
358
359 #if 0
360 aprint_normal("pc=%x Rn=%x imm=%x %c%c%c%c\n", frame->tf_pc, regs[rn], imm,
361 (spsr & PSR_N_bit) ? 'N' : 'n',
362 (spsr & PSR_Z_bit) ? 'Z' : 'z',
363 (spsr & PSR_C_bit) ? 'C' : 'c',
364 (spsr & PSR_V_bit) ? 'V' : 'v');
365 #endif
366 frame->tf_spsr = spsr;
367 frame->tf_pc += 4;
368 break;
369 }
370 case DTRACE_INVOP_B_LABEL: {
371 /* b ??? */
372 uint32_t imm = (op & 0x00ffffff) << 2;
373 int32_t diff;
374 /* SignExtend(imm26, 32) */
375 if (imm & 0x02000000)
376 imm |= 0xfc000000;
377 diff = (int32_t)imm;
378 frame->tf_pc += 8 + diff;
379 break;
380 }
381 /* FIXME: push will overwrite trapframe... */
382 case DTRACE_INVOP_PUSH: {
383 /* push {...} */
384 uint32_t register_list = (op & 0xffff);
385 uint32_t *sp = (uint32_t *)(intptr_t)frame->tf_svc_sp;
386 uint32_t *regs = &frame->tf_r0;
387 int i;
388 int count = 0;
389
390 #if 0
391 if ((op & 0x0fff0fff) == 0x052d0004) {
392 /* A2: str r4, [sp, #-4]! */
393 *(sp - 1) = regs[4];
394 frame->tf_pc += 4;
395 break;
396 }
397 #endif
398
399 for (i=0; i < 16; i++) {
400 if (register_list & (1 << i))
401 count++;
402 }
403 sp -= count;
404
405 for (i=0; i <= 12; i++) {
406 if (register_list & (1 << i))
407 *sp++ = regs[i];
408 }
409 if (register_list & (1 << 13))
410 *sp++ = frame->tf_svc_sp;
411 if (register_list & (1 << 14))
412 *sp++ = frame->tf_svc_lr;
413 if (register_list & (1 << 15))
414 *sp = frame->tf_pc + 8;
415
416 /* make sure the caches and memory are in sync */
417 cpu_dcache_wbinv_range(frame->tf_svc_sp, count * 4);
418
419 /* In case the current page tables have been modified ... */
420 cpu_tlb_flushID();
421 cpu_cpwait();
422
423 frame->tf_svc_sp -= count * 4;
424 frame->tf_pc += 4;
425
426 break;
427 }
428 default:
429 KDASSERTMSG(0, "op=%u\n", op >> 28);
430 }
431 }
432 #endif
433
434 static void
435 fbt_doubletrap(void)
436 {
437 fbt_probe_t *fbt;
438 int i;
439
440 for (i = 0; i < fbt_probetab_size; i++) {
441 fbt = fbt_probetab[i];
442
443 for (; fbt != NULL; fbt = fbt->fbtp_next)
444 *fbt->fbtp_patchpoint = fbt->fbtp_savedval;
445 }
446 }
447
448
449 static int
450 fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval)
451 {
452 solaris_cpu_t *cpu = &solaris_cpu[cpu_number()];
453 uintptr_t stack0, stack1, stack2, stack3, stack4;
454 fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];
455
456 for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
457 if ((uintptr_t)fbt->fbtp_patchpoint == addr) {
458 fbt->fbtp_invop_cnt++;
459 if (fbt->fbtp_roffset == 0) {
460 int i = 0;
461 /*
462 * When accessing the arguments on the stack,
463 * we must protect against accessing beyond
464 * the stack. We can safely set NOFAULT here
465 * -- we know that interrupts are already
466 * disabled.
467 */
468 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
469 cpu->cpu_dtrace_caller = stack[i++];
470 stack0 = stack[i++];
471 stack1 = stack[i++];
472 stack2 = stack[i++];
473 stack3 = stack[i++];
474 stack4 = stack[i++];
475 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
476 CPU_DTRACE_BADADDR);
477
478 dtrace_probe(fbt->fbtp_id, stack0, stack1,
479 stack2, stack3, stack4);
480
481 cpu->cpu_dtrace_caller = 0;
482 } else {
483 #ifdef __amd64__
484 /*
485 * On amd64, we instrument the ret, not the
486 * leave. We therefore need to set the caller
487 * to assure that the top frame of a stack()
488 * action is correct.
489 */
490 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
491 cpu->cpu_dtrace_caller = stack[0];
492 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
493 CPU_DTRACE_BADADDR);
494 #endif
495
496 dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset,
497 rval, 0, 0, 0);
498 cpu->cpu_dtrace_caller = 0;
499 }
500
501 return (fbt->fbtp_rval);
502 }
503 }
504
505 return (0);
506 }
507
508 #if defined(__i386__) || defined(__amd64__)
509 static int
510 fbt_provide_module_cb(const char *name, int symindx, void *value,
511 uint32_t symsize, int type, void *opaque)
512 {
513 fbt_probe_t *fbt, *retfbt;
514 u_int8_t *instr, *limit;
515 dtrace_modctl_t *mod = opaque;
516 const char *modname = mod->mod_info->mi_name;
517 int j;
518 int size;
519
520 /* got a function? */
521 if (ELF_ST_TYPE(type) != STT_FUNC) {
522 return 0;
523 }
524
525 if (strncmp(name, "dtrace_", 7) == 0 &&
526 strncmp(name, "dtrace_safe_", 12) != 0) {
527 /*
528 * Anything beginning with "dtrace_" may be called
529 * from probe context unless it explicitly indicates
530 * that it won't be called from probe context by
531 * using the prefix "dtrace_safe_".
532 */
533 return (0);
534 }
535
536 if (name[0] == '_' && name[1] == '_')
537 return (0);
538
539 /*
540 * Exclude some more symbols which can be called from probe context.
541 */
542 if (strcmp(name, "x86_curcpu") == 0 /* CPU */
543 || strcmp(name, "x86_curlwp") == 0 /* curproc, curlwp, curthread */
544 || strcmp(name, "cpu_index") == 0 /* cpu_number, curcpu_id */
545 || strncmp(name, "db_", 3) == 0 /* debugger */
546 || strncmp(name, "ddb_", 4) == 0 /* debugger */
547 || strncmp(name, "kdb_", 4) == 0 /* debugger */
548 || strncmp(name, "lockdebug_", 10) == 0 /* lockdebug XXX for now */
549 || strncmp(name, "kauth_", 5) == 0 /* CRED XXX for now */
550 ) {
551 return 0;
552 }
553
554 instr = (u_int8_t *) value;
555 limit = (u_int8_t *) value + symsize;
556
557 #ifdef __amd64__
558 while (instr < limit) {
559 if (*instr == FBT_PUSHL_EBP)
560 break;
561
562 if ((size = dtrace_instr_size(instr)) <= 0)
563 break;
564
565 instr += size;
566 }
567
568 if (instr >= limit || *instr != FBT_PUSHL_EBP) {
569 /*
570 * We either don't save the frame pointer in this
571 * function, or we ran into some disassembly
572 * screw-up. Either way, we bail.
573 */
574 return (0);
575 }
576 #else
577 if (instr[0] != FBT_PUSHL_EBP) {
578 return (0);
579 }
580
581 if (!(instr[1] == FBT_MOVL_ESP_EBP0_V0 &&
582 instr[2] == FBT_MOVL_ESP_EBP1_V0) &&
583 !(instr[1] == FBT_MOVL_ESP_EBP0_V1 &&
584 instr[2] == FBT_MOVL_ESP_EBP1_V1)) {
585 return (0);
586 }
587 #endif
588 fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO);
589 fbt->fbtp_name = name;
590 fbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
591 name, FBT_ENTRY, 3, fbt);
592 fbt->fbtp_patchpoint = instr;
593 fbt->fbtp_ctl = mod;
594 /* fbt->fbtp_loadcnt = lf->loadcnt; */
595 fbt->fbtp_rval = DTRACE_INVOP_PUSHL_EBP;
596 fbt->fbtp_savedval = *instr;
597 fbt->fbtp_patchval = FBT_PATCHVAL;
598 fbt->fbtp_symindx = symindx;
599
600 fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
601 fbt_probetab[FBT_ADDR2NDX(instr)] = fbt;
602 mod->mod_fbtentries++;
603
604 retfbt = NULL;
605
606 while (instr < limit) {
607 if (instr >= limit)
608 return (0);
609
610 /*
611 * If this disassembly fails, then we've likely walked off into
612 * a jump table or some other unsuitable area. Bail out of the
613 * disassembly now.
614 */
615 if ((size = dtrace_instr_size(instr)) <= 0)
616 return (0);
617
618 #ifdef __amd64__
619 /*
620 * We only instrument "ret" on amd64 -- we don't yet instrument
621 * ret imm16, largely because the compiler doesn't seem to
622 * (yet) emit them in the kernel...
623 */
624 if (*instr != FBT_RET) {
625 instr += size;
626 continue;
627 }
628 #else
629 if (!(size == 1 &&
630 (*instr == FBT_POPL_EBP || *instr == FBT_LEAVE) &&
631 (*(instr + 1) == FBT_RET ||
632 *(instr + 1) == FBT_RET_IMM16))) {
633 instr += size;
634 continue;
635 }
636 #endif
637
638 /*
639 * We (desperately) want to avoid erroneously instrumenting a
640 * jump table, especially given that our markers are pretty
641 * short: two bytes on x86, and just one byte on amd64. To
642 * determine if we're looking at a true instruction sequence
643 * or an inline jump table that happens to contain the same
644 * byte sequences, we resort to some heuristic sleeze: we
645 * treat this instruction as being contained within a pointer,
646 * and see if that pointer points to within the body of the
647 * function. If it does, we refuse to instrument it.
648 */
649 for (j = 0; j < sizeof (uintptr_t); j++) {
650 caddr_t check = (caddr_t) instr - j;
651 uint8_t *ptr;
652
653 if (check < (caddr_t)value)
654 break;
655
656 if (check + sizeof (caddr_t) > (caddr_t)limit)
657 continue;
658
659 ptr = *(uint8_t **)check;
660
661 if (ptr >= (uint8_t *) value && ptr < limit) {
662 instr += size;
663 continue;
664 }
665 }
666
667 /*
668 * We have a winner!
669 */
670 fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO);
671 fbt->fbtp_name = name;
672
673 if (retfbt == NULL) {
674 fbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
675 name, FBT_RETURN, 3, fbt);
676 } else {
677 retfbt->fbtp_next = fbt;
678 fbt->fbtp_id = retfbt->fbtp_id;
679 }
680
681 retfbt = fbt;
682 fbt->fbtp_patchpoint = instr;
683 fbt->fbtp_ctl = mod;
684 /* fbt->fbtp_loadcnt = lf->loadcnt; */
685 fbt->fbtp_symindx = symindx;
686
687 #ifndef __amd64__
688 if (*instr == FBT_POPL_EBP) {
689 fbt->fbtp_rval = DTRACE_INVOP_POPL_EBP;
690 } else {
691 ASSERT(*instr == FBT_LEAVE);
692 fbt->fbtp_rval = DTRACE_INVOP_LEAVE;
693 }
694 fbt->fbtp_roffset =
695 (uintptr_t)(instr - (uint8_t *) value) + 1;
696
697 #else
698 ASSERT(*instr == FBT_RET);
699 fbt->fbtp_rval = DTRACE_INVOP_RET;
700 fbt->fbtp_roffset =
701 (uintptr_t)(instr - (uint8_t *) value);
702 #endif
703
704 fbt->fbtp_savedval = *instr;
705 fbt->fbtp_patchval = FBT_PATCHVAL;
706 fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
707 fbt_probetab[FBT_ADDR2NDX(instr)] = fbt;
708
709 mod->mod_fbtentries++;
710
711 instr += size;
712 }
713
714 return 0;
715 }
716
717 #elif defined(__arm__)
718
719 static int
720 fbt_provide_module_cb(const char *name, int symindx, void *value,
721 uint32_t symsize, int type, void *opaque)
722 {
723 fbt_probe_t *fbt, *retfbt;
724 uint32_t *instr, *limit;
725 bool was_ldm_lr = false;
726 dtrace_modctl_t *mod = opaque;
727 const char *modname = mod->mod_info->mi_name;
728 int size;
729
730 /* got a function? */
731 if (ELF_ST_TYPE(type) != STT_FUNC) {
732 return 0;
733 }
734
735 if (strncmp(name, "dtrace_", 7) == 0 &&
736 strncmp(name, "dtrace_safe_", 12) != 0) {
737 /*
738 * Anything beginning with "dtrace_" may be called
739 * from probe context unless it explicitly indicates
740 * that it won't be called from probe context by
741 * using the prefix "dtrace_safe_".
742 */
743 return (0);
744 }
745
746 if (name[0] == '_' && name[1] == '_')
747 return (0);
748
749 /*
750 * Exclude some more symbols which can be called from probe context.
751 */
752 if (strncmp(name, "db_", 3) == 0 /* debugger */
753 || strncmp(name, "ddb_", 4) == 0 /* debugger */
754 || strncmp(name, "kdb_", 4) == 0 /* debugger */
755 || strncmp(name, "lockdebug_", 10) == 0 /* lockdebug XXX for now */
756 || strncmp(name, "kauth_", 5) == 0 /* CRED XXX for now */
757 /* Sensitive functions on ARM */
758 || strncmp(name, "_spl", 4) == 0
759 || strcmp(name, "binuptime") == 0
760 || strcmp(name, "dosoftints") == 0
761 || strcmp(name, "fbt_emulate") == 0
762 || strcmp(name, "nanouptime") == 0
763 || strcmp(name, "undefinedinstruction") == 0
764 || strncmp(name, "dmt_", 4) == 0 /* omap */
765 || strncmp(name, "mvsoctmr_", 9) == 0 /* marvell */
766 ) {
767 return 0;
768 }
769
770 instr = (uint32_t *) value;
771 limit = (uint32_t *)((uintptr_t)value + symsize);
772
773 if (!FBT_MOV_IP_SP_P(*instr)
774 && !FBT_BX_LR_P(*instr)
775 && !FBT_MOVW_P(*instr)
776 && !FBT_MOV_IMM_P(*instr)
777 && !FBT_B_LABEL_P(*instr)
778 && !FBT_LDR_IMM_P(*instr)
779 && !FBT_CMP_IMM_P(*instr)
780 /* && !FBT_PUSH_P(*instr) */
781 ) {
782 return 0;
783 }
784
785 fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO);
786 fbt->fbtp_name = name;
787 fbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
788 name, FBT_ENTRY, 3, fbt);
789 fbt->fbtp_patchpoint = instr;
790 fbt->fbtp_ctl = mod;
791 /* fbt->fbtp_loadcnt = lf->loadcnt; */
792 if (FBT_MOV_IP_SP_P(*instr))
793 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_MOV_IP_SP);
794 else if (FBT_LDR_IMM_P(*instr))
795 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_LDR_IMM);
796 else if (FBT_MOVW_P(*instr))
797 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_MOVW);
798 else if (FBT_MOV_IMM_P(*instr))
799 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_MOV_IMM);
800 else if (FBT_CMP_IMM_P(*instr))
801 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_CMP_IMM);
802 else if (FBT_BX_LR_P(*instr))
803 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_BX_LR);
804 else if (FBT_PUSH_P(*instr))
805 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_PUSH);
806 else if (FBT_B_LABEL_P(*instr))
807 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_B_LABEL);
808
809 fbt->fbtp_patchval = PATCHVAL_ENCODE_COND(*instr);
810 fbt->fbtp_savedval = *instr;
811 fbt->fbtp_symindx = symindx;
812
813 fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
814 fbt_probetab[FBT_ADDR2NDX(instr)] = fbt;
815 mod->mod_fbtentries++;
816
817 retfbt = NULL;
818
819 while (instr < limit) {
820 if (instr >= limit)
821 return (0);
822
823 size = 1;
824
825 if (!FBT_BX_LR_P(*instr)
826 && !FBT_MOV_PC_LR_P(*instr)
827 && !FBT_LDM_P(*instr)
828 && !FBT_LDMIB_P(*instr)
829 && !(was_ldm_lr && FBT_B_LABEL_P(*instr))
830 ) {
831 if (FBT_LDM_LR_P(*instr) || FBT_LDMIB_LR_P(*instr))
832 was_ldm_lr = true;
833 else
834 was_ldm_lr = false;
835 instr += size;
836 continue;
837 }
838
839 /*
840 * We have a winner!
841 */
842 fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO);
843 fbt->fbtp_name = name;
844
845 if (retfbt == NULL) {
846 fbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
847 name, FBT_RETURN, 3, fbt);
848 } else {
849 retfbt->fbtp_next = fbt;
850 fbt->fbtp_id = retfbt->fbtp_id;
851 }
852
853 retfbt = fbt;
854 fbt->fbtp_patchpoint = instr;
855 fbt->fbtp_ctl = mod;
856 /* fbt->fbtp_loadcnt = lf->loadcnt; */
857 fbt->fbtp_symindx = symindx;
858
859 if (FBT_BX_LR_P(*instr))
860 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_BX_LR);
861 else if (FBT_MOV_PC_LR_P(*instr))
862 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_MOV_PC_LR);
863 else if (FBT_LDM_P(*instr))
864 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_LDM);
865 else if (FBT_LDMIB_P(*instr))
866 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_LDMIB);
867 else if (FBT_B_LABEL_P(*instr))
868 fbt->fbtp_rval = BUILD_RVAL(*instr, DTRACE_INVOP_B_LABEL);
869
870 fbt->fbtp_roffset = (uintptr_t)(instr - (uint32_t *) value);
871 fbt->fbtp_patchval = PATCHVAL_ENCODE_COND(*instr);
872
873 fbt->fbtp_savedval = *instr;
874 fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
875 fbt_probetab[FBT_ADDR2NDX(instr)] = fbt;
876
877 mod->mod_fbtentries++;
878
879 instr += size;
880 was_ldm_lr = false;
881 }
882
883 return 0;
884 }
885 #else
886 #error "architecture not supported"
887 #endif
888
889
890 static void
891 fbt_provide_module(void *arg, dtrace_modctl_t *mod)
892 {
893 char modname[MAXPATHLEN];
894 int i;
895 size_t len;
896
897 strlcpy(modname, mod->mod_info->mi_name, sizeof(modname));
898 len = strlen(modname);
899 if (len > 5 && strcmp(modname + len - 3, ".kmod") == 0)
900 modname[len - 4] = '\0';
901
902 /*
903 * Employees of dtrace and their families are ineligible. Void
904 * where prohibited.
905 */
906 if (strcmp(modname, "dtrace") == 0)
907 return;
908
909 /*
910 * The cyclic timer subsystem can be built as a module and DTrace
911 * depends on that, so it is ineligible too.
912 */
913 if (strcmp(modname, "cyclic") == 0)
914 return;
915
916 /*
917 * To register with DTrace, a module must list 'dtrace' as a
918 * dependency in order for the kernel linker to resolve
919 * symbols like dtrace_register(). All modules with such a
920 * dependency are ineligible for FBT tracing.
921 */
922 for (i = 0; i < mod->mod_nrequired; i++) {
923 if (strncmp(mod->mod_required[i]->mod_info->mi_name,
924 "dtrace", 6) == 0)
925 return;
926 }
927
928 if (mod->mod_fbtentries) {
929 /*
930 * This module has some FBT entries allocated; we're afraid
931 * to screw with it.
932 */
933 return;
934 }
935
936 /*
937 * List the functions in the module and the symbol values.
938 */
939 ksyms_mod_foreach(modname, fbt_provide_module_cb, mod);
940 }
941
942 static void
943 fbt_destroy(void *arg, dtrace_id_t id, void *parg)
944 {
945 fbt_probe_t *fbt = parg, *next, *hash, *last;
946 dtrace_modctl_t *ctl;
947 int ndx;
948
949 do {
950 ctl = fbt->fbtp_ctl;
951
952 ctl->mod_fbtentries--;
953
954 /*
955 * Now we need to remove this probe from the fbt_probetab.
956 */
957 ndx = FBT_ADDR2NDX(fbt->fbtp_patchpoint);
958 last = NULL;
959 hash = fbt_probetab[ndx];
960
961 while (hash != fbt) {
962 ASSERT(hash != NULL);
963 last = hash;
964 hash = hash->fbtp_hashnext;
965 }
966
967 if (last != NULL) {
968 last->fbtp_hashnext = fbt->fbtp_hashnext;
969 } else {
970 fbt_probetab[ndx] = fbt->fbtp_hashnext;
971 }
972
973 next = fbt->fbtp_next;
974 free(fbt, M_FBT);
975
976 fbt = next;
977 } while (fbt != NULL);
978 }
979
980 #if defined(__i386__) || defined(__amd64__)
981
982 static int
983 fbt_enable(void *arg, dtrace_id_t id, void *parg)
984 {
985 fbt_probe_t *fbt = parg;
986 #if 0
987 dtrace_modctl_t *ctl = fbt->fbtp_ctl;
988 #endif
989 u_long psl;
990 u_long cr0;
991
992
993 #if 0 /* XXX TBD */
994 ctl->nenabled++;
995
996 /*
997 * Now check that our modctl has the expected load count. If it
998 * doesn't, this module must have been unloaded and reloaded -- and
999 * we're not going to touch it.
1000 */
1001 if (ctl->loadcnt != fbt->fbtp_loadcnt) {
1002 if (fbt_verbose) {
1003 printf("fbt is failing for probe %s "
1004 "(module %s reloaded)",
1005 fbt->fbtp_name, ctl->filename);
1006 }
1007
1008 return;
1009 }
1010 #endif
1011
1012 /* Disable interrupts. */
1013 psl = x86_read_psl();
1014 x86_disable_intr();
1015
1016 /* Disable write protection in supervisor mode. */
1017 cr0 = rcr0();
1018 lcr0(cr0 & ~CR0_WP);
1019
1020 for (; fbt != NULL; fbt = fbt->fbtp_next) {
1021 *fbt->fbtp_patchpoint = fbt->fbtp_patchval;
1022 }
1023
1024 /* Write back and invalidate cache, flush pipelines. */
1025 wbinvd();
1026 x86_flush();
1027 x86_write_psl(psl);
1028
1029 /* Re-enable write protection. */
1030 lcr0(cr0);
1031
1032 return 0;
1033 }
1034
1035 static void
1036 fbt_disable(void *arg, dtrace_id_t id, void *parg)
1037 {
1038 fbt_probe_t *fbt = parg;
1039 #if 0
1040 dtrace_modctl_t *ctl = fbt->fbtp_ctl;
1041 #endif
1042 u_long psl;
1043 u_long cr0;
1044
1045 #if 0 /* XXX TBD */
1046 ASSERT(ctl->nenabled > 0);
1047 ctl->nenabled--;
1048
1049 if ((ctl->loadcnt != fbt->fbtp_loadcnt))
1050 return;
1051 #endif
1052 /* Disable interrupts. */
1053 psl = x86_read_psl();
1054 x86_disable_intr();
1055
1056 /* Disable write protection in supervisor mode. */
1057 cr0 = rcr0();
1058 lcr0(cr0 & ~CR0_WP);
1059
1060 for (; fbt != NULL; fbt = fbt->fbtp_next)
1061 *fbt->fbtp_patchpoint = fbt->fbtp_savedval;
1062
1063 /* Write back and invalidate cache, flush pipelines. */
1064 wbinvd();
1065 x86_flush();
1066 x86_write_psl(psl);
1067
1068 /* Re-enable write protection. */
1069 lcr0(cr0);
1070 }
1071
1072 static void
1073 fbt_suspend(void *arg, dtrace_id_t id, void *parg)
1074 {
1075 fbt_probe_t *fbt = parg;
1076 #if 0
1077 dtrace_modctl_t *ctl = fbt->fbtp_ctl;
1078 #endif
1079 u_long psl;
1080 u_long cr0;
1081
1082 #if 0 /* XXX TBD */
1083 ASSERT(ctl->nenabled > 0);
1084
1085 if ((ctl->loadcnt != fbt->fbtp_loadcnt))
1086 return;
1087 #endif
1088
1089 /* Disable interrupts. */
1090 psl = x86_read_psl();
1091 x86_disable_intr();
1092
1093 /* Disable write protection in supervisor mode. */
1094 cr0 = rcr0();
1095 lcr0(cr0 & ~CR0_WP);
1096
1097 for (; fbt != NULL; fbt = fbt->fbtp_next)
1098 *fbt->fbtp_patchpoint = fbt->fbtp_savedval;
1099
1100 /* Write back and invalidate cache, flush pipelines. */
1101 wbinvd();
1102 x86_flush();
1103 x86_write_psl(psl);
1104
1105 /* Re-enable write protection. */
1106 lcr0(cr0);
1107 }
1108
1109 static void
1110 fbt_resume(void *arg, dtrace_id_t id, void *parg)
1111 {
1112 fbt_probe_t *fbt = parg;
1113 #if 0
1114 dtrace_modctl_t *ctl = fbt->fbtp_ctl;
1115 #endif
1116 u_long psl;
1117 u_long cr0;
1118
1119 #if 0 /* XXX TBD */
1120 ASSERT(ctl->nenabled > 0);
1121
1122 if ((ctl->loadcnt != fbt->fbtp_loadcnt))
1123 return;
1124 #endif
1125 /* Disable interrupts. */
1126 psl = x86_read_psl();
1127 x86_disable_intr();
1128
1129 /* Disable write protection in supervisor mode. */
1130 cr0 = rcr0();
1131 lcr0(cr0 & ~CR0_WP);
1132
1133 for (; fbt != NULL; fbt = fbt->fbtp_next)
1134 *fbt->fbtp_patchpoint = fbt->fbtp_patchval;
1135
1136 /* Write back and invalidate cache, flush pipelines. */
1137 wbinvd();
1138 x86_flush();
1139 x86_write_psl(psl);
1140
1141 /* Re-enable write protection. */
1142 lcr0(cr0);
1143 }
1144
1145 #elif defined(__arm__)
1146
1147 static int
1148 fbt_enable(void *arg, dtrace_id_t id, void *parg)
1149 {
1150 fbt_probe_t *fbt = parg;
1151 #if 0
1152 dtrace_modctl_t *ctl = fbt->fbtp_ctl;
1153 #endif
1154 dtrace_icookie_t c;
1155
1156
1157 #if 0 /* XXX TBD */
1158 ctl->nenabled++;
1159
1160 /*
1161 * Now check that our modctl has the expected load count. If it
1162 * doesn't, this module must have been unloaded and reloaded -- and
1163 * we're not going to touch it.
1164 */
1165 if (ctl->loadcnt != fbt->fbtp_loadcnt) {
1166 if (fbt_verbose) {
1167 printf("fbt is failing for probe %s "
1168 "(module %s reloaded)",
1169 fbt->fbtp_name, ctl->filename);
1170 }
1171
1172 return;
1173 }
1174 #endif
1175
1176 c = dtrace_interrupt_disable();
1177
1178 for (fbt = parg; fbt != NULL; fbt = fbt->fbtp_next) {
1179 *fbt->fbtp_patchpoint = fbt->fbtp_patchval;
1180 cpu_idcache_wbinv_range((vaddr_t)fbt->fbtp_patchpoint, 4);
1181 }
1182
1183 dtrace_interrupt_enable(c);
1184
1185 return 0;
1186 }
1187
1188 static void
1189 fbt_disable(void *arg, dtrace_id_t id, void *parg)
1190 {
1191 fbt_probe_t *fbt = parg;
1192 #if 0
1193 dtrace_modctl_t *ctl = fbt->fbtp_ctl;
1194 #endif
1195 dtrace_icookie_t c;
1196
1197 #if 0 /* XXX TBD */
1198 ASSERT(ctl->nenabled > 0);
1199 ctl->nenabled--;
1200
1201 if ((ctl->loadcnt != fbt->fbtp_loadcnt))
1202 return;
1203 #endif
1204
1205 c = dtrace_interrupt_disable();
1206
1207 for (; fbt != NULL; fbt = fbt->fbtp_next) {
1208 *fbt->fbtp_patchpoint = fbt->fbtp_savedval;
1209 cpu_idcache_wbinv_range((vaddr_t)fbt->fbtp_patchpoint, 4);
1210 }
1211
1212 dtrace_interrupt_enable(c);
1213 }
1214
1215 static void
1216 fbt_suspend(void *arg, dtrace_id_t id, void *parg)
1217 {
1218 fbt_probe_t *fbt = parg;
1219 #if 0
1220 dtrace_modctl_t *ctl = fbt->fbtp_ctl;
1221 #endif
1222 dtrace_icookie_t c;
1223
1224 #if 0 /* XXX TBD */
1225 ASSERT(ctl->nenabled > 0);
1226
1227 if ((ctl->loadcnt != fbt->fbtp_loadcnt))
1228 return;
1229 #endif
1230
1231 c = dtrace_interrupt_disable();
1232
1233 for (; fbt != NULL; fbt = fbt->fbtp_next) {
1234 *fbt->fbtp_patchpoint = fbt->fbtp_savedval;
1235 cpu_idcache_wbinv_range((vaddr_t)fbt->fbtp_patchpoint, 4);
1236 }
1237
1238 dtrace_interrupt_enable(c);
1239 }
1240
1241 static void
1242 fbt_resume(void *arg, dtrace_id_t id, void *parg)
1243 {
1244 fbt_probe_t *fbt = parg;
1245 #if 0
1246 dtrace_modctl_t *ctl = fbt->fbtp_ctl;
1247 #endif
1248 dtrace_icookie_t c;
1249
1250 #if 0 /* XXX TBD */
1251 ASSERT(ctl->nenabled > 0);
1252
1253 if ((ctl->loadcnt != fbt->fbtp_loadcnt))
1254 return;
1255 #endif
1256
1257 c = dtrace_interrupt_disable();
1258
1259 for (; fbt != NULL; fbt = fbt->fbtp_next) {
1260 *fbt->fbtp_patchpoint = fbt->fbtp_patchval;
1261 cpu_idcache_wbinv_range((vaddr_t)fbt->fbtp_patchpoint, 4);
1262 }
1263
1264 dtrace_interrupt_enable(c);
1265 }
1266
1267 #else
1268 #error "architecture not supported"
1269 #endif
1270
1271 static int
1272 fbt_ctfoff_init(dtrace_modctl_t *mod, mod_ctf_t *mc)
1273 {
1274 const Elf_Sym *symp = mc->symtab;
1275 const ctf_header_t *hp = (const ctf_header_t *) mc->ctftab;
1276 const uint8_t *ctfdata = mc->ctftab + sizeof(ctf_header_t);
1277 int i;
1278 uint32_t *ctfoff;
1279 uint32_t objtoff = hp->cth_objtoff;
1280 uint32_t funcoff = hp->cth_funcoff;
1281 ushort_t info;
1282 ushort_t vlen;
1283 int nsyms = (mc->nmap != NULL) ? mc->nmapsize : mc->nsym;
1284
1285 /* Sanity check. */
1286 if (hp->cth_magic != CTF_MAGIC) {
1287 printf("Bad magic value in CTF data of '%s'\n",
1288 mod->mod_info->mi_name);
1289 return (EINVAL);
1290 }
1291
1292 if (mc->symtab == NULL) {
1293 printf("No symbol table in '%s'\n",
1294 mod->mod_info->mi_name);
1295 return (EINVAL);
1296 }
1297
1298 if ((ctfoff = malloc(sizeof(uint32_t) * nsyms, M_FBT, M_WAITOK)) == NULL)
1299 return (ENOMEM);
1300
1301 mc->ctfoffp = ctfoff;
1302
1303 for (i = 0; i < nsyms; i++, ctfoff++, symp++) {
1304 if (mc->nmap != NULL) {
1305 if (mc->nmap[i] == 0) {
1306 printf("%s.%d: Error! Got zero nmap!\n",
1307 __func__, __LINE__);
1308 continue;
1309 }
1310
1311 /* CTF expects the pre-sorted symbol ordering,
1312 * so map it from that to the current sorted
1313 * and trimmed symbol table.
1314 * ctfoff[new-ind] = oldind symbol info.
1315 */
1316
1317 /* map old index to new symbol table */
1318 symp = &mc->symtab[mc->nmap[i] - 1];
1319
1320 /* map old index to new ctfoff index */
1321 ctfoff = &mc->ctfoffp[mc->nmap[i]-1];
1322 }
1323
1324 if (symp->st_name == 0 || symp->st_shndx == SHN_UNDEF) {
1325 *ctfoff = 0xffffffff;
1326 continue;
1327 }
1328
1329 switch (ELF_ST_TYPE(symp->st_info)) {
1330 case STT_OBJECT:
1331 if (objtoff >= hp->cth_funcoff ||
1332 (symp->st_shndx == SHN_ABS && symp->st_value == 0)) {
1333 *ctfoff = 0xffffffff;
1334 break;
1335 }
1336
1337 *ctfoff = objtoff;
1338 objtoff += sizeof (ushort_t);
1339 break;
1340
1341 case STT_FUNC:
1342 if (funcoff >= hp->cth_typeoff) {
1343 *ctfoff = 0xffffffff;
1344 break;
1345 }
1346
1347 *ctfoff = funcoff;
1348
1349 info = *((const ushort_t *)(ctfdata + funcoff));
1350 vlen = CTF_INFO_VLEN(info);
1351
1352 /*
1353 * If we encounter a zero pad at the end, just skip it.
1354 * Otherwise skip over the function and its return type
1355 * (+2) and the argument list (vlen).
1356 */
1357 if (CTF_INFO_KIND(info) == CTF_K_UNKNOWN && vlen == 0)
1358 funcoff += sizeof (ushort_t); /* skip pad */
1359 else
1360 funcoff += sizeof (ushort_t) * (vlen + 2);
1361 break;
1362
1363 default:
1364 *ctfoff = 0xffffffff;
1365 break;
1366 }
1367 }
1368
1369 return (0);
1370 }
1371
1372 static ssize_t
1373 fbt_get_ctt_size(uint8_t xversion, const ctf_type_t *tp, ssize_t *sizep,
1374 ssize_t *incrementp)
1375 {
1376 ssize_t size, increment;
1377
1378 if (xversion > CTF_VERSION_1 &&
1379 tp->ctt_size == CTF_LSIZE_SENT) {
1380 size = CTF_TYPE_LSIZE(tp);
1381 increment = sizeof (ctf_type_t);
1382 } else {
1383 size = tp->ctt_size;
1384 increment = sizeof (ctf_stype_t);
1385 }
1386
1387 if (sizep)
1388 *sizep = size;
1389 if (incrementp)
1390 *incrementp = increment;
1391
1392 return (size);
1393 }
1394
1395 static int
1396 fbt_typoff_init(mod_ctf_t *mc)
1397 {
1398 const ctf_header_t *hp = (const ctf_header_t *) mc->ctftab;
1399 const ctf_type_t *tbuf;
1400 const ctf_type_t *tend;
1401 const ctf_type_t *tp;
1402 const uint8_t *ctfdata = mc->ctftab + sizeof(ctf_header_t);
1403 int ctf_typemax = 0;
1404 uint32_t *xp;
1405 ulong_t pop[CTF_K_MAX + 1] = { 0 };
1406
1407 /* Sanity check. */
1408 if (hp->cth_magic != CTF_MAGIC)
1409 return (EINVAL);
1410
1411 tbuf = (const ctf_type_t *) (ctfdata + hp->cth_typeoff);
1412 tend = (const ctf_type_t *) (ctfdata + hp->cth_stroff);
1413
1414 int child = hp->cth_parname != 0;
1415
1416 /*
1417 * We make two passes through the entire type section. In this first
1418 * pass, we count the number of each type and the total number of types.
1419 */
1420 for (tp = tbuf; tp < tend; ctf_typemax++) {
1421 ushort_t kind = CTF_INFO_KIND(tp->ctt_info);
1422 ulong_t vlen = CTF_INFO_VLEN(tp->ctt_info);
1423 ssize_t size, increment;
1424
1425 size_t vbytes;
1426 uint_t n;
1427
1428 (void) fbt_get_ctt_size(hp->cth_version, tp, &size, &increment);
1429
1430 switch (kind) {
1431 case CTF_K_INTEGER:
1432 case CTF_K_FLOAT:
1433 vbytes = sizeof (uint_t);
1434 break;
1435 case CTF_K_ARRAY:
1436 vbytes = sizeof (ctf_array_t);
1437 break;
1438 case CTF_K_FUNCTION:
1439 vbytes = sizeof (ushort_t) * (vlen + (vlen & 1));
1440 break;
1441 case CTF_K_STRUCT:
1442 case CTF_K_UNION:
1443 if (size < CTF_LSTRUCT_THRESH) {
1444 ctf_member_t *mp = (ctf_member_t *)
1445 ((uintptr_t)tp + increment);
1446
1447 vbytes = sizeof (ctf_member_t) * vlen;
1448 for (n = vlen; n != 0; n--, mp++)
1449 child |= CTF_TYPE_ISCHILD(mp->ctm_type);
1450 } else {
1451 ctf_lmember_t *lmp = (ctf_lmember_t *)
1452 ((uintptr_t)tp + increment);
1453
1454 vbytes = sizeof (ctf_lmember_t) * vlen;
1455 for (n = vlen; n != 0; n--, lmp++)
1456 child |=
1457 CTF_TYPE_ISCHILD(lmp->ctlm_type);
1458 }
1459 break;
1460 case CTF_K_ENUM:
1461 vbytes = sizeof (ctf_enum_t) * vlen;
1462 break;
1463 case CTF_K_FORWARD:
1464 /*
1465 * For forward declarations, ctt_type is the CTF_K_*
1466 * kind for the tag, so bump that population count too.
1467 * If ctt_type is unknown, treat the tag as a struct.
1468 */
1469 if (tp->ctt_type == CTF_K_UNKNOWN ||
1470 tp->ctt_type >= CTF_K_MAX)
1471 pop[CTF_K_STRUCT]++;
1472 else
1473 pop[tp->ctt_type]++;
1474 /*FALLTHRU*/
1475 case CTF_K_UNKNOWN:
1476 vbytes = 0;
1477 break;
1478 case CTF_K_POINTER:
1479 case CTF_K_TYPEDEF:
1480 case CTF_K_VOLATILE:
1481 case CTF_K_CONST:
1482 case CTF_K_RESTRICT:
1483 child |= CTF_TYPE_ISCHILD(tp->ctt_type);
1484 vbytes = 0;
1485 break;
1486 default:
1487 printf("%s(%d): detected invalid CTF kind -- %u\n", __func__, __LINE__, kind);
1488 return (EIO);
1489 }
1490 tp = (ctf_type_t *)((uintptr_t)tp + increment + vbytes);
1491 pop[kind]++;
1492 }
1493
1494 mc->typlen = ctf_typemax;
1495
1496 if ((xp = malloc(sizeof(uint32_t) * ctf_typemax, M_FBT, M_ZERO | M_WAITOK)) == NULL)
1497 return (ENOMEM);
1498
1499 mc->typoffp = xp;
1500
1501 /* type id 0 is used as a sentinel value */
1502 *xp++ = 0;
1503
1504 /*
1505 * In the second pass, fill in the type offset.
1506 */
1507 for (tp = tbuf; tp < tend; xp++) {
1508 ushort_t kind = CTF_INFO_KIND(tp->ctt_info);
1509 ulong_t vlen = CTF_INFO_VLEN(tp->ctt_info);
1510 ssize_t size, increment;
1511
1512 size_t vbytes;
1513 uint_t n;
1514
1515 (void) fbt_get_ctt_size(hp->cth_version, tp, &size, &increment);
1516
1517 switch (kind) {
1518 case CTF_K_INTEGER:
1519 case CTF_K_FLOAT:
1520 vbytes = sizeof (uint_t);
1521 break;
1522 case CTF_K_ARRAY:
1523 vbytes = sizeof (ctf_array_t);
1524 break;
1525 case CTF_K_FUNCTION:
1526 vbytes = sizeof (ushort_t) * (vlen + (vlen & 1));
1527 break;
1528 case CTF_K_STRUCT:
1529 case CTF_K_UNION:
1530 if (size < CTF_LSTRUCT_THRESH) {
1531 ctf_member_t *mp = (ctf_member_t *)
1532 ((uintptr_t)tp + increment);
1533
1534 vbytes = sizeof (ctf_member_t) * vlen;
1535 for (n = vlen; n != 0; n--, mp++)
1536 child |= CTF_TYPE_ISCHILD(mp->ctm_type);
1537 } else {
1538 ctf_lmember_t *lmp = (ctf_lmember_t *)
1539 ((uintptr_t)tp + increment);
1540
1541 vbytes = sizeof (ctf_lmember_t) * vlen;
1542 for (n = vlen; n != 0; n--, lmp++)
1543 child |=
1544 CTF_TYPE_ISCHILD(lmp->ctlm_type);
1545 }
1546 break;
1547 case CTF_K_ENUM:
1548 vbytes = sizeof (ctf_enum_t) * vlen;
1549 break;
1550 case CTF_K_FORWARD:
1551 case CTF_K_UNKNOWN:
1552 vbytes = 0;
1553 break;
1554 case CTF_K_POINTER:
1555 case CTF_K_TYPEDEF:
1556 case CTF_K_VOLATILE:
1557 case CTF_K_CONST:
1558 case CTF_K_RESTRICT:
1559 vbytes = 0;
1560 break;
1561 default:
1562 printf("%s(%d): detected invalid CTF kind -- %u\n", __func__, __LINE__, kind);
1563 return (EIO);
1564 }
1565 *xp = (uint32_t)((uintptr_t) tp - (uintptr_t) ctfdata);
1566 tp = (ctf_type_t *)((uintptr_t)tp + increment + vbytes);
1567 }
1568
1569 return (0);
1570 }
1571
1572 /*
1573 * CTF Declaration Stack
1574 *
1575 * In order to implement ctf_type_name(), we must convert a type graph back
1576 * into a C type declaration. Unfortunately, a type graph represents a storage
1577 * class ordering of the type whereas a type declaration must obey the C rules
1578 * for operator precedence, and the two orderings are frequently in conflict.
1579 * For example, consider these CTF type graphs and their C declarations:
1580 *
1581 * CTF_K_POINTER -> CTF_K_FUNCTION -> CTF_K_INTEGER : int (*)()
1582 * CTF_K_POINTER -> CTF_K_ARRAY -> CTF_K_INTEGER : int (*)[]
1583 *
1584 * In each case, parentheses are used to raise operator * to higher lexical
1585 * precedence, so the string form of the C declaration cannot be constructed by
1586 * walking the type graph links and forming the string from left to right.
1587 *
1588 * The functions in this file build a set of stacks from the type graph nodes
1589 * corresponding to the C operator precedence levels in the appropriate order.
1590 * The code in ctf_type_name() can then iterate over the levels and nodes in
1591 * lexical precedence order and construct the final C declaration string.
1592 */
1593 typedef struct ctf_list {
1594 struct ctf_list *l_prev; /* previous pointer or tail pointer */
1595 struct ctf_list *l_next; /* next pointer or head pointer */
1596 } ctf_list_t;
1597
1598 #define ctf_list_prev(elem) ((void *)(((ctf_list_t *)(elem))->l_prev))
1599 #define ctf_list_next(elem) ((void *)(((ctf_list_t *)(elem))->l_next))
1600
1601 typedef enum {
1602 CTF_PREC_BASE,
1603 CTF_PREC_POINTER,
1604 CTF_PREC_ARRAY,
1605 CTF_PREC_FUNCTION,
1606 CTF_PREC_MAX
1607 } ctf_decl_prec_t;
1608
1609 typedef struct ctf_decl_node {
1610 ctf_list_t cd_list; /* linked list pointers */
1611 ctf_id_t cd_type; /* type identifier */
1612 uint_t cd_kind; /* type kind */
1613 uint_t cd_n; /* type dimension if array */
1614 } ctf_decl_node_t;
1615
1616 typedef struct ctf_decl {
1617 ctf_list_t cd_nodes[CTF_PREC_MAX]; /* declaration node stacks */
1618 int cd_order[CTF_PREC_MAX]; /* storage order of decls */
1619 ctf_decl_prec_t cd_qualp; /* qualifier precision */
1620 ctf_decl_prec_t cd_ordp; /* ordered precision */
1621 char *cd_buf; /* buffer for output */
1622 char *cd_ptr; /* buffer location */
1623 char *cd_end; /* buffer limit */
1624 size_t cd_len; /* buffer space required */
1625 int cd_err; /* saved error value */
1626 } ctf_decl_t;
1627
1628 /*
1629 * Simple doubly-linked list append routine. This implementation assumes that
1630 * each list element contains an embedded ctf_list_t as the first member.
1631 * An additional ctf_list_t is used to store the head (l_next) and tail
1632 * (l_prev) pointers. The current head and tail list elements have their
1633 * previous and next pointers set to NULL, respectively.
1634 */
1635 static void
1636 ctf_list_append(ctf_list_t *lp, void *new)
1637 {
1638 ctf_list_t *p = lp->l_prev; /* p = tail list element */
1639 ctf_list_t *q = new; /* q = new list element */
1640
1641 lp->l_prev = q;
1642 q->l_prev = p;
1643 q->l_next = NULL;
1644
1645 if (p != NULL)
1646 p->l_next = q;
1647 else
1648 lp->l_next = q;
1649 }
1650
1651 /*
1652 * Prepend the specified existing element to the given ctf_list_t. The
1653 * existing pointer should be pointing at a struct with embedded ctf_list_t.
1654 */
1655 static void
1656 ctf_list_prepend(ctf_list_t *lp, void *new)
1657 {
1658 ctf_list_t *p = new; /* p = new list element */
1659 ctf_list_t *q = lp->l_next; /* q = head list element */
1660
1661 lp->l_next = p;
1662 p->l_prev = NULL;
1663 p->l_next = q;
1664
1665 if (q != NULL)
1666 q->l_prev = p;
1667 else
1668 lp->l_prev = p;
1669 }
1670
1671 static void
1672 ctf_decl_init(ctf_decl_t *cd, char *buf, size_t len)
1673 {
1674 int i;
1675
1676 bzero(cd, sizeof (ctf_decl_t));
1677
1678 for (i = CTF_PREC_BASE; i < CTF_PREC_MAX; i++)
1679 cd->cd_order[i] = CTF_PREC_BASE - 1;
1680
1681 cd->cd_qualp = CTF_PREC_BASE;
1682 cd->cd_ordp = CTF_PREC_BASE;
1683
1684 cd->cd_buf = buf;
1685 cd->cd_ptr = buf;
1686 cd->cd_end = buf + len;
1687 }
1688
1689 static void
1690 ctf_decl_fini(ctf_decl_t *cd)
1691 {
1692 ctf_decl_node_t *cdp, *ndp;
1693 int i;
1694
1695 for (i = CTF_PREC_BASE; i < CTF_PREC_MAX; i++) {
1696 for (cdp = ctf_list_next(&cd->cd_nodes[i]);
1697 cdp != NULL; cdp = ndp) {
1698 ndp = ctf_list_next(cdp);
1699 free(cdp, M_FBT);
1700 }
1701 }
1702 }
1703
1704 static const ctf_type_t *
1705 ctf_lookup_by_id(mod_ctf_t *mc, ctf_id_t type)
1706 {
1707 const ctf_type_t *tp;
1708 uint32_t offset;
1709 uint32_t *typoff = mc->typoffp;
1710
1711 if (type >= mc->typlen) {
1712 printf("%s(%d): type %d exceeds max %ld\n",__func__,__LINE__,(int) type,mc->typlen);
1713 return(NULL);
1714 }
1715
1716 /* Check if the type isn't cross-referenced. */
1717 if ((offset = typoff[type]) == 0) {
1718 printf("%s(%d): type %d isn't cross referenced\n",__func__,__LINE__, (int) type);
1719 return(NULL);
1720 }
1721
1722 tp = (const ctf_type_t *)(mc->ctftab + offset + sizeof(ctf_header_t));
1723
1724 return (tp);
1725 }
1726
1727 static void
1728 fbt_array_info(mod_ctf_t *mc, ctf_id_t type, ctf_arinfo_t *arp)
1729 {
1730 const ctf_header_t *hp = (const ctf_header_t *) mc->ctftab;
1731 const ctf_type_t *tp;
1732 const ctf_array_t *ap;
1733 ssize_t increment;
1734
1735 bzero(arp, sizeof(*arp));
1736
1737 if ((tp = ctf_lookup_by_id(mc, type)) == NULL)
1738 return;
1739
1740 if (CTF_INFO_KIND(tp->ctt_info) != CTF_K_ARRAY)
1741 return;
1742
1743 (void) fbt_get_ctt_size(hp->cth_version, tp, NULL, &increment);
1744
1745 ap = (const ctf_array_t *)((uintptr_t)tp + increment);
1746 arp->ctr_contents = ap->cta_contents;
1747 arp->ctr_index = ap->cta_index;
1748 arp->ctr_nelems = ap->cta_nelems;
1749 }
1750
1751 static const char *
1752 ctf_strptr(mod_ctf_t *mc, int name)
1753 {
1754 const ctf_header_t *hp = (const ctf_header_t *) mc->ctftab;;
1755 const char *strp = "";
1756
1757 if (name < 0 || name >= hp->cth_strlen)
1758 return(strp);
1759
1760 strp = (const char *)(mc->ctftab + hp->cth_stroff + name + sizeof(ctf_header_t));
1761
1762 return (strp);
1763 }
1764
1765 static void
1766 ctf_decl_push(ctf_decl_t *cd, mod_ctf_t *mc, ctf_id_t type)
1767 {
1768 ctf_decl_node_t *cdp;
1769 ctf_decl_prec_t prec;
1770 uint_t kind, n = 1;
1771 int is_qual = 0;
1772
1773 const ctf_type_t *tp;
1774 ctf_arinfo_t ar;
1775
1776 if ((tp = ctf_lookup_by_id(mc, type)) == NULL) {
1777 cd->cd_err = ENOENT;
1778 return;
1779 }
1780
1781 switch (kind = CTF_INFO_KIND(tp->ctt_info)) {
1782 case CTF_K_ARRAY:
1783 fbt_array_info(mc, type, &ar);
1784 ctf_decl_push(cd, mc, ar.ctr_contents);
1785 n = ar.ctr_nelems;
1786 prec = CTF_PREC_ARRAY;
1787 break;
1788
1789 case CTF_K_TYPEDEF:
1790 if (ctf_strptr(mc, tp->ctt_name)[0] == '\0') {
1791 ctf_decl_push(cd, mc, tp->ctt_type);
1792 return;
1793 }
1794 prec = CTF_PREC_BASE;
1795 break;
1796
1797 case CTF_K_FUNCTION:
1798 ctf_decl_push(cd, mc, tp->ctt_type);
1799 prec = CTF_PREC_FUNCTION;
1800 break;
1801
1802 case CTF_K_POINTER:
1803 ctf_decl_push(cd, mc, tp->ctt_type);
1804 prec = CTF_PREC_POINTER;
1805 break;
1806
1807 case CTF_K_VOLATILE:
1808 case CTF_K_CONST:
1809 case CTF_K_RESTRICT:
1810 ctf_decl_push(cd, mc, tp->ctt_type);
1811 prec = cd->cd_qualp;
1812 is_qual++;
1813 break;
1814
1815 default:
1816 prec = CTF_PREC_BASE;
1817 }
1818
1819 if ((cdp = malloc(sizeof (ctf_decl_node_t), M_FBT, M_WAITOK)) == NULL) {
1820 cd->cd_err = EAGAIN;
1821 return;
1822 }
1823
1824 cdp->cd_type = type;
1825 cdp->cd_kind = kind;
1826 cdp->cd_n = n;
1827
1828 if (ctf_list_next(&cd->cd_nodes[prec]) == NULL)
1829 cd->cd_order[prec] = cd->cd_ordp++;
1830
1831 /*
1832 * Reset cd_qualp to the highest precedence level that we've seen so
1833 * far that can be qualified (CTF_PREC_BASE or CTF_PREC_POINTER).
1834 */
1835 if (prec > cd->cd_qualp && prec < CTF_PREC_ARRAY)
1836 cd->cd_qualp = prec;
1837
1838 /*
1839 * C array declarators are ordered inside out so prepend them. Also by
1840 * convention qualifiers of base types precede the type specifier (e.g.
1841 * const int vs. int const) even though the two forms are equivalent.
1842 */
1843 if (kind == CTF_K_ARRAY || (is_qual && prec == CTF_PREC_BASE))
1844 ctf_list_prepend(&cd->cd_nodes[prec], cdp);
1845 else
1846 ctf_list_append(&cd->cd_nodes[prec], cdp);
1847 }
1848
1849 static void
1850 ctf_decl_sprintf(ctf_decl_t *cd, const char *format, ...)
1851 {
1852 size_t len = (size_t)(cd->cd_end - cd->cd_ptr);
1853 va_list ap;
1854 size_t n;
1855
1856 va_start(ap, format);
1857 n = vsnprintf(cd->cd_ptr, len, format, ap);
1858 va_end(ap);
1859
1860 cd->cd_ptr += MIN(n, len);
1861 cd->cd_len += n;
1862 }
1863
1864 static ssize_t
1865 fbt_type_name(mod_ctf_t *mc, ctf_id_t type, char *buf, size_t len)
1866 {
1867 ctf_decl_t cd;
1868 ctf_decl_node_t *cdp;
1869 ctf_decl_prec_t prec, lp, rp;
1870 int ptr, arr;
1871 uint_t k;
1872
1873 if (mc == NULL && type == CTF_ERR)
1874 return (-1); /* simplify caller code by permitting CTF_ERR */
1875
1876 ctf_decl_init(&cd, buf, len);
1877 ctf_decl_push(&cd, mc, type);
1878
1879 if (cd.cd_err != 0) {
1880 ctf_decl_fini(&cd);
1881 return (-1);
1882 }
1883
1884 /*
1885 * If the type graph's order conflicts with lexical precedence order
1886 * for pointers or arrays, then we need to surround the declarations at
1887 * the corresponding lexical precedence with parentheses. This can
1888 * result in either a parenthesized pointer (*) as in int (*)() or
1889 * int (*)[], or in a parenthesized pointer and array as in int (*[])().
1890 */
1891 ptr = cd.cd_order[CTF_PREC_POINTER] > CTF_PREC_POINTER;
1892 arr = cd.cd_order[CTF_PREC_ARRAY] > CTF_PREC_ARRAY;
1893
1894 rp = arr ? CTF_PREC_ARRAY : ptr ? CTF_PREC_POINTER : -1;
1895 lp = ptr ? CTF_PREC_POINTER : arr ? CTF_PREC_ARRAY : -1;
1896
1897 k = CTF_K_POINTER; /* avoid leading whitespace (see below) */
1898
1899 for (prec = CTF_PREC_BASE; prec < CTF_PREC_MAX; prec++) {
1900 for (cdp = ctf_list_next(&cd.cd_nodes[prec]);
1901 cdp != NULL; cdp = ctf_list_next(cdp)) {
1902
1903 const ctf_type_t *tp =
1904 ctf_lookup_by_id(mc, cdp->cd_type);
1905 const char *name = ctf_strptr(mc, tp->ctt_name);
1906
1907 if (k != CTF_K_POINTER && k != CTF_K_ARRAY)
1908 ctf_decl_sprintf(&cd, " ");
1909
1910 if (lp == prec) {
1911 ctf_decl_sprintf(&cd, "(");
1912 lp = -1;
1913 }
1914
1915 switch (cdp->cd_kind) {
1916 case CTF_K_INTEGER:
1917 case CTF_K_FLOAT:
1918 case CTF_K_TYPEDEF:
1919 ctf_decl_sprintf(&cd, "%s", name);
1920 break;
1921 case CTF_K_POINTER:
1922 ctf_decl_sprintf(&cd, "*");
1923 break;
1924 case CTF_K_ARRAY:
1925 ctf_decl_sprintf(&cd, "[%u]", cdp->cd_n);
1926 break;
1927 case CTF_K_FUNCTION:
1928 ctf_decl_sprintf(&cd, "()");
1929 break;
1930 case CTF_K_STRUCT:
1931 case CTF_K_FORWARD:
1932 ctf_decl_sprintf(&cd, "struct %s", name);
1933 break;
1934 case CTF_K_UNION:
1935 ctf_decl_sprintf(&cd, "union %s", name);
1936 break;
1937 case CTF_K_ENUM:
1938 ctf_decl_sprintf(&cd, "enum %s", name);
1939 break;
1940 case CTF_K_VOLATILE:
1941 ctf_decl_sprintf(&cd, "volatile");
1942 break;
1943 case CTF_K_CONST:
1944 ctf_decl_sprintf(&cd, "const");
1945 break;
1946 case CTF_K_RESTRICT:
1947 ctf_decl_sprintf(&cd, "restrict");
1948 break;
1949 }
1950
1951 k = cdp->cd_kind;
1952 }
1953
1954 if (rp == prec)
1955 ctf_decl_sprintf(&cd, ")");
1956 }
1957
1958 ctf_decl_fini(&cd);
1959 return (cd.cd_len);
1960 }
1961
1962 static void
1963 fbt_getargdesc(void *arg __unused, dtrace_id_t id __unused, void *parg, dtrace_argdesc_t *desc)
1964 {
1965 const ushort_t *dp;
1966 fbt_probe_t *fbt = parg;
1967 mod_ctf_t mc;
1968 dtrace_modctl_t *ctl = fbt->fbtp_ctl;
1969 int ndx = desc->dtargd_ndx;
1970 int symindx = fbt->fbtp_symindx;
1971 uint32_t *ctfoff;
1972 uint32_t offset;
1973 ushort_t info, kind, n;
1974 int nsyms;
1975
1976 desc->dtargd_ndx = DTRACE_ARGNONE;
1977
1978 /* Get a pointer to the CTF data and it's length. */
1979 if (mod_ctf_get(ctl, &mc) != 0) {
1980 static int report=0;
1981 if (report < 1) {
1982 report++;
1983 printf("FBT: Error no CTF section found in module \"%s\"\n",
1984 ctl->mod_info->mi_name);
1985 }
1986 /* No CTF data? Something wrong? *shrug* */
1987 return;
1988 }
1989
1990 nsyms = (mc.nmap != NULL) ? mc.nmapsize : mc.nsym;
1991
1992 /* Check if this module hasn't been initialised yet. */
1993 if (mc.ctfoffp == NULL) {
1994 /*
1995 * Initialise the CTF object and function symindx to
1996 * byte offset array.
1997 */
1998 if (fbt_ctfoff_init(ctl, &mc) != 0) {
1999 return;
2000 }
2001
2002 /* Initialise the CTF type to byte offset array. */
2003 if (fbt_typoff_init(&mc) != 0) {
2004 return;
2005 }
2006 }
2007
2008 ctfoff = mc.ctfoffp;
2009
2010 if (ctfoff == NULL || mc.typoffp == NULL) {
2011 return;
2012 }
2013
2014 /* Check if the symbol index is out of range. */
2015 if (symindx >= nsyms)
2016 return;
2017
2018 /* Check if the symbol isn't cross-referenced. */
2019 if ((offset = ctfoff[symindx]) == 0xffffffff)
2020 return;
2021
2022 dp = (const ushort_t *)(mc.ctftab + offset + sizeof(ctf_header_t));
2023
2024 info = *dp++;
2025 kind = CTF_INFO_KIND(info);
2026 n = CTF_INFO_VLEN(info);
2027
2028 if (kind == CTF_K_UNKNOWN && n == 0) {
2029 printf("%s(%d): Unknown function %s!\n",__func__,__LINE__,
2030 fbt->fbtp_name);
2031 return;
2032 }
2033
2034 if (kind != CTF_K_FUNCTION) {
2035 printf("%s(%d): Expected a function %s!\n",__func__,__LINE__,
2036 fbt->fbtp_name);
2037 return;
2038 }
2039
2040 /* Check if the requested argument doesn't exist. */
2041 if (ndx >= n)
2042 return;
2043
2044 /* Skip the return type and arguments up to the one requested. */
2045 dp += ndx + 1;
2046
2047 if (fbt_type_name(&mc, *dp, desc->dtargd_native, sizeof(desc->dtargd_native)) > 0) {
2048 desc->dtargd_ndx = ndx;
2049 }
2050
2051 return;
2052 }
2053
2054 static void
2055 fbt_load(void)
2056 {
2057 /* Default the probe table size if not specified. */
2058 if (fbt_probetab_size == 0)
2059 fbt_probetab_size = FBT_PROBETAB_SIZE;
2060
2061 /* Choose the hash mask for the probe table. */
2062 fbt_probetab_mask = fbt_probetab_size - 1;
2063
2064 /* Allocate memory for the probe table. */
2065 fbt_probetab =
2066 malloc(fbt_probetab_size * sizeof (fbt_probe_t *), M_FBT, M_WAITOK | M_ZERO);
2067
2068 dtrace_doubletrap_func = fbt_doubletrap;
2069 dtrace_invop_add(fbt_invop);
2070 #ifdef __arm__
2071 dtrace_emulation_jump_addr = fbt_emulate;
2072 #endif
2073
2074 if (dtrace_register("fbt", &fbt_attr, DTRACE_PRIV_USER,
2075 NULL, &fbt_pops, NULL, &fbt_id) != 0)
2076 return;
2077 }
2078
2079
2080 static int
2081 fbt_unload(void)
2082 {
2083 int error = 0;
2084
2085 #ifdef __arm__
2086 dtrace_emulation_jump_addr = NULL;
2087 #endif
2088 /* De-register the invalid opcode handler. */
2089 dtrace_invop_remove(fbt_invop);
2090
2091 dtrace_doubletrap_func = NULL;
2092
2093 /* De-register this DTrace provider. */
2094 if ((error = dtrace_unregister(fbt_id)) != 0)
2095 return (error);
2096
2097 /* Free the probe table. */
2098 free(fbt_probetab, M_FBT);
2099 fbt_probetab = NULL;
2100 fbt_probetab_mask = 0;
2101
2102 return (error);
2103 }
2104
2105
2106 static int
2107 dtrace_fbt_modcmd(modcmd_t cmd, void *data)
2108 {
2109 int bmajor = -1, cmajor = -1;
2110 int error;
2111
2112 switch (cmd) {
2113 case MODULE_CMD_INIT:
2114 fbt_load();
2115 return devsw_attach("fbt", NULL, &bmajor,
2116 &fbt_cdevsw, &cmajor);
2117 case MODULE_CMD_FINI:
2118 error = fbt_unload();
2119 if (error != 0)
2120 return error;
2121 return devsw_detach(NULL, &fbt_cdevsw);
2122 case MODULE_CMD_AUTOUNLOAD:
2123 return EBUSY;
2124 default:
2125 return ENOTTY;
2126 }
2127 }
2128
2129 static int
2130 fbt_open(dev_t dev, int flags, int mode, struct lwp *l)
2131 {
2132 return (0);
2133 }
2134
2135 MODULE(MODULE_CLASS_MISC, dtrace_fbt, "dtrace");
2136