Home | History | Annotate | Line # | Download | only in gmon
      1 /*	$NetBSD: mcount.c,v 1.18 2024/02/23 13:32:28 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Nathan J. Williams for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*-
     39  * Copyright (c) 1983, 1992, 1993
     40  *	The Regents of the University of California.  All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  */
     66 
     67 #ifdef _KERNEL_OPT
     68 #include "opt_multiprocessor.h"
     69 #endif
     70 
     71 /* If building a standalone libkern, don't include mcount. */
     72 #if (!defined(_KERNEL) || defined(GPROF)) && !defined(_STANDALONE)
     73 
     74 #include <sys/cdefs.h>
     75 #if !defined(lint) && !defined(_KERNEL) && defined(LIBC_SCCS)
     76 #if 0
     77 static char sccsid[] = "@(#)mcount.c	8.1 (Berkeley) 6/4/93";
     78 #else
     79 __RCSID("$NetBSD: mcount.c,v 1.18 2024/02/23 13:32:28 christos Exp $");
     80 #endif
     81 #endif
     82 
     83 #include <sys/param.h>
     84 #include <sys/gmon.h>
     85 #include <sys/lock.h>
     86 #include <sys/proc.h>
     87 
     88 #ifndef _KERNEL
     89 #include "reentrant.h"
     90 #endif
     91 
     92 #if defined(_REENTRANT) && !defined(_RUMPKERNEL)
     93 extern thread_key_t _gmonkey;
     94 extern struct gmonparam _gmondummy;
     95 struct gmonparam *_m_gmon_alloc(void);
     96 #endif
     97 
     98 _MCOUNT_DECL(u_long, u_long)
     99 #ifdef _KERNEL
    100     __attribute__((__no_instrument_function__))
    101 #endif
    102     __used;
    103 
    104 /* XXX: make these interfaces */
    105 #ifdef _RUMPKERNEL
    106 #undef MCOUNT_ENTER
    107 #define MCOUNT_ENTER
    108 #undef MCOUNT_EXIT
    109 #define MCOUNT_EXIT
    110 #undef MCOUNT
    111 #define MCOUNT
    112 #endif
    113 
    114 /*
    115  * mcount is called on entry to each function compiled with the profiling
    116  * switch set.  _mcount(), which is declared in a machine-dependent way
    117  * with _MCOUNT_DECL, does the actual work and is either inlined into a
    118  * C routine or called by an assembly stub.  In any case, this magic is
    119  * taken care of by the MCOUNT definition in <machine/profile.h>.
    120  *
    121  * _mcount updates data structures that represent traversals of the
    122  * program's call graph edges.  frompc and selfpc are the return
    123  * address and function address that represents the given call graph edge.
    124  *
    125  * Note: the original BSD code used the same variable (frompcindex) for
    126  * both frompcindex and frompc.  Any reasonable, modern compiler will
    127  * perform this optimization.
    128  */
    129 /* _mcount; may be static, inline, etc */
    130 /*LINTED unused*/
    131 _MCOUNT_DECL(u_long frompc, u_long selfpc)
    132 {
    133 	u_short *frompcindex;
    134 	struct tostruct *top, *prevtop;
    135 	struct gmonparam *p;
    136 	long toindex;
    137 #if defined(_KERNEL) && !defined(_RUMPKERNEL)
    138 	int s;
    139 #endif
    140 
    141 #if defined(_REENTRANT) && !defined(_KERNEL) && !defined(_RUMPKERNEL)
    142 	if (__isthreaded) {
    143 		/* prevent re-entry via thr_getspecific */
    144 		if (_gmonparam.state != GMON_PROF_ON)
    145 			return;
    146 		_gmonparam.state = GMON_PROF_BUSY;
    147 		p = thr_getspecific(_gmonkey);
    148 		if (p == NULL) {
    149 			/* Prevent recursive calls while allocating */
    150 			thr_setspecific(_gmonkey, &_gmondummy);
    151 			p = _m_gmon_alloc();
    152 		}
    153 		_gmonparam.state = GMON_PROF_ON;
    154 	} else
    155 #endif
    156 		p = &_gmonparam;
    157 	/*
    158 	 * check that we are profiling
    159 	 * and that we aren't recursively invoked.
    160 	 */
    161 	if (p->state != GMON_PROF_ON)
    162 		return;
    163 #if defined(_KERNEL) && !defined(_RUMPKERNEL)
    164 	MCOUNT_ENTER;
    165 #ifdef MULTIPROCESSOR
    166 	p = curcpu()->ci_gmon;
    167 	if (p == NULL || p->state != GMON_PROF_ON) {
    168 		MCOUNT_EXIT;
    169 		return;
    170 	}
    171 #endif
    172 #endif
    173 	p->state = GMON_PROF_BUSY;
    174 	/*
    175 	 * check that frompcindex is a reasonable pc value.
    176 	 * for example:	signal catchers get called from the stack,
    177 	 *		not from text space.  too bad.
    178 	 */
    179 	frompc -= p->lowpc;
    180 	if (frompc > p->textsize)
    181 		goto done;
    182 
    183 #if (HASHFRACTION & (HASHFRACTION - 1)) == 0
    184 	if (p->hashfraction == HASHFRACTION)
    185 		frompcindex =
    186 		    &p->froms[
    187 		    (size_t)(frompc / (HASHFRACTION * sizeof(*p->froms)))];
    188 	else
    189 #endif
    190 		frompcindex =
    191 		    &p->froms[
    192 		    (size_t)(frompc / (p->hashfraction * sizeof(*p->froms)))];
    193 	toindex = *frompcindex;
    194 	if (toindex == 0) {
    195 		/*
    196 		 *	first time traversing this arc
    197 		 */
    198 		toindex = ++p->tos[0].link;
    199 		if (toindex >= p->tolimit)
    200 			/* halt further profiling */
    201 			goto overflow;
    202 
    203 		*frompcindex = (u_short)toindex;
    204 		top = &p->tos[(size_t)toindex];
    205 		top->selfpc = selfpc;
    206 		top->count = 1;
    207 		top->link = 0;
    208 		goto done;
    209 	}
    210 	top = &p->tos[(size_t)toindex];
    211 	if (top->selfpc == selfpc) {
    212 		/*
    213 		 * arc at front of chain; usual case.
    214 		 */
    215 		top->count++;
    216 		goto done;
    217 	}
    218 	/*
    219 	 * have to go looking down chain for it.
    220 	 * top points to what we are looking at,
    221 	 * prevtop points to previous top.
    222 	 * we know it is not at the head of the chain.
    223 	 */
    224 	for (; /* goto done */; ) {
    225 		if (top->link == 0) {
    226 			/*
    227 			 * top is end of the chain and none of the chain
    228 			 * had top->selfpc == selfpc.
    229 			 * so we allocate a new tostruct
    230 			 * and link it to the head of the chain.
    231 			 */
    232 			toindex = ++p->tos[0].link;
    233 			if (toindex >= p->tolimit)
    234 				goto overflow;
    235 
    236 			top = &p->tos[(size_t)toindex];
    237 			top->selfpc = selfpc;
    238 			top->count = 1;
    239 			top->link = *frompcindex;
    240 			*frompcindex = (u_short)toindex;
    241 			goto done;
    242 		}
    243 		/*
    244 		 * otherwise, check the next arc on the chain.
    245 		 */
    246 		prevtop = top;
    247 		top = &p->tos[top->link];
    248 		if (top->selfpc == selfpc) {
    249 			/*
    250 			 * there it is.
    251 			 * increment its count
    252 			 * move it to the head of the chain.
    253 			 */
    254 			top->count++;
    255 			toindex = prevtop->link;
    256 			prevtop->link = top->link;
    257 			top->link = *frompcindex;
    258 			*frompcindex = (u_short)toindex;
    259 			goto done;
    260 		}
    261 	}
    262 done:
    263 	p->state = GMON_PROF_ON;
    264 #if defined(_KERNEL) && !defined(_RUMPKERNEL)
    265 	MCOUNT_EXIT;
    266 #endif
    267 	return;
    268 
    269 overflow:
    270 	p->state = GMON_PROF_ERROR;
    271 #if defined(_KERNEL) && !defined(_RUMPKERNEL)
    272 	MCOUNT_EXIT;
    273 #endif
    274 	return;
    275 }
    276 
    277 #ifdef MCOUNT
    278 /*
    279  * Actual definition of mcount function.  Defined in <machine/profile.h>,
    280  * which is included by <sys/gmon.h>.
    281  */
    282 MCOUNT
    283 #endif
    284 
    285 #if defined(_KERNEL) && !defined(_RUMPKERNEL) && defined(MULTIPROCESSOR)
    286 void _gmonparam_merge(struct gmonparam *, struct gmonparam *);
    287 
    288 void
    289 _gmonparam_merge(struct gmonparam *p, struct gmonparam *q)
    290 {
    291 	u_long fromindex;
    292 	u_short *frompcindex, qtoindex, toindex;
    293 	u_long selfpc;
    294 	u_long endfrom;
    295 	long count;
    296 	struct tostruct *top;
    297 	int i;
    298 
    299 	count = q->kcountsize / sizeof(*q->kcount);
    300 	for (i = 0; i < count; i++)
    301 		p->kcount[i] += q->kcount[i];
    302 
    303 	endfrom = (q->fromssize / sizeof(*q->froms));
    304 	for (fromindex = 0; fromindex < endfrom; fromindex++) {
    305 		if (q->froms[fromindex] == 0)
    306 			continue;
    307 		for (qtoindex = q->froms[fromindex]; qtoindex != 0;
    308 		     qtoindex = q->tos[qtoindex].link) {
    309 			selfpc = q->tos[qtoindex].selfpc;
    310 			count = q->tos[qtoindex].count;
    311 			/* cribbed from mcount */
    312 			frompcindex = &p->froms[fromindex];
    313 			toindex = *frompcindex;
    314 			if (toindex == 0) {
    315 				/*
    316 				 * first time traversing this arc
    317 				 */
    318 				toindex = ++p->tos[0].link;
    319 				if (toindex >= p->tolimit)
    320 					/* halt further profiling */
    321 					goto overflow;
    322 
    323 				*frompcindex = (u_short)toindex;
    324 				top = &p->tos[(size_t)toindex];
    325 				top->selfpc = selfpc;
    326 				top->count = count;
    327 				top->link = 0;
    328 				goto done;
    329 			}
    330 			top = &p->tos[(size_t)toindex];
    331 			if (top->selfpc == selfpc) {
    332 				/*
    333 				 * arc at front of chain; usual case.
    334 				 */
    335 				top->count+= count;
    336 				goto done;
    337 			}
    338 			/*
    339 			 * have to go looking down chain for it.
    340 			 * top points to what we are looking at,
    341 			 * we know it is not at the head of the chain.
    342 			 */
    343 			for (; /* goto done */; ) {
    344 				if (top->link == 0) {
    345 					/*
    346 					 * top is end of the chain and
    347 					 * none of the chain had
    348 					 * top->selfpc == selfpc.  so
    349 					 * we allocate a new tostruct
    350 					 * and link it to the head of
    351 					 * the chain.
    352 					 */
    353 					toindex = ++p->tos[0].link;
    354 					if (toindex >= p->tolimit)
    355 						goto overflow;
    356 
    357 					top = &p->tos[(size_t)toindex];
    358 					top->selfpc = selfpc;
    359 					top->count = count;
    360 					top->link = *frompcindex;
    361 					*frompcindex = (u_short)toindex;
    362 					goto done;
    363 				}
    364 				/*
    365 				 * otherwise, check the next arc on the chain.
    366 				 */
    367 				top = &p->tos[top->link];
    368 				if (top->selfpc == selfpc) {
    369 					/*
    370 					 * there it is.
    371 					 * add to its count.
    372 					 */
    373 					top->count += count;
    374 					goto done;
    375 				}
    376 			}
    377 
    378 		done: ;
    379 		}
    380 
    381 	}
    382  overflow: ;
    383 
    384 }
    385 #endif
    386 
    387 #endif /* (!_KERNEL || GPROF) && !_STANDALONE */
    388