Home | History | Annotate | Line # | Download | only in gmon
mcount.c revision 1.16
      1 /*	$NetBSD: mcount.c,v 1.16 2021/08/14 17:51:18 ryo Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Nathan J. Williams for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*-
     39  * Copyright (c) 1983, 1992, 1993
     40  *	The Regents of the University of California.  All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  */
     66 
     67 #ifdef _KERNEL_OPT
     68 #include "opt_multiprocessor.h"
     69 #endif
     70 
     71 /* If building a standalone libkern, don't include mcount. */
     72 #if (!defined(_KERNEL) || defined(GPROF)) && !defined(_STANDALONE)
     73 
     74 #include <sys/cdefs.h>
     75 #if !defined(lint) && !defined(_KERNEL) && defined(LIBC_SCCS)
     76 #if 0
     77 static char sccsid[] = "@(#)mcount.c	8.1 (Berkeley) 6/4/93";
     78 #else
     79 __RCSID("$NetBSD: mcount.c,v 1.16 2021/08/14 17:51:18 ryo Exp $");
     80 #endif
     81 #endif
     82 
     83 #include <sys/param.h>
     84 #include <sys/gmon.h>
     85 #include <sys/lock.h>
     86 #include <sys/proc.h>
     87 
     88 #ifndef _KERNEL
     89 #include "reentrant.h"
     90 #endif
     91 
     92 #if defined(_REENTRANT) && !defined(_RUMPKERNEL)
     93 extern thread_key_t _gmonkey;
     94 extern struct gmonparam _gmondummy;
     95 struct gmonparam *_m_gmon_alloc(void);
     96 #endif
     97 
     98 #ifndef __LINT__
     99 _MCOUNT_DECL(u_long, u_long)
    100 #ifdef _KERNEL
    101     __attribute__((__no_instrument_function__))
    102 #endif
    103     __used;
    104 #endif
    105 
    106 /* XXX: make these interfaces */
    107 #ifdef _RUMPKERNEL
    108 #undef MCOUNT_ENTER
    109 #define MCOUNT_ENTER
    110 #undef MCOUNT_EXIT
    111 #define MCOUNT_EXIT
    112 #undef MCOUNT
    113 #define MCOUNT
    114 #endif
    115 
    116 /*
    117  * mcount is called on entry to each function compiled with the profiling
    118  * switch set.  _mcount(), which is declared in a machine-dependent way
    119  * with _MCOUNT_DECL, does the actual work and is either inlined into a
    120  * C routine or called by an assembly stub.  In any case, this magic is
    121  * taken care of by the MCOUNT definition in <machine/profile.h>.
    122  *
    123  * _mcount updates data structures that represent traversals of the
    124  * program's call graph edges.  frompc and selfpc are the return
    125  * address and function address that represents the given call graph edge.
    126  *
    127  * Note: the original BSD code used the same variable (frompcindex) for
    128  * both frompcindex and frompc.  Any reasonable, modern compiler will
    129  * perform this optimization.
    130  */
    131 #ifndef __LINT__
    132 /* _mcount; may be static, inline, etc */
    133 _MCOUNT_DECL(u_long frompc, u_long selfpc)
    134 {
    135 	u_short *frompcindex;
    136 	struct tostruct *top, *prevtop;
    137 	struct gmonparam *p;
    138 	long toindex;
    139 #if defined(_KERNEL) && !defined(_RUMPKERNEL)
    140 	int s;
    141 #endif
    142 
    143 #if defined(_REENTRANT) && !defined(_KERNEL) && !defined(_RUMPKERNEL)
    144 	if (__isthreaded) {
    145 		/* prevent re-entry via thr_getspecific */
    146 		if (_gmonparam.state != GMON_PROF_ON)
    147 			return;
    148 		_gmonparam.state = GMON_PROF_BUSY;
    149 		p = thr_getspecific(_gmonkey);
    150 		if (p == NULL) {
    151 			/* Prevent recursive calls while allocating */
    152 			thr_setspecific(_gmonkey, &_gmondummy);
    153 			p = _m_gmon_alloc();
    154 		}
    155 		_gmonparam.state = GMON_PROF_ON;
    156 	} else
    157 #endif
    158 		p = &_gmonparam;
    159 	/*
    160 	 * check that we are profiling
    161 	 * and that we aren't recursively invoked.
    162 	 */
    163 	if (p->state != GMON_PROF_ON)
    164 		return;
    165 #if defined(_KERNEL) && !defined(_RUMPKERNEL)
    166 	MCOUNT_ENTER;
    167 #ifdef MULTIPROCESSOR
    168 	p = curcpu()->ci_gmon;
    169 	if (p == NULL || p->state != GMON_PROF_ON) {
    170 		MCOUNT_EXIT;
    171 		return;
    172 	}
    173 #endif
    174 #endif
    175 	p->state = GMON_PROF_BUSY;
    176 	/*
    177 	 * check that frompcindex is a reasonable pc value.
    178 	 * for example:	signal catchers get called from the stack,
    179 	 *		not from text space.  too bad.
    180 	 */
    181 	frompc -= p->lowpc;
    182 	if (frompc > p->textsize)
    183 		goto done;
    184 
    185 #if (HASHFRACTION & (HASHFRACTION - 1)) == 0
    186 	if (p->hashfraction == HASHFRACTION)
    187 		frompcindex =
    188 		    &p->froms[
    189 		    (size_t)(frompc / (HASHFRACTION * sizeof(*p->froms)))];
    190 	else
    191 #endif
    192 		frompcindex =
    193 		    &p->froms[
    194 		    (size_t)(frompc / (p->hashfraction * sizeof(*p->froms)))];
    195 	toindex = *frompcindex;
    196 	if (toindex == 0) {
    197 		/*
    198 		 *	first time traversing this arc
    199 		 */
    200 		toindex = ++p->tos[0].link;
    201 		if (toindex >= p->tolimit)
    202 			/* halt further profiling */
    203 			goto overflow;
    204 
    205 		*frompcindex = (u_short)toindex;
    206 		top = &p->tos[(size_t)toindex];
    207 		top->selfpc = selfpc;
    208 		top->count = 1;
    209 		top->link = 0;
    210 		goto done;
    211 	}
    212 	top = &p->tos[(size_t)toindex];
    213 	if (top->selfpc == selfpc) {
    214 		/*
    215 		 * arc at front of chain; usual case.
    216 		 */
    217 		top->count++;
    218 		goto done;
    219 	}
    220 	/*
    221 	 * have to go looking down chain for it.
    222 	 * top points to what we are looking at,
    223 	 * prevtop points to previous top.
    224 	 * we know it is not at the head of the chain.
    225 	 */
    226 	for (; /* goto done */; ) {
    227 		if (top->link == 0) {
    228 			/*
    229 			 * top is end of the chain and none of the chain
    230 			 * had top->selfpc == selfpc.
    231 			 * so we allocate a new tostruct
    232 			 * and link it to the head of the chain.
    233 			 */
    234 			toindex = ++p->tos[0].link;
    235 			if (toindex >= p->tolimit)
    236 				goto overflow;
    237 
    238 			top = &p->tos[(size_t)toindex];
    239 			top->selfpc = selfpc;
    240 			top->count = 1;
    241 			top->link = *frompcindex;
    242 			*frompcindex = (u_short)toindex;
    243 			goto done;
    244 		}
    245 		/*
    246 		 * otherwise, check the next arc on the chain.
    247 		 */
    248 		prevtop = top;
    249 		top = &p->tos[top->link];
    250 		if (top->selfpc == selfpc) {
    251 			/*
    252 			 * there it is.
    253 			 * increment its count
    254 			 * move it to the head of the chain.
    255 			 */
    256 			top->count++;
    257 			toindex = prevtop->link;
    258 			prevtop->link = top->link;
    259 			top->link = *frompcindex;
    260 			*frompcindex = (u_short)toindex;
    261 			goto done;
    262 		}
    263 	}
    264 done:
    265 	p->state = GMON_PROF_ON;
    266 #if defined(_KERNEL) && !defined(_RUMPKERNEL)
    267 	MCOUNT_EXIT;
    268 #endif
    269 	return;
    270 
    271 overflow:
    272 	p->state = GMON_PROF_ERROR;
    273 #if defined(_KERNEL) && !defined(_RUMPKERNEL)
    274 	MCOUNT_EXIT;
    275 #endif
    276 	return;
    277 }
    278 #endif
    279 
    280 #ifdef MCOUNT
    281 /*
    282  * Actual definition of mcount function.  Defined in <machine/profile.h>,
    283  * which is included by <sys/gmon.h>.
    284  */
    285 MCOUNT
    286 #endif
    287 
    288 #if defined(_KERNEL) && !defined(_RUMPKERNEL) && defined(MULTIPROCESSOR)
    289 void _gmonparam_merge(struct gmonparam *, struct gmonparam *);
    290 
    291 void
    292 _gmonparam_merge(struct gmonparam *p, struct gmonparam *q)
    293 {
    294 	u_long fromindex;
    295 	u_short *frompcindex, qtoindex, toindex;
    296 	u_long selfpc;
    297 	u_long endfrom;
    298 	long count;
    299 	struct tostruct *top;
    300 	int i;
    301 
    302 	count = q->kcountsize / sizeof(*q->kcount);
    303 	for (i = 0; i < count; i++)
    304 		p->kcount[i] += q->kcount[i];
    305 
    306 	endfrom = (q->fromssize / sizeof(*q->froms));
    307 	for (fromindex = 0; fromindex < endfrom; fromindex++) {
    308 		if (q->froms[fromindex] == 0)
    309 			continue;
    310 		for (qtoindex = q->froms[fromindex]; qtoindex != 0;
    311 		     qtoindex = q->tos[qtoindex].link) {
    312 			selfpc = q->tos[qtoindex].selfpc;
    313 			count = q->tos[qtoindex].count;
    314 			/* cribbed from mcount */
    315 			frompcindex = &p->froms[fromindex];
    316 			toindex = *frompcindex;
    317 			if (toindex == 0) {
    318 				/*
    319 				 * first time traversing this arc
    320 				 */
    321 				toindex = ++p->tos[0].link;
    322 				if (toindex >= p->tolimit)
    323 					/* halt further profiling */
    324 					goto overflow;
    325 
    326 				*frompcindex = (u_short)toindex;
    327 				top = &p->tos[(size_t)toindex];
    328 				top->selfpc = selfpc;
    329 				top->count = count;
    330 				top->link = 0;
    331 				goto done;
    332 			}
    333 			top = &p->tos[(size_t)toindex];
    334 			if (top->selfpc == selfpc) {
    335 				/*
    336 				 * arc at front of chain; usual case.
    337 				 */
    338 				top->count+= count;
    339 				goto done;
    340 			}
    341 			/*
    342 			 * have to go looking down chain for it.
    343 			 * top points to what we are looking at,
    344 			 * we know it is not at the head of the chain.
    345 			 */
    346 			for (; /* goto done */; ) {
    347 				if (top->link == 0) {
    348 					/*
    349 					 * top is end of the chain and
    350 					 * none of the chain had
    351 					 * top->selfpc == selfpc.  so
    352 					 * we allocate a new tostruct
    353 					 * and link it to the head of
    354 					 * the chain.
    355 					 */
    356 					toindex = ++p->tos[0].link;
    357 					if (toindex >= p->tolimit)
    358 						goto overflow;
    359 
    360 					top = &p->tos[(size_t)toindex];
    361 					top->selfpc = selfpc;
    362 					top->count = count;
    363 					top->link = *frompcindex;
    364 					*frompcindex = (u_short)toindex;
    365 					goto done;
    366 				}
    367 				/*
    368 				 * otherwise, check the next arc on the chain.
    369 				 */
    370 				top = &p->tos[top->link];
    371 				if (top->selfpc == selfpc) {
    372 					/*
    373 					 * there it is.
    374 					 * add to its count.
    375 					 */
    376 					top->count += count;
    377 					goto done;
    378 				}
    379 			}
    380 
    381 		done: ;
    382 		}
    383 
    384 	}
    385  overflow: ;
    386 
    387 }
    388 #endif
    389 
    390 #endif /* (!_KERNEL || GPROF) && !_STANDALONE */
    391