Home | History | Annotate | Line # | Download | only in kern
subr_log.c revision 1.59.12.1
      1 /*	$NetBSD: subr_log.c,v 1.59.12.1 2020/12/14 14:38:14 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1982, 1986, 1993
     34  *	The Regents of the University of California.  All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. Neither the name of the University nor the names of its contributors
     45  *    may be used to endorse or promote products derived from this software
     46  *    without specific prior written permission.
     47  *
     48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     58  * SUCH DAMAGE.
     59  *
     60  *	@(#)subr_log.c	8.3 (Berkeley) 2/14/95
     61  */
     62 
     63 /*
     64  * Error log buffer for kernel printf's.
     65  */
     66 
     67 #include <sys/cdefs.h>
     68 __KERNEL_RCSID(0, "$NetBSD: subr_log.c,v 1.59.12.1 2020/12/14 14:38:14 thorpej Exp $");
     69 
     70 #include <sys/param.h>
     71 #include <sys/systm.h>
     72 #include <sys/kernel.h>
     73 #include <sys/proc.h>
     74 #include <sys/vnode.h>
     75 #include <sys/ioctl.h>
     76 #include <sys/msgbuf.h>
     77 #include <sys/file.h>
     78 #include <sys/syslog.h>
     79 #include <sys/conf.h>
     80 #include <sys/select.h>
     81 #include <sys/poll.h>
     82 #include <sys/intr.h>
     83 #include <sys/sysctl.h>
     84 #include <sys/ktrace.h>
     85 
     86 static int sysctl_msgbuf(SYSCTLFN_PROTO);
     87 
     88 static void	logsoftintr(void *);
     89 
     90 static bool	log_async;
     91 static struct selinfo log_selp;		/* process waiting on select call */
     92 static pid_t	log_pgid;		/* process/group for async I/O */
     93 static kcondvar_t log_cv;
     94 static void	*log_sih;
     95 
     96 kmutex_t log_lock;
     97 int	log_open;			/* also used in log() */
     98 int	msgbufmapped;			/* is the message buffer mapped */
     99 int	msgbufenabled;			/* is logging to the buffer enabled */
    100 struct	kern_msgbuf *msgbufp;		/* the mapped buffer, itself. */
    101 
    102 void
    103 initmsgbuf(void *bf, size_t bufsize)
    104 {
    105 	struct kern_msgbuf *mbp;
    106 	long new_bufs;
    107 
    108 	/* Sanity-check the given size. */
    109 	if (bufsize < sizeof(struct kern_msgbuf))
    110 		return;
    111 
    112 	mbp = msgbufp = (struct kern_msgbuf *)bf;
    113 
    114 	new_bufs = bufsize - offsetof(struct kern_msgbuf, msg_bufc);
    115 	if ((mbp->msg_magic != MSG_MAGIC) || (mbp->msg_bufs != new_bufs) ||
    116 	    (mbp->msg_bufr < 0) || (mbp->msg_bufr >= mbp->msg_bufs) ||
    117 	    (mbp->msg_bufx < 0) || (mbp->msg_bufx >= mbp->msg_bufs)) {
    118 		/*
    119 		 * If the buffer magic number is wrong, has changed
    120 		 * size (which shouldn't happen often), or is
    121 		 * internally inconsistent, initialize it.
    122 		 */
    123 
    124 		memset(bf, 0, bufsize);
    125 		mbp->msg_magic = MSG_MAGIC;
    126 		mbp->msg_bufs = new_bufs;
    127 	}
    128 
    129 	/* mark it as ready for use. */
    130 	msgbufmapped = msgbufenabled = 1;
    131 }
    132 
    133 void
    134 loginit(void)
    135 {
    136 
    137 	mutex_init(&log_lock, MUTEX_DEFAULT, IPL_VM);
    138 	selinit(&log_selp);
    139 	cv_init(&log_cv, "klog");
    140 	log_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
    141 	    logsoftintr, NULL);
    142 
    143 	sysctl_createv(NULL, 0, NULL, NULL,
    144 		       CTLFLAG_PERMANENT,
    145 		       CTLTYPE_INT, "msgbufsize",
    146 		       SYSCTL_DESCR("Size of the kernel message buffer"),
    147 		       sysctl_msgbuf, 0, NULL, 0,
    148 		       CTL_KERN, KERN_MSGBUFSIZE, CTL_EOL);
    149 	sysctl_createv(NULL, 0, NULL, NULL,
    150 		       CTLFLAG_PERMANENT,
    151 		       CTLTYPE_INT, "msgbuf",
    152 		       SYSCTL_DESCR("Kernel message buffer"),
    153 		       sysctl_msgbuf, 0, NULL, 0,
    154 		       CTL_KERN, KERN_MSGBUF, CTL_EOL);
    155 }
    156 
    157 /*ARGSUSED*/
    158 static int
    159 logopen(dev_t dev, int flags, int mode, struct lwp *l)
    160 {
    161 	struct kern_msgbuf *mbp = msgbufp;
    162 	int error = 0;
    163 
    164 	mutex_spin_enter(&log_lock);
    165 	if (log_open) {
    166 		error = EBUSY;
    167 	} else {
    168 		log_open = 1;
    169 		log_pgid = l->l_proc->p_pid;	/* signal process only */
    170 		/*
    171 		 * The message buffer is initialized during system
    172 		 * configuration.  If it's been clobbered, note that
    173 		 * and return an error.  (This allows a user to read
    174 		 * the buffer via /dev/kmem, and try to figure out
    175 		 * what clobbered it.
    176 		 */
    177 		if (mbp->msg_magic != MSG_MAGIC) {
    178 			msgbufenabled = 0;
    179 			error = ENXIO;
    180 		}
    181 	}
    182 	mutex_spin_exit(&log_lock);
    183 
    184 	return error;
    185 }
    186 
    187 /*ARGSUSED*/
    188 static int
    189 logclose(dev_t dev, int flag, int mode, struct lwp *l)
    190 {
    191 
    192 	mutex_spin_enter(&log_lock);
    193 	log_pgid = 0;
    194 	log_open = 0;
    195 	log_async = 0;
    196 	mutex_spin_exit(&log_lock);
    197 
    198 	return 0;
    199 }
    200 
    201 /*ARGSUSED*/
    202 static int
    203 logread(dev_t dev, struct uio *uio, int flag)
    204 {
    205 	struct kern_msgbuf *mbp = msgbufp;
    206 	long l;
    207 	int error = 0;
    208 
    209 	mutex_spin_enter(&log_lock);
    210 	while (mbp->msg_bufr == mbp->msg_bufx) {
    211 		if (flag & IO_NDELAY) {
    212 			mutex_spin_exit(&log_lock);
    213 			return EWOULDBLOCK;
    214 		}
    215 		error = cv_wait_sig(&log_cv, &log_lock);
    216 		if (error) {
    217 			mutex_spin_exit(&log_lock);
    218 			return error;
    219 		}
    220 	}
    221 	while (uio->uio_resid > 0) {
    222 		l = mbp->msg_bufx - mbp->msg_bufr;
    223 		if (l < 0)
    224 			l = mbp->msg_bufs - mbp->msg_bufr;
    225 		l = uimin(l, uio->uio_resid);
    226 		if (l == 0)
    227 			break;
    228 		mutex_spin_exit(&log_lock);
    229 		error = uiomove(&mbp->msg_bufc[mbp->msg_bufr], (int)l, uio);
    230 		mutex_spin_enter(&log_lock);
    231 		if (error)
    232 			break;
    233 		mbp->msg_bufr += l;
    234 		if (mbp->msg_bufr < 0 || mbp->msg_bufr >= mbp->msg_bufs)
    235 			mbp->msg_bufr = 0;
    236 	}
    237 	mutex_spin_exit(&log_lock);
    238 
    239 	return error;
    240 }
    241 
    242 /*ARGSUSED*/
    243 static int
    244 logpoll(dev_t dev, int events, struct lwp *l)
    245 {
    246 	int revents = 0;
    247 
    248 	if (events & (POLLIN | POLLRDNORM)) {
    249 		mutex_spin_enter(&log_lock);
    250 		if (msgbufp->msg_bufr != msgbufp->msg_bufx)
    251 			revents |= events & (POLLIN | POLLRDNORM);
    252 		else
    253 			selrecord(l, &log_selp);
    254 		mutex_spin_exit(&log_lock);
    255 	}
    256 
    257 	return revents;
    258 }
    259 
    260 static void
    261 filt_logrdetach(struct knote *kn)
    262 {
    263 
    264 	mutex_spin_enter(&log_lock);
    265 	selremove_knote(&log_selp, kn);
    266 	mutex_spin_exit(&log_lock);
    267 }
    268 
    269 static int
    270 filt_logread(struct knote *kn, long hint)
    271 {
    272 	int rv;
    273 
    274 	if ((hint & NOTE_SUBMIT) == 0)
    275 		mutex_spin_enter(&log_lock);
    276 	if (msgbufp->msg_bufr == msgbufp->msg_bufx) {
    277 		rv = 0;
    278 	} else if (msgbufp->msg_bufr < msgbufp->msg_bufx) {
    279 		kn->kn_data = msgbufp->msg_bufx - msgbufp->msg_bufr;
    280 		rv = 1;
    281 	} else {
    282 		kn->kn_data = (msgbufp->msg_bufs - msgbufp->msg_bufr) +
    283 		    msgbufp->msg_bufx;
    284 		rv = 1;
    285 	}
    286 	if ((hint & NOTE_SUBMIT) == 0)
    287 		mutex_spin_exit(&log_lock);
    288 
    289 	return rv;
    290 }
    291 
    292 static const struct filterops logread_filtops = {
    293 	.f_isfd = 1,
    294 	.f_attach = NULL,
    295 	.f_detach = filt_logrdetach,
    296 	.f_event = filt_logread,
    297 };
    298 
    299 static int
    300 logkqfilter(dev_t dev, struct knote *kn)
    301 {
    302 
    303 	switch (kn->kn_filter) {
    304 	case EVFILT_READ:
    305 		kn->kn_fop = &logread_filtops;
    306 		break;
    307 
    308 	default:
    309 		return (EINVAL);
    310 	}
    311 
    312 	kn->kn_hook = NULL;
    313 
    314 	mutex_spin_enter(&log_lock);
    315 	selrecord_knote(&log_selp, kn);
    316 	mutex_spin_exit(&log_lock);
    317 
    318 	return (0);
    319 }
    320 
    321 void
    322 logwakeup(void)
    323 {
    324 
    325 	if (!cold && log_open) {
    326 		mutex_spin_enter(&log_lock);
    327 		selnotify(&log_selp, 0, NOTE_SUBMIT);
    328 		if (log_async)
    329 			softint_schedule(log_sih);
    330 		cv_broadcast(&log_cv);
    331 		mutex_spin_exit(&log_lock);
    332 	}
    333 }
    334 
    335 static void
    336 logsoftintr(void *cookie)
    337 {
    338 	pid_t pid;
    339 
    340 	if ((pid = log_pgid) != 0)
    341 		fownsignal(pid, SIGIO, 0, 0, NULL);
    342 }
    343 
    344 /*ARGSUSED*/
    345 static int
    346 logioctl(dev_t dev, u_long com, void *data, int flag, struct lwp *lwp)
    347 {
    348 	long l;
    349 
    350 	switch (com) {
    351 
    352 	/* return number of characters immediately available */
    353 	case FIONREAD:
    354 		mutex_spin_enter(&log_lock);
    355 		l = msgbufp->msg_bufx - msgbufp->msg_bufr;
    356 		if (l < 0)
    357 			l += msgbufp->msg_bufs;
    358 		mutex_spin_exit(&log_lock);
    359 		*(int *)data = l;
    360 		break;
    361 
    362 	case FIONBIO:
    363 		break;
    364 
    365 	case FIOASYNC:
    366 		/* No locking needed, 'thread private'. */
    367 		log_async = (*((int *)data) != 0);
    368 		break;
    369 
    370 	case TIOCSPGRP:
    371 	case FIOSETOWN:
    372 		return fsetown(&log_pgid, com, data);
    373 
    374 	case TIOCGPGRP:
    375 	case FIOGETOWN:
    376 		return fgetown(log_pgid, com, data);
    377 
    378 	default:
    379 		return (EPASSTHROUGH);
    380 	}
    381 	return (0);
    382 }
    383 
    384 static void
    385 logskip(struct kern_msgbuf *mbp)
    386 {
    387 	/*
    388 	 * Move forward read pointer to the next line
    389 	 * in the buffer.  Note that the buffer is
    390 	 * a ring buffer so we should reset msg_bufr
    391 	 * to 0 when msg_bufr exceeds msg_bufs.
    392 	 *
    393 	 * To prevent to loop forever, give up if we
    394 	 * cannot find a newline in mbp->msg_bufs
    395 	 * characters (the max size of the buffer).
    396 	 */
    397 	for (int i = 0; i < mbp->msg_bufs; i++) {
    398 		char c0 = mbp->msg_bufc[mbp->msg_bufr];
    399 		if (++mbp->msg_bufr >= mbp->msg_bufs)
    400 			mbp->msg_bufr = 0;
    401 		if (c0 == '\n')
    402 			break;
    403 	}
    404 }
    405 
    406 static void
    407 logaddchar(struct kern_msgbuf *mbp, int c)
    408 {
    409 	mbp->msg_bufc[mbp->msg_bufx++] = c;
    410 	if (mbp->msg_bufx < 0 || mbp->msg_bufx >= mbp->msg_bufs)
    411 		mbp->msg_bufx = 0;
    412 
    413 	/* If the buffer is full, keep the most recent data. */
    414 	if (mbp->msg_bufr == mbp->msg_bufx)
    415 		logskip(mbp);
    416 }
    417 
    418 void
    419 logputchar(int c)
    420 {
    421 	struct kern_msgbuf *mbp;
    422 
    423 	if (!cold)
    424 		mutex_spin_enter(&log_lock);
    425 
    426 	if (!msgbufenabled)
    427 		goto out;
    428 
    429 	mbp = msgbufp;
    430 	if (mbp->msg_magic != MSG_MAGIC) {
    431 		/*
    432 		 * Arguably should panic or somehow notify the
    433 		 * user...  but how?  Panic may be too drastic,
    434 		 * and would obliterate the message being kicked
    435 		 * out (maybe a panic itself), and printf
    436 		 * would invoke us recursively.  Silently punt
    437 		 * for now.  If syslog is running, it should
    438 		 * notice.
    439 		 */
    440 		msgbufenabled = 0;
    441 		goto out;
    442 
    443 	}
    444 
    445 	logaddchar(mbp, c);
    446 
    447 out:
    448 	if (!cold)
    449 		mutex_spin_exit(&log_lock);
    450 }
    451 
    452 /*
    453  * sysctl helper routine for kern.msgbufsize and kern.msgbuf. For the
    454  * former it merely checks the message buffer is set up. For the latter,
    455  * it also copies out the data if necessary.
    456  */
    457 static int
    458 sysctl_msgbuf(SYSCTLFN_ARGS)
    459 {
    460 	char *where = oldp;
    461 	size_t len, maxlen;
    462 	long beg, end;
    463 	extern kmutex_t log_lock;
    464 	int error;
    465 
    466 	if (!logenabled(msgbufp)) {
    467 		msgbufenabled = 0;
    468 		return (ENXIO);
    469 	}
    470 
    471 	switch (rnode->sysctl_num) {
    472 	case KERN_MSGBUFSIZE: {
    473 		struct sysctlnode node = *rnode;
    474 		int msg_bufs = (int)msgbufp->msg_bufs;
    475 		node.sysctl_data = &msg_bufs;
    476 		return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    477 	}
    478 	case KERN_MSGBUF:
    479 		break;
    480 	default:
    481 		return (EOPNOTSUPP);
    482 	}
    483 
    484 	if (newp != NULL)
    485 		return (EPERM);
    486 
    487 	if (oldp == NULL) {
    488 		/* always return full buffer size */
    489 		*oldlenp = msgbufp->msg_bufs;
    490 		return (0);
    491 	}
    492 
    493 	sysctl_unlock();
    494 
    495 	/*
    496 	 * First, copy from the write pointer to the end of
    497 	 * message buffer.
    498 	 */
    499 	error = 0;
    500 	mutex_spin_enter(&log_lock);
    501 	maxlen = MIN(msgbufp->msg_bufs, *oldlenp);
    502 	beg = msgbufp->msg_bufx;
    503 	end = msgbufp->msg_bufs;
    504 	mutex_spin_exit(&log_lock);
    505 
    506 	while (maxlen > 0) {
    507 		len = MIN(end - beg, maxlen);
    508 		if (len == 0)
    509 			break;
    510 		/* XXX unlocked, but hardly matters. */
    511 		error = copyout(&msgbufp->msg_bufc[beg], where, len);
    512 		ktrmibio(-1, UIO_READ, where, len, error);
    513 		if (error)
    514 			break;
    515 		where += len;
    516 		maxlen -= len;
    517 
    518 		/*
    519 		 * ... then, copy from the beginning of message buffer to
    520 		 * the write pointer.
    521 		 */
    522 		beg = 0;
    523 		end = msgbufp->msg_bufx;
    524 	}
    525 
    526 	sysctl_relock();
    527 	return (error);
    528 }
    529 
    530 const struct cdevsw log_cdevsw = {
    531 	.d_open = logopen,
    532 	.d_close = logclose,
    533 	.d_read = logread,
    534 	.d_write = nowrite,
    535 	.d_ioctl = logioctl,
    536 	.d_stop = nostop,
    537 	.d_tty = notty,
    538 	.d_poll = logpoll,
    539 	.d_mmap = nommap,
    540 	.d_kqfilter = logkqfilter,
    541 	.d_discard = nodiscard,
    542 	.d_flag = D_OTHER | D_MPSAFE
    543 };
    544