xref: /netbsd-src/sys/kern/subr_log.c (revision 4e8b6e03fa4b3afffd9109525e67db5426faa821)
1 /*	$NetBSD: subr_log.c,v 1.63 2022/10/26 23:28:30 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1982, 1986, 1993
34  *	The Regents of the University of California.  All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)subr_log.c	8.3 (Berkeley) 2/14/95
61  */
62 
63 /*
64  * Error log buffer for kernel printf's.
65  */
66 
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: subr_log.c,v 1.63 2022/10/26 23:28:30 riastradh Exp $");
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/proc.h>
74 #include <sys/vnode.h>
75 #include <sys/ioctl.h>
76 #include <sys/msgbuf.h>
77 #include <sys/file.h>
78 #include <sys/syslog.h>
79 #include <sys/conf.h>
80 #include <sys/select.h>
81 #include <sys/poll.h>
82 #include <sys/intr.h>
83 #include <sys/sysctl.h>
84 #include <sys/ktrace.h>
85 
86 static int sysctl_msgbuf(SYSCTLFN_PROTO);
87 
88 static void	logsoftintr(void *);
89 
90 static bool	log_async;
91 static struct selinfo log_selp;		/* process waiting on select call */
92 static pid_t	log_pgid;		/* process/group for async I/O */
93 static kcondvar_t log_cv;
94 static void	*log_sih;
95 
96 static kmutex_t log_lock;
97 int	log_open;			/* also used in log() */
98 int	msgbufmapped;			/* is the message buffer mapped */
99 int	msgbufenabled;			/* is logging to the buffer enabled */
100 struct	kern_msgbuf *msgbufp;		/* the mapped buffer, itself. */
101 
102 void
initmsgbuf(void * bf,size_t bufsize)103 initmsgbuf(void *bf, size_t bufsize)
104 {
105 	struct kern_msgbuf *mbp;
106 	long new_bufs;
107 
108 	/* Sanity-check the given size. */
109 	if (bufsize < sizeof(struct kern_msgbuf))
110 		return;
111 
112 	mbp = msgbufp = (struct kern_msgbuf *)bf;
113 
114 	new_bufs = bufsize - offsetof(struct kern_msgbuf, msg_bufc);
115 	if ((mbp->msg_magic != MSG_MAGIC) || (mbp->msg_bufs != new_bufs) ||
116 	    (mbp->msg_bufr < 0) || (mbp->msg_bufr >= mbp->msg_bufs) ||
117 	    (mbp->msg_bufx < 0) || (mbp->msg_bufx >= mbp->msg_bufs)) {
118 		/*
119 		 * If the buffer magic number is wrong, has changed
120 		 * size (which shouldn't happen often), or is
121 		 * internally inconsistent, initialize it.
122 		 */
123 
124 		memset(bf, 0, bufsize);
125 		mbp->msg_magic = MSG_MAGIC;
126 		mbp->msg_bufs = new_bufs;
127 	}
128 
129 	/* mark it as ready for use. */
130 	msgbufmapped = msgbufenabled = 1;
131 }
132 
133 void
loginit(void)134 loginit(void)
135 {
136 
137 	mutex_init(&log_lock, MUTEX_DEFAULT, IPL_VM);
138 	selinit(&log_selp);
139 	cv_init(&log_cv, "klog");
140 	log_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
141 	    logsoftintr, NULL);
142 
143 	sysctl_createv(NULL, 0, NULL, NULL,
144 		       CTLFLAG_PERMANENT,
145 		       CTLTYPE_INT, "msgbufsize",
146 		       SYSCTL_DESCR("Size of the kernel message buffer"),
147 		       sysctl_msgbuf, 0, NULL, 0,
148 		       CTL_KERN, KERN_MSGBUFSIZE, CTL_EOL);
149 	sysctl_createv(NULL, 0, NULL, NULL,
150 		       CTLFLAG_PERMANENT,
151 		       CTLTYPE_INT, "msgbuf",
152 		       SYSCTL_DESCR("Kernel message buffer"),
153 		       sysctl_msgbuf, 0, NULL, 0,
154 		       CTL_KERN, KERN_MSGBUF, CTL_EOL);
155 }
156 
157 /*ARGSUSED*/
158 static int
logopen(dev_t dev,int flags,int mode,struct lwp * l)159 logopen(dev_t dev, int flags, int mode, struct lwp *l)
160 {
161 	struct kern_msgbuf *mbp = msgbufp;
162 	int error = 0;
163 
164 	mutex_spin_enter(&log_lock);
165 	if (log_open) {
166 		error = EBUSY;
167 	} else {
168 		log_open = 1;
169 		log_pgid = l->l_proc->p_pid;	/* signal process only */
170 		/*
171 		 * The message buffer is initialized during system
172 		 * configuration.  If it's been clobbered, note that
173 		 * and return an error.  (This allows a user to read
174 		 * the buffer via /dev/kmem, and try to figure out
175 		 * what clobbered it.
176 		 */
177 		if (mbp->msg_magic != MSG_MAGIC) {
178 			msgbufenabled = 0;
179 			error = ENXIO;
180 		}
181 	}
182 	mutex_spin_exit(&log_lock);
183 
184 	return error;
185 }
186 
187 /*ARGSUSED*/
188 static int
logclose(dev_t dev,int flag,int mode,struct lwp * l)189 logclose(dev_t dev, int flag, int mode, struct lwp *l)
190 {
191 
192 	mutex_spin_enter(&log_lock);
193 	log_pgid = 0;
194 	log_open = 0;
195 	log_async = 0;
196 	mutex_spin_exit(&log_lock);
197 
198 	return 0;
199 }
200 
201 /*ARGSUSED*/
202 static int
logread(dev_t dev,struct uio * uio,int flag)203 logread(dev_t dev, struct uio *uio, int flag)
204 {
205 	struct kern_msgbuf *mbp = msgbufp;
206 	long l;
207 	int error = 0;
208 
209 	mutex_spin_enter(&log_lock);
210 	while (mbp->msg_bufr == mbp->msg_bufx) {
211 		if (flag & IO_NDELAY) {
212 			mutex_spin_exit(&log_lock);
213 			return EWOULDBLOCK;
214 		}
215 		error = cv_wait_sig(&log_cv, &log_lock);
216 		if (error) {
217 			mutex_spin_exit(&log_lock);
218 			return error;
219 		}
220 	}
221 	while (uio->uio_resid > 0) {
222 		l = mbp->msg_bufx - mbp->msg_bufr;
223 		if (l < 0)
224 			l = mbp->msg_bufs - mbp->msg_bufr;
225 		l = uimin(l, uio->uio_resid);
226 		if (l == 0)
227 			break;
228 		mutex_spin_exit(&log_lock);
229 		error = uiomove(&mbp->msg_bufc[mbp->msg_bufr], (int)l, uio);
230 		mutex_spin_enter(&log_lock);
231 		if (error)
232 			break;
233 		mbp->msg_bufr += l;
234 		if (mbp->msg_bufr < 0 || mbp->msg_bufr >= mbp->msg_bufs)
235 			mbp->msg_bufr = 0;
236 	}
237 	mutex_spin_exit(&log_lock);
238 
239 	return error;
240 }
241 
242 /*ARGSUSED*/
243 static int
logpoll(dev_t dev,int events,struct lwp * l)244 logpoll(dev_t dev, int events, struct lwp *l)
245 {
246 	int revents = 0;
247 
248 	if (events & (POLLIN | POLLRDNORM)) {
249 		mutex_spin_enter(&log_lock);
250 		if (msgbufp->msg_bufr != msgbufp->msg_bufx)
251 			revents |= events & (POLLIN | POLLRDNORM);
252 		else
253 			selrecord(l, &log_selp);
254 		mutex_spin_exit(&log_lock);
255 	}
256 
257 	return revents;
258 }
259 
260 static void
filt_logrdetach(struct knote * kn)261 filt_logrdetach(struct knote *kn)
262 {
263 
264 	mutex_spin_enter(&log_lock);
265 	selremove_knote(&log_selp, kn);
266 	mutex_spin_exit(&log_lock);
267 }
268 
269 static int
filt_logread(struct knote * kn,long hint)270 filt_logread(struct knote *kn, long hint)
271 {
272 	int rv;
273 
274 	if ((hint & NOTE_SUBMIT) == 0)
275 		mutex_spin_enter(&log_lock);
276 	if (msgbufp->msg_bufr == msgbufp->msg_bufx) {
277 		rv = 0;
278 	} else if (msgbufp->msg_bufr < msgbufp->msg_bufx) {
279 		kn->kn_data = msgbufp->msg_bufx - msgbufp->msg_bufr;
280 		rv = 1;
281 	} else {
282 		kn->kn_data = (msgbufp->msg_bufs - msgbufp->msg_bufr) +
283 		    msgbufp->msg_bufx;
284 		rv = 1;
285 	}
286 	if ((hint & NOTE_SUBMIT) == 0)
287 		mutex_spin_exit(&log_lock);
288 
289 	return rv;
290 }
291 
292 static const struct filterops logread_filtops = {
293 	.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
294 	.f_attach = NULL,
295 	.f_detach = filt_logrdetach,
296 	.f_event = filt_logread,
297 };
298 
299 static int
logkqfilter(dev_t dev,struct knote * kn)300 logkqfilter(dev_t dev, struct knote *kn)
301 {
302 
303 	switch (kn->kn_filter) {
304 	case EVFILT_READ:
305 		kn->kn_fop = &logread_filtops;
306 		mutex_spin_enter(&log_lock);
307 		selrecord_knote(&log_selp, kn);
308 		mutex_spin_exit(&log_lock);
309 		break;
310 
311 	default:
312 		return (EINVAL);
313 	}
314 
315 	return (0);
316 }
317 
318 void
logwakeup(void)319 logwakeup(void)
320 {
321 
322 	if (!cold && log_open) {
323 		mutex_spin_enter(&log_lock);
324 		selnotify(&log_selp, 0, NOTE_SUBMIT);
325 		if (log_async)
326 			softint_schedule(log_sih);
327 		cv_broadcast(&log_cv);
328 		mutex_spin_exit(&log_lock);
329 	}
330 }
331 
332 static void
logsoftintr(void * cookie)333 logsoftintr(void *cookie)
334 {
335 	pid_t pid;
336 
337 	if ((pid = log_pgid) != 0)
338 		fownsignal(pid, SIGIO, 0, 0, NULL);
339 }
340 
341 /*ARGSUSED*/
342 static int
logioctl(dev_t dev,u_long com,void * data,int flag,struct lwp * lwp)343 logioctl(dev_t dev, u_long com, void *data, int flag, struct lwp *lwp)
344 {
345 	long l;
346 
347 	switch (com) {
348 
349 	/* return number of characters immediately available */
350 	case FIONREAD:
351 		mutex_spin_enter(&log_lock);
352 		l = msgbufp->msg_bufx - msgbufp->msg_bufr;
353 		if (l < 0)
354 			l += msgbufp->msg_bufs;
355 		mutex_spin_exit(&log_lock);
356 		*(int *)data = l;
357 		break;
358 
359 	case FIONBIO:
360 		break;
361 
362 	case FIOASYNC:
363 		/* No locking needed, 'thread private'. */
364 		log_async = (*((int *)data) != 0);
365 		break;
366 
367 	case TIOCSPGRP:
368 	case FIOSETOWN:
369 		return fsetown(&log_pgid, com, data);
370 
371 	case TIOCGPGRP:
372 	case FIOGETOWN:
373 		return fgetown(log_pgid, com, data);
374 
375 	default:
376 		return (EPASSTHROUGH);
377 	}
378 	return (0);
379 }
380 
381 static void
logskip(struct kern_msgbuf * mbp)382 logskip(struct kern_msgbuf *mbp)
383 {
384 	/*
385 	 * Move forward read pointer to the next line
386 	 * in the buffer.  Note that the buffer is
387 	 * a ring buffer so we should reset msg_bufr
388 	 * to 0 when msg_bufr exceeds msg_bufs.
389 	 *
390 	 * To prevent to loop forever, give up if we
391 	 * cannot find a newline in mbp->msg_bufs
392 	 * characters (the max size of the buffer).
393 	 */
394 	for (int i = 0; i < mbp->msg_bufs; i++) {
395 		char c0 = mbp->msg_bufc[mbp->msg_bufr];
396 		if (++mbp->msg_bufr >= mbp->msg_bufs)
397 			mbp->msg_bufr = 0;
398 		if (c0 == '\n')
399 			break;
400 	}
401 }
402 
403 static void
logaddchar(struct kern_msgbuf * mbp,int c)404 logaddchar(struct kern_msgbuf *mbp, int c)
405 {
406 	mbp->msg_bufc[mbp->msg_bufx++] = c;
407 	if (mbp->msg_bufx < 0 || mbp->msg_bufx >= mbp->msg_bufs)
408 		mbp->msg_bufx = 0;
409 
410 	/* If the buffer is full, keep the most recent data. */
411 	if (mbp->msg_bufr == mbp->msg_bufx)
412 		logskip(mbp);
413 }
414 
415 void
logputchar(int c)416 logputchar(int c)
417 {
418 	struct kern_msgbuf *mbp;
419 
420 	if (!cold)
421 		mutex_spin_enter(&log_lock);
422 
423 	if (!msgbufenabled)
424 		goto out;
425 
426 	mbp = msgbufp;
427 	if (mbp->msg_magic != MSG_MAGIC) {
428 		/*
429 		 * Arguably should panic or somehow notify the
430 		 * user...  but how?  Panic may be too drastic,
431 		 * and would obliterate the message being kicked
432 		 * out (maybe a panic itself), and printf
433 		 * would invoke us recursively.  Silently punt
434 		 * for now.  If syslog is running, it should
435 		 * notice.
436 		 */
437 		msgbufenabled = 0;
438 		goto out;
439 
440 	}
441 
442 	logaddchar(mbp, c);
443 
444 out:
445 	if (!cold)
446 		mutex_spin_exit(&log_lock);
447 }
448 
449 /*
450  * sysctl helper routine for kern.msgbufsize and kern.msgbuf. For the
451  * former it merely checks the message buffer is set up. For the latter,
452  * it also copies out the data if necessary.
453  */
454 static int
sysctl_msgbuf(SYSCTLFN_ARGS)455 sysctl_msgbuf(SYSCTLFN_ARGS)
456 {
457 	char *where = oldp;
458 	size_t len, maxlen;
459 	long beg, end;
460 	int error;
461 
462 	if (!logenabled(msgbufp)) {
463 		msgbufenabled = 0;
464 		return (ENXIO);
465 	}
466 
467 	switch (rnode->sysctl_num) {
468 	case KERN_MSGBUFSIZE: {
469 		struct sysctlnode node = *rnode;
470 		int msg_bufs = (int)msgbufp->msg_bufs;
471 		node.sysctl_data = &msg_bufs;
472 		return (sysctl_lookup(SYSCTLFN_CALL(&node)));
473 	}
474 	case KERN_MSGBUF:
475 		break;
476 	default:
477 		return (EOPNOTSUPP);
478 	}
479 
480 	if (newp != NULL)
481 		return (EPERM);
482 
483 	if (oldp == NULL) {
484 		/* always return full buffer size */
485 		*oldlenp = msgbufp->msg_bufs;
486 		return (0);
487 	}
488 
489 	sysctl_unlock();
490 
491 	/*
492 	 * First, copy from the write pointer to the end of
493 	 * message buffer.
494 	 */
495 	error = 0;
496 	mutex_spin_enter(&log_lock);
497 	maxlen = MIN(msgbufp->msg_bufs, *oldlenp);
498 	beg = msgbufp->msg_bufx;
499 	end = msgbufp->msg_bufs;
500 	mutex_spin_exit(&log_lock);
501 
502 	while (maxlen > 0) {
503 		len = MIN(end - beg, maxlen);
504 		if (len == 0)
505 			break;
506 		/* XXX unlocked, but hardly matters. */
507 		error = copyout(&msgbufp->msg_bufc[beg], where, len);
508 		ktrmibio(-1, UIO_READ, where, len, error);
509 		if (error)
510 			break;
511 		where += len;
512 		maxlen -= len;
513 
514 		/*
515 		 * ... then, copy from the beginning of message buffer to
516 		 * the write pointer.
517 		 */
518 		beg = 0;
519 		end = msgbufp->msg_bufx;
520 	}
521 
522 	sysctl_relock();
523 	return (error);
524 }
525 
526 const struct cdevsw log_cdevsw = {
527 	.d_open = logopen,
528 	.d_close = logclose,
529 	.d_read = logread,
530 	.d_write = nowrite,
531 	.d_ioctl = logioctl,
532 	.d_stop = nostop,
533 	.d_tty = notty,
534 	.d_poll = logpoll,
535 	.d_mmap = nommap,
536 	.d_kqfilter = logkqfilter,
537 	.d_discard = nodiscard,
538 	.d_flag = D_OTHER | D_MPSAFE
539 };
540