xref: /openbsd-src/sys/kern/subr_log.c (revision a0747c9f67a4ae71ccb71e62a28d1ea19e06a63c)
1 /*	$OpenBSD: subr_log.c,v 1.74 2021/03/18 08:43:38 mvs Exp $	*/
2 /*	$NetBSD: subr_log.c,v 1.11 1996/03/30 22:24:44 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)subr_log.c	8.1 (Berkeley) 6/10/93
33  */
34 
35 /*
36  * Error log buffer for kernel printf's.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/ioctl.h>
44 #include <sys/msgbuf.h>
45 #include <sys/file.h>
46 #include <sys/tty.h>
47 #include <sys/signalvar.h>
48 #include <sys/syslog.h>
49 #include <sys/poll.h>
50 #include <sys/malloc.h>
51 #include <sys/filedesc.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/fcntl.h>
55 #include <sys/mutex.h>
56 #include <sys/timeout.h>
57 
58 #ifdef KTRACE
59 #include <sys/ktrace.h>
60 #endif
61 
62 #include <sys/mount.h>
63 #include <sys/syscallargs.h>
64 
65 #include <dev/cons.h>
66 
67 #define LOG_RDPRI	(PZERO + 1)
68 #define LOG_TICK	50		/* log tick interval in msec */
69 
70 #define LOG_ASYNC	0x04
71 #define LOG_RDWAIT	0x08
72 
73 /*
74  * Locking:
75  *	L	log_mtx
76  */
77 struct logsoftc {
78 	int	sc_state;		/* [L] see above for possibilities */
79 	struct	selinfo sc_selp;	/* process waiting on select call */
80 	struct	sigio_ref sc_sigio;	/* async I/O registration */
81 	int	sc_need_wakeup;		/* if set, wake up waiters */
82 	struct timeout sc_tick;		/* wakeup poll timeout */
83 } logsoftc;
84 
85 int	log_open;			/* also used in log() */
86 int	msgbufmapped;			/* is the message buffer mapped */
87 struct	msgbuf *msgbufp;		/* the mapped buffer, itself. */
88 struct	msgbuf *consbufp;		/* console message buffer. */
89 
90 struct	file *syslogf;
91 struct	rwlock syslogf_rwlock = RWLOCK_INITIALIZER("syslogf");
92 
93 /*
94  * Lock that serializes access to log message buffers.
95  * This should be kept as a leaf lock in order not to constrain where
96  * printf(9) can be used.
97  */
98 struct	mutex log_mtx =
99     MUTEX_INITIALIZER_FLAGS(IPL_HIGH, "logmtx", MTX_NOWITNESS);
100 
101 void filt_logrdetach(struct knote *kn);
102 int filt_logread(struct knote *kn, long hint);
103 
104 const struct filterops logread_filtops = {
105 	.f_flags	= FILTEROP_ISFD,
106 	.f_attach	= NULL,
107 	.f_detach	= filt_logrdetach,
108 	.f_event	= filt_logread,
109 };
110 
111 int dosendsyslog(struct proc *, const char *, size_t, int, enum uio_seg);
112 void logtick(void *);
113 size_t msgbuf_getlen(struct msgbuf *);
114 void msgbuf_putchar_locked(struct msgbuf *, const char);
115 
116 void
117 initmsgbuf(caddr_t buf, size_t bufsize)
118 {
119 	struct msgbuf *mbp;
120 	long new_bufs;
121 
122 	/* Sanity-check the given size. */
123 	if (bufsize < sizeof(struct msgbuf))
124 		return;
125 
126 	mbp = msgbufp = (struct msgbuf *)buf;
127 
128 	new_bufs = bufsize - offsetof(struct msgbuf, msg_bufc);
129 	if ((mbp->msg_magic != MSG_MAGIC) || (mbp->msg_bufs != new_bufs) ||
130 	    (mbp->msg_bufr < 0) || (mbp->msg_bufr >= mbp->msg_bufs) ||
131 	    (mbp->msg_bufx < 0) || (mbp->msg_bufx >= mbp->msg_bufs)) {
132 		/*
133 		 * If the buffer magic number is wrong, has changed
134 		 * size (which shouldn't happen often), or is
135 		 * internally inconsistent, initialize it.
136 		 */
137 
138 		memset(buf, 0, bufsize);
139 		mbp->msg_magic = MSG_MAGIC;
140 		mbp->msg_bufs = new_bufs;
141 	}
142 
143 	/*
144 	 * Always start new buffer data on a new line.
145 	 * Avoid using log_mtx because mutexes do not work during early boot
146 	 * on some architectures.
147 	 */
148 	if (mbp->msg_bufx > 0 && mbp->msg_bufc[mbp->msg_bufx - 1] != '\n')
149 		msgbuf_putchar_locked(mbp, '\n');
150 
151 	/* mark it as ready for use. */
152 	msgbufmapped = 1;
153 }
154 
155 void
156 initconsbuf(void)
157 {
158 	/* Set up a buffer to collect /dev/console output */
159 	consbufp = malloc(CONSBUFSIZE, M_TTYS, M_WAITOK | M_ZERO);
160 	consbufp->msg_magic = MSG_MAGIC;
161 	consbufp->msg_bufs = CONSBUFSIZE - offsetof(struct msgbuf, msg_bufc);
162 }
163 
164 void
165 msgbuf_putchar(struct msgbuf *mbp, const char c)
166 {
167 	if (mbp->msg_magic != MSG_MAGIC)
168 		/* Nothing we can do */
169 		return;
170 
171 	mtx_enter(&log_mtx);
172 	msgbuf_putchar_locked(mbp, c);
173 	mtx_leave(&log_mtx);
174 }
175 
176 void
177 msgbuf_putchar_locked(struct msgbuf *mbp, const char c)
178 {
179 	mbp->msg_bufc[mbp->msg_bufx++] = c;
180 	if (mbp->msg_bufx < 0 || mbp->msg_bufx >= mbp->msg_bufs)
181 		mbp->msg_bufx = 0;
182 	/* If the buffer is full, keep the most recent data. */
183 	if (mbp->msg_bufr == mbp->msg_bufx) {
184 		if (++mbp->msg_bufr >= mbp->msg_bufs)
185 			mbp->msg_bufr = 0;
186 		mbp->msg_bufd++;
187 	}
188 }
189 
190 size_t
191 msgbuf_getlen(struct msgbuf *mbp)
192 {
193 	long len;
194 
195 	mtx_enter(&log_mtx);
196 	len = mbp->msg_bufx - mbp->msg_bufr;
197 	if (len < 0)
198 		len += mbp->msg_bufs;
199 	mtx_leave(&log_mtx);
200 	return (len);
201 }
202 
203 int
204 logopen(dev_t dev, int flags, int mode, struct proc *p)
205 {
206 	if (log_open)
207 		return (EBUSY);
208 	log_open = 1;
209 	sigio_init(&logsoftc.sc_sigio);
210 	timeout_set(&logsoftc.sc_tick, logtick, NULL);
211 	timeout_add_msec(&logsoftc.sc_tick, LOG_TICK);
212 	return (0);
213 }
214 
215 int
216 logclose(dev_t dev, int flag, int mode, struct proc *p)
217 {
218 	struct file *fp;
219 
220 	rw_enter_write(&syslogf_rwlock);
221 	fp = syslogf;
222 	syslogf = NULL;
223 	rw_exit(&syslogf_rwlock);
224 
225 	if (fp)
226 		FRELE(fp, p);
227 	log_open = 0;
228 	timeout_del(&logsoftc.sc_tick);
229 	logsoftc.sc_state = 0;
230 	sigio_free(&logsoftc.sc_sigio);
231 	return (0);
232 }
233 
234 int
235 logread(dev_t dev, struct uio *uio, int flag)
236 {
237 	struct sleep_state sls;
238 	struct msgbuf *mbp = msgbufp;
239 	size_t l, rpos;
240 	int error = 0;
241 
242 	mtx_enter(&log_mtx);
243 	while (mbp->msg_bufr == mbp->msg_bufx) {
244 		if (flag & IO_NDELAY) {
245 			error = EWOULDBLOCK;
246 			goto out;
247 		}
248 		logsoftc.sc_state |= LOG_RDWAIT;
249 		mtx_leave(&log_mtx);
250 		/*
251 		 * Set up and enter sleep manually instead of using msleep()
252 		 * to keep log_mtx as a leaf lock.
253 		 */
254 		sleep_setup(&sls, mbp, LOG_RDPRI | PCATCH, "klog", 0);
255 		error = sleep_finish(&sls, logsoftc.sc_state & LOG_RDWAIT);
256 		mtx_enter(&log_mtx);
257 		if (error)
258 			goto out;
259 	}
260 
261 	if (mbp->msg_bufd > 0) {
262 		char buf[64];
263 		long ndropped;
264 
265 		ndropped = mbp->msg_bufd;
266 		mtx_leave(&log_mtx);
267 		l = snprintf(buf, sizeof(buf),
268 		    "<%d>klog: dropped %ld byte%s, message buffer full\n",
269 		    LOG_KERN|LOG_WARNING, ndropped,
270 		    ndropped == 1 ? "" : "s");
271 		error = uiomove(buf, ulmin(l, sizeof(buf) - 1), uio);
272 		mtx_enter(&log_mtx);
273 		if (error)
274 			goto out;
275 		mbp->msg_bufd -= ndropped;
276 	}
277 
278 	while (uio->uio_resid > 0) {
279 		if (mbp->msg_bufx >= mbp->msg_bufr)
280 			l = mbp->msg_bufx - mbp->msg_bufr;
281 		else
282 			l = mbp->msg_bufs - mbp->msg_bufr;
283 		l = ulmin(l, uio->uio_resid);
284 		if (l == 0)
285 			break;
286 		rpos = mbp->msg_bufr;
287 		mtx_leave(&log_mtx);
288 		/* Ignore that concurrent readers may consume the same data. */
289 		error = uiomove(&mbp->msg_bufc[rpos], l, uio);
290 		mtx_enter(&log_mtx);
291 		if (error)
292 			break;
293 		mbp->msg_bufr += l;
294 		if (mbp->msg_bufr < 0 || mbp->msg_bufr >= mbp->msg_bufs)
295 			mbp->msg_bufr = 0;
296 	}
297  out:
298 	mtx_leave(&log_mtx);
299 	return (error);
300 }
301 
302 int
303 logpoll(dev_t dev, int events, struct proc *p)
304 {
305 	int revents = 0;
306 
307 	mtx_enter(&log_mtx);
308 	if (events & (POLLIN | POLLRDNORM)) {
309 		if (msgbufp->msg_bufr != msgbufp->msg_bufx)
310 			revents |= events & (POLLIN | POLLRDNORM);
311 		else
312 			selrecord(p, &logsoftc.sc_selp);
313 	}
314 	mtx_leave(&log_mtx);
315 	return (revents);
316 }
317 
318 int
319 logkqfilter(dev_t dev, struct knote *kn)
320 {
321 	struct klist *klist;
322 	int s;
323 
324 	switch (kn->kn_filter) {
325 	case EVFILT_READ:
326 		klist = &logsoftc.sc_selp.si_note;
327 		kn->kn_fop = &logread_filtops;
328 		break;
329 	default:
330 		return (EINVAL);
331 	}
332 
333 	kn->kn_hook = (void *)msgbufp;
334 
335 	s = splhigh();
336 	klist_insert_locked(klist, kn);
337 	splx(s);
338 
339 	return (0);
340 }
341 
342 void
343 filt_logrdetach(struct knote *kn)
344 {
345 	int s;
346 
347 	s = splhigh();
348 	klist_remove_locked(&logsoftc.sc_selp.si_note, kn);
349 	splx(s);
350 }
351 
352 int
353 filt_logread(struct knote *kn, long hint)
354 {
355 	struct msgbuf *mbp = kn->kn_hook;
356 
357 	kn->kn_data = msgbuf_getlen(mbp);
358 	return (kn->kn_data != 0);
359 }
360 
361 void
362 logwakeup(void)
363 {
364 	/*
365 	 * The actual wakeup has to be deferred because logwakeup() can be
366 	 * called in very varied contexts.
367 	 * Keep the print routines usable in as many situations as possible
368 	 * by not using locking here.
369 	 */
370 
371 	/*
372 	 * Ensure that preceding stores become visible to other CPUs
373 	 * before the flag.
374 	 */
375 	membar_producer();
376 
377 	logsoftc.sc_need_wakeup = 1;
378 }
379 
380 void
381 logtick(void *arg)
382 {
383 	int state;
384 
385 	if (!log_open)
386 		return;
387 
388 	if (!logsoftc.sc_need_wakeup)
389 		goto out;
390 	logsoftc.sc_need_wakeup = 0;
391 
392 	/*
393 	 * sc_need_wakeup has to be cleared before handling the wakeup.
394 	 * Visiting log_mtx ensures the proper order.
395 	 */
396 
397 	mtx_enter(&log_mtx);
398 	state = logsoftc.sc_state;
399 	if (logsoftc.sc_state & LOG_RDWAIT)
400 		logsoftc.sc_state &= ~LOG_RDWAIT;
401 	mtx_leave(&log_mtx);
402 
403 	selwakeup(&logsoftc.sc_selp);
404 	if (state & LOG_ASYNC)
405 		pgsigio(&logsoftc.sc_sigio, SIGIO, 0);
406 	if (state & LOG_RDWAIT)
407 		wakeup(msgbufp);
408 out:
409 	timeout_add_msec(&logsoftc.sc_tick, LOG_TICK);
410 }
411 
412 int
413 logioctl(dev_t dev, u_long com, caddr_t data, int flag, struct proc *p)
414 {
415 	struct file *fp, *newfp;
416 	int error;
417 
418 	switch (com) {
419 
420 	/* return number of characters immediately available */
421 	case FIONREAD:
422 		*(int *)data = (int)msgbuf_getlen(msgbufp);
423 		break;
424 
425 	case FIONBIO:
426 		break;
427 
428 	case FIOASYNC:
429 		mtx_enter(&log_mtx);
430 		if (*(int *)data)
431 			logsoftc.sc_state |= LOG_ASYNC;
432 		else
433 			logsoftc.sc_state &= ~LOG_ASYNC;
434 		mtx_leave(&log_mtx);
435 		break;
436 
437 	case FIOSETOWN:
438 	case TIOCSPGRP:
439 		return (sigio_setown(&logsoftc.sc_sigio, com, data));
440 
441 	case FIOGETOWN:
442 	case TIOCGPGRP:
443 		sigio_getown(&logsoftc.sc_sigio, com, data);
444 		break;
445 
446 	case LIOCSFD:
447 		if ((error = suser(p)) != 0)
448 			return (error);
449 		if ((error = getsock(p, *(int *)data, &newfp)) != 0)
450 			return (error);
451 
452 		rw_enter_write(&syslogf_rwlock);
453 		fp = syslogf;
454 		syslogf = newfp;
455 		rw_exit(&syslogf_rwlock);
456 
457 		if (fp)
458 			FRELE(fp, p);
459 		break;
460 
461 	default:
462 		return (ENOTTY);
463 	}
464 	return (0);
465 }
466 
467 /*
468  * If syslogd is not running, temporarily store a limited amount of messages
469  * in kernel.  After log stash is full, drop messages and count them.  When
470  * syslogd is available again, next log message will flush the stashed
471  * messages and insert a message with drop count.  Calls to malloc(9) and
472  * copyin(9) may sleep, protect data structures with rwlock.
473  */
474 
475 #define LOGSTASH_SIZE	100
476 struct logstash_message {
477 	char	*lgs_buffer;
478 	size_t	 lgs_size;
479 } logstash_messages[LOGSTASH_SIZE];
480 
481 struct	logstash_message *logstash_in = &logstash_messages[0];
482 struct	logstash_message *logstash_out = &logstash_messages[0];
483 
484 struct	rwlock logstash_rwlock = RWLOCK_INITIALIZER("logstash");
485 
486 int	logstash_dropped, logstash_error, logstash_pid;
487 
488 int	logstash_insert(const char *, size_t, int, pid_t);
489 void	logstash_remove(void);
490 int	logstash_sendsyslog(struct proc *);
491 
492 static inline int
493 logstash_full(void)
494 {
495 	rw_assert_anylock(&logstash_rwlock);
496 
497 	return logstash_out->lgs_buffer != NULL &&
498 	    logstash_in == logstash_out;
499 }
500 
501 static inline void
502 logstash_increment(struct logstash_message **msg)
503 {
504 	rw_assert_wrlock(&logstash_rwlock);
505 
506 	KASSERT((*msg) >= &logstash_messages[0]);
507 	KASSERT((*msg) < &logstash_messages[LOGSTASH_SIZE]);
508 	if ((*msg) == &logstash_messages[LOGSTASH_SIZE - 1])
509 		(*msg) = &logstash_messages[0];
510 	else
511 		(*msg)++;
512 }
513 
514 int
515 logstash_insert(const char *buf, size_t nbyte, int logerror, pid_t pid)
516 {
517 	int error;
518 
519 	rw_enter_write(&logstash_rwlock);
520 
521 	if (logstash_full()) {
522 		if (logstash_dropped == 0) {
523 			logstash_error = logerror;
524 			logstash_pid = pid;
525 		}
526 		logstash_dropped++;
527 
528 		rw_exit(&logstash_rwlock);
529 		return (0);
530 	}
531 
532 	logstash_in->lgs_buffer = malloc(nbyte, M_LOG, M_WAITOK);
533 	error = copyin(buf, logstash_in->lgs_buffer, nbyte);
534 	if (error) {
535 		free(logstash_in->lgs_buffer, M_LOG, nbyte);
536 		logstash_in->lgs_buffer = NULL;
537 
538 		rw_exit(&logstash_rwlock);
539 		return (error);
540 	}
541 	logstash_in->lgs_size = nbyte;
542 	logstash_increment(&logstash_in);
543 
544 	rw_exit(&logstash_rwlock);
545 	return (0);
546 }
547 
548 void
549 logstash_remove(void)
550 {
551 	rw_assert_wrlock(&logstash_rwlock);
552 
553 	KASSERT(logstash_out->lgs_buffer != NULL);
554 	free(logstash_out->lgs_buffer, M_LOG, logstash_out->lgs_size);
555 	logstash_out->lgs_buffer = NULL;
556 	logstash_increment(&logstash_out);
557 
558 	/* Insert dropped message in sequence where messages were dropped. */
559 	if (logstash_dropped) {
560 		size_t l, nbyte;
561 		char buf[80];
562 
563 		l = snprintf(buf, sizeof(buf),
564 		    "<%d>sendsyslog: dropped %d message%s, error %d, pid %d",
565 		    LOG_KERN|LOG_WARNING, logstash_dropped,
566 		    logstash_dropped == 1 ? "" : "s",
567 		    logstash_error, logstash_pid);
568 		logstash_dropped = 0;
569 		logstash_error = 0;
570 		logstash_pid = 0;
571 
572 		/* Cannot fail, we have just freed a slot. */
573 		KASSERT(!logstash_full());
574 		nbyte = ulmin(l, sizeof(buf) - 1);
575 		logstash_in->lgs_buffer = malloc(nbyte, M_LOG, M_WAITOK);
576 		memcpy(logstash_in->lgs_buffer, buf, nbyte);
577 		logstash_in->lgs_size = nbyte;
578 		logstash_increment(&logstash_in);
579 	}
580 }
581 
582 int
583 logstash_sendsyslog(struct proc *p)
584 {
585 	int error;
586 
587 	rw_enter_write(&logstash_rwlock);
588 
589 	while (logstash_out->lgs_buffer != NULL) {
590 		error = dosendsyslog(p, logstash_out->lgs_buffer,
591 		    logstash_out->lgs_size, 0, UIO_SYSSPACE);
592 		if (error) {
593 			rw_exit(&logstash_rwlock);
594 			return (error);
595 		}
596 		logstash_remove();
597 	}
598 
599 	rw_exit(&logstash_rwlock);
600 	return (0);
601 }
602 
603 /*
604  * Send syslog(3) message from userland to socketpair(2) created by syslogd(8).
605  * Store message in kernel log stash for later if syslogd(8) is not available
606  * or sending fails.  Send to console if LOG_CONS is set and syslogd(8) socket
607  * does not exist.
608  */
609 
610 int
611 sys_sendsyslog(struct proc *p, void *v, register_t *retval)
612 {
613 	struct sys_sendsyslog_args /* {
614 		syscallarg(const char *) buf;
615 		syscallarg(size_t) nbyte;
616 		syscallarg(int) flags;
617 	} */ *uap = v;
618 	size_t nbyte;
619 	int error;
620 
621 	nbyte = SCARG(uap, nbyte);
622 	if (nbyte > LOG_MAXLINE)
623 		nbyte = LOG_MAXLINE;
624 
625 	logstash_sendsyslog(p);
626 	error = dosendsyslog(p, SCARG(uap, buf), nbyte, SCARG(uap, flags),
627 	    UIO_USERSPACE);
628 	if (error && error != EFAULT)
629 		logstash_insert(SCARG(uap, buf), nbyte, error, p->p_p->ps_pid);
630 	return (error);
631 }
632 
633 int
634 dosendsyslog(struct proc *p, const char *buf, size_t nbyte, int flags,
635     enum uio_seg sflg)
636 {
637 #ifdef KTRACE
638 	struct iovec ktriov;
639 #endif
640 	struct file *fp;
641 	char pri[6], *kbuf;
642 	struct iovec aiov;
643 	struct uio auio;
644 	size_t i, len;
645 	int error;
646 
647 	/* Global variable syslogf may change during sleep, use local copy. */
648 	rw_enter_read(&syslogf_rwlock);
649 	fp = syslogf;
650 	if (fp)
651 		FREF(fp);
652 	rw_exit(&syslogf_rwlock);
653 
654 	if (fp == NULL) {
655 		if (!ISSET(flags, LOG_CONS))
656 			return (ENOTCONN);
657 		/*
658 		 * Strip off syslog priority when logging to console.
659 		 * LOG_PRIMASK | LOG_FACMASK is 0x03ff, so at most 4
660 		 * decimal digits may appear in priority as <1023>.
661 		 */
662 		len = MIN(nbyte, sizeof(pri));
663 		if (sflg == UIO_USERSPACE) {
664 			if ((error = copyin(buf, pri, len)))
665 				return (error);
666 		} else
667 			memcpy(pri, buf, len);
668 		if (0 < len && pri[0] == '<') {
669 			for (i = 1; i < len; i++) {
670 				if (pri[i] < '0' || pri[i] > '9')
671 					break;
672 			}
673 			if (i < len && pri[i] == '>') {
674 				i++;
675 				/* There must be at least one digit <0>. */
676 				if (i >= 3) {
677 					buf += i;
678 					nbyte -= i;
679 				}
680 			}
681 		}
682 	}
683 
684 	aiov.iov_base = (char *)buf;
685 	aiov.iov_len = nbyte;
686 	auio.uio_iov = &aiov;
687 	auio.uio_iovcnt = 1;
688 	auio.uio_segflg = sflg;
689 	auio.uio_rw = UIO_WRITE;
690 	auio.uio_procp = p;
691 	auio.uio_offset = 0;
692 	auio.uio_resid = aiov.iov_len;
693 #ifdef KTRACE
694 	if (sflg == UIO_USERSPACE && KTRPOINT(p, KTR_GENIO))
695 		ktriov = aiov;
696 	else
697 		ktriov.iov_len = 0;
698 #endif
699 
700 	len = auio.uio_resid;
701 	if (fp) {
702 		int flags = (fp->f_flag & FNONBLOCK) ? MSG_DONTWAIT : 0;
703 		error = sosend(fp->f_data, NULL, &auio, NULL, NULL, flags);
704 		if (error == 0)
705 			len -= auio.uio_resid;
706 	} else {
707 		KERNEL_LOCK();
708 		if (constty || cn_devvp) {
709 			error = cnwrite(0, &auio, 0);
710 			if (error == 0)
711 				len -= auio.uio_resid;
712 			aiov.iov_base = "\r\n";
713 			aiov.iov_len = 2;
714 			auio.uio_iov = &aiov;
715 			auio.uio_iovcnt = 1;
716 			auio.uio_segflg = UIO_SYSSPACE;
717 			auio.uio_rw = UIO_WRITE;
718 			auio.uio_procp = p;
719 			auio.uio_offset = 0;
720 			auio.uio_resid = aiov.iov_len;
721 			cnwrite(0, &auio, 0);
722 		} else {
723 			/* XXX console redirection breaks down... */
724 			if (sflg == UIO_USERSPACE) {
725 				kbuf = malloc(len, M_TEMP, M_WAITOK);
726 				error = copyin(aiov.iov_base, kbuf, len);
727 			} else {
728 				kbuf = aiov.iov_base;
729 				error = 0;
730 			}
731 			if (error == 0)
732 				for (i = 0; i < len; i++) {
733 					if (kbuf[i] == '\0')
734 						break;
735 					cnputc(kbuf[i]);
736 					auio.uio_resid--;
737 				}
738 			if (sflg == UIO_USERSPACE)
739 				free(kbuf, M_TEMP, len);
740 			if (error == 0)
741 				len -= auio.uio_resid;
742 			cnputc('\n');
743 		}
744 		KERNEL_UNLOCK();
745 	}
746 
747 #ifdef KTRACE
748 	if (error == 0 && ktriov.iov_len != 0)
749 		ktrgenio(p, -1, UIO_WRITE, &ktriov, len);
750 #endif
751 	if (fp)
752 		FRELE(fp, p);
753 	else if (error != EFAULT)
754 		error = ENOTCONN;
755 	return (error);
756 }
757