xref: /openbsd-src/sys/kern/subr_log.c (revision 5c389b79544373bccfce668b646e62e7ba9802a3)
1 /*	$OpenBSD: subr_log.c,v 1.76 2023/06/28 08:23:25 claudio Exp $	*/
2 /*	$NetBSD: subr_log.c,v 1.11 1996/03/30 22:24:44 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)subr_log.c	8.1 (Berkeley) 6/10/93
33  */
34 
35 /*
36  * Error log buffer for kernel printf's.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/ioctl.h>
44 #include <sys/msgbuf.h>
45 #include <sys/file.h>
46 #include <sys/tty.h>
47 #include <sys/signalvar.h>
48 #include <sys/syslog.h>
49 #include <sys/malloc.h>
50 #include <sys/filedesc.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/fcntl.h>
54 #include <sys/mutex.h>
55 #include <sys/timeout.h>
56 
57 #ifdef KTRACE
58 #include <sys/ktrace.h>
59 #endif
60 
61 #include <sys/mount.h>
62 #include <sys/syscallargs.h>
63 
64 #include <dev/cons.h>
65 
66 #define LOG_RDPRI	(PZERO + 1)
67 #define LOG_TICK	50		/* log tick interval in msec */
68 
69 #define LOG_ASYNC	0x04
70 #define LOG_RDWAIT	0x08
71 
72 /*
73  * Locking:
74  *	L	log_mtx
75  */
76 struct logsoftc {
77 	int	sc_state;		/* [L] see above for possibilities */
78 	struct	selinfo sc_selp;	/* process waiting on select call */
79 	struct	sigio_ref sc_sigio;	/* async I/O registration */
80 	int	sc_need_wakeup;		/* if set, wake up waiters */
81 	struct timeout sc_tick;		/* wakeup poll timeout */
82 } logsoftc;
83 
84 int	log_open;			/* also used in log() */
85 int	msgbufmapped;			/* is the message buffer mapped */
86 struct	msgbuf *msgbufp;		/* the mapped buffer, itself. */
87 struct	msgbuf *consbufp;		/* console message buffer. */
88 
89 struct	file *syslogf;
90 struct	rwlock syslogf_rwlock = RWLOCK_INITIALIZER("syslogf");
91 
92 /*
93  * Lock that serializes access to log message buffers.
94  * This should be kept as a leaf lock in order not to constrain where
95  * printf(9) can be used.
96  */
97 struct	mutex log_mtx =
98     MUTEX_INITIALIZER_FLAGS(IPL_HIGH, "logmtx", MTX_NOWITNESS);
99 
100 void filt_logrdetach(struct knote *kn);
101 int filt_logread(struct knote *kn, long hint);
102 
103 const struct filterops logread_filtops = {
104 	.f_flags	= FILTEROP_ISFD,
105 	.f_attach	= NULL,
106 	.f_detach	= filt_logrdetach,
107 	.f_event	= filt_logread,
108 };
109 
110 int dosendsyslog(struct proc *, const char *, size_t, int, enum uio_seg);
111 void logtick(void *);
112 size_t msgbuf_getlen(struct msgbuf *);
113 void msgbuf_putchar_locked(struct msgbuf *, const char);
114 
115 void
116 initmsgbuf(caddr_t buf, size_t bufsize)
117 {
118 	struct msgbuf *mbp;
119 	long new_bufs;
120 
121 	/* Sanity-check the given size. */
122 	if (bufsize < sizeof(struct msgbuf))
123 		return;
124 
125 	mbp = msgbufp = (struct msgbuf *)buf;
126 
127 	new_bufs = bufsize - offsetof(struct msgbuf, msg_bufc);
128 	if ((mbp->msg_magic != MSG_MAGIC) || (mbp->msg_bufs != new_bufs) ||
129 	    (mbp->msg_bufr < 0) || (mbp->msg_bufr >= mbp->msg_bufs) ||
130 	    (mbp->msg_bufx < 0) || (mbp->msg_bufx >= mbp->msg_bufs)) {
131 		/*
132 		 * If the buffer magic number is wrong, has changed
133 		 * size (which shouldn't happen often), or is
134 		 * internally inconsistent, initialize it.
135 		 */
136 
137 		memset(buf, 0, bufsize);
138 		mbp->msg_magic = MSG_MAGIC;
139 		mbp->msg_bufs = new_bufs;
140 	}
141 
142 	/*
143 	 * Always start new buffer data on a new line.
144 	 * Avoid using log_mtx because mutexes do not work during early boot
145 	 * on some architectures.
146 	 */
147 	if (mbp->msg_bufx > 0 && mbp->msg_bufc[mbp->msg_bufx - 1] != '\n')
148 		msgbuf_putchar_locked(mbp, '\n');
149 
150 	/* mark it as ready for use. */
151 	msgbufmapped = 1;
152 }
153 
154 void
155 initconsbuf(void)
156 {
157 	/* Set up a buffer to collect /dev/console output */
158 	consbufp = malloc(CONSBUFSIZE, M_TTYS, M_WAITOK | M_ZERO);
159 	consbufp->msg_magic = MSG_MAGIC;
160 	consbufp->msg_bufs = CONSBUFSIZE - offsetof(struct msgbuf, msg_bufc);
161 }
162 
163 void
164 msgbuf_putchar(struct msgbuf *mbp, const char c)
165 {
166 	if (mbp->msg_magic != MSG_MAGIC)
167 		/* Nothing we can do */
168 		return;
169 
170 	mtx_enter(&log_mtx);
171 	msgbuf_putchar_locked(mbp, c);
172 	mtx_leave(&log_mtx);
173 }
174 
175 void
176 msgbuf_putchar_locked(struct msgbuf *mbp, const char c)
177 {
178 	mbp->msg_bufc[mbp->msg_bufx++] = c;
179 	if (mbp->msg_bufx < 0 || mbp->msg_bufx >= mbp->msg_bufs)
180 		mbp->msg_bufx = 0;
181 	/* If the buffer is full, keep the most recent data. */
182 	if (mbp->msg_bufr == mbp->msg_bufx) {
183 		if (++mbp->msg_bufr >= mbp->msg_bufs)
184 			mbp->msg_bufr = 0;
185 		mbp->msg_bufd++;
186 	}
187 }
188 
189 size_t
190 msgbuf_getlen(struct msgbuf *mbp)
191 {
192 	long len;
193 
194 	mtx_enter(&log_mtx);
195 	len = mbp->msg_bufx - mbp->msg_bufr;
196 	if (len < 0)
197 		len += mbp->msg_bufs;
198 	mtx_leave(&log_mtx);
199 	return (len);
200 }
201 
202 int
203 logopen(dev_t dev, int flags, int mode, struct proc *p)
204 {
205 	if (log_open)
206 		return (EBUSY);
207 	log_open = 1;
208 	sigio_init(&logsoftc.sc_sigio);
209 	timeout_set(&logsoftc.sc_tick, logtick, NULL);
210 	timeout_add_msec(&logsoftc.sc_tick, LOG_TICK);
211 	return (0);
212 }
213 
214 int
215 logclose(dev_t dev, int flag, int mode, struct proc *p)
216 {
217 	struct file *fp;
218 
219 	rw_enter_write(&syslogf_rwlock);
220 	fp = syslogf;
221 	syslogf = NULL;
222 	rw_exit(&syslogf_rwlock);
223 
224 	if (fp)
225 		FRELE(fp, p);
226 	log_open = 0;
227 	timeout_del(&logsoftc.sc_tick);
228 	logsoftc.sc_state = 0;
229 	sigio_free(&logsoftc.sc_sigio);
230 	return (0);
231 }
232 
233 int
234 logread(dev_t dev, struct uio *uio, int flag)
235 {
236 	struct sleep_state sls;
237 	struct msgbuf *mbp = msgbufp;
238 	size_t l, rpos;
239 	int error = 0;
240 
241 	mtx_enter(&log_mtx);
242 	while (mbp->msg_bufr == mbp->msg_bufx) {
243 		if (flag & IO_NDELAY) {
244 			error = EWOULDBLOCK;
245 			goto out;
246 		}
247 		logsoftc.sc_state |= LOG_RDWAIT;
248 		mtx_leave(&log_mtx);
249 		/*
250 		 * Set up and enter sleep manually instead of using msleep()
251 		 * to keep log_mtx as a leaf lock.
252 		 */
253 		sleep_setup(&sls, mbp, LOG_RDPRI | PCATCH, "klog");
254 		error = sleep_finish(&sls, LOG_RDPRI | PCATCH, 0,
255 		    logsoftc.sc_state & LOG_RDWAIT);
256 		mtx_enter(&log_mtx);
257 		if (error)
258 			goto out;
259 	}
260 
261 	if (mbp->msg_bufd > 0) {
262 		char buf[64];
263 		long ndropped;
264 
265 		ndropped = mbp->msg_bufd;
266 		mtx_leave(&log_mtx);
267 		l = snprintf(buf, sizeof(buf),
268 		    "<%d>klog: dropped %ld byte%s, message buffer full\n",
269 		    LOG_KERN|LOG_WARNING, ndropped,
270 		    ndropped == 1 ? "" : "s");
271 		error = uiomove(buf, ulmin(l, sizeof(buf) - 1), uio);
272 		mtx_enter(&log_mtx);
273 		if (error)
274 			goto out;
275 		mbp->msg_bufd -= ndropped;
276 	}
277 
278 	while (uio->uio_resid > 0) {
279 		if (mbp->msg_bufx >= mbp->msg_bufr)
280 			l = mbp->msg_bufx - mbp->msg_bufr;
281 		else
282 			l = mbp->msg_bufs - mbp->msg_bufr;
283 		l = ulmin(l, uio->uio_resid);
284 		if (l == 0)
285 			break;
286 		rpos = mbp->msg_bufr;
287 		mtx_leave(&log_mtx);
288 		/* Ignore that concurrent readers may consume the same data. */
289 		error = uiomove(&mbp->msg_bufc[rpos], l, uio);
290 		mtx_enter(&log_mtx);
291 		if (error)
292 			break;
293 		mbp->msg_bufr += l;
294 		if (mbp->msg_bufr < 0 || mbp->msg_bufr >= mbp->msg_bufs)
295 			mbp->msg_bufr = 0;
296 	}
297  out:
298 	mtx_leave(&log_mtx);
299 	return (error);
300 }
301 
302 int
303 logkqfilter(dev_t dev, struct knote *kn)
304 {
305 	struct klist *klist;
306 	int s;
307 
308 	switch (kn->kn_filter) {
309 	case EVFILT_READ:
310 		klist = &logsoftc.sc_selp.si_note;
311 		kn->kn_fop = &logread_filtops;
312 		break;
313 	default:
314 		return (EINVAL);
315 	}
316 
317 	kn->kn_hook = (void *)msgbufp;
318 
319 	s = splhigh();
320 	klist_insert_locked(klist, kn);
321 	splx(s);
322 
323 	return (0);
324 }
325 
326 void
327 filt_logrdetach(struct knote *kn)
328 {
329 	int s;
330 
331 	s = splhigh();
332 	klist_remove_locked(&logsoftc.sc_selp.si_note, kn);
333 	splx(s);
334 }
335 
336 int
337 filt_logread(struct knote *kn, long hint)
338 {
339 	struct msgbuf *mbp = kn->kn_hook;
340 
341 	kn->kn_data = msgbuf_getlen(mbp);
342 	return (kn->kn_data != 0);
343 }
344 
345 void
346 logwakeup(void)
347 {
348 	/*
349 	 * The actual wakeup has to be deferred because logwakeup() can be
350 	 * called in very varied contexts.
351 	 * Keep the print routines usable in as many situations as possible
352 	 * by not using locking here.
353 	 */
354 
355 	/*
356 	 * Ensure that preceding stores become visible to other CPUs
357 	 * before the flag.
358 	 */
359 	membar_producer();
360 
361 	logsoftc.sc_need_wakeup = 1;
362 }
363 
364 void
365 logtick(void *arg)
366 {
367 	int state;
368 
369 	if (!log_open)
370 		return;
371 
372 	if (!logsoftc.sc_need_wakeup)
373 		goto out;
374 	logsoftc.sc_need_wakeup = 0;
375 
376 	/*
377 	 * sc_need_wakeup has to be cleared before handling the wakeup.
378 	 * Visiting log_mtx ensures the proper order.
379 	 */
380 
381 	mtx_enter(&log_mtx);
382 	state = logsoftc.sc_state;
383 	if (logsoftc.sc_state & LOG_RDWAIT)
384 		logsoftc.sc_state &= ~LOG_RDWAIT;
385 	mtx_leave(&log_mtx);
386 
387 	selwakeup(&logsoftc.sc_selp);
388 	if (state & LOG_ASYNC)
389 		pgsigio(&logsoftc.sc_sigio, SIGIO, 0);
390 	if (state & LOG_RDWAIT)
391 		wakeup(msgbufp);
392 out:
393 	timeout_add_msec(&logsoftc.sc_tick, LOG_TICK);
394 }
395 
396 int
397 logioctl(dev_t dev, u_long com, caddr_t data, int flag, struct proc *p)
398 {
399 	struct file *fp, *newfp;
400 	int error;
401 
402 	switch (com) {
403 
404 	/* return number of characters immediately available */
405 	case FIONREAD:
406 		*(int *)data = (int)msgbuf_getlen(msgbufp);
407 		break;
408 
409 	case FIONBIO:
410 		break;
411 
412 	case FIOASYNC:
413 		mtx_enter(&log_mtx);
414 		if (*(int *)data)
415 			logsoftc.sc_state |= LOG_ASYNC;
416 		else
417 			logsoftc.sc_state &= ~LOG_ASYNC;
418 		mtx_leave(&log_mtx);
419 		break;
420 
421 	case FIOSETOWN:
422 	case TIOCSPGRP:
423 		return (sigio_setown(&logsoftc.sc_sigio, com, data));
424 
425 	case FIOGETOWN:
426 	case TIOCGPGRP:
427 		sigio_getown(&logsoftc.sc_sigio, com, data);
428 		break;
429 
430 	case LIOCSFD:
431 		if ((error = suser(p)) != 0)
432 			return (error);
433 		if ((error = getsock(p, *(int *)data, &newfp)) != 0)
434 			return (error);
435 
436 		rw_enter_write(&syslogf_rwlock);
437 		fp = syslogf;
438 		syslogf = newfp;
439 		rw_exit(&syslogf_rwlock);
440 
441 		if (fp)
442 			FRELE(fp, p);
443 		break;
444 
445 	default:
446 		return (ENOTTY);
447 	}
448 	return (0);
449 }
450 
451 /*
452  * If syslogd is not running, temporarily store a limited amount of messages
453  * in kernel.  After log stash is full, drop messages and count them.  When
454  * syslogd is available again, next log message will flush the stashed
455  * messages and insert a message with drop count.  Calls to malloc(9) and
456  * copyin(9) may sleep, protect data structures with rwlock.
457  */
458 
459 #define LOGSTASH_SIZE	100
460 struct logstash_message {
461 	char	*lgs_buffer;
462 	size_t	 lgs_size;
463 } logstash_messages[LOGSTASH_SIZE];
464 
465 struct	logstash_message *logstash_in = &logstash_messages[0];
466 struct	logstash_message *logstash_out = &logstash_messages[0];
467 
468 struct	rwlock logstash_rwlock = RWLOCK_INITIALIZER("logstash");
469 
470 int	logstash_dropped, logstash_error, logstash_pid;
471 
472 int	logstash_insert(const char *, size_t, int, pid_t);
473 void	logstash_remove(void);
474 int	logstash_sendsyslog(struct proc *);
475 
476 static inline int
477 logstash_full(void)
478 {
479 	rw_assert_anylock(&logstash_rwlock);
480 
481 	return logstash_out->lgs_buffer != NULL &&
482 	    logstash_in == logstash_out;
483 }
484 
485 static inline void
486 logstash_increment(struct logstash_message **msg)
487 {
488 	rw_assert_wrlock(&logstash_rwlock);
489 
490 	KASSERT((*msg) >= &logstash_messages[0]);
491 	KASSERT((*msg) < &logstash_messages[LOGSTASH_SIZE]);
492 	if ((*msg) == &logstash_messages[LOGSTASH_SIZE - 1])
493 		(*msg) = &logstash_messages[0];
494 	else
495 		(*msg)++;
496 }
497 
498 int
499 logstash_insert(const char *buf, size_t nbyte, int logerror, pid_t pid)
500 {
501 	int error;
502 
503 	rw_enter_write(&logstash_rwlock);
504 
505 	if (logstash_full()) {
506 		if (logstash_dropped == 0) {
507 			logstash_error = logerror;
508 			logstash_pid = pid;
509 		}
510 		logstash_dropped++;
511 
512 		rw_exit(&logstash_rwlock);
513 		return (0);
514 	}
515 
516 	logstash_in->lgs_buffer = malloc(nbyte, M_LOG, M_WAITOK);
517 	error = copyin(buf, logstash_in->lgs_buffer, nbyte);
518 	if (error) {
519 		free(logstash_in->lgs_buffer, M_LOG, nbyte);
520 		logstash_in->lgs_buffer = NULL;
521 
522 		rw_exit(&logstash_rwlock);
523 		return (error);
524 	}
525 	logstash_in->lgs_size = nbyte;
526 	logstash_increment(&logstash_in);
527 
528 	rw_exit(&logstash_rwlock);
529 	return (0);
530 }
531 
532 void
533 logstash_remove(void)
534 {
535 	rw_assert_wrlock(&logstash_rwlock);
536 
537 	KASSERT(logstash_out->lgs_buffer != NULL);
538 	free(logstash_out->lgs_buffer, M_LOG, logstash_out->lgs_size);
539 	logstash_out->lgs_buffer = NULL;
540 	logstash_increment(&logstash_out);
541 
542 	/* Insert dropped message in sequence where messages were dropped. */
543 	if (logstash_dropped) {
544 		size_t l, nbyte;
545 		char buf[80];
546 
547 		l = snprintf(buf, sizeof(buf),
548 		    "<%d>sendsyslog: dropped %d message%s, error %d, pid %d",
549 		    LOG_KERN|LOG_WARNING, logstash_dropped,
550 		    logstash_dropped == 1 ? "" : "s",
551 		    logstash_error, logstash_pid);
552 		logstash_dropped = 0;
553 		logstash_error = 0;
554 		logstash_pid = 0;
555 
556 		/* Cannot fail, we have just freed a slot. */
557 		KASSERT(!logstash_full());
558 		nbyte = ulmin(l, sizeof(buf) - 1);
559 		logstash_in->lgs_buffer = malloc(nbyte, M_LOG, M_WAITOK);
560 		memcpy(logstash_in->lgs_buffer, buf, nbyte);
561 		logstash_in->lgs_size = nbyte;
562 		logstash_increment(&logstash_in);
563 	}
564 }
565 
566 int
567 logstash_sendsyslog(struct proc *p)
568 {
569 	int error;
570 
571 	rw_enter_write(&logstash_rwlock);
572 
573 	while (logstash_out->lgs_buffer != NULL) {
574 		error = dosendsyslog(p, logstash_out->lgs_buffer,
575 		    logstash_out->lgs_size, 0, UIO_SYSSPACE);
576 		if (error) {
577 			rw_exit(&logstash_rwlock);
578 			return (error);
579 		}
580 		logstash_remove();
581 	}
582 
583 	rw_exit(&logstash_rwlock);
584 	return (0);
585 }
586 
587 /*
588  * Send syslog(3) message from userland to socketpair(2) created by syslogd(8).
589  * Store message in kernel log stash for later if syslogd(8) is not available
590  * or sending fails.  Send to console if LOG_CONS is set and syslogd(8) socket
591  * does not exist.
592  */
593 
594 int
595 sys_sendsyslog(struct proc *p, void *v, register_t *retval)
596 {
597 	struct sys_sendsyslog_args /* {
598 		syscallarg(const char *) buf;
599 		syscallarg(size_t) nbyte;
600 		syscallarg(int) flags;
601 	} */ *uap = v;
602 	size_t nbyte;
603 	int error;
604 
605 	nbyte = SCARG(uap, nbyte);
606 	if (nbyte > LOG_MAXLINE)
607 		nbyte = LOG_MAXLINE;
608 
609 	logstash_sendsyslog(p);
610 	error = dosendsyslog(p, SCARG(uap, buf), nbyte, SCARG(uap, flags),
611 	    UIO_USERSPACE);
612 	if (error && error != EFAULT)
613 		logstash_insert(SCARG(uap, buf), nbyte, error, p->p_p->ps_pid);
614 	return (error);
615 }
616 
617 int
618 dosendsyslog(struct proc *p, const char *buf, size_t nbyte, int flags,
619     enum uio_seg sflg)
620 {
621 #ifdef KTRACE
622 	struct iovec ktriov;
623 #endif
624 	struct file *fp;
625 	char pri[6], *kbuf;
626 	struct iovec aiov;
627 	struct uio auio;
628 	size_t i, len;
629 	int error;
630 
631 	/* Global variable syslogf may change during sleep, use local copy. */
632 	rw_enter_read(&syslogf_rwlock);
633 	fp = syslogf;
634 	if (fp)
635 		FREF(fp);
636 	rw_exit(&syslogf_rwlock);
637 
638 	if (fp == NULL) {
639 		if (!ISSET(flags, LOG_CONS))
640 			return (ENOTCONN);
641 		/*
642 		 * Strip off syslog priority when logging to console.
643 		 * LOG_PRIMASK | LOG_FACMASK is 0x03ff, so at most 4
644 		 * decimal digits may appear in priority as <1023>.
645 		 */
646 		len = MIN(nbyte, sizeof(pri));
647 		if (sflg == UIO_USERSPACE) {
648 			if ((error = copyin(buf, pri, len)))
649 				return (error);
650 		} else
651 			memcpy(pri, buf, len);
652 		if (0 < len && pri[0] == '<') {
653 			for (i = 1; i < len; i++) {
654 				if (pri[i] < '0' || pri[i] > '9')
655 					break;
656 			}
657 			if (i < len && pri[i] == '>') {
658 				i++;
659 				/* There must be at least one digit <0>. */
660 				if (i >= 3) {
661 					buf += i;
662 					nbyte -= i;
663 				}
664 			}
665 		}
666 	}
667 
668 	aiov.iov_base = (char *)buf;
669 	aiov.iov_len = nbyte;
670 	auio.uio_iov = &aiov;
671 	auio.uio_iovcnt = 1;
672 	auio.uio_segflg = sflg;
673 	auio.uio_rw = UIO_WRITE;
674 	auio.uio_procp = p;
675 	auio.uio_offset = 0;
676 	auio.uio_resid = aiov.iov_len;
677 #ifdef KTRACE
678 	if (sflg == UIO_USERSPACE && KTRPOINT(p, KTR_GENIO))
679 		ktriov = aiov;
680 	else
681 		ktriov.iov_len = 0;
682 #endif
683 
684 	len = auio.uio_resid;
685 	if (fp) {
686 		int flags = (fp->f_flag & FNONBLOCK) ? MSG_DONTWAIT : 0;
687 		error = sosend(fp->f_data, NULL, &auio, NULL, NULL, flags);
688 		if (error == 0)
689 			len -= auio.uio_resid;
690 	} else {
691 		KERNEL_LOCK();
692 		if (constty || cn_devvp) {
693 			error = cnwrite(0, &auio, 0);
694 			if (error == 0)
695 				len -= auio.uio_resid;
696 			aiov.iov_base = "\r\n";
697 			aiov.iov_len = 2;
698 			auio.uio_iov = &aiov;
699 			auio.uio_iovcnt = 1;
700 			auio.uio_segflg = UIO_SYSSPACE;
701 			auio.uio_rw = UIO_WRITE;
702 			auio.uio_procp = p;
703 			auio.uio_offset = 0;
704 			auio.uio_resid = aiov.iov_len;
705 			cnwrite(0, &auio, 0);
706 		} else {
707 			/* XXX console redirection breaks down... */
708 			if (sflg == UIO_USERSPACE) {
709 				kbuf = malloc(len, M_TEMP, M_WAITOK);
710 				error = copyin(aiov.iov_base, kbuf, len);
711 			} else {
712 				kbuf = aiov.iov_base;
713 				error = 0;
714 			}
715 			if (error == 0)
716 				for (i = 0; i < len; i++) {
717 					if (kbuf[i] == '\0')
718 						break;
719 					cnputc(kbuf[i]);
720 					auio.uio_resid--;
721 				}
722 			if (sflg == UIO_USERSPACE)
723 				free(kbuf, M_TEMP, len);
724 			if (error == 0)
725 				len -= auio.uio_resid;
726 			cnputc('\n');
727 		}
728 		KERNEL_UNLOCK();
729 	}
730 
731 #ifdef KTRACE
732 	if (error == 0 && ktriov.iov_len != 0)
733 		ktrgenio(p, -1, UIO_WRITE, &ktriov, len);
734 #endif
735 	if (fp)
736 		FRELE(fp, p);
737 	else if (error != EFAULT)
738 		error = ENOTCONN;
739 	return (error);
740 }
741