xref: /openbsd-src/sys/kern/uipc_socket.c (revision de8cc8edbc71bd3e3bc7fbffa27ba0e564c37d8b)
1 /*	$OpenBSD: uipc_socket.c,v 1.256 2021/02/24 13:19:48 bluhm Exp $	*/
2 /*	$NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)uipc_socket.c	8.3 (Berkeley) 4/15/94
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/file.h>
39 #include <sys/filedesc.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/domain.h>
43 #include <sys/kernel.h>
44 #include <sys/event.h>
45 #include <sys/protosw.h>
46 #include <sys/socket.h>
47 #include <sys/unpcb.h>
48 #include <sys/socketvar.h>
49 #include <sys/signalvar.h>
50 #include <net/if.h>
51 #include <sys/pool.h>
52 #include <sys/atomic.h>
53 #include <sys/rwlock.h>
54 #include <sys/time.h>
55 
56 #ifdef DDB
57 #include <machine/db_machdep.h>
58 #endif
59 
60 void	sbsync(struct sockbuf *, struct mbuf *);
61 
62 int	sosplice(struct socket *, int, off_t, struct timeval *);
63 void	sounsplice(struct socket *, struct socket *, int);
64 void	soidle(void *);
65 void	sotask(void *);
66 void	soreaper(void *);
67 void	soput(void *);
68 int	somove(struct socket *, int);
69 void	sorflush(struct socket *);
70 
71 void	filt_sordetach(struct knote *kn);
72 int	filt_soread(struct knote *kn, long hint);
73 void	filt_sowdetach(struct knote *kn);
74 int	filt_sowrite(struct knote *kn, long hint);
75 int	filt_solisten(struct knote *kn, long hint);
76 
77 const struct filterops solisten_filtops = {
78 	.f_flags	= FILTEROP_ISFD,
79 	.f_attach	= NULL,
80 	.f_detach	= filt_sordetach,
81 	.f_event	= filt_solisten,
82 };
83 
84 const struct filterops soread_filtops = {
85 	.f_flags	= FILTEROP_ISFD,
86 	.f_attach	= NULL,
87 	.f_detach	= filt_sordetach,
88 	.f_event	= filt_soread,
89 };
90 
91 const struct filterops sowrite_filtops = {
92 	.f_flags	= FILTEROP_ISFD,
93 	.f_attach	= NULL,
94 	.f_detach	= filt_sowdetach,
95 	.f_event	= filt_sowrite,
96 };
97 
98 const struct filterops soexcept_filtops = {
99 	.f_flags	= FILTEROP_ISFD,
100 	.f_attach	= NULL,
101 	.f_detach	= filt_sordetach,
102 	.f_event	= filt_soread,
103 };
104 
105 #ifndef SOMINCONN
106 #define SOMINCONN 80
107 #endif /* SOMINCONN */
108 
109 int	somaxconn = SOMAXCONN;
110 int	sominconn = SOMINCONN;
111 
112 struct pool socket_pool;
113 #ifdef SOCKET_SPLICE
114 struct pool sosplice_pool;
115 struct taskq *sosplice_taskq;
116 struct rwlock sosplice_lock = RWLOCK_INITIALIZER("sosplicelk");
117 #endif
118 
119 void
120 soinit(void)
121 {
122 	pool_init(&socket_pool, sizeof(struct socket), 0, IPL_SOFTNET, 0,
123 	    "sockpl", NULL);
124 #ifdef SOCKET_SPLICE
125 	pool_init(&sosplice_pool, sizeof(struct sosplice), 0, IPL_SOFTNET, 0,
126 	    "sosppl", NULL);
127 #endif
128 }
129 
130 /*
131  * Socket operation routines.
132  * These routines are called by the routines in
133  * sys_socket.c or from a system process, and
134  * implement the semantics of socket operations by
135  * switching out to the protocol specific routines.
136  */
137 int
138 socreate(int dom, struct socket **aso, int type, int proto)
139 {
140 	struct proc *p = curproc;		/* XXX */
141 	const struct protosw *prp;
142 	struct socket *so;
143 	int error, s;
144 
145 	if (proto)
146 		prp = pffindproto(dom, proto, type);
147 	else
148 		prp = pffindtype(dom, type);
149 	if (prp == NULL || prp->pr_attach == NULL)
150 		return (EPROTONOSUPPORT);
151 	if (prp->pr_type != type)
152 		return (EPROTOTYPE);
153 	so = pool_get(&socket_pool, PR_WAITOK | PR_ZERO);
154 	sigio_init(&so->so_sigio);
155 	TAILQ_INIT(&so->so_q0);
156 	TAILQ_INIT(&so->so_q);
157 	so->so_type = type;
158 	if (suser(p) == 0)
159 		so->so_state = SS_PRIV;
160 	so->so_ruid = p->p_ucred->cr_ruid;
161 	so->so_euid = p->p_ucred->cr_uid;
162 	so->so_rgid = p->p_ucred->cr_rgid;
163 	so->so_egid = p->p_ucred->cr_gid;
164 	so->so_cpid = p->p_p->ps_pid;
165 	so->so_proto = prp;
166 	so->so_snd.sb_timeo_nsecs = INFSLP;
167 	so->so_rcv.sb_timeo_nsecs = INFSLP;
168 
169 	s = solock(so);
170 	error = (*prp->pr_attach)(so, proto);
171 	if (error) {
172 		so->so_state |= SS_NOFDREF;
173 		/* sofree() calls sounlock(). */
174 		sofree(so, s);
175 		return (error);
176 	}
177 	sounlock(so, s);
178 	*aso = so;
179 	return (0);
180 }
181 
182 int
183 sobind(struct socket *so, struct mbuf *nam, struct proc *p)
184 {
185 	int error;
186 
187 	soassertlocked(so);
188 
189 	error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p);
190 	return (error);
191 }
192 
193 int
194 solisten(struct socket *so, int backlog)
195 {
196 	int error;
197 
198 	soassertlocked(so);
199 
200 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING))
201 		return (EINVAL);
202 #ifdef SOCKET_SPLICE
203 	if (isspliced(so) || issplicedback(so))
204 		return (EOPNOTSUPP);
205 #endif /* SOCKET_SPLICE */
206 	error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL,
207 	    curproc);
208 	if (error)
209 		return (error);
210 	if (TAILQ_FIRST(&so->so_q) == NULL)
211 		so->so_options |= SO_ACCEPTCONN;
212 	if (backlog < 0 || backlog > somaxconn)
213 		backlog = somaxconn;
214 	if (backlog < sominconn)
215 		backlog = sominconn;
216 	so->so_qlimit = backlog;
217 	return (0);
218 }
219 
220 #define SOSP_FREEING_READ	1
221 #define SOSP_FREEING_WRITE	2
222 void
223 sofree(struct socket *so, int s)
224 {
225 	soassertlocked(so);
226 
227 	if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) {
228 		sounlock(so, s);
229 		return;
230 	}
231 	if (so->so_head) {
232 		/*
233 		 * We must not decommission a socket that's on the accept(2)
234 		 * queue.  If we do, then accept(2) may hang after select(2)
235 		 * indicated that the listening socket was ready.
236 		 */
237 		if (!soqremque(so, 0)) {
238 			sounlock(so, s);
239 			return;
240 		}
241 	}
242 	sigio_free(&so->so_sigio);
243 #ifdef SOCKET_SPLICE
244 	if (so->so_sp) {
245 		if (issplicedback(so)) {
246 			int freeing = SOSP_FREEING_WRITE;
247 
248 			if (so->so_sp->ssp_soback == so)
249 				freeing |= SOSP_FREEING_READ;
250 			sounsplice(so->so_sp->ssp_soback, so, freeing);
251 		}
252 		if (isspliced(so)) {
253 			int freeing = SOSP_FREEING_READ;
254 
255 			if (so == so->so_sp->ssp_socket)
256 				freeing |= SOSP_FREEING_WRITE;
257 			sounsplice(so, so->so_sp->ssp_socket, freeing);
258 		}
259 	}
260 #endif /* SOCKET_SPLICE */
261 	sbrelease(so, &so->so_snd);
262 	sorflush(so);
263 	sounlock(so, s);
264 #ifdef SOCKET_SPLICE
265 	if (so->so_sp) {
266 		/* Reuse splice idle, sounsplice() has been called before. */
267 		timeout_set_proc(&so->so_sp->ssp_idleto, soreaper, so);
268 		timeout_add(&so->so_sp->ssp_idleto, 0);
269 	} else
270 #endif /* SOCKET_SPLICE */
271 	{
272 		pool_put(&socket_pool, so);
273 	}
274 }
275 
276 static inline uint64_t
277 solinger_nsec(struct socket *so)
278 {
279 	if (so->so_linger == 0)
280 		return INFSLP;
281 
282 	return SEC_TO_NSEC(so->so_linger);
283 }
284 
285 /*
286  * Close a socket on last file table reference removal.
287  * Initiate disconnect if connected.
288  * Free socket when disconnect complete.
289  */
290 int
291 soclose(struct socket *so, int flags)
292 {
293 	struct socket *so2;
294 	int s, error = 0;
295 
296 	s = solock(so);
297 	/* Revoke async IO early. There is a final revocation in sofree(). */
298 	sigio_free(&so->so_sigio);
299 	if (so->so_options & SO_ACCEPTCONN) {
300 		while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) {
301 			(void) soqremque(so2, 0);
302 			(void) soabort(so2);
303 		}
304 		while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) {
305 			(void) soqremque(so2, 1);
306 			(void) soabort(so2);
307 		}
308 	}
309 	if (so->so_pcb == NULL)
310 		goto discard;
311 	if (so->so_state & SS_ISCONNECTED) {
312 		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
313 			error = sodisconnect(so);
314 			if (error)
315 				goto drop;
316 		}
317 		if (so->so_options & SO_LINGER) {
318 			if ((so->so_state & SS_ISDISCONNECTING) &&
319 			    (flags & MSG_DONTWAIT))
320 				goto drop;
321 			while (so->so_state & SS_ISCONNECTED) {
322 				error = sosleep_nsec(so, &so->so_timeo,
323 				    PSOCK | PCATCH, "netcls",
324 				    solinger_nsec(so));
325 				if (error)
326 					break;
327 			}
328 		}
329 	}
330 drop:
331 	if (so->so_pcb) {
332 		int error2;
333 		KASSERT(so->so_proto->pr_detach);
334 		error2 = (*so->so_proto->pr_detach)(so);
335 		if (error == 0)
336 			error = error2;
337 	}
338 discard:
339 	if (so->so_state & SS_NOFDREF)
340 		panic("soclose NOFDREF: so %p, so_type %d", so, so->so_type);
341 	so->so_state |= SS_NOFDREF;
342 	/* sofree() calls sounlock(). */
343 	sofree(so, s);
344 	return (error);
345 }
346 
347 int
348 soabort(struct socket *so)
349 {
350 	soassertlocked(so);
351 
352 	return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, NULL, NULL,
353 	   curproc);
354 }
355 
356 int
357 soaccept(struct socket *so, struct mbuf *nam)
358 {
359 	int error = 0;
360 
361 	soassertlocked(so);
362 
363 	if ((so->so_state & SS_NOFDREF) == 0)
364 		panic("soaccept !NOFDREF: so %p, so_type %d", so, so->so_type);
365 	so->so_state &= ~SS_NOFDREF;
366 	if ((so->so_state & SS_ISDISCONNECTED) == 0 ||
367 	    (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0)
368 		error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, NULL,
369 		    nam, NULL, curproc);
370 	else
371 		error = ECONNABORTED;
372 	return (error);
373 }
374 
375 int
376 soconnect(struct socket *so, struct mbuf *nam)
377 {
378 	int error;
379 
380 	soassertlocked(so);
381 
382 	if (so->so_options & SO_ACCEPTCONN)
383 		return (EOPNOTSUPP);
384 	/*
385 	 * If protocol is connection-based, can only connect once.
386 	 * Otherwise, if connected, try to disconnect first.
387 	 * This allows user to disconnect by connecting to, e.g.,
388 	 * a null address.
389 	 */
390 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
391 	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
392 	    (error = sodisconnect(so))))
393 		error = EISCONN;
394 	else
395 		error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
396 		    NULL, nam, NULL, curproc);
397 	return (error);
398 }
399 
400 int
401 soconnect2(struct socket *so1, struct socket *so2)
402 {
403 	int s, error;
404 
405 	s = solock(so1);
406 	error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, NULL,
407 	    (struct mbuf *)so2, NULL, curproc);
408 	sounlock(so1, s);
409 	return (error);
410 }
411 
412 int
413 sodisconnect(struct socket *so)
414 {
415 	int error;
416 
417 	soassertlocked(so);
418 
419 	if ((so->so_state & SS_ISCONNECTED) == 0)
420 		return (ENOTCONN);
421 	if (so->so_state & SS_ISDISCONNECTING)
422 		return (EALREADY);
423 	error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, NULL, NULL,
424 	    NULL, curproc);
425 	return (error);
426 }
427 
428 int m_getuio(struct mbuf **, int, long, struct uio *);
429 
430 #define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
431 /*
432  * Send on a socket.
433  * If send must go all at once and message is larger than
434  * send buffering, then hard error.
435  * Lock against other senders.
436  * If must go all at once and not enough room now, then
437  * inform user that this would block and do nothing.
438  * Otherwise, if nonblocking, send as much as possible.
439  * The data to be sent is described by "uio" if nonzero,
440  * otherwise by the mbuf chain "top" (which must be null
441  * if uio is not).  Data provided in mbuf chain must be small
442  * enough to send all at once.
443  *
444  * Returns nonzero on error, timeout or signal; callers
445  * must check for short counts if EINTR/ERESTART are returned.
446  * Data and control buffers are freed on return.
447  */
448 int
449 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top,
450     struct mbuf *control, int flags)
451 {
452 	long space, clen = 0;
453 	size_t resid;
454 	int error, s;
455 	int atomic = sosendallatonce(so) || top;
456 
457 	if (uio)
458 		resid = uio->uio_resid;
459 	else
460 		resid = top->m_pkthdr.len;
461 	/* MSG_EOR on a SOCK_STREAM socket is invalid. */
462 	if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
463 		m_freem(top);
464 		m_freem(control);
465 		return (EINVAL);
466 	}
467 	if (uio && uio->uio_procp)
468 		uio->uio_procp->p_ru.ru_msgsnd++;
469 	if (control) {
470 		/*
471 		 * In theory clen should be unsigned (since control->m_len is).
472 		 * However, space must be signed, as it might be less than 0
473 		 * if we over-committed, and we must use a signed comparison
474 		 * of space and clen.
475 		 */
476 		clen = control->m_len;
477 		/* reserve extra space for AF_UNIX's internalize */
478 		if (so->so_proto->pr_domain->dom_family == AF_UNIX &&
479 		    clen >= CMSG_ALIGN(sizeof(struct cmsghdr)) &&
480 		    mtod(control, struct cmsghdr *)->cmsg_type == SCM_RIGHTS)
481 			clen = CMSG_SPACE(
482 			    (clen - CMSG_ALIGN(sizeof(struct cmsghdr))) *
483 			    (sizeof(struct fdpass) / sizeof(int)));
484 	}
485 
486 #define	snderr(errno)	{ error = errno; goto release; }
487 
488 	s = solock(so);
489 restart:
490 	if ((error = sblock(so, &so->so_snd, SBLOCKWAIT(flags))) != 0)
491 		goto out;
492 	so->so_state |= SS_ISSENDING;
493 	do {
494 		if (so->so_state & SS_CANTSENDMORE)
495 			snderr(EPIPE);
496 		if (so->so_error) {
497 			error = so->so_error;
498 			so->so_error = 0;
499 			snderr(error);
500 		}
501 		if ((so->so_state & SS_ISCONNECTED) == 0) {
502 			if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
503 				if (!(resid == 0 && clen != 0))
504 					snderr(ENOTCONN);
505 			} else if (addr == 0)
506 				snderr(EDESTADDRREQ);
507 		}
508 		space = sbspace(so, &so->so_snd);
509 		if (flags & MSG_OOB)
510 			space += 1024;
511 		if (so->so_proto->pr_domain->dom_family == AF_UNIX) {
512 			if (atomic && resid > so->so_snd.sb_hiwat)
513 				snderr(EMSGSIZE);
514 		} else {
515 			if (clen > so->so_snd.sb_hiwat ||
516 			    (atomic && resid > so->so_snd.sb_hiwat - clen))
517 				snderr(EMSGSIZE);
518 		}
519 		if (space < clen ||
520 		    (space - clen < resid &&
521 		    (atomic || space < so->so_snd.sb_lowat))) {
522 			if (flags & MSG_DONTWAIT)
523 				snderr(EWOULDBLOCK);
524 			sbunlock(so, &so->so_snd);
525 			error = sbwait(so, &so->so_snd);
526 			so->so_state &= ~SS_ISSENDING;
527 			if (error)
528 				goto out;
529 			goto restart;
530 		}
531 		space -= clen;
532 		do {
533 			if (uio == NULL) {
534 				/*
535 				 * Data is prepackaged in "top".
536 				 */
537 				resid = 0;
538 				if (flags & MSG_EOR)
539 					top->m_flags |= M_EOR;
540 			} else {
541 				sounlock(so, s);
542 				error = m_getuio(&top, atomic, space, uio);
543 				s = solock(so);
544 				if (error)
545 					goto release;
546 				space -= top->m_pkthdr.len;
547 				resid = uio->uio_resid;
548 				if (flags & MSG_EOR)
549 					top->m_flags |= M_EOR;
550 			}
551 			if (resid == 0)
552 				so->so_state &= ~SS_ISSENDING;
553 			if (top && so->so_options & SO_ZEROIZE)
554 				top->m_flags |= M_ZEROIZE;
555 			error = (*so->so_proto->pr_usrreq)(so,
556 			    (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
557 			    top, addr, control, curproc);
558 			clen = 0;
559 			control = NULL;
560 			top = NULL;
561 			if (error)
562 				goto release;
563 		} while (resid && space > 0);
564 	} while (resid);
565 
566 release:
567 	so->so_state &= ~SS_ISSENDING;
568 	sbunlock(so, &so->so_snd);
569 out:
570 	sounlock(so, s);
571 	m_freem(top);
572 	m_freem(control);
573 	return (error);
574 }
575 
576 int
577 m_getuio(struct mbuf **mp, int atomic, long space, struct uio *uio)
578 {
579 	struct mbuf *m, *top = NULL;
580 	struct mbuf **nextp = &top;
581 	u_long len, mlen;
582 	size_t resid = uio->uio_resid;
583 	int error;
584 
585 	do {
586 		if (top == NULL) {
587 			MGETHDR(m, M_WAIT, MT_DATA);
588 			mlen = MHLEN;
589 			m->m_pkthdr.len = 0;
590 			m->m_pkthdr.ph_ifidx = 0;
591 		} else {
592 			MGET(m, M_WAIT, MT_DATA);
593 			mlen = MLEN;
594 		}
595 		/* chain mbuf together */
596 		*nextp = m;
597 		nextp = &m->m_next;
598 
599 		resid = ulmin(resid, space);
600 		if (resid >= MINCLSIZE) {
601 			MCLGETL(m, M_NOWAIT, ulmin(resid, MAXMCLBYTES));
602 			if ((m->m_flags & M_EXT) == 0)
603 				MCLGETL(m, M_NOWAIT, MCLBYTES);
604 			if ((m->m_flags & M_EXT) == 0)
605 				goto nopages;
606 			mlen = m->m_ext.ext_size;
607 			len = ulmin(mlen, resid);
608 			/*
609 			 * For datagram protocols, leave room
610 			 * for protocol headers in first mbuf.
611 			 */
612 			if (atomic && m == top && len < mlen - max_hdr)
613 				m->m_data += max_hdr;
614 		} else {
615 nopages:
616 			len = ulmin(mlen, resid);
617 			/*
618 			 * For datagram protocols, leave room
619 			 * for protocol headers in first mbuf.
620 			 */
621 			if (atomic && m == top && len < mlen - max_hdr)
622 				m_align(m, len);
623 		}
624 
625 		error = uiomove(mtod(m, caddr_t), len, uio);
626 		if (error) {
627 			m_freem(top);
628 			return (error);
629 		}
630 
631 		/* adjust counters */
632 		resid = uio->uio_resid;
633 		space -= len;
634 		m->m_len = len;
635 		top->m_pkthdr.len += len;
636 
637 		/* Is there more space and more data? */
638 	} while (space > 0 && resid > 0);
639 
640 	*mp = top;
641 	return 0;
642 }
643 
644 /*
645  * Following replacement or removal of the first mbuf on the first
646  * mbuf chain of a socket buffer, push necessary state changes back
647  * into the socket buffer so that other consumers see the values
648  * consistently.  'nextrecord' is the callers locally stored value of
649  * the original value of sb->sb_mb->m_nextpkt which must be restored
650  * when the lead mbuf changes.  NOTE: 'nextrecord' may be NULL.
651  */
652 void
653 sbsync(struct sockbuf *sb, struct mbuf *nextrecord)
654 {
655 
656 	/*
657 	 * First, update for the new value of nextrecord.  If necessary,
658 	 * make it the first record.
659 	 */
660 	if (sb->sb_mb != NULL)
661 		sb->sb_mb->m_nextpkt = nextrecord;
662 	else
663 		sb->sb_mb = nextrecord;
664 
665 	/*
666 	 * Now update any dependent socket buffer fields to reflect
667 	 * the new state.  This is an inline of SB_EMPTY_FIXUP, with
668 	 * the addition of a second clause that takes care of the
669 	 * case where sb_mb has been updated, but remains the last
670 	 * record.
671 	 */
672 	if (sb->sb_mb == NULL) {
673 		sb->sb_mbtail = NULL;
674 		sb->sb_lastrecord = NULL;
675 	} else if (sb->sb_mb->m_nextpkt == NULL)
676 		sb->sb_lastrecord = sb->sb_mb;
677 }
678 
679 /*
680  * Implement receive operations on a socket.
681  * We depend on the way that records are added to the sockbuf
682  * by sbappend*.  In particular, each record (mbufs linked through m_next)
683  * must begin with an address if the protocol so specifies,
684  * followed by an optional mbuf or mbufs containing ancillary data,
685  * and then zero or more mbufs of data.
686  * In order to avoid blocking network for the entire time here, we release
687  * the solock() while doing the actual copy to user space.
688  * Although the sockbuf is locked, new data may still be appended,
689  * and thus we must maintain consistency of the sockbuf during that time.
690  *
691  * The caller may receive the data as a single mbuf chain by supplying
692  * an mbuf **mp0 for use in returning the chain.  The uio is then used
693  * only for the count in uio_resid.
694  */
695 int
696 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio,
697     struct mbuf **mp0, struct mbuf **controlp, int *flagsp,
698     socklen_t controllen)
699 {
700 	struct mbuf *m, **mp;
701 	struct mbuf *cm;
702 	u_long len, offset, moff;
703 	int flags, error, s, type, uio_error = 0;
704 	const struct protosw *pr = so->so_proto;
705 	struct mbuf *nextrecord;
706 	size_t resid, orig_resid = uio->uio_resid;
707 
708 	mp = mp0;
709 	if (paddr)
710 		*paddr = NULL;
711 	if (controlp)
712 		*controlp = NULL;
713 	if (flagsp)
714 		flags = *flagsp &~ MSG_EOR;
715 	else
716 		flags = 0;
717 	if (flags & MSG_OOB) {
718 		m = m_get(M_WAIT, MT_DATA);
719 		s = solock(so);
720 		error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,
721 		    (struct mbuf *)(long)(flags & MSG_PEEK), NULL, curproc);
722 		sounlock(so, s);
723 		if (error)
724 			goto bad;
725 		do {
726 			error = uiomove(mtod(m, caddr_t),
727 			    ulmin(uio->uio_resid, m->m_len), uio);
728 			m = m_free(m);
729 		} while (uio->uio_resid && error == 0 && m);
730 bad:
731 		m_freem(m);
732 		return (error);
733 	}
734 	if (mp)
735 		*mp = NULL;
736 
737 	s = solock(so);
738 restart:
739 	if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0) {
740 		sounlock(so, s);
741 		return (error);
742 	}
743 
744 	m = so->so_rcv.sb_mb;
745 #ifdef SOCKET_SPLICE
746 	if (isspliced(so))
747 		m = NULL;
748 #endif /* SOCKET_SPLICE */
749 	/*
750 	 * If we have less data than requested, block awaiting more
751 	 * (subject to any timeout) if:
752 	 *   1. the current count is less than the low water mark,
753 	 *   2. MSG_WAITALL is set, and it is possible to do the entire
754 	 *	receive operation at once if we block (resid <= hiwat), or
755 	 *   3. MSG_DONTWAIT is not set.
756 	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
757 	 * we have to do the receive in sections, and thus risk returning
758 	 * a short count if a timeout or signal occurs after we start.
759 	 */
760 	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
761 	    so->so_rcv.sb_cc < uio->uio_resid) &&
762 	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
763 	    ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
764 	    m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
765 #ifdef DIAGNOSTIC
766 		if (m == NULL && so->so_rcv.sb_cc)
767 #ifdef SOCKET_SPLICE
768 		    if (!isspliced(so))
769 #endif /* SOCKET_SPLICE */
770 			panic("receive 1: so %p, so_type %d, sb_cc %lu",
771 			    so, so->so_type, so->so_rcv.sb_cc);
772 #endif
773 		if (so->so_error) {
774 			if (m)
775 				goto dontblock;
776 			error = so->so_error;
777 			if ((flags & MSG_PEEK) == 0)
778 				so->so_error = 0;
779 			goto release;
780 		}
781 		if (so->so_state & SS_CANTRCVMORE) {
782 			if (m)
783 				goto dontblock;
784 			else if (so->so_rcv.sb_cc == 0)
785 				goto release;
786 		}
787 		for (; m; m = m->m_next)
788 			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
789 				m = so->so_rcv.sb_mb;
790 				goto dontblock;
791 			}
792 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
793 		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
794 			error = ENOTCONN;
795 			goto release;
796 		}
797 		if (uio->uio_resid == 0 && controlp == NULL)
798 			goto release;
799 		if (flags & MSG_DONTWAIT) {
800 			error = EWOULDBLOCK;
801 			goto release;
802 		}
803 		SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
804 		SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
805 		sbunlock(so, &so->so_rcv);
806 		error = sbwait(so, &so->so_rcv);
807 		if (error) {
808 			sounlock(so, s);
809 			return (error);
810 		}
811 		goto restart;
812 	}
813 dontblock:
814 	/*
815 	 * On entry here, m points to the first record of the socket buffer.
816 	 * From this point onward, we maintain 'nextrecord' as a cache of the
817 	 * pointer to the next record in the socket buffer.  We must keep the
818 	 * various socket buffer pointers and local stack versions of the
819 	 * pointers in sync, pushing out modifications before operations that
820 	 * may sleep, and re-reading them afterwards.
821 	 *
822 	 * Otherwise, we will race with the network stack appending new data
823 	 * or records onto the socket buffer by using inconsistent/stale
824 	 * versions of the field, possibly resulting in socket buffer
825 	 * corruption.
826 	 */
827 	if (uio->uio_procp)
828 		uio->uio_procp->p_ru.ru_msgrcv++;
829 	KASSERT(m == so->so_rcv.sb_mb);
830 	SBLASTRECORDCHK(&so->so_rcv, "soreceive 1");
831 	SBLASTMBUFCHK(&so->so_rcv, "soreceive 1");
832 	nextrecord = m->m_nextpkt;
833 	if (pr->pr_flags & PR_ADDR) {
834 #ifdef DIAGNOSTIC
835 		if (m->m_type != MT_SONAME)
836 			panic("receive 1a: so %p, so_type %d, m %p, m_type %d",
837 			    so, so->so_type, m, m->m_type);
838 #endif
839 		orig_resid = 0;
840 		if (flags & MSG_PEEK) {
841 			if (paddr)
842 				*paddr = m_copym(m, 0, m->m_len, M_NOWAIT);
843 			m = m->m_next;
844 		} else {
845 			sbfree(&so->so_rcv, m);
846 			if (paddr) {
847 				*paddr = m;
848 				so->so_rcv.sb_mb = m->m_next;
849 				m->m_next = 0;
850 				m = so->so_rcv.sb_mb;
851 			} else {
852 				so->so_rcv.sb_mb = m_free(m);
853 				m = so->so_rcv.sb_mb;
854 			}
855 			sbsync(&so->so_rcv, nextrecord);
856 		}
857 	}
858 	while (m && m->m_type == MT_CONTROL && error == 0) {
859 		int skip = 0;
860 		if (flags & MSG_PEEK) {
861 			if (mtod(m, struct cmsghdr *)->cmsg_type ==
862 			    SCM_RIGHTS) {
863 				/* don't leak internalized SCM_RIGHTS msgs */
864 				skip = 1;
865 			} else if (controlp)
866 				*controlp = m_copym(m, 0, m->m_len, M_NOWAIT);
867 			m = m->m_next;
868 		} else {
869 			sbfree(&so->so_rcv, m);
870 			so->so_rcv.sb_mb = m->m_next;
871 			m->m_nextpkt = m->m_next = NULL;
872 			cm = m;
873 			m = so->so_rcv.sb_mb;
874 			sbsync(&so->so_rcv, nextrecord);
875 			if (controlp) {
876 				if (pr->pr_domain->dom_externalize) {
877 					error =
878 					    (*pr->pr_domain->dom_externalize)
879 					    (cm, controllen, flags);
880 				}
881 				*controlp = cm;
882 			} else {
883 				/*
884 				 * Dispose of any SCM_RIGHTS message that went
885 				 * through the read path rather than recv.
886 				 */
887 				if (pr->pr_domain->dom_dispose)
888 					pr->pr_domain->dom_dispose(cm);
889 				m_free(cm);
890 			}
891 		}
892 		if (m != NULL)
893 			nextrecord = so->so_rcv.sb_mb->m_nextpkt;
894 		else
895 			nextrecord = so->so_rcv.sb_mb;
896 		if (controlp && !skip)
897 			controlp = &(*controlp)->m_next;
898 		orig_resid = 0;
899 	}
900 
901 	/* If m is non-NULL, we have some data to read. */
902 	if (m) {
903 		type = m->m_type;
904 		if (type == MT_OOBDATA)
905 			flags |= MSG_OOB;
906 		if (m->m_flags & M_BCAST)
907 			flags |= MSG_BCAST;
908 		if (m->m_flags & M_MCAST)
909 			flags |= MSG_MCAST;
910 	}
911 	SBLASTRECORDCHK(&so->so_rcv, "soreceive 2");
912 	SBLASTMBUFCHK(&so->so_rcv, "soreceive 2");
913 
914 	moff = 0;
915 	offset = 0;
916 	while (m && uio->uio_resid > 0 && error == 0) {
917 		if (m->m_type == MT_OOBDATA) {
918 			if (type != MT_OOBDATA)
919 				break;
920 		} else if (type == MT_OOBDATA) {
921 			break;
922 		} else if (m->m_type == MT_CONTROL) {
923 			/*
924 			 * If there is more than one control message in the
925 			 * stream, we do a short read.  Next can be received
926 			 * or disposed by another system call.
927 			 */
928 			break;
929 #ifdef DIAGNOSTIC
930 		} else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) {
931 			panic("receive 3: so %p, so_type %d, m %p, m_type %d",
932 			    so, so->so_type, m, m->m_type);
933 #endif
934 		}
935 		so->so_state &= ~SS_RCVATMARK;
936 		len = uio->uio_resid;
937 		if (so->so_oobmark && len > so->so_oobmark - offset)
938 			len = so->so_oobmark - offset;
939 		if (len > m->m_len - moff)
940 			len = m->m_len - moff;
941 		/*
942 		 * If mp is set, just pass back the mbufs.
943 		 * Otherwise copy them out via the uio, then free.
944 		 * Sockbuf must be consistent here (points to current mbuf,
945 		 * it points to next record) when we drop priority;
946 		 * we must note any additions to the sockbuf when we
947 		 * block interrupts again.
948 		 */
949 		if (mp == NULL && uio_error == 0) {
950 			SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove");
951 			SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove");
952 			resid = uio->uio_resid;
953 			sounlock(so, s);
954 			uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio);
955 			s = solock(so);
956 			if (uio_error)
957 				uio->uio_resid = resid - len;
958 		} else
959 			uio->uio_resid -= len;
960 		if (len == m->m_len - moff) {
961 			if (m->m_flags & M_EOR)
962 				flags |= MSG_EOR;
963 			if (flags & MSG_PEEK) {
964 				m = m->m_next;
965 				moff = 0;
966 				orig_resid = 0;
967 			} else {
968 				nextrecord = m->m_nextpkt;
969 				sbfree(&so->so_rcv, m);
970 				if (mp) {
971 					*mp = m;
972 					mp = &m->m_next;
973 					so->so_rcv.sb_mb = m = m->m_next;
974 					*mp = NULL;
975 				} else {
976 					so->so_rcv.sb_mb = m_free(m);
977 					m = so->so_rcv.sb_mb;
978 				}
979 				/*
980 				 * If m != NULL, we also know that
981 				 * so->so_rcv.sb_mb != NULL.
982 				 */
983 				KASSERT(so->so_rcv.sb_mb == m);
984 				if (m) {
985 					m->m_nextpkt = nextrecord;
986 					if (nextrecord == NULL)
987 						so->so_rcv.sb_lastrecord = m;
988 				} else {
989 					so->so_rcv.sb_mb = nextrecord;
990 					SB_EMPTY_FIXUP(&so->so_rcv);
991 				}
992 				SBLASTRECORDCHK(&so->so_rcv, "soreceive 3");
993 				SBLASTMBUFCHK(&so->so_rcv, "soreceive 3");
994 			}
995 		} else {
996 			if (flags & MSG_PEEK) {
997 				moff += len;
998 				orig_resid = 0;
999 			} else {
1000 				if (mp)
1001 					*mp = m_copym(m, 0, len, M_WAIT);
1002 				m->m_data += len;
1003 				m->m_len -= len;
1004 				so->so_rcv.sb_cc -= len;
1005 				so->so_rcv.sb_datacc -= len;
1006 			}
1007 		}
1008 		if (so->so_oobmark) {
1009 			if ((flags & MSG_PEEK) == 0) {
1010 				so->so_oobmark -= len;
1011 				if (so->so_oobmark == 0) {
1012 					so->so_state |= SS_RCVATMARK;
1013 					break;
1014 				}
1015 			} else {
1016 				offset += len;
1017 				if (offset == so->so_oobmark)
1018 					break;
1019 			}
1020 		}
1021 		if (flags & MSG_EOR)
1022 			break;
1023 		/*
1024 		 * If the MSG_WAITALL flag is set (for non-atomic socket),
1025 		 * we must not quit until "uio->uio_resid == 0" or an error
1026 		 * termination.  If a signal/timeout occurs, return
1027 		 * with a short count but without error.
1028 		 * Keep sockbuf locked against other readers.
1029 		 */
1030 		while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1031 		    !sosendallatonce(so) && !nextrecord) {
1032 			if (so->so_error || so->so_state & SS_CANTRCVMORE)
1033 				break;
1034 			SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2");
1035 			SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2");
1036 			error = sbwait(so, &so->so_rcv);
1037 			if (error) {
1038 				sbunlock(so, &so->so_rcv);
1039 				sounlock(so, s);
1040 				return (0);
1041 			}
1042 			if ((m = so->so_rcv.sb_mb) != NULL)
1043 				nextrecord = m->m_nextpkt;
1044 		}
1045 	}
1046 
1047 	if (m && pr->pr_flags & PR_ATOMIC) {
1048 		flags |= MSG_TRUNC;
1049 		if ((flags & MSG_PEEK) == 0)
1050 			(void) sbdroprecord(&so->so_rcv);
1051 	}
1052 	if ((flags & MSG_PEEK) == 0) {
1053 		if (m == NULL) {
1054 			/*
1055 			 * First part is an inline SB_EMPTY_FIXUP().  Second
1056 			 * part makes sure sb_lastrecord is up-to-date if
1057 			 * there is still data in the socket buffer.
1058 			 */
1059 			so->so_rcv.sb_mb = nextrecord;
1060 			if (so->so_rcv.sb_mb == NULL) {
1061 				so->so_rcv.sb_mbtail = NULL;
1062 				so->so_rcv.sb_lastrecord = NULL;
1063 			} else if (nextrecord->m_nextpkt == NULL)
1064 				so->so_rcv.sb_lastrecord = nextrecord;
1065 		}
1066 		SBLASTRECORDCHK(&so->so_rcv, "soreceive 4");
1067 		SBLASTMBUFCHK(&so->so_rcv, "soreceive 4");
1068 		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1069 			(*pr->pr_usrreq)(so, PRU_RCVD, NULL,
1070 			    (struct mbuf *)(long)flags, NULL, curproc);
1071 	}
1072 	if (orig_resid == uio->uio_resid && orig_resid &&
1073 	    (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1074 		sbunlock(so, &so->so_rcv);
1075 		goto restart;
1076 	}
1077 
1078 	if (uio_error)
1079 		error = uio_error;
1080 
1081 	if (flagsp)
1082 		*flagsp |= flags;
1083 release:
1084 	sbunlock(so, &so->so_rcv);
1085 	sounlock(so, s);
1086 	return (error);
1087 }
1088 
1089 int
1090 soshutdown(struct socket *so, int how)
1091 {
1092 	const struct protosw *pr = so->so_proto;
1093 	int s, error = 0;
1094 
1095 	s = solock(so);
1096 	switch (how) {
1097 	case SHUT_RD:
1098 		sorflush(so);
1099 		break;
1100 	case SHUT_RDWR:
1101 		sorflush(so);
1102 		/* FALLTHROUGH */
1103 	case SHUT_WR:
1104 		error = (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, NULL, NULL,
1105 		    curproc);
1106 		break;
1107 	default:
1108 		error = EINVAL;
1109 		break;
1110 	}
1111 	sounlock(so, s);
1112 
1113 	return (error);
1114 }
1115 
1116 void
1117 sorflush(struct socket *so)
1118 {
1119 	struct sockbuf *sb = &so->so_rcv;
1120 	struct mbuf *m;
1121 	const struct protosw *pr = so->so_proto;
1122 	int error;
1123 
1124 	sb->sb_flags |= SB_NOINTR;
1125 	error = sblock(so, sb, M_WAITOK);
1126 	/* with SB_NOINTR and M_WAITOK sblock() must not fail */
1127 	KASSERT(error == 0);
1128 	socantrcvmore(so);
1129 	sbunlock(so, sb);
1130 	m = sb->sb_mb;
1131 	memset(&sb->sb_startzero, 0,
1132 	     (caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero);
1133 	sb->sb_timeo_nsecs = INFSLP;
1134 	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1135 		(*pr->pr_domain->dom_dispose)(m);
1136 	m_purge(m);
1137 }
1138 
1139 #ifdef SOCKET_SPLICE
1140 
1141 #define so_splicelen	so_sp->ssp_len
1142 #define so_splicemax	so_sp->ssp_max
1143 #define so_idletv	so_sp->ssp_idletv
1144 #define so_idleto	so_sp->ssp_idleto
1145 #define so_splicetask	so_sp->ssp_task
1146 
1147 int
1148 sosplice(struct socket *so, int fd, off_t max, struct timeval *tv)
1149 {
1150 	struct file	*fp;
1151 	struct socket	*sosp;
1152 	struct sosplice	*sp;
1153 	struct taskq	*tq;
1154 	int		 error = 0;
1155 
1156 	soassertlocked(so);
1157 
1158 	if (sosplice_taskq == NULL) {
1159 		rw_enter_write(&sosplice_lock);
1160 		if (sosplice_taskq == NULL) {
1161 			tq = taskq_create("sosplice", 1, IPL_SOFTNET,
1162 			    TASKQ_MPSAFE);
1163 			/* Ensure the taskq is fully visible to other CPUs. */
1164 			membar_producer();
1165 			sosplice_taskq = tq;
1166 		}
1167 		rw_exit_write(&sosplice_lock);
1168 	}
1169 	if (sosplice_taskq == NULL)
1170 		return (ENOMEM);
1171 
1172 	if ((so->so_proto->pr_flags & PR_SPLICE) == 0)
1173 		return (EPROTONOSUPPORT);
1174 	if (so->so_options & SO_ACCEPTCONN)
1175 		return (EOPNOTSUPP);
1176 	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1177 	    (so->so_proto->pr_flags & PR_CONNREQUIRED))
1178 		return (ENOTCONN);
1179 	if (so->so_sp == NULL) {
1180 		sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO);
1181 		if (so->so_sp == NULL)
1182 			so->so_sp = sp;
1183 		else
1184 			pool_put(&sosplice_pool, sp);
1185 	}
1186 
1187 	/* If no fd is given, unsplice by removing existing link. */
1188 	if (fd < 0) {
1189 		/* Lock receive buffer. */
1190 		if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) {
1191 			return (error);
1192 		}
1193 		if (so->so_sp->ssp_socket)
1194 			sounsplice(so, so->so_sp->ssp_socket, 0);
1195 		sbunlock(so, &so->so_rcv);
1196 		return (0);
1197 	}
1198 
1199 	if (max && max < 0)
1200 		return (EINVAL);
1201 
1202 	if (tv && (tv->tv_sec < 0 || !timerisvalid(tv)))
1203 		return (EINVAL);
1204 
1205 	/* Find sosp, the drain socket where data will be spliced into. */
1206 	if ((error = getsock(curproc, fd, &fp)) != 0)
1207 		return (error);
1208 	sosp = fp->f_data;
1209 	if (sosp->so_proto->pr_usrreq != so->so_proto->pr_usrreq) {
1210 		error = EPROTONOSUPPORT;
1211 		goto frele;
1212 	}
1213 	if (sosp->so_sp == NULL) {
1214 		sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO);
1215 		if (sosp->so_sp == NULL)
1216 			sosp->so_sp = sp;
1217 		else
1218 			pool_put(&sosplice_pool, sp);
1219 	}
1220 
1221 	/* Lock both receive and send buffer. */
1222 	if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) {
1223 		goto frele;
1224 	}
1225 	if ((error = sblock(so, &sosp->so_snd, M_WAITOK)) != 0) {
1226 		sbunlock(so, &so->so_rcv);
1227 		goto frele;
1228 	}
1229 
1230 	if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) {
1231 		error = EBUSY;
1232 		goto release;
1233 	}
1234 	if (sosp->so_options & SO_ACCEPTCONN) {
1235 		error = EOPNOTSUPP;
1236 		goto release;
1237 	}
1238 	if ((sosp->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) {
1239 		error = ENOTCONN;
1240 		goto release;
1241 	}
1242 
1243 	/* Splice so and sosp together. */
1244 	so->so_sp->ssp_socket = sosp;
1245 	sosp->so_sp->ssp_soback = so;
1246 	so->so_splicelen = 0;
1247 	so->so_splicemax = max;
1248 	if (tv)
1249 		so->so_idletv = *tv;
1250 	else
1251 		timerclear(&so->so_idletv);
1252 	timeout_set_proc(&so->so_idleto, soidle, so);
1253 	task_set(&so->so_splicetask, sotask, so);
1254 
1255 	/*
1256 	 * To prevent softnet interrupt from calling somove() while
1257 	 * we sleep, the socket buffers are not marked as spliced yet.
1258 	 */
1259 	if (somove(so, M_WAIT)) {
1260 		so->so_rcv.sb_flags |= SB_SPLICE;
1261 		sosp->so_snd.sb_flags |= SB_SPLICE;
1262 	}
1263 
1264  release:
1265 	sbunlock(sosp, &sosp->so_snd);
1266 	sbunlock(so, &so->so_rcv);
1267  frele:
1268 	/*
1269 	 * FRELE() must not be called with the socket lock held. It is safe to
1270 	 * release the lock here as long as no other operation happen on the
1271 	 * socket when sosplice() returns. The dance could be avoided by
1272 	 * grabbing the socket lock inside this function.
1273 	 */
1274 	sounlock(so, SL_LOCKED);
1275 	FRELE(fp, curproc);
1276 	solock(so);
1277 	return (error);
1278 }
1279 
1280 void
1281 sounsplice(struct socket *so, struct socket *sosp, int freeing)
1282 {
1283 	soassertlocked(so);
1284 
1285 	task_del(sosplice_taskq, &so->so_splicetask);
1286 	timeout_del(&so->so_idleto);
1287 	sosp->so_snd.sb_flags &= ~SB_SPLICE;
1288 	so->so_rcv.sb_flags &= ~SB_SPLICE;
1289 	so->so_sp->ssp_socket = sosp->so_sp->ssp_soback = NULL;
1290 	/* Do not wakeup a socket that is about to be freed. */
1291 	if ((freeing & SOSP_FREEING_READ) == 0 && soreadable(so))
1292 		sorwakeup(so);
1293 	if ((freeing & SOSP_FREEING_WRITE) == 0 && sowriteable(sosp))
1294 		sowwakeup(sosp);
1295 }
1296 
1297 void
1298 soidle(void *arg)
1299 {
1300 	struct socket *so = arg;
1301 	int s;
1302 
1303 	s = solock(so);
1304 	if (so->so_rcv.sb_flags & SB_SPLICE) {
1305 		so->so_error = ETIMEDOUT;
1306 		sounsplice(so, so->so_sp->ssp_socket, 0);
1307 	}
1308 	sounlock(so, s);
1309 }
1310 
1311 void
1312 sotask(void *arg)
1313 {
1314 	struct socket *so = arg;
1315 	int s;
1316 
1317 	s = solock(so);
1318 	if (so->so_rcv.sb_flags & SB_SPLICE) {
1319 		/*
1320 		 * We may not sleep here as sofree() and unsplice() may be
1321 		 * called from softnet interrupt context.  This would remove
1322 		 * the socket during somove().
1323 		 */
1324 		somove(so, M_DONTWAIT);
1325 	}
1326 	sounlock(so, s);
1327 
1328 	/* Avoid user land starvation. */
1329 	yield();
1330 }
1331 
1332 /*
1333  * The socket splicing task or idle timeout may sleep while grabbing the net
1334  * lock.  As sofree() can be called anytime, sotask() or soidle() could access
1335  * the socket memory of a freed socket after wakeup.  So delay the pool_put()
1336  * after all pending socket splicing tasks or timeouts have finished.  Do this
1337  * by scheduling it on the same threads.
1338  */
1339 void
1340 soreaper(void *arg)
1341 {
1342 	struct socket *so = arg;
1343 
1344 	/* Reuse splice task, sounsplice() has been called before. */
1345 	task_set(&so->so_sp->ssp_task, soput, so);
1346 	task_add(sosplice_taskq, &so->so_sp->ssp_task);
1347 }
1348 
1349 void
1350 soput(void *arg)
1351 {
1352 	struct socket *so = arg;
1353 
1354 	pool_put(&sosplice_pool, so->so_sp);
1355 	pool_put(&socket_pool, so);
1356 }
1357 
1358 /*
1359  * Move data from receive buffer of spliced source socket to send
1360  * buffer of drain socket.  Try to move as much as possible in one
1361  * big chunk.  It is a TCP only implementation.
1362  * Return value 0 means splicing has been finished, 1 continue.
1363  */
1364 int
1365 somove(struct socket *so, int wait)
1366 {
1367 	struct socket	*sosp = so->so_sp->ssp_socket;
1368 	struct mbuf	*m, **mp, *nextrecord;
1369 	u_long		 len, off, oobmark;
1370 	long		 space;
1371 	int		 error = 0, maxreached = 0;
1372 	unsigned int	 state;
1373 
1374 	soassertlocked(so);
1375 
1376  nextpkt:
1377 	if (so->so_error) {
1378 		error = so->so_error;
1379 		goto release;
1380 	}
1381 	if (sosp->so_state & SS_CANTSENDMORE) {
1382 		error = EPIPE;
1383 		goto release;
1384 	}
1385 	if (sosp->so_error && sosp->so_error != ETIMEDOUT &&
1386 	    sosp->so_error != EFBIG && sosp->so_error != ELOOP) {
1387 		error = sosp->so_error;
1388 		goto release;
1389 	}
1390 	if ((sosp->so_state & SS_ISCONNECTED) == 0)
1391 		goto release;
1392 
1393 	/* Calculate how many bytes can be copied now. */
1394 	len = so->so_rcv.sb_datacc;
1395 	if (so->so_splicemax) {
1396 		KASSERT(so->so_splicelen < so->so_splicemax);
1397 		if (so->so_splicemax <= so->so_splicelen + len) {
1398 			len = so->so_splicemax - so->so_splicelen;
1399 			maxreached = 1;
1400 		}
1401 	}
1402 	space = sbspace(sosp, &sosp->so_snd);
1403 	if (so->so_oobmark && so->so_oobmark < len &&
1404 	    so->so_oobmark < space + 1024)
1405 		space += 1024;
1406 	if (space <= 0) {
1407 		maxreached = 0;
1408 		goto release;
1409 	}
1410 	if (space < len) {
1411 		maxreached = 0;
1412 		if (space < sosp->so_snd.sb_lowat)
1413 			goto release;
1414 		len = space;
1415 	}
1416 	sosp->so_state |= SS_ISSENDING;
1417 
1418 	SBLASTRECORDCHK(&so->so_rcv, "somove 1");
1419 	SBLASTMBUFCHK(&so->so_rcv, "somove 1");
1420 	m = so->so_rcv.sb_mb;
1421 	if (m == NULL)
1422 		goto release;
1423 	nextrecord = m->m_nextpkt;
1424 
1425 	/* Drop address and control information not used with splicing. */
1426 	if (so->so_proto->pr_flags & PR_ADDR) {
1427 #ifdef DIAGNOSTIC
1428 		if (m->m_type != MT_SONAME)
1429 			panic("somove soname: so %p, so_type %d, m %p, "
1430 			    "m_type %d", so, so->so_type, m, m->m_type);
1431 #endif
1432 		m = m->m_next;
1433 	}
1434 	while (m && m->m_type == MT_CONTROL)
1435 		m = m->m_next;
1436 	if (m == NULL) {
1437 		sbdroprecord(&so->so_rcv);
1438 		if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb)
1439 			(so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL,
1440 			    NULL, NULL, NULL);
1441 		goto nextpkt;
1442 	}
1443 
1444 	/*
1445 	 * By splicing sockets connected to localhost, userland might create a
1446 	 * loop.  Dissolve splicing with error if loop is detected by counter.
1447 	 *
1448 	 * If we deal with looped broadcast/multicast packet we bail out with
1449 	 * no error to suppress splice termination.
1450 	 */
1451 	if ((m->m_flags & M_PKTHDR) &&
1452 	    ((m->m_pkthdr.ph_loopcnt++ >= M_MAXLOOP) ||
1453 	    ((m->m_flags & M_LOOP) && (m->m_flags & (M_BCAST|M_MCAST))))) {
1454 		error = ELOOP;
1455 		goto release;
1456 	}
1457 
1458 	if (so->so_proto->pr_flags & PR_ATOMIC) {
1459 		if ((m->m_flags & M_PKTHDR) == 0)
1460 			panic("somove !PKTHDR: so %p, so_type %d, m %p, "
1461 			    "m_type %d", so, so->so_type, m, m->m_type);
1462 		if (sosp->so_snd.sb_hiwat < m->m_pkthdr.len) {
1463 			error = EMSGSIZE;
1464 			goto release;
1465 		}
1466 		if (len < m->m_pkthdr.len)
1467 			goto release;
1468 		if (m->m_pkthdr.len < len) {
1469 			maxreached = 0;
1470 			len = m->m_pkthdr.len;
1471 		}
1472 		/*
1473 		 * Throw away the name mbuf after it has been assured
1474 		 * that the whole first record can be processed.
1475 		 */
1476 		m = so->so_rcv.sb_mb;
1477 		sbfree(&so->so_rcv, m);
1478 		so->so_rcv.sb_mb = m_free(m);
1479 		sbsync(&so->so_rcv, nextrecord);
1480 	}
1481 	/*
1482 	 * Throw away the control mbufs after it has been assured
1483 	 * that the whole first record can be processed.
1484 	 */
1485 	m = so->so_rcv.sb_mb;
1486 	while (m && m->m_type == MT_CONTROL) {
1487 		sbfree(&so->so_rcv, m);
1488 		so->so_rcv.sb_mb = m_free(m);
1489 		m = so->so_rcv.sb_mb;
1490 		sbsync(&so->so_rcv, nextrecord);
1491 	}
1492 
1493 	SBLASTRECORDCHK(&so->so_rcv, "somove 2");
1494 	SBLASTMBUFCHK(&so->so_rcv, "somove 2");
1495 
1496 	/* Take at most len mbufs out of receive buffer. */
1497 	for (off = 0, mp = &m; off <= len && *mp;
1498 	    off += (*mp)->m_len, mp = &(*mp)->m_next) {
1499 		u_long size = len - off;
1500 
1501 #ifdef DIAGNOSTIC
1502 		if ((*mp)->m_type != MT_DATA && (*mp)->m_type != MT_HEADER)
1503 			panic("somove type: so %p, so_type %d, m %p, "
1504 			    "m_type %d", so, so->so_type, *mp, (*mp)->m_type);
1505 #endif
1506 		if ((*mp)->m_len > size) {
1507 			/*
1508 			 * Move only a partial mbuf at maximum splice length or
1509 			 * if the drain buffer is too small for this large mbuf.
1510 			 */
1511 			if (!maxreached && so->so_snd.sb_datacc > 0) {
1512 				len -= size;
1513 				break;
1514 			}
1515 			*mp = m_copym(so->so_rcv.sb_mb, 0, size, wait);
1516 			if (*mp == NULL) {
1517 				len -= size;
1518 				break;
1519 			}
1520 			so->so_rcv.sb_mb->m_data += size;
1521 			so->so_rcv.sb_mb->m_len -= size;
1522 			so->so_rcv.sb_cc -= size;
1523 			so->so_rcv.sb_datacc -= size;
1524 		} else {
1525 			*mp = so->so_rcv.sb_mb;
1526 			sbfree(&so->so_rcv, *mp);
1527 			so->so_rcv.sb_mb = (*mp)->m_next;
1528 			sbsync(&so->so_rcv, nextrecord);
1529 		}
1530 	}
1531 	*mp = NULL;
1532 
1533 	SBLASTRECORDCHK(&so->so_rcv, "somove 3");
1534 	SBLASTMBUFCHK(&so->so_rcv, "somove 3");
1535 	SBCHECK(&so->so_rcv);
1536 	if (m == NULL)
1537 		goto release;
1538 	m->m_nextpkt = NULL;
1539 	if (m->m_flags & M_PKTHDR) {
1540 		m_resethdr(m);
1541 		m->m_pkthdr.len = len;
1542 	}
1543 
1544 	/* Send window update to source peer as receive buffer has changed. */
1545 	if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb)
1546 		(so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL,
1547 		    NULL, NULL, NULL);
1548 
1549 	/* Receive buffer did shrink by len bytes, adjust oob. */
1550 	state = so->so_state;
1551 	so->so_state &= ~SS_RCVATMARK;
1552 	oobmark = so->so_oobmark;
1553 	so->so_oobmark = oobmark > len ? oobmark - len : 0;
1554 	if (oobmark) {
1555 		if (oobmark == len)
1556 			so->so_state |= SS_RCVATMARK;
1557 		if (oobmark >= len)
1558 			oobmark = 0;
1559 	}
1560 
1561 	/*
1562 	 * Handle oob data.  If any malloc fails, ignore error.
1563 	 * TCP urgent data is not very reliable anyway.
1564 	 */
1565 	while (((state & SS_RCVATMARK) || oobmark) &&
1566 	    (so->so_options & SO_OOBINLINE)) {
1567 		struct mbuf *o = NULL;
1568 
1569 		if (state & SS_RCVATMARK) {
1570 			o = m_get(wait, MT_DATA);
1571 			state &= ~SS_RCVATMARK;
1572 		} else if (oobmark) {
1573 			o = m_split(m, oobmark, wait);
1574 			if (o) {
1575 				error = (*sosp->so_proto->pr_usrreq)(sosp,
1576 				    PRU_SEND, m, NULL, NULL, NULL);
1577 				if (error) {
1578 					if (sosp->so_state & SS_CANTSENDMORE)
1579 						error = EPIPE;
1580 					m_freem(o);
1581 					goto release;
1582 				}
1583 				len -= oobmark;
1584 				so->so_splicelen += oobmark;
1585 				m = o;
1586 				o = m_get(wait, MT_DATA);
1587 			}
1588 			oobmark = 0;
1589 		}
1590 		if (o) {
1591 			o->m_len = 1;
1592 			*mtod(o, caddr_t) = *mtod(m, caddr_t);
1593 			error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SENDOOB,
1594 			    o, NULL, NULL, NULL);
1595 			if (error) {
1596 				if (sosp->so_state & SS_CANTSENDMORE)
1597 					error = EPIPE;
1598 				m_freem(m);
1599 				goto release;
1600 			}
1601 			len -= 1;
1602 			so->so_splicelen += 1;
1603 			if (oobmark) {
1604 				oobmark -= 1;
1605 				if (oobmark == 0)
1606 					state |= SS_RCVATMARK;
1607 			}
1608 			m_adj(m, 1);
1609 		}
1610 	}
1611 
1612 	/* Append all remaining data to drain socket. */
1613 	if (so->so_rcv.sb_cc == 0 || maxreached)
1614 		sosp->so_state &= ~SS_ISSENDING;
1615 	error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SEND, m, NULL, NULL,
1616 	    NULL);
1617 	if (error) {
1618 		if (sosp->so_state & SS_CANTSENDMORE)
1619 			error = EPIPE;
1620 		goto release;
1621 	}
1622 	so->so_splicelen += len;
1623 
1624 	/* Move several packets if possible. */
1625 	if (!maxreached && nextrecord)
1626 		goto nextpkt;
1627 
1628  release:
1629 	sosp->so_state &= ~SS_ISSENDING;
1630 	if (!error && maxreached && so->so_splicemax == so->so_splicelen)
1631 		error = EFBIG;
1632 	if (error)
1633 		so->so_error = error;
1634 	if (((so->so_state & SS_CANTRCVMORE) && so->so_rcv.sb_cc == 0) ||
1635 	    (sosp->so_state & SS_CANTSENDMORE) || maxreached || error) {
1636 		sounsplice(so, sosp, 0);
1637 		return (0);
1638 	}
1639 	if (timerisset(&so->so_idletv))
1640 		timeout_add_tv(&so->so_idleto, &so->so_idletv);
1641 	return (1);
1642 }
1643 
1644 #endif /* SOCKET_SPLICE */
1645 
1646 void
1647 sorwakeup(struct socket *so)
1648 {
1649 	soassertlocked(so);
1650 
1651 #ifdef SOCKET_SPLICE
1652 	if (so->so_rcv.sb_flags & SB_SPLICE) {
1653 		/*
1654 		 * TCP has a sendbuffer that can handle multiple packets
1655 		 * at once.  So queue the stream a bit to accumulate data.
1656 		 * The sosplice thread will call somove() later and send
1657 		 * the packets calling tcp_output() only once.
1658 		 * In the UDP case, send out the packets immediately.
1659 		 * Using a thread would make things slower.
1660 		 */
1661 		if (so->so_proto->pr_flags & PR_WANTRCVD)
1662 			task_add(sosplice_taskq, &so->so_splicetask);
1663 		else
1664 			somove(so, M_DONTWAIT);
1665 	}
1666 	if (isspliced(so))
1667 		return;
1668 #endif
1669 	sowakeup(so, &so->so_rcv);
1670 	if (so->so_upcall)
1671 		(*(so->so_upcall))(so, so->so_upcallarg, M_DONTWAIT);
1672 }
1673 
1674 void
1675 sowwakeup(struct socket *so)
1676 {
1677 	soassertlocked(so);
1678 
1679 #ifdef SOCKET_SPLICE
1680 	if (so->so_snd.sb_flags & SB_SPLICE)
1681 		task_add(sosplice_taskq, &so->so_sp->ssp_soback->so_splicetask);
1682 	if (issplicedback(so))
1683 		return;
1684 #endif
1685 	sowakeup(so, &so->so_snd);
1686 }
1687 
1688 int
1689 sosetopt(struct socket *so, int level, int optname, struct mbuf *m)
1690 {
1691 	int error = 0;
1692 
1693 	soassertlocked(so);
1694 
1695 	if (level != SOL_SOCKET) {
1696 		if (so->so_proto->pr_ctloutput) {
1697 			error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so,
1698 			    level, optname, m);
1699 			return (error);
1700 		}
1701 		error = ENOPROTOOPT;
1702 	} else {
1703 		switch (optname) {
1704 		case SO_BINDANY:
1705 			if ((error = suser(curproc)) != 0)	/* XXX */
1706 				return (error);
1707 			break;
1708 		}
1709 
1710 		switch (optname) {
1711 
1712 		case SO_LINGER:
1713 			if (m == NULL || m->m_len != sizeof (struct linger) ||
1714 			    mtod(m, struct linger *)->l_linger < 0 ||
1715 			    mtod(m, struct linger *)->l_linger > SHRT_MAX)
1716 				return (EINVAL);
1717 			so->so_linger = mtod(m, struct linger *)->l_linger;
1718 			/* FALLTHROUGH */
1719 
1720 		case SO_BINDANY:
1721 		case SO_DEBUG:
1722 		case SO_KEEPALIVE:
1723 		case SO_USELOOPBACK:
1724 		case SO_BROADCAST:
1725 		case SO_REUSEADDR:
1726 		case SO_REUSEPORT:
1727 		case SO_OOBINLINE:
1728 		case SO_TIMESTAMP:
1729 		case SO_ZEROIZE:
1730 			if (m == NULL || m->m_len < sizeof (int))
1731 				return (EINVAL);
1732 			if (*mtod(m, int *))
1733 				so->so_options |= optname;
1734 			else
1735 				so->so_options &= ~optname;
1736 			break;
1737 
1738 		case SO_DONTROUTE:
1739 			if (m == NULL || m->m_len < sizeof (int))
1740 				return (EINVAL);
1741 			if (*mtod(m, int *))
1742 				error = EOPNOTSUPP;
1743 			break;
1744 
1745 		case SO_SNDBUF:
1746 		case SO_RCVBUF:
1747 		case SO_SNDLOWAT:
1748 		case SO_RCVLOWAT:
1749 		    {
1750 			u_long cnt;
1751 
1752 			if (m == NULL || m->m_len < sizeof (int))
1753 				return (EINVAL);
1754 			cnt = *mtod(m, int *);
1755 			if ((long)cnt <= 0)
1756 				cnt = 1;
1757 			switch (optname) {
1758 
1759 			case SO_SNDBUF:
1760 				if (so->so_state & SS_CANTSENDMORE)
1761 					return (EINVAL);
1762 				if (sbcheckreserve(cnt, so->so_snd.sb_wat) ||
1763 				    sbreserve(so, &so->so_snd, cnt))
1764 					return (ENOBUFS);
1765 				so->so_snd.sb_wat = cnt;
1766 				break;
1767 
1768 			case SO_RCVBUF:
1769 				if (so->so_state & SS_CANTRCVMORE)
1770 					return (EINVAL);
1771 				if (sbcheckreserve(cnt, so->so_rcv.sb_wat) ||
1772 				    sbreserve(so, &so->so_rcv, cnt))
1773 					return (ENOBUFS);
1774 				so->so_rcv.sb_wat = cnt;
1775 				break;
1776 
1777 			case SO_SNDLOWAT:
1778 				so->so_snd.sb_lowat =
1779 				    (cnt > so->so_snd.sb_hiwat) ?
1780 				    so->so_snd.sb_hiwat : cnt;
1781 				break;
1782 			case SO_RCVLOWAT:
1783 				so->so_rcv.sb_lowat =
1784 				    (cnt > so->so_rcv.sb_hiwat) ?
1785 				    so->so_rcv.sb_hiwat : cnt;
1786 				break;
1787 			}
1788 			break;
1789 		    }
1790 
1791 		case SO_SNDTIMEO:
1792 		case SO_RCVTIMEO:
1793 		    {
1794 			struct timeval tv;
1795 			uint64_t nsecs;
1796 
1797 			if (m == NULL || m->m_len < sizeof (tv))
1798 				return (EINVAL);
1799 			memcpy(&tv, mtod(m, struct timeval *), sizeof tv);
1800 			if (!timerisvalid(&tv))
1801 				return (EINVAL);
1802 			nsecs = TIMEVAL_TO_NSEC(&tv);
1803 			if (nsecs == UINT64_MAX)
1804 				return (EDOM);
1805 			if (nsecs == 0)
1806 				nsecs = INFSLP;
1807 			switch (optname) {
1808 
1809 			case SO_SNDTIMEO:
1810 				so->so_snd.sb_timeo_nsecs = nsecs;
1811 				break;
1812 			case SO_RCVTIMEO:
1813 				so->so_rcv.sb_timeo_nsecs = nsecs;
1814 				break;
1815 			}
1816 			break;
1817 		    }
1818 
1819 		case SO_RTABLE:
1820 			if (so->so_proto->pr_domain &&
1821 			    so->so_proto->pr_domain->dom_protosw &&
1822 			    so->so_proto->pr_ctloutput) {
1823 				struct domain *dom = so->so_proto->pr_domain;
1824 
1825 				level = dom->dom_protosw->pr_protocol;
1826 				error = (*so->so_proto->pr_ctloutput)
1827 				    (PRCO_SETOPT, so, level, optname, m);
1828 				return (error);
1829 			}
1830 			error = ENOPROTOOPT;
1831 			break;
1832 
1833 #ifdef SOCKET_SPLICE
1834 		case SO_SPLICE:
1835 			if (m == NULL) {
1836 				error = sosplice(so, -1, 0, NULL);
1837 			} else if (m->m_len < sizeof(int)) {
1838 				return (EINVAL);
1839 			} else if (m->m_len < sizeof(struct splice)) {
1840 				error = sosplice(so, *mtod(m, int *), 0, NULL);
1841 			} else {
1842 				error = sosplice(so,
1843 				    mtod(m, struct splice *)->sp_fd,
1844 				    mtod(m, struct splice *)->sp_max,
1845 				   &mtod(m, struct splice *)->sp_idle);
1846 			}
1847 			break;
1848 #endif /* SOCKET_SPLICE */
1849 
1850 		default:
1851 			error = ENOPROTOOPT;
1852 			break;
1853 		}
1854 		if (error == 0 && so->so_proto->pr_ctloutput) {
1855 			(*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so,
1856 			    level, optname, m);
1857 		}
1858 	}
1859 
1860 	return (error);
1861 }
1862 
1863 int
1864 sogetopt(struct socket *so, int level, int optname, struct mbuf *m)
1865 {
1866 	int error = 0;
1867 
1868 	soassertlocked(so);
1869 
1870 	if (level != SOL_SOCKET) {
1871 		if (so->so_proto->pr_ctloutput) {
1872 			m->m_len = 0;
1873 
1874 			error = (*so->so_proto->pr_ctloutput)(PRCO_GETOPT, so,
1875 			    level, optname, m);
1876 			if (error)
1877 				return (error);
1878 			return (0);
1879 		} else
1880 			return (ENOPROTOOPT);
1881 	} else {
1882 		m->m_len = sizeof (int);
1883 
1884 		switch (optname) {
1885 
1886 		case SO_LINGER:
1887 			m->m_len = sizeof (struct linger);
1888 			mtod(m, struct linger *)->l_onoff =
1889 				so->so_options & SO_LINGER;
1890 			mtod(m, struct linger *)->l_linger = so->so_linger;
1891 			break;
1892 
1893 		case SO_BINDANY:
1894 		case SO_USELOOPBACK:
1895 		case SO_DEBUG:
1896 		case SO_KEEPALIVE:
1897 		case SO_REUSEADDR:
1898 		case SO_REUSEPORT:
1899 		case SO_BROADCAST:
1900 		case SO_OOBINLINE:
1901 		case SO_TIMESTAMP:
1902 		case SO_ZEROIZE:
1903 			*mtod(m, int *) = so->so_options & optname;
1904 			break;
1905 
1906 		case SO_DONTROUTE:
1907 			*mtod(m, int *) = 0;
1908 			break;
1909 
1910 		case SO_TYPE:
1911 			*mtod(m, int *) = so->so_type;
1912 			break;
1913 
1914 		case SO_ERROR:
1915 			*mtod(m, int *) = so->so_error;
1916 			so->so_error = 0;
1917 			break;
1918 
1919 		case SO_DOMAIN:
1920 			*mtod(m, int *) = so->so_proto->pr_domain->dom_family;
1921 			break;
1922 
1923 		case SO_PROTOCOL:
1924 			*mtod(m, int *) = so->so_proto->pr_protocol;
1925 			break;
1926 
1927 		case SO_SNDBUF:
1928 			*mtod(m, int *) = so->so_snd.sb_hiwat;
1929 			break;
1930 
1931 		case SO_RCVBUF:
1932 			*mtod(m, int *) = so->so_rcv.sb_hiwat;
1933 			break;
1934 
1935 		case SO_SNDLOWAT:
1936 			*mtod(m, int *) = so->so_snd.sb_lowat;
1937 			break;
1938 
1939 		case SO_RCVLOWAT:
1940 			*mtod(m, int *) = so->so_rcv.sb_lowat;
1941 			break;
1942 
1943 		case SO_SNDTIMEO:
1944 		case SO_RCVTIMEO:
1945 		    {
1946 			struct timeval tv;
1947 			uint64_t nsecs = (optname == SO_SNDTIMEO ?
1948 			    so->so_snd.sb_timeo_nsecs :
1949 			    so->so_rcv.sb_timeo_nsecs);
1950 
1951 			m->m_len = sizeof(struct timeval);
1952 			memset(&tv, 0, sizeof(tv));
1953 			if (nsecs != INFSLP)
1954 				NSEC_TO_TIMEVAL(nsecs, &tv);
1955 			memcpy(mtod(m, struct timeval *), &tv, sizeof tv);
1956 			break;
1957 		    }
1958 
1959 		case SO_RTABLE:
1960 			if (so->so_proto->pr_domain &&
1961 			    so->so_proto->pr_domain->dom_protosw &&
1962 			    so->so_proto->pr_ctloutput) {
1963 				struct domain *dom = so->so_proto->pr_domain;
1964 
1965 				level = dom->dom_protosw->pr_protocol;
1966 				error = (*so->so_proto->pr_ctloutput)
1967 				    (PRCO_GETOPT, so, level, optname, m);
1968 				if (error)
1969 					return (error);
1970 				break;
1971 			}
1972 			return (ENOPROTOOPT);
1973 
1974 #ifdef SOCKET_SPLICE
1975 		case SO_SPLICE:
1976 		    {
1977 			off_t len;
1978 
1979 			m->m_len = sizeof(off_t);
1980 			len = so->so_sp ? so->so_sp->ssp_len : 0;
1981 			memcpy(mtod(m, off_t *), &len, sizeof(off_t));
1982 			break;
1983 		    }
1984 #endif /* SOCKET_SPLICE */
1985 
1986 		case SO_PEERCRED:
1987 			if (so->so_proto->pr_protocol == AF_UNIX) {
1988 				struct unpcb *unp = sotounpcb(so);
1989 
1990 				if (unp->unp_flags & UNP_FEIDS) {
1991 					m->m_len = sizeof(unp->unp_connid);
1992 					memcpy(mtod(m, caddr_t),
1993 					    &(unp->unp_connid), m->m_len);
1994 					break;
1995 				}
1996 				return (ENOTCONN);
1997 			}
1998 			return (EOPNOTSUPP);
1999 
2000 		default:
2001 			return (ENOPROTOOPT);
2002 		}
2003 		return (0);
2004 	}
2005 }
2006 
2007 void
2008 sohasoutofband(struct socket *so)
2009 {
2010 	pgsigio(&so->so_sigio, SIGURG, 0);
2011 	selwakeup(&so->so_rcv.sb_sel);
2012 }
2013 
2014 int
2015 soo_kqfilter(struct file *fp, struct knote *kn)
2016 {
2017 	struct socket *so = kn->kn_fp->f_data;
2018 	struct sockbuf *sb;
2019 
2020 	KERNEL_ASSERT_LOCKED();
2021 
2022 	switch (kn->kn_filter) {
2023 	case EVFILT_READ:
2024 		if (so->so_options & SO_ACCEPTCONN)
2025 			kn->kn_fop = &solisten_filtops;
2026 		else
2027 			kn->kn_fop = &soread_filtops;
2028 		sb = &so->so_rcv;
2029 		break;
2030 	case EVFILT_WRITE:
2031 		kn->kn_fop = &sowrite_filtops;
2032 		sb = &so->so_snd;
2033 		break;
2034 	case EVFILT_EXCEPT:
2035 		kn->kn_fop = &soexcept_filtops;
2036 		sb = &so->so_rcv;
2037 		break;
2038 	default:
2039 		return (EINVAL);
2040 	}
2041 
2042 	klist_insert_locked(&sb->sb_sel.si_note, kn);
2043 
2044 	return (0);
2045 }
2046 
2047 void
2048 filt_sordetach(struct knote *kn)
2049 {
2050 	struct socket *so = kn->kn_fp->f_data;
2051 
2052 	KERNEL_ASSERT_LOCKED();
2053 
2054 	klist_remove_locked(&so->so_rcv.sb_sel.si_note, kn);
2055 }
2056 
2057 int
2058 filt_soread(struct knote *kn, long hint)
2059 {
2060 	struct socket *so = kn->kn_fp->f_data;
2061 	int s, rv = 0;
2062 
2063 	if ((hint & NOTE_SUBMIT) == 0)
2064 		s = solock(so);
2065 	kn->kn_data = so->so_rcv.sb_cc;
2066 #ifdef SOCKET_SPLICE
2067 	if (isspliced(so)) {
2068 		rv = 0;
2069 	} else
2070 #endif /* SOCKET_SPLICE */
2071 	if (kn->kn_sfflags & NOTE_OOB) {
2072 		if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) {
2073 			kn->kn_fflags |= NOTE_OOB;
2074 			kn->kn_data -= so->so_oobmark;
2075 			rv = 1;
2076 		}
2077 	} else if (so->so_state & SS_CANTRCVMORE) {
2078 		kn->kn_flags |= EV_EOF;
2079 		if (kn->kn_flags & __EV_POLL) {
2080 			if (so->so_state & SS_ISDISCONNECTED)
2081 				kn->kn_flags |= __EV_HUP;
2082 		}
2083 		kn->kn_fflags = so->so_error;
2084 		rv = 1;
2085 	} else if (so->so_error) {	/* temporary udp error */
2086 		rv = 1;
2087 	} else if (kn->kn_sfflags & NOTE_LOWAT) {
2088 		rv = (kn->kn_data >= kn->kn_sdata);
2089 	} else {
2090 		rv = (kn->kn_data >= so->so_rcv.sb_lowat);
2091 	}
2092 	if ((hint & NOTE_SUBMIT) == 0)
2093 		sounlock(so, s);
2094 
2095 	return rv;
2096 }
2097 
2098 void
2099 filt_sowdetach(struct knote *kn)
2100 {
2101 	struct socket *so = kn->kn_fp->f_data;
2102 
2103 	KERNEL_ASSERT_LOCKED();
2104 
2105 	klist_remove_locked(&so->so_snd.sb_sel.si_note, kn);
2106 }
2107 
2108 int
2109 filt_sowrite(struct knote *kn, long hint)
2110 {
2111 	struct socket *so = kn->kn_fp->f_data;
2112 	int s, rv;
2113 
2114 	if ((hint & NOTE_SUBMIT) == 0)
2115 		s = solock(so);
2116 	kn->kn_data = sbspace(so, &so->so_snd);
2117 	if (so->so_state & SS_CANTSENDMORE) {
2118 		kn->kn_flags |= EV_EOF;
2119 		if (kn->kn_flags & __EV_POLL) {
2120 			if (so->so_state & SS_ISDISCONNECTED)
2121 				kn->kn_flags |= __EV_HUP;
2122 		}
2123 		kn->kn_fflags = so->so_error;
2124 		rv = 1;
2125 	} else if (so->so_error) {	/* temporary udp error */
2126 		rv = 1;
2127 	} else if (((so->so_state & SS_ISCONNECTED) == 0) &&
2128 	    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
2129 		rv = 0;
2130 	} else if (kn->kn_sfflags & NOTE_LOWAT) {
2131 		rv = (kn->kn_data >= kn->kn_sdata);
2132 	} else {
2133 		rv = (kn->kn_data >= so->so_snd.sb_lowat);
2134 	}
2135 	if ((hint & NOTE_SUBMIT) == 0)
2136 		sounlock(so, s);
2137 
2138 	return (rv);
2139 }
2140 
2141 int
2142 filt_solisten(struct knote *kn, long hint)
2143 {
2144 	struct socket *so = kn->kn_fp->f_data;
2145 	int s;
2146 
2147 	if ((hint & NOTE_SUBMIT) == 0)
2148 		s = solock(so);
2149 	kn->kn_data = so->so_qlen;
2150 	if ((hint & NOTE_SUBMIT) == 0)
2151 		sounlock(so, s);
2152 
2153 	return (kn->kn_data != 0);
2154 }
2155 
2156 #ifdef DDB
2157 void
2158 sobuf_print(struct sockbuf *,
2159     int (*)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))));
2160 
2161 void
2162 sobuf_print(struct sockbuf *sb,
2163     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2164 {
2165 	(*pr)("\tsb_cc: %lu\n", sb->sb_cc);
2166 	(*pr)("\tsb_datacc: %lu\n", sb->sb_datacc);
2167 	(*pr)("\tsb_hiwat: %lu\n", sb->sb_hiwat);
2168 	(*pr)("\tsb_wat: %lu\n", sb->sb_wat);
2169 	(*pr)("\tsb_mbcnt: %lu\n", sb->sb_mbcnt);
2170 	(*pr)("\tsb_mbmax: %lu\n", sb->sb_mbmax);
2171 	(*pr)("\tsb_lowat: %ld\n", sb->sb_lowat);
2172 	(*pr)("\tsb_mb: %p\n", sb->sb_mb);
2173 	(*pr)("\tsb_mbtail: %p\n", sb->sb_mbtail);
2174 	(*pr)("\tsb_lastrecord: %p\n", sb->sb_lastrecord);
2175 	(*pr)("\tsb_sel: ...\n");
2176 	(*pr)("\tsb_flags: %i\n", sb->sb_flags);
2177 	(*pr)("\tsb_timeo_nsecs: %llu\n", sb->sb_timeo_nsecs);
2178 }
2179 
2180 void
2181 so_print(void *v,
2182     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
2183 {
2184 	struct socket *so = v;
2185 
2186 	(*pr)("socket %p\n", so);
2187 	(*pr)("so_type: %i\n", so->so_type);
2188 	(*pr)("so_options: 0x%04x\n", so->so_options); /* %b */
2189 	(*pr)("so_linger: %i\n", so->so_linger);
2190 	(*pr)("so_state: 0x%04x\n", so->so_state);
2191 	(*pr)("so_pcb: %p\n", so->so_pcb);
2192 	(*pr)("so_proto: %p\n", so->so_proto);
2193 	(*pr)("so_sigio: %p\n", so->so_sigio.sir_sigio);
2194 
2195 	(*pr)("so_head: %p\n", so->so_head);
2196 	(*pr)("so_onq: %p\n", so->so_onq);
2197 	(*pr)("so_q0: @%p first: %p\n", &so->so_q0, TAILQ_FIRST(&so->so_q0));
2198 	(*pr)("so_q: @%p first: %p\n", &so->so_q, TAILQ_FIRST(&so->so_q));
2199 	(*pr)("so_eq: next: %p\n", TAILQ_NEXT(so, so_qe));
2200 	(*pr)("so_q0len: %i\n", so->so_q0len);
2201 	(*pr)("so_qlen: %i\n", so->so_qlen);
2202 	(*pr)("so_qlimit: %i\n", so->so_qlimit);
2203 	(*pr)("so_timeo: %i\n", so->so_timeo);
2204 	(*pr)("so_obmark: %lu\n", so->so_oobmark);
2205 
2206 	(*pr)("so_sp: %p\n", so->so_sp);
2207 	if (so->so_sp != NULL) {
2208 		(*pr)("\tssp_socket: %p\n", so->so_sp->ssp_socket);
2209 		(*pr)("\tssp_soback: %p\n", so->so_sp->ssp_soback);
2210 		(*pr)("\tssp_len: %lld\n",
2211 		    (unsigned long long)so->so_sp->ssp_len);
2212 		(*pr)("\tssp_max: %lld\n",
2213 		    (unsigned long long)so->so_sp->ssp_max);
2214 		(*pr)("\tssp_idletv: %lld %ld\n", so->so_sp->ssp_idletv.tv_sec,
2215 		    so->so_sp->ssp_idletv.tv_usec);
2216 		(*pr)("\tssp_idleto: %spending (@%i)\n",
2217 		    timeout_pending(&so->so_sp->ssp_idleto) ? "" : "not ",
2218 		    so->so_sp->ssp_idleto.to_time);
2219 	}
2220 
2221 	(*pr)("so_rcv:\n");
2222 	sobuf_print(&so->so_rcv, pr);
2223 	(*pr)("so_snd:\n");
2224 	sobuf_print(&so->so_snd, pr);
2225 
2226 	(*pr)("so_upcall: %p so_upcallarg: %p\n",
2227 	    so->so_upcall, so->so_upcallarg);
2228 
2229 	(*pr)("so_euid: %d so_ruid: %d\n", so->so_euid, so->so_ruid);
2230 	(*pr)("so_egid: %d so_rgid: %d\n", so->so_egid, so->so_rgid);
2231 	(*pr)("so_cpid: %d\n", so->so_cpid);
2232 }
2233 #endif
2234