xref: /csrg-svn/sys/kern/uipc_socket.c (revision 31810)
1 /*
2  * Copyright (c) 1982, 1986 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)uipc_socket.c	7.3 (Berkeley) 07/10/87
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "dir.h"
12 #include "user.h"
13 #include "proc.h"
14 #include "file.h"
15 #include "inode.h"
16 #include "buf.h"
17 #include "mbuf.h"
18 #include "un.h"
19 #include "domain.h"
20 #include "protosw.h"
21 #include "socket.h"
22 #include "socketvar.h"
23 #include "stat.h"
24 #include "ioctl.h"
25 #include "uio.h"
26 #include "../net/route.h"
27 #include "../netinet/in.h"
28 #include "../net/if.h"
29 
30 /*
31  * Socket operation routines.
32  * These routines are called by the routines in
33  * sys_socket.c or from a system process, and
34  * implement the semantics of socket operations by
35  * switching out to the protocol specific routines.
36  *
37  * TODO:
38  *	test socketpair
39  *	clean up async
40  *	out-of-band is a kludge
41  */
42 /*ARGSUSED*/
43 socreate(dom, aso, type, proto)
44 	struct socket **aso;
45 	register int type;
46 	int proto;
47 {
48 	register struct protosw *prp;
49 	register struct socket *so;
50 	register struct mbuf *m;
51 	register int error;
52 
53 	if (proto)
54 		prp = pffindproto(dom, proto, type);
55 	else
56 		prp = pffindtype(dom, type);
57 	if (prp == 0)
58 		return (EPROTONOSUPPORT);
59 	if (prp->pr_type != type)
60 		return (EPROTOTYPE);
61 	m = m_getclr(M_WAIT, MT_SOCKET);
62 	so = mtod(m, struct socket *);
63 	so->so_options = 0;
64 	so->so_state = 0;
65 	so->so_type = type;
66 	if (u.u_uid == 0)
67 		so->so_state = SS_PRIV;
68 	so->so_proto = prp;
69 	error =
70 	    (*prp->pr_usrreq)(so, PRU_ATTACH,
71 		(struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0);
72 	if (error) {
73 		so->so_state |= SS_NOFDREF;
74 		sofree(so);
75 		return (error);
76 	}
77 	*aso = so;
78 	return (0);
79 }
80 
81 sobind(so, nam)
82 	struct socket *so;
83 	struct mbuf *nam;
84 {
85 	int s = splnet();
86 	int error;
87 
88 	error =
89 	    (*so->so_proto->pr_usrreq)(so, PRU_BIND,
90 		(struct mbuf *)0, nam, (struct mbuf *)0);
91 	splx(s);
92 	return (error);
93 }
94 
95 solisten(so, backlog)
96 	register struct socket *so;
97 	int backlog;
98 {
99 	int s = splnet(), error;
100 
101 	error =
102 	    (*so->so_proto->pr_usrreq)(so, PRU_LISTEN,
103 		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
104 	if (error) {
105 		splx(s);
106 		return (error);
107 	}
108 	if (so->so_q == 0) {
109 		so->so_q = so;
110 		so->so_q0 = so;
111 		so->so_options |= SO_ACCEPTCONN;
112 	}
113 	if (backlog < 0)
114 		backlog = 0;
115 	so->so_qlimit = MIN(backlog, SOMAXCONN);
116 	splx(s);
117 	return (0);
118 }
119 
120 sofree(so)
121 	register struct socket *so;
122 {
123 
124 	if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
125 		return;
126 	if (so->so_head) {
127 		if (!soqremque(so, 0) && !soqremque(so, 1))
128 			panic("sofree dq");
129 		so->so_head = 0;
130 	}
131 	sbrelease(&so->so_snd);
132 	sorflush(so);
133 	(void) m_free(dtom(so));
134 }
135 
136 /*
137  * Close a socket on last file table reference removal.
138  * Initiate disconnect if connected.
139  * Free socket when disconnect complete.
140  */
141 soclose(so)
142 	register struct socket *so;
143 {
144 	int s = splnet();		/* conservative */
145 	int error;
146 
147 	if (so->so_options & SO_ACCEPTCONN) {
148 		while (so->so_q0 != so)
149 			(void) soabort(so->so_q0);
150 		while (so->so_q != so)
151 			(void) soabort(so->so_q);
152 	}
153 	if (so->so_pcb == 0)
154 		goto discard;
155 	if (so->so_state & SS_ISCONNECTED) {
156 		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
157 			error = sodisconnect(so);
158 			if (error)
159 				goto drop;
160 		}
161 		if (so->so_options & SO_LINGER) {
162 			if ((so->so_state & SS_ISDISCONNECTING) &&
163 			    (so->so_state & SS_NBIO))
164 				goto drop;
165 			while (so->so_state & SS_ISCONNECTED)
166 				sleep((caddr_t)&so->so_timeo, PZERO+1);
167 		}
168 	}
169 drop:
170 	if (so->so_pcb) {
171 		int error2 =
172 		    (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
173 			(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
174 		if (error == 0)
175 			error = error2;
176 	}
177 discard:
178 	if (so->so_state & SS_NOFDREF)
179 		panic("soclose: NOFDREF");
180 	so->so_state |= SS_NOFDREF;
181 	sofree(so);
182 	splx(s);
183 	return (error);
184 }
185 
186 /*
187  * Must be called at splnet...
188  */
189 soabort(so)
190 	struct socket *so;
191 {
192 
193 	return (
194 	    (*so->so_proto->pr_usrreq)(so, PRU_ABORT,
195 		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
196 }
197 
198 soaccept(so, nam)
199 	register struct socket *so;
200 	struct mbuf *nam;
201 {
202 	int s = splnet();
203 	int error;
204 
205 	if ((so->so_state & SS_NOFDREF) == 0)
206 		panic("soaccept: !NOFDREF");
207 	so->so_state &= ~SS_NOFDREF;
208 	error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
209 	    (struct mbuf *)0, nam, (struct mbuf *)0);
210 	splx(s);
211 	return (error);
212 }
213 
214 soconnect(so, nam)
215 	register struct socket *so;
216 	struct mbuf *nam;
217 {
218 	int s;
219 	int error;
220 
221 	if (so->so_options & SO_ACCEPTCONN)
222 		return (EOPNOTSUPP);
223 	s = splnet();
224 	/*
225 	 * If protocol is connection-based, can only connect once.
226 	 * Otherwise, if connected, try to disconnect first.
227 	 * This allows user to disconnect by connecting to, e.g.,
228 	 * a null address.
229 	 */
230 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
231 	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
232 	    (error = sodisconnect(so))))
233 		error = EISCONN;
234 	else
235 		error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
236 		    (struct mbuf *)0, nam, (struct mbuf *)0);
237 	splx(s);
238 	return (error);
239 }
240 
241 soconnect2(so1, so2)
242 	register struct socket *so1;
243 	struct socket *so2;
244 {
245 	int s = splnet();
246 	int error;
247 
248 	error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
249 	    (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);
250 	splx(s);
251 	return (error);
252 }
253 
254 sodisconnect(so)
255 	register struct socket *so;
256 {
257 	int s = splnet();
258 	int error;
259 
260 	if ((so->so_state & SS_ISCONNECTED) == 0) {
261 		error = ENOTCONN;
262 		goto bad;
263 	}
264 	if (so->so_state & SS_ISDISCONNECTING) {
265 		error = EALREADY;
266 		goto bad;
267 	}
268 	error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
269 	    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
270 bad:
271 	splx(s);
272 	return (error);
273 }
274 
275 /*
276  * Send on a socket.
277  * If send must go all at once and message is larger than
278  * send buffering, then hard error.
279  * Lock against other senders.
280  * If must go all at once and not enough room now, then
281  * inform user that this would block and do nothing.
282  * Otherwise, if nonblocking, send as much as possible.
283  */
284 sosend(so, nam, uio, flags, rights)
285 	register struct socket *so;
286 	struct mbuf *nam;
287 	register struct uio *uio;
288 	int flags;
289 	struct mbuf *rights;
290 {
291 	struct mbuf *top = 0;
292 	register struct mbuf *m, **mp;
293 	register int space;
294 	int len, rlen = 0, error = 0, s, dontroute, first = 1;
295 
296 	if (sosendallatonce(so) && uio->uio_resid > so->so_snd.sb_hiwat)
297 		return (EMSGSIZE);
298 	dontroute =
299 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
300 	    (so->so_proto->pr_flags & PR_ATOMIC);
301 	u.u_ru.ru_msgsnd++;
302 	if (rights)
303 		rlen = rights->m_len;
304 #define	snderr(errno)	{ error = errno; splx(s); goto release; }
305 
306 restart:
307 	sblock(&so->so_snd);
308 	do {
309 		s = splnet();
310 		if (so->so_state & SS_CANTSENDMORE)
311 			snderr(EPIPE);
312 		if (so->so_error) {
313 			error = so->so_error;
314 			so->so_error = 0;			/* ??? */
315 			splx(s);
316 			goto release;
317 		}
318 		if ((so->so_state & SS_ISCONNECTED) == 0) {
319 			if (so->so_proto->pr_flags & PR_CONNREQUIRED)
320 				snderr(ENOTCONN);
321 			if (nam == 0)
322 				snderr(EDESTADDRREQ);
323 		}
324 		if (flags & MSG_OOB)
325 			space = 1024;
326 		else {
327 			space = sbspace(&so->so_snd);
328 			if (space <= rlen ||
329 			   (sosendallatonce(so) &&
330 				space < uio->uio_resid + rlen) ||
331 			   (uio->uio_resid >= CLBYTES && space < CLBYTES &&
332 			   so->so_snd.sb_cc >= CLBYTES &&
333 			   (so->so_state & SS_NBIO) == 0)) {
334 				if (so->so_state & SS_NBIO) {
335 					if (first)
336 						error = EWOULDBLOCK;
337 					splx(s);
338 					goto release;
339 				}
340 				sbunlock(&so->so_snd);
341 				sbwait(&so->so_snd);
342 				splx(s);
343 				goto restart;
344 			}
345 		}
346 		splx(s);
347 		mp = &top;
348 		space -= rlen;
349 		while (space > 0) {
350 			MGET(m, M_WAIT, MT_DATA);
351 			if (uio->uio_resid >= CLBYTES / 2 && space >= CLBYTES) {
352 				MCLGET(m);
353 				if (m->m_len != CLBYTES)
354 					goto nopages;
355 				len = MIN(CLBYTES, uio->uio_resid);
356 				space -= CLBYTES;
357 			} else {
358 nopages:
359 				len = MIN(MIN(MLEN, uio->uio_resid), space);
360 				space -= len;
361 			}
362 			error = uiomove(mtod(m, caddr_t), len, UIO_WRITE, uio);
363 			m->m_len = len;
364 			*mp = m;
365 			if (error)
366 				goto release;
367 			mp = &m->m_next;
368 			if (uio->uio_resid <= 0)
369 				break;
370 		}
371 		if (dontroute)
372 			so->so_options |= SO_DONTROUTE;
373 		s = splnet();					/* XXX */
374 		error = (*so->so_proto->pr_usrreq)(so,
375 		    (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
376 		    top, (caddr_t)nam, rights);
377 		splx(s);
378 		if (dontroute)
379 			so->so_options &= ~SO_DONTROUTE;
380 		rights = 0;
381 		rlen = 0;
382 		top = 0;
383 		first = 0;
384 		if (error)
385 			break;
386 	} while (uio->uio_resid);
387 
388 release:
389 	sbunlock(&so->so_snd);
390 	if (top)
391 		m_freem(top);
392 	if (error == EPIPE)
393 		psignal(u.u_procp, SIGPIPE);
394 	return (error);
395 }
396 
397 /*
398  * Implement receive operations on a socket.
399  * We depend on the way that records are added to the sockbuf
400  * by sbappend*.  In particular, each record (mbufs linked through m_next)
401  * must begin with an address if the protocol so specifies,
402  * followed by an optional mbuf containing access rights if supported
403  * by the protocol, and then zero or more mbufs of data.
404  * In order to avoid blocking network interrupts for the entire time here,
405  * we splx() while doing the actual copy to user space.
406  * Although the sockbuf is locked, new data may still be appended,
407  * and thus we must maintain consistency of the sockbuf during that time.
408  */
409 soreceive(so, aname, uio, flags, rightsp)
410 	register struct socket *so;
411 	struct mbuf **aname;
412 	register struct uio *uio;
413 	int flags;
414 	struct mbuf **rightsp;
415 {
416 	register struct mbuf *m;
417 	register int len, error = 0, s, tomark;
418 	struct protosw *pr = so->so_proto;
419 	struct mbuf *nextrecord;
420 	int moff;
421 
422 	if (rightsp)
423 		*rightsp = 0;
424 	if (aname)
425 		*aname = 0;
426 	if (flags & MSG_OOB) {
427 		m = m_get(M_WAIT, MT_DATA);
428 		error = (*pr->pr_usrreq)(so, PRU_RCVOOB,
429 		    m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0);
430 		if (error)
431 			goto bad;
432 		do {
433 			len = uio->uio_resid;
434 			if (len > m->m_len)
435 				len = m->m_len;
436 			error =
437 			    uiomove(mtod(m, caddr_t), (int)len, UIO_READ, uio);
438 			m = m_free(m);
439 		} while (uio->uio_resid && error == 0 && m);
440 bad:
441 		if (m)
442 			m_freem(m);
443 		return (error);
444 	}
445 
446 restart:
447 	sblock(&so->so_rcv);
448 	s = splnet();
449 
450 #define	rcverr(errno)	{ error = errno; splx(s); goto release; }
451 	if (so->so_rcv.sb_cc == 0) {
452 		if (so->so_error) {
453 			error = so->so_error;
454 			so->so_error = 0;
455 			splx(s);
456 			goto release;
457 		}
458 		if (so->so_state & SS_CANTRCVMORE) {
459 			splx(s);
460 			goto release;
461 		}
462 		if ((so->so_state & SS_ISCONNECTED) == 0 &&
463 		    (so->so_proto->pr_flags & PR_CONNREQUIRED))
464 			rcverr(ENOTCONN);
465 		if (uio->uio_resid == 0)
466 			goto release;
467 		if (so->so_state & SS_NBIO)
468 			rcverr(EWOULDBLOCK);
469 		sbunlock(&so->so_rcv);
470 		sbwait(&so->so_rcv);
471 		splx(s);
472 		goto restart;
473 	}
474 	u.u_ru.ru_msgrcv++;
475 	m = so->so_rcv.sb_mb;
476 	if (m == 0)
477 		panic("receive 1");
478 	nextrecord = m->m_act;
479 	if (pr->pr_flags & PR_ADDR) {
480 		if (m->m_type != MT_SONAME)
481 			panic("receive 1a");
482 		if (flags & MSG_PEEK) {
483 			if (aname)
484 				*aname = m_copy(m, 0, m->m_len);
485 			m = m->m_next;
486 		} else {
487 			sbfree(&so->so_rcv, m);
488 			if (aname) {
489 				*aname = m;
490 				m = m->m_next;
491 				(*aname)->m_next = 0;
492 				so->so_rcv.sb_mb = m;
493 			} else {
494 				MFREE(m, so->so_rcv.sb_mb);
495 				m = so->so_rcv.sb_mb;
496 			}
497 			if (m)
498 				m->m_act = nextrecord;
499 		}
500 	}
501 	if (m && m->m_type == MT_RIGHTS) {
502 		if ((pr->pr_flags & PR_RIGHTS) == 0)
503 			panic("receive 2");
504 		if (flags & MSG_PEEK) {
505 			if (rightsp)
506 				*rightsp = m_copy(m, 0, m->m_len);
507 			m = m->m_next;
508 		} else {
509 			sbfree(&so->so_rcv, m);
510 			if (rightsp) {
511 				*rightsp = m;
512 				so->so_rcv.sb_mb = m->m_next;
513 				m->m_next = 0;
514 				m = so->so_rcv.sb_mb;
515 			} else {
516 				MFREE(m, so->so_rcv.sb_mb);
517 				m = so->so_rcv.sb_mb;
518 			}
519 			if (m)
520 				m->m_act = nextrecord;
521 		}
522 	}
523 	moff = 0;
524 	tomark = so->so_oobmark;
525 	while (m && uio->uio_resid > 0 && error == 0) {
526 		if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
527 			panic("receive 3");
528 		len = uio->uio_resid;
529 		so->so_state &= ~SS_RCVATMARK;
530 		if (tomark && len > tomark)
531 			len = tomark;
532 		if (len > m->m_len - moff)
533 			len = m->m_len - moff;
534 		splx(s);
535 		error =
536 		    uiomove(mtod(m, caddr_t) + moff, (int)len, UIO_READ, uio);
537 		s = splnet();
538 		if (len == m->m_len - moff) {
539 			if (flags & MSG_PEEK) {
540 				m = m->m_next;
541 				moff = 0;
542 			} else {
543 				nextrecord = m->m_act;
544 				sbfree(&so->so_rcv, m);
545 				MFREE(m, so->so_rcv.sb_mb);
546 				m = so->so_rcv.sb_mb;
547 				if (m)
548 					m->m_act = nextrecord;
549 			}
550 		} else {
551 			if (flags & MSG_PEEK)
552 				moff += len;
553 			else {
554 				m->m_off += len;
555 				m->m_len -= len;
556 				so->so_rcv.sb_cc -= len;
557 			}
558 		}
559 		if ((flags & MSG_PEEK) == 0 && so->so_oobmark) {
560 			so->so_oobmark -= len;
561 			if (so->so_oobmark == 0) {
562 				so->so_state |= SS_RCVATMARK;
563 				break;
564 			}
565 		}
566 		if (tomark) {
567 			tomark -= len;
568 			if (tomark == 0)
569 				break;
570 		}
571 	}
572 	if ((flags & MSG_PEEK) == 0) {
573 		if (m == 0)
574 			so->so_rcv.sb_mb = nextrecord;
575 		else if (pr->pr_flags & PR_ATOMIC)
576 			(void) sbdroprecord(&so->so_rcv);
577 		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
578 			(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
579 			    (struct mbuf *)0, (struct mbuf *)0);
580 		if (error == 0 && rightsp && *rightsp &&
581 		    pr->pr_domain->dom_externalize)
582 			error = (*pr->pr_domain->dom_externalize)(*rightsp);
583 	}
584 release:
585 	sbunlock(&so->so_rcv);
586 	splx(s);
587 	return (error);
588 }
589 
590 soshutdown(so, how)
591 	register struct socket *so;
592 	register int how;
593 {
594 	register struct protosw *pr = so->so_proto;
595 
596 	how++;
597 	if (how & FREAD)
598 		sorflush(so);
599 	if (how & FWRITE)
600 		return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN,
601 		    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
602 	return (0);
603 }
604 
605 sorflush(so)
606 	register struct socket *so;
607 {
608 	register struct sockbuf *sb = &so->so_rcv;
609 	register struct protosw *pr = so->so_proto;
610 	register int s;
611 	struct sockbuf asb;
612 
613 	sblock(sb);
614 	s = splimp();
615 	socantrcvmore(so);
616 	sbunlock(sb);
617 	asb = *sb;
618 	bzero((caddr_t)sb, sizeof (*sb));
619 	splx(s);
620 	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
621 		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
622 	sbrelease(&asb);
623 }
624 
625 sosetopt(so, level, optname, m0)
626 	register struct socket *so;
627 	int level, optname;
628 	struct mbuf *m0;
629 {
630 	int error = 0;
631 	register struct mbuf *m = m0;
632 
633 	if (level != SOL_SOCKET) {
634 		if (so->so_proto && so->so_proto->pr_ctloutput)
635 			return ((*so->so_proto->pr_ctloutput)
636 				  (PRCO_SETOPT, so, level, optname, &m0));
637 		error = ENOPROTOOPT;
638 	} else {
639 		switch (optname) {
640 
641 		case SO_LINGER:
642 			if (m == NULL || m->m_len != sizeof (struct linger)) {
643 				error = EINVAL;
644 				goto bad;
645 			}
646 			so->so_linger = mtod(m, struct linger *)->l_linger;
647 			/* fall thru... */
648 
649 		case SO_DEBUG:
650 		case SO_KEEPALIVE:
651 		case SO_DONTROUTE:
652 		case SO_USELOOPBACK:
653 		case SO_BROADCAST:
654 		case SO_REUSEADDR:
655 		case SO_OOBINLINE:
656 			if (m == NULL || m->m_len < sizeof (int)) {
657 				error = EINVAL;
658 				goto bad;
659 			}
660 			if (*mtod(m, int *))
661 				so->so_options |= optname;
662 			else
663 				so->so_options &= ~optname;
664 			break;
665 
666 		case SO_SNDBUF:
667 		case SO_RCVBUF:
668 		case SO_SNDLOWAT:
669 		case SO_RCVLOWAT:
670 		case SO_SNDTIMEO:
671 		case SO_RCVTIMEO:
672 			if (m == NULL || m->m_len < sizeof (int)) {
673 				error = EINVAL;
674 				goto bad;
675 			}
676 			switch (optname) {
677 
678 			case SO_SNDBUF:
679 			case SO_RCVBUF:
680 				if (sbreserve(optname == SO_SNDBUF ? &so->so_snd :
681 				    &so->so_rcv, *mtod(m, int *)) == 0) {
682 					error = ENOBUFS;
683 					goto bad;
684 				}
685 				break;
686 
687 			case SO_SNDLOWAT:
688 				so->so_snd.sb_lowat = *mtod(m, int *);
689 				break;
690 			case SO_RCVLOWAT:
691 				so->so_rcv.sb_lowat = *mtod(m, int *);
692 				break;
693 			case SO_SNDTIMEO:
694 				so->so_snd.sb_timeo = *mtod(m, int *);
695 				break;
696 			case SO_RCVTIMEO:
697 				so->so_rcv.sb_timeo = *mtod(m, int *);
698 				break;
699 			}
700 			break;
701 
702 		default:
703 			error = ENOPROTOOPT;
704 			break;
705 		}
706 	}
707 bad:
708 	if (m)
709 		(void) m_free(m);
710 	return (error);
711 }
712 
713 sogetopt(so, level, optname, mp)
714 	register struct socket *so;
715 	int level, optname;
716 	struct mbuf **mp;
717 {
718 	register struct mbuf *m;
719 
720 	if (level != SOL_SOCKET) {
721 		if (so->so_proto && so->so_proto->pr_ctloutput) {
722 			return ((*so->so_proto->pr_ctloutput)
723 				  (PRCO_GETOPT, so, level, optname, mp));
724 		} else
725 			return (ENOPROTOOPT);
726 	} else {
727 		m = m_get(M_WAIT, MT_SOOPTS);
728 		m->m_len = sizeof (int);
729 
730 		switch (optname) {
731 
732 		case SO_LINGER:
733 			m->m_len = sizeof (struct linger);
734 			mtod(m, struct linger *)->l_onoff =
735 				so->so_options & SO_LINGER;
736 			mtod(m, struct linger *)->l_linger = so->so_linger;
737 			break;
738 
739 		case SO_USELOOPBACK:
740 		case SO_DONTROUTE:
741 		case SO_DEBUG:
742 		case SO_KEEPALIVE:
743 		case SO_REUSEADDR:
744 		case SO_BROADCAST:
745 		case SO_OOBINLINE:
746 			*mtod(m, int *) = so->so_options & optname;
747 			break;
748 
749 		case SO_TYPE:
750 			*mtod(m, int *) = so->so_type;
751 			break;
752 
753 		case SO_ERROR:
754 			*mtod(m, int *) = so->so_error;
755 			so->so_error = 0;
756 			break;
757 
758 		case SO_SNDBUF:
759 			*mtod(m, int *) = so->so_snd.sb_hiwat;
760 			break;
761 
762 		case SO_RCVBUF:
763 			*mtod(m, int *) = so->so_rcv.sb_hiwat;
764 			break;
765 
766 		case SO_SNDLOWAT:
767 			*mtod(m, int *) = so->so_snd.sb_lowat;
768 			break;
769 
770 		case SO_RCVLOWAT:
771 			*mtod(m, int *) = so->so_rcv.sb_lowat;
772 			break;
773 
774 		case SO_SNDTIMEO:
775 			*mtod(m, int *) = so->so_snd.sb_timeo;
776 			break;
777 
778 		case SO_RCVTIMEO:
779 			*mtod(m, int *) = so->so_rcv.sb_timeo;
780 			break;
781 
782 		default:
783 			(void)m_free(m);
784 			return (ENOPROTOOPT);
785 		}
786 		*mp = m;
787 		return (0);
788 	}
789 }
790 
791 sohasoutofband(so)
792 	register struct socket *so;
793 {
794 	struct proc *p;
795 
796 	if (so->so_pgrp < 0)
797 		gsignal(-so->so_pgrp, SIGURG);
798 	else if (so->so_pgrp > 0 && (p = pfind(so->so_pgrp)) != 0)
799 		psignal(p, SIGURG);
800 	if (so->so_rcv.sb_sel) {
801 		selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL);
802 		so->so_rcv.sb_sel = 0;
803 		so->so_rcv.sb_flags &= ~SB_COLL;
804 	}
805 }
806