xref: /csrg-svn/sys/kern/uipc_socket.c (revision 26228)
1 /*
2  * Copyright (c) 1982 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)uipc_socket.c	6.21 (Berkeley) 02/18/86
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "dir.h"
12 #include "user.h"
13 #include "proc.h"
14 #include "file.h"
15 #include "inode.h"
16 #include "buf.h"
17 #include "mbuf.h"
18 #include "un.h"
19 #include "domain.h"
20 #include "protosw.h"
21 #include "socket.h"
22 #include "socketvar.h"
23 #include "stat.h"
24 #include "ioctl.h"
25 #include "uio.h"
26 #include "../net/route.h"
27 #include "../netinet/in.h"
28 #include "../net/if.h"
29 
30 /*
31  * Socket operation routines.
32  * These routines are called by the routines in
33  * sys_socket.c or from a system process, and
34  * implement the semantics of socket operations by
35  * switching out to the protocol specific routines.
36  *
37  * TODO:
38  *	test socketpair
39  *	clean up async
40  *	out-of-band is a kludge
41  */
42 /*ARGSUSED*/
43 socreate(dom, aso, type, proto)
44 	struct socket **aso;
45 	register int type;
46 	int proto;
47 {
48 	register struct protosw *prp;
49 	register struct socket *so;
50 	register struct mbuf *m;
51 	register int error;
52 
53 	if (proto)
54 		prp = pffindproto(dom, proto, type);
55 	else
56 		prp = pffindtype(dom, type);
57 	if (prp == 0)
58 		return (EPROTONOSUPPORT);
59 	if (prp->pr_type != type)
60 		return (EPROTOTYPE);
61 	m = m_getclr(M_WAIT, MT_SOCKET);
62 	so = mtod(m, struct socket *);
63 	so->so_options = 0;
64 	so->so_state = 0;
65 	so->so_type = type;
66 	if (u.u_uid == 0)
67 		so->so_state = SS_PRIV;
68 	so->so_proto = prp;
69 	error =
70 	    (*prp->pr_usrreq)(so, PRU_ATTACH,
71 		(struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0);
72 	if (error) {
73 		so->so_state |= SS_NOFDREF;
74 		sofree(so);
75 		return (error);
76 	}
77 	*aso = so;
78 	return (0);
79 }
80 
81 sobind(so, nam)
82 	struct socket *so;
83 	struct mbuf *nam;
84 {
85 	int s = splnet();
86 	int error;
87 
88 	error =
89 	    (*so->so_proto->pr_usrreq)(so, PRU_BIND,
90 		(struct mbuf *)0, nam, (struct mbuf *)0);
91 	splx(s);
92 	return (error);
93 }
94 
95 solisten(so, backlog)
96 	register struct socket *so;
97 	int backlog;
98 {
99 	int s = splnet(), error;
100 
101 	error =
102 	    (*so->so_proto->pr_usrreq)(so, PRU_LISTEN,
103 		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
104 	if (error) {
105 		splx(s);
106 		return (error);
107 	}
108 	if (so->so_q == 0) {
109 		so->so_q = so;
110 		so->so_q0 = so;
111 		so->so_options |= SO_ACCEPTCONN;
112 	}
113 	if (backlog < 0)
114 		backlog = 0;
115 	so->so_qlimit = MIN(backlog, SOMAXCONN);
116 	splx(s);
117 	return (0);
118 }
119 
120 sofree(so)
121 	register struct socket *so;
122 {
123 
124 	if (so->so_head) {
125 		if (!soqremque(so, 0) && !soqremque(so, 1))
126 			panic("sofree dq");
127 		so->so_head = 0;
128 	}
129 	if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
130 		return;
131 	sbrelease(&so->so_snd);
132 	sorflush(so);
133 	(void) m_free(dtom(so));
134 }
135 
136 /*
137  * Close a socket on last file table reference removal.
138  * Initiate disconnect if connected.
139  * Free socket when disconnect complete.
140  */
141 soclose(so)
142 	register struct socket *so;
143 {
144 	int s = splnet();		/* conservative */
145 	int error;
146 
147 	if (so->so_options & SO_ACCEPTCONN) {
148 		while (so->so_q0 != so)
149 			(void) soabort(so->so_q0);
150 		while (so->so_q != so)
151 			(void) soabort(so->so_q);
152 	}
153 	if (so->so_pcb == 0)
154 		goto discard;
155 	if (so->so_state & SS_ISCONNECTED) {
156 		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
157 			error = sodisconnect(so, (struct mbuf *)0);
158 			if (error)
159 				goto drop;
160 		}
161 		if (so->so_options & SO_LINGER) {
162 			if ((so->so_state & SS_ISDISCONNECTING) &&
163 			    (so->so_state & SS_NBIO))
164 				goto drop;
165 			while (so->so_state & SS_ISCONNECTED)
166 				sleep((caddr_t)&so->so_timeo, PZERO+1);
167 		}
168 	}
169 drop:
170 	if (so->so_pcb) {
171 		int error2 =
172 		    (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
173 			(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
174 		if (error == 0)
175 			error = error2;
176 	}
177 discard:
178 	if (so->so_state & SS_NOFDREF)
179 		panic("soclose: NOFDREF");
180 	so->so_state |= SS_NOFDREF;
181 	sofree(so);
182 	splx(s);
183 	return (error);
184 }
185 
186 /*
187  * Must be called at splnet...
188  */
189 soabort(so)
190 	struct socket *so;
191 {
192 
193 	return (
194 	    (*so->so_proto->pr_usrreq)(so, PRU_ABORT,
195 		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
196 }
197 
198 soaccept(so, nam)
199 	register struct socket *so;
200 	struct mbuf *nam;
201 {
202 	int s = splnet();
203 	int error;
204 
205 	if ((so->so_state & SS_NOFDREF) == 0)
206 		panic("soaccept: !NOFDREF");
207 	so->so_state &= ~SS_NOFDREF;
208 	error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
209 	    (struct mbuf *)0, nam, (struct mbuf *)0);
210 	splx(s);
211 	return (error);
212 }
213 
214 soconnect(so, nam)
215 	register struct socket *so;
216 	struct mbuf *nam;
217 {
218 	int s = splnet();
219 	int error;
220 
221 	/*
222 	 * If protocol is connection-based, can only connect once.
223 	 * Otherwise, if connected, try to disconnect first.
224 	 * This allows user to disconnect by connecting to, e.g.,
225 	 * a null address.
226 	 */
227 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
228 	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
229 	    (error = sodisconnect(so))))
230 		error = EISCONN;
231 	else
232 		error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
233 		    (struct mbuf *)0, nam, (struct mbuf *)0);
234 	splx(s);
235 	return (error);
236 }
237 
238 soconnect2(so1, so2)
239 	register struct socket *so1;
240 	struct socket *so2;
241 {
242 	int s = splnet();
243 	int error;
244 
245 	error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
246 	    (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);
247 	splx(s);
248 	return (error);
249 }
250 
251 sodisconnect(so, nam)
252 	register struct socket *so;
253 	struct mbuf *nam;
254 {
255 	int s = splnet();
256 	int error;
257 
258 	if ((so->so_state & SS_ISCONNECTED) == 0) {
259 		error = ENOTCONN;
260 		goto bad;
261 	}
262 	if (so->so_state & SS_ISDISCONNECTING) {
263 		error = EALREADY;
264 		goto bad;
265 	}
266 	error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
267 	    (struct mbuf *)0, nam, (struct mbuf *)0);
268 bad:
269 	splx(s);
270 	return (error);
271 }
272 
273 /*
274  * Send on a socket.
275  * If send must go all at once and message is larger than
276  * send buffering, then hard error.
277  * Lock against other senders.
278  * If must go all at once and not enough room now, then
279  * inform user that this would block and do nothing.
280  * Otherwise, if nonblocking, send as much as possible.
281  */
282 sosend(so, nam, uio, flags, rights)
283 	register struct socket *so;
284 	struct mbuf *nam;
285 	register struct uio *uio;
286 	int flags;
287 	struct mbuf *rights;
288 {
289 	struct mbuf *top = 0;
290 	register struct mbuf *m, **mp;
291 	register int space;
292 	int len, rlen = 0, error = 0, s, dontroute, first = 1;
293 
294 	if (sosendallatonce(so) && uio->uio_resid > so->so_snd.sb_hiwat)
295 		return (EMSGSIZE);
296 	dontroute =
297 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
298 	    (so->so_proto->pr_flags & PR_ATOMIC);
299 	u.u_ru.ru_msgsnd++;
300 	if (rights)
301 		rlen = rights->m_len;
302 #define	snderr(errno)	{ error = errno; splx(s); goto release; }
303 
304 restart:
305 	sblock(&so->so_snd);
306 	do {
307 		s = splnet();
308 		if (so->so_state & SS_CANTSENDMORE)
309 			snderr(EPIPE);
310 		if (so->so_error) {
311 			error = so->so_error;
312 			so->so_error = 0;			/* ??? */
313 			splx(s);
314 			goto release;
315 		}
316 		if ((so->so_state & SS_ISCONNECTED) == 0) {
317 			if (so->so_proto->pr_flags & PR_CONNREQUIRED)
318 				snderr(ENOTCONN);
319 			if (nam == 0)
320 				snderr(EDESTADDRREQ);
321 		}
322 		if (flags & MSG_OOB)
323 			space = 1024;
324 		else {
325 			space = sbspace(&so->so_snd);
326 			if (space <= rlen ||
327 			   (sosendallatonce(so) &&
328 				space < uio->uio_resid + rlen) ||
329 			   (uio->uio_resid >= CLBYTES && space < CLBYTES &&
330 			   so->so_snd.sb_cc >= CLBYTES &&
331 			   (so->so_state & SS_NBIO) == 0)) {
332 				if (so->so_state & SS_NBIO) {
333 					if (first)
334 						error = EWOULDBLOCK;
335 					splx(s);
336 					goto release;
337 				}
338 				sbunlock(&so->so_snd);
339 				sbwait(&so->so_snd);
340 				splx(s);
341 				goto restart;
342 			}
343 		}
344 		splx(s);
345 		mp = &top;
346 		space -= rlen;
347 		while (space > 0) {
348 			register struct iovec *iov = uio->uio_iov;
349 
350 			MGET(m, M_WAIT, MT_DATA);
351 			if (iov->iov_len >= NBPG && space >= CLBYTES) {
352 				MCLGET(m);
353 				if (m->m_len != CLBYTES)
354 					goto nopages;
355 				len = MIN(CLBYTES, iov->iov_len);
356 				space -= CLBYTES;
357 			} else {
358 nopages:
359 				len = MIN(MIN(MLEN, iov->iov_len), space);
360 				space -= len;
361 			}
362 			error = uiomove(mtod(m, caddr_t), len, UIO_WRITE, uio);
363 			m->m_len = len;
364 			*mp = m;
365 			if (error)
366 				goto release;
367 			mp = &m->m_next;
368 			if (uio->uio_resid <= 0)
369 				break;
370 			while (uio->uio_iov->iov_len == 0) {
371 				uio->uio_iov++;
372 				uio->uio_iovcnt--;
373 				if (uio->uio_iovcnt <= 0)
374 					panic("sosend");
375 			}
376 		}
377 		if (dontroute)
378 			so->so_options |= SO_DONTROUTE;
379 		s = splnet();					/* XXX */
380 		error = (*so->so_proto->pr_usrreq)(so,
381 		    (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
382 		    top, (caddr_t)nam, rights);
383 		splx(s);
384 		if (dontroute)
385 			so->so_options &= ~SO_DONTROUTE;
386 		rights = 0;
387 		rlen = 0;
388 		top = 0;
389 		first = 0;
390 		if (error)
391 			break;
392 	} while (uio->uio_resid);
393 
394 release:
395 	sbunlock(&so->so_snd);
396 	if (top)
397 		m_freem(top);
398 	if (error == EPIPE)
399 		psignal(u.u_procp, SIGPIPE);
400 	return (error);
401 }
402 
403 /*
404  * Implement receive operations on a socket.
405  * We depend on the way that records are added to the sockbuf
406  * by sbappend*.  In particular, each record (mbufs linked through m_next)
407  * must begin with an address if the protocol so specifies,
408  * followed by an optional mbuf containing access rights if supported
409  * by the protocol, and then zero or more mbufs of data.
410  * In order to avoid blocking network interrupts for the entire time here,
411  * we splx() while doing the actual copy to user space.
412  * Although the sockbuf is locked, new data may still be appended,
413  * and thus we must maintain consistency of the sockbuf during that time.
414  */
415 soreceive(so, aname, uio, flags, rightsp)
416 	register struct socket *so;
417 	struct mbuf **aname;
418 	register struct uio *uio;
419 	int flags;
420 	struct mbuf **rightsp;
421 {
422 	register struct mbuf *m, *n;
423 	register int len, error = 0, s, tomark;
424 	struct protosw *pr = so->so_proto;
425 	struct mbuf *nextrecord;
426 	int moff;
427 
428 	if (rightsp)
429 		*rightsp = 0;
430 	if (aname)
431 		*aname = 0;
432 	if (flags & MSG_OOB) {
433 		m = m_get(M_WAIT, MT_DATA);
434 		error = (*pr->pr_usrreq)(so, PRU_RCVOOB,
435 		    m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0);
436 		if (error)
437 			goto bad;
438 		do {
439 			len = uio->uio_resid;
440 			if (len > m->m_len)
441 				len = m->m_len;
442 			error =
443 			    uiomove(mtod(m, caddr_t), (int)len, UIO_READ, uio);
444 			m = m_free(m);
445 		} while (uio->uio_resid && error == 0 && m);
446 bad:
447 		if (m)
448 			m_freem(m);
449 		return (error);
450 	}
451 
452 restart:
453 	sblock(&so->so_rcv);
454 	s = splnet();
455 
456 #define	rcverr(errno)	{ error = errno; splx(s); goto release; }
457 	if (so->so_rcv.sb_cc == 0) {
458 		if (so->so_error) {
459 			error = so->so_error;
460 			so->so_error = 0;
461 			splx(s);
462 			goto release;
463 		}
464 		if (so->so_state & SS_CANTRCVMORE) {
465 			splx(s);
466 			goto release;
467 		}
468 		if ((so->so_state & SS_ISCONNECTED) == 0 &&
469 		    (so->so_proto->pr_flags & PR_CONNREQUIRED))
470 			rcverr(ENOTCONN);
471 		if (uio->uio_resid == 0)
472 			goto release;
473 		if (so->so_state & SS_NBIO)
474 			rcverr(EWOULDBLOCK);
475 		sbunlock(&so->so_rcv);
476 		sbwait(&so->so_rcv);
477 		splx(s);
478 		goto restart;
479 	}
480 	u.u_ru.ru_msgrcv++;
481 	m = so->so_rcv.sb_mb;
482 	if (m == 0)
483 		panic("receive 1");
484 	nextrecord = m->m_act;
485 	if (pr->pr_flags & PR_ADDR) {
486 		if (m->m_type != MT_SONAME)
487 			panic("receive 1a");
488 		if (flags & MSG_PEEK) {
489 			if (aname)
490 				*aname = m_copy(m, 0, m->m_len);
491 			m = m->m_next;
492 		} else {
493 			sbfree(&so->so_rcv, m);
494 			if (aname) {
495 				*aname = m;
496 				m = m->m_next;
497 				(*aname)->m_next = 0;
498 			} else {
499 				MFREE(m, n);
500 				nextrecord = m->m_act;
501 				m = n;
502 			}
503 		}
504 	}
505 	if (m && m->m_type == MT_RIGHTS) {
506 		if ((pr->pr_flags & PR_RIGHTS) == 0)
507 			panic("receive 2a");
508 		if (flags & MSG_PEEK) {
509 			if (rightsp)
510 				*rightsp = m_copy(m, 0, m->m_len);
511 			m = m->m_next;
512 		} else {
513 			sbfree(&so->so_rcv, m);
514 			if (rightsp) {
515 				*rightsp = m;
516 				n = m->m_next;
517 				m->m_next = 0;
518 				m = n;
519 			} else {
520 				MFREE(m, n);
521 				m = n;
522 			}
523 		}
524 	}
525 	moff = 0;
526 	tomark = so->so_oobmark;
527 	while (m && uio->uio_resid > 0 && error == 0) {
528 		if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
529 			panic("receive 3");
530 		len = uio->uio_resid;
531 		so->so_state &= ~SS_RCVATMARK;
532 		if (tomark && len > tomark)
533 			len = tomark;
534 		if (len > m->m_len - moff)
535 			len = m->m_len - moff;
536 		if ((flags & MSG_PEEK) == 0) {
537 			so->so_rcv.sb_mb = m;
538 			m->m_act = nextrecord;
539 		}
540 		splx(s);
541 		error =
542 		    uiomove(mtod(m, caddr_t) + moff, (int)len, UIO_READ, uio);
543 		s = splnet();
544 		if (len == m->m_len - moff) {
545 			if (flags & MSG_PEEK) {
546 				m = m->m_next;
547 				moff = 0;
548 			} else {
549 				sbfree(&so->so_rcv, m);
550 				nextrecord = m->m_act;
551 				MFREE(m, n);
552 				so->so_rcv.sb_mb = m = n;
553 			}
554 		} else {
555 			if (flags & MSG_PEEK)
556 				moff += len;
557 			else {
558 				m->m_off += len;
559 				m->m_len -= len;
560 				so->so_rcv.sb_cc -= len;
561 			}
562 		}
563 		if ((flags & MSG_PEEK) == 0 && so->so_oobmark) {
564 			so->so_oobmark -= len;
565 			if (so->so_oobmark == 0) {
566 				so->so_state |= SS_RCVATMARK;
567 				break;
568 			}
569 		}
570 		if (tomark) {
571 			tomark -= len;
572 			if (tomark == 0)
573 				break;
574 		}
575 	}
576 	if ((flags & MSG_PEEK) == 0) {
577 		if (so->so_rcv.sb_mb == 0)
578 			so->so_rcv.sb_mb = nextrecord;
579 		else if (pr->pr_flags & PR_ATOMIC)
580 			(void) sbdroprecord(&so->so_rcv);
581 		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
582 			(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
583 			    (struct mbuf *)0, (struct mbuf *)0);
584 		if (error == 0 && rightsp && *rightsp &&
585 		    pr->pr_domain->dom_externalize)
586 			error = (*pr->pr_domain->dom_externalize)(*rightsp);
587 	}
588 release:
589 	sbunlock(&so->so_rcv);
590 	splx(s);
591 	return (error);
592 }
593 
594 soshutdown(so, how)
595 	register struct socket *so;
596 	register int how;
597 {
598 	register struct protosw *pr = so->so_proto;
599 
600 	how++;
601 	if (how & FREAD)
602 		sorflush(so);
603 	if (how & FWRITE)
604 		return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN,
605 		    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
606 	return (0);
607 }
608 
609 sorflush(so)
610 	register struct socket *so;
611 {
612 	register struct sockbuf *sb = &so->so_rcv;
613 	register struct protosw *pr = so->so_proto;
614 	register int s;
615 	struct sockbuf asb;
616 
617 	sblock(sb);
618 	s = splimp();
619 	socantrcvmore(so);
620 	sbunlock(sb);
621 	asb = *sb;
622 	bzero((caddr_t)sb, sizeof (*sb));
623 	splx(s);
624 	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
625 		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
626 	sbrelease(&asb);
627 }
628 
629 sosetopt(so, level, optname, m0)
630 	register struct socket *so;
631 	int level, optname;
632 	struct mbuf *m0;
633 {
634 	int error = 0;
635 	register struct mbuf *m = m0;
636 
637 	if (level != SOL_SOCKET) {
638 		if (so->so_proto && so->so_proto->pr_ctloutput)
639 			return ((*so->so_proto->pr_ctloutput)
640 				  (PRCO_SETOPT, so, level, optname, &m0));
641 		error = ENOPROTOOPT;
642 	} else {
643 		switch (optname) {
644 
645 		case SO_LINGER:
646 			if (m == NULL || m->m_len != sizeof (struct linger)) {
647 				error = EINVAL;
648 				goto bad;
649 			}
650 			so->so_linger = mtod(m, struct linger *)->l_linger;
651 			/* fall thru... */
652 
653 		case SO_DEBUG:
654 		case SO_KEEPALIVE:
655 		case SO_DONTROUTE:
656 		case SO_USELOOPBACK:
657 		case SO_BROADCAST:
658 		case SO_REUSEADDR:
659 			if (m == NULL || m->m_len < sizeof (int)) {
660 				error = EINVAL;
661 				goto bad;
662 			}
663 			if (*mtod(m, int *))
664 				so->so_options |= optname;
665 			else
666 				so->so_options &= ~optname;
667 			break;
668 
669 		case SO_SNDBUF:
670 		case SO_RCVBUF:
671 		case SO_SNDLOWAT:
672 		case SO_RCVLOWAT:
673 		case SO_SNDTIMEO:
674 		case SO_RCVTIMEO:
675 			if (m == NULL || m->m_len < sizeof (int)) {
676 				error = EINVAL;
677 				goto bad;
678 			}
679 			switch (optname) {
680 
681 			case SO_SNDBUF:
682 			case SO_RCVBUF:
683 				if (sbreserve(optname == SO_SNDBUF ? &so->so_snd :
684 				    &so->so_rcv, *mtod(m, int *)) == 0) {
685 					error = ENOBUFS;
686 					goto bad;
687 				}
688 				break;
689 
690 			case SO_SNDLOWAT:
691 				so->so_snd.sb_lowat = *mtod(m, int *);
692 				break;
693 			case SO_RCVLOWAT:
694 				so->so_rcv.sb_lowat = *mtod(m, int *);
695 				break;
696 			case SO_SNDTIMEO:
697 				so->so_snd.sb_timeo = *mtod(m, int *);
698 				break;
699 			case SO_RCVTIMEO:
700 				so->so_rcv.sb_timeo = *mtod(m, int *);
701 				break;
702 			}
703 			break;
704 
705 		default:
706 			error = ENOPROTOOPT;
707 			break;
708 		}
709 	}
710 bad:
711 	if (m)
712 		(void) m_free(m);
713 	return (error);
714 }
715 
716 sogetopt(so, level, optname, mp)
717 	register struct socket *so;
718 	int level, optname;
719 	struct mbuf **mp;
720 {
721 	register struct mbuf *m;
722 
723 	if (level != SOL_SOCKET) {
724 		if (so->so_proto && so->so_proto->pr_ctloutput) {
725 			return ((*so->so_proto->pr_ctloutput)
726 				  (PRCO_GETOPT, so, level, optname, mp));
727 		} else
728 			return (ENOPROTOOPT);
729 	} else {
730 		m = m_get(M_WAIT, MT_SOOPTS);
731 		m->m_len = sizeof (int);
732 
733 		switch (optname) {
734 
735 		case SO_LINGER:
736 			m->m_len = sizeof (struct linger);
737 			mtod(m, struct linger *)->l_onoff =
738 				so->so_options & SO_LINGER;
739 			mtod(m, struct linger *)->l_linger = so->so_linger;
740 			break;
741 
742 		case SO_USELOOPBACK:
743 		case SO_DONTROUTE:
744 		case SO_DEBUG:
745 		case SO_KEEPALIVE:
746 		case SO_REUSEADDR:
747 		case SO_BROADCAST:
748 			*mtod(m, int *) = so->so_options & optname;
749 			break;
750 
751 		case SO_TYPE:
752 			*mtod(m, int *) = so->so_type;
753 			break;
754 
755 		case SO_ERROR:
756 			*mtod(m, int *) = so->so_error;
757 			so->so_error = 0;
758 			break;
759 
760 		case SO_SNDBUF:
761 			*mtod(m, int *) = so->so_snd.sb_hiwat;
762 			break;
763 
764 		case SO_RCVBUF:
765 			*mtod(m, int *) = so->so_rcv.sb_hiwat;
766 			break;
767 
768 		case SO_SNDLOWAT:
769 			*mtod(m, int *) = so->so_snd.sb_lowat;
770 			break;
771 
772 		case SO_RCVLOWAT:
773 			*mtod(m, int *) = so->so_rcv.sb_lowat;
774 			break;
775 
776 		case SO_SNDTIMEO:
777 			*mtod(m, int *) = so->so_snd.sb_timeo;
778 			break;
779 
780 		case SO_RCVTIMEO:
781 			*mtod(m, int *) = so->so_rcv.sb_timeo;
782 			break;
783 
784 		default:
785 			m_free(m);
786 			return (ENOPROTOOPT);
787 		}
788 		*mp = m;
789 		return (0);
790 	}
791 }
792 
793 sohasoutofband(so)
794 	register struct socket *so;
795 {
796 	struct proc *p;
797 
798 	if (so->so_pgrp < 0)
799 		gsignal(-so->so_pgrp, SIGURG);
800 	else if (so->so_pgrp > 0 && (p = pfind(so->so_pgrp)) != 0)
801 		psignal(p, SIGURG);
802 	if (so->so_rcv.sb_sel) {
803 		selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL);
804 		so->so_rcv.sb_sel = 0;
805 		so->so_rcv.sb_flags &= ~SB_COLL;
806 	}
807 }
808