xref: /dflybsd-src/sys/kern/uipc_socket2.c (revision 31c068aaf635ad9fa72dbc4c65b32d890ff7544d)
1 /*
2  * Copyright (c) 2005 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 1982, 1986, 1988, 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	@(#)uipc_socket2.c	8.1 (Berkeley) 6/10/93
31  * $FreeBSD: src/sys/kern/uipc_socket2.c,v 1.55.2.17 2002/08/31 19:04:55 dwmalone Exp $
32  * $DragonFly: src/sys/kern/uipc_socket2.c,v 1.33 2008/09/02 16:17:52 dillon Exp $
33  */
34 
35 #include "opt_param.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/domain.h>
39 #include <sys/file.h>	/* for maxfiles */
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/proc.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/protosw.h>
46 #include <sys/resourcevar.h>
47 #include <sys/stat.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/socketops.h>
51 #include <sys/signalvar.h>
52 #include <sys/sysctl.h>
53 #include <sys/event.h>
54 
55 #include <sys/thread2.h>
56 #include <sys/msgport2.h>
57 #include <sys/socketvar2.h>
58 
59 #include <net/netisr2.h>
60 
61 #ifndef KTR_SOWAKEUP
62 #define KTR_SOWAKEUP	KTR_ALL
63 #endif
64 KTR_INFO_MASTER(sowakeup);
65 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_start, 0, "newconn sorwakeup start");
66 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_end, 1, "newconn sorwakeup end");
67 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_wakeupstart, 2, "newconn wakeup start");
68 KTR_INFO(KTR_SOWAKEUP, sowakeup, nconn_wakeupend, 3, "newconn wakeup end");
69 #define logsowakeup(name)	KTR_LOG(sowakeup_ ## name)
70 
71 int	maxsockets;
72 
73 /*
74  * Primitive routines for operating on sockets and socket buffers
75  */
76 
77 u_long	sb_max = SB_MAX;
78 u_long	sb_max_adj =
79     SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
80 
81 static	u_long sb_efficiency = 8;	/* parameter for sbreserve() */
82 
83 /************************************************************************
84  * signalsockbuf procedures						*
85  ************************************************************************/
86 
87 /*
88  * Wait for data to arrive at/drain from a socket buffer.
89  *
90  * NOTE: Caller must generally hold the ssb_lock (client side lock) since
91  *	 WAIT/WAKEUP only works for one client at a time.
92  *
93  * NOTE: Caller always retries whatever operation it was waiting on.
94  */
95 int
96 ssb_wait(struct signalsockbuf *ssb)
97 {
98 	uint32_t flags;
99 	int pflags;
100 	int error;
101 
102 	pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH;
103 
104 	for (;;) {
105 		flags = ssb->ssb_flags;
106 		cpu_ccfence();
107 
108 		/*
109 		 * WAKEUP and WAIT interlock each other.  We can catch the
110 		 * race by checking to see if WAKEUP has already been set,
111 		 * and only setting WAIT if WAKEUP is clear.
112 		 */
113 		if (flags & SSB_WAKEUP) {
114 			if (atomic_cmpset_int(&ssb->ssb_flags, flags,
115 					      flags & ~SSB_WAKEUP)) {
116 				error = 0;
117 				break;
118 			}
119 			continue;
120 		}
121 
122 		/*
123 		 * Only set WAIT if WAKEUP is clear.
124 		 */
125 		tsleep_interlock(&ssb->ssb_cc, pflags);
126 		if (atomic_cmpset_int(&ssb->ssb_flags, flags,
127 				      flags | SSB_WAIT)) {
128 			error = tsleep(&ssb->ssb_cc, pflags | PINTERLOCKED,
129 				       "sbwait", ssb->ssb_timeo);
130 			break;
131 		}
132 	}
133 	return (error);
134 }
135 
136 /*
137  * Lock a sockbuf already known to be locked;
138  * return any error returned from sleep (EINTR).
139  */
140 int
141 _ssb_lock(struct signalsockbuf *ssb)
142 {
143 	uint32_t flags;
144 	int pflags;
145 	int error;
146 
147 	pflags = (ssb->ssb_flags & SSB_NOINTR) ? 0 : PCATCH;
148 
149 	for (;;) {
150 		flags = ssb->ssb_flags;
151 		cpu_ccfence();
152 		if (flags & SSB_LOCK) {
153 			tsleep_interlock(&ssb->ssb_flags, pflags);
154 			if (atomic_cmpset_int(&ssb->ssb_flags, flags,
155 					      flags | SSB_WANT)) {
156 				error = tsleep(&ssb->ssb_flags,
157 					       pflags | PINTERLOCKED,
158 					       "sblock", 0);
159 				if (error)
160 					break;
161 			}
162 		} else {
163 			if (atomic_cmpset_int(&ssb->ssb_flags, flags,
164 					      flags | SSB_LOCK)) {
165 				lwkt_gettoken(&ssb->ssb_token);
166 				error = 0;
167 				break;
168 			}
169 		}
170 	}
171 	return (error);
172 }
173 
174 /*
175  * This does the same for sockbufs.  Note that the xsockbuf structure,
176  * since it is always embedded in a socket, does not include a self
177  * pointer nor a length.  We make this entry point public in case
178  * some other mechanism needs it.
179  */
180 void
181 ssbtoxsockbuf(struct signalsockbuf *ssb, struct xsockbuf *xsb)
182 {
183 	xsb->sb_cc = ssb->ssb_cc;
184 	xsb->sb_hiwat = ssb->ssb_hiwat;
185 	xsb->sb_mbcnt = ssb->ssb_mbcnt;
186 	xsb->sb_mbmax = ssb->ssb_mbmax;
187 	xsb->sb_lowat = ssb->ssb_lowat;
188 	xsb->sb_flags = ssb->ssb_flags;
189 	xsb->sb_timeo = ssb->ssb_timeo;
190 }
191 
192 
193 /************************************************************************
194  * Procedures which manipulate socket state flags, wakeups, etc.	*
195  ************************************************************************
196  *
197  * Normal sequence from the active (originating) side is that
198  * soisconnecting() is called during processing of connect() call, resulting
199  * in an eventual call to soisconnected() if/when the connection is
200  * established.  When the connection is torn down soisdisconnecting() is
201  * called during processing of disconnect() call, and soisdisconnected() is
202  * called when the connection to the peer is totally severed.
203  *
204  * The semantics of these routines are such that connectionless protocols
205  * can call soisconnected() and soisdisconnected() only, bypassing the
206  * in-progress calls when setting up a ``connection'' takes no time.
207  *
208  * From the passive side, a socket is created with two queues of sockets:
209  * so_incomp for connections in progress and so_comp for connections
210  * already made and awaiting user acceptance.  As a protocol is preparing
211  * incoming connections, it creates a socket structure queued on so_incomp
212  * by calling sonewconn().  When the connection is established,
213  * soisconnected() is called, and transfers the socket structure to so_comp,
214  * making it available to accept().
215  *
216  * If a socket is closed with sockets on either so_incomp or so_comp, these
217  * sockets are dropped.
218  *
219  * If higher level protocols are implemented in the kernel, the wakeups
220  * done here will sometimes cause software-interrupt process scheduling.
221  */
222 
223 void
224 soisconnecting(struct socket *so)
225 {
226 	soclrstate(so, SS_ISCONNECTED | SS_ISDISCONNECTING);
227 	sosetstate(so, SS_ISCONNECTING);
228 }
229 
230 void
231 soisconnected(struct socket *so)
232 {
233 	struct socket *head;
234 
235 	while ((head = so->so_head) != NULL) {
236 		lwkt_getpooltoken(head);
237 		if (so->so_head == head)
238 			break;
239 		lwkt_relpooltoken(head);
240 	}
241 
242 	soclrstate(so, SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING);
243 	sosetstate(so, SS_ISCONNECTED);
244 	if (head && (so->so_state & SS_INCOMP)) {
245 		if ((so->so_options & SO_ACCEPTFILTER) != 0) {
246 			so->so_upcall = head->so_accf->so_accept_filter->accf_callback;
247 			so->so_upcallarg = head->so_accf->so_accept_filter_arg;
248 			atomic_set_int(&so->so_rcv.ssb_flags, SSB_UPCALL);
249 			so->so_options &= ~SO_ACCEPTFILTER;
250 			so->so_upcall(so, so->so_upcallarg, 0);
251 			lwkt_relpooltoken(head);
252 			return;
253 		}
254 
255 		/*
256 		 * Listen socket are not per-cpu.
257 		 */
258 		TAILQ_REMOVE(&head->so_incomp, so, so_list);
259 		head->so_incqlen--;
260 		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
261 		head->so_qlen++;
262 		sosetstate(so, SS_COMP);
263 		soclrstate(so, SS_INCOMP);
264 
265 		/*
266 		 * XXX head may be on a different protocol thread.
267 		 *     sorwakeup()->sowakeup() is hacked atm.
268 		 */
269 		sorwakeup(head);
270 		wakeup_one(&head->so_timeo);
271 	} else {
272 		wakeup(&so->so_timeo);
273 		sorwakeup(so);
274 		sowwakeup(so);
275 	}
276 	if (head)
277 		lwkt_relpooltoken(head);
278 }
279 
280 void
281 soisdisconnecting(struct socket *so)
282 {
283 	soclrstate(so, SS_ISCONNECTING);
284 	sosetstate(so, SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE);
285 	wakeup((caddr_t)&so->so_timeo);
286 	sowwakeup(so);
287 	sorwakeup(so);
288 }
289 
290 void
291 soisdisconnected(struct socket *so)
292 {
293 	soclrstate(so, SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING);
294 	sosetstate(so, SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED);
295 	wakeup((caddr_t)&so->so_timeo);
296 	sbdrop(&so->so_snd.sb, so->so_snd.ssb_cc);
297 	sowwakeup(so);
298 	sorwakeup(so);
299 }
300 
301 void
302 soisreconnecting(struct socket *so)
303 {
304         soclrstate(so, SS_ISDISCONNECTING | SS_ISDISCONNECTED |
305 		       SS_CANTRCVMORE | SS_CANTSENDMORE);
306 	sosetstate(so, SS_ISCONNECTING);
307 }
308 
309 void
310 soisreconnected(struct socket *so)
311 {
312 	soclrstate(so, SS_ISDISCONNECTED | SS_CANTRCVMORE | SS_CANTSENDMORE);
313 	soisconnected(so);
314 }
315 
316 /*
317  * Set or change the message port a socket receives commands on.
318  *
319  * XXX
320  */
321 void
322 sosetport(struct socket *so, lwkt_port_t port)
323 {
324 	so->so_port = port;
325 }
326 
327 /*
328  * When an attempt at a new connection is noted on a socket
329  * which accepts connections, sonewconn is called.  If the
330  * connection is possible (subject to space constraints, etc.)
331  * then we allocate a new structure, propoerly linked into the
332  * data structure of the original socket, and return this.
333  * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
334  *
335  * The new socket is returned with one ref and so_pcb assigned.
336  * The reference is implied by so_pcb.
337  */
338 struct socket *
339 sonewconn_faddr(struct socket *head, int connstatus,
340     const struct sockaddr *faddr)
341 {
342 	struct socket *so;
343 	struct socket *sp;
344 	struct pru_attach_info ai;
345 
346 	if (head->so_qlen > 3 * head->so_qlimit / 2)
347 		return (NULL);
348 	so = soalloc(1, head->so_proto);
349 	if (so == NULL)
350 		return (NULL);
351 
352 	/*
353 	 * Set the port prior to attaching the inpcb to the current
354 	 * cpu's protocol thread (which should be the current thread
355 	 * but might not be in all cases).  This serializes any pcb ops
356 	 * which occur to our cpu allowing us to complete the attachment
357 	 * without racing anything.
358 	 */
359 	if (head->so_proto->pr_flags & PR_SYNC_PORT)
360 		sosetport(so, &netisr_sync_port);
361 	else
362 		sosetport(so, netisr_cpuport(mycpuid));
363 	if ((head->so_options & SO_ACCEPTFILTER) != 0)
364 		connstatus = 0;
365 	so->so_head = head;
366 	so->so_type = head->so_type;
367 	so->so_options = head->so_options &~ SO_ACCEPTCONN;
368 	so->so_linger = head->so_linger;
369 
370 	/*
371 	 * NOTE: Clearing NOFDREF implies referencing the so with
372 	 *	 soreference().
373 	 */
374 	so->so_state = head->so_state | SS_NOFDREF | SS_ASSERTINPROG;
375 	so->so_cred = crhold(head->so_cred);
376 	ai.sb_rlimit = NULL;
377 	ai.p_ucred = NULL;
378 	ai.fd_rdir = NULL;		/* jail code cruft XXX JH */
379 
380 	/*
381 	 * Reserve space and call pru_attach.  We can direct-call the
382 	 * function since we're already in the protocol thread.
383 	 */
384 	if (soreserve(so, head->so_snd.ssb_hiwat,
385 		      head->so_rcv.ssb_hiwat, NULL) ||
386 	    so_pru_attach_direct(so, 0, &ai)) {
387 		so->so_head = NULL;
388 		soclrstate(so, SS_ASSERTINPROG);
389 		sofree(so);		/* remove implied pcb ref */
390 		return (NULL);
391 	}
392 	KKASSERT(((so->so_proto->pr_flags & PR_ASYNC_RCVD) == 0 &&
393 	    so->so_refs == 2) ||	/* attach + our base ref */
394 	   ((so->so_proto->pr_flags & PR_ASYNC_RCVD) &&
395 	    so->so_refs == 3));		/* + async rcvd ref */
396 	sofree(so);
397 	KKASSERT(so->so_port != NULL);
398 	so->so_rcv.ssb_lowat = head->so_rcv.ssb_lowat;
399 	so->so_snd.ssb_lowat = head->so_snd.ssb_lowat;
400 	so->so_rcv.ssb_timeo = head->so_rcv.ssb_timeo;
401 	so->so_snd.ssb_timeo = head->so_snd.ssb_timeo;
402 
403 	if (head->so_rcv.ssb_flags & SSB_AUTOLOWAT)
404 		so->so_rcv.ssb_flags |= SSB_AUTOLOWAT;
405 	else
406 		so->so_rcv.ssb_flags &= ~SSB_AUTOLOWAT;
407 
408 	if (head->so_snd.ssb_flags & SSB_AUTOLOWAT)
409 		so->so_snd.ssb_flags |= SSB_AUTOLOWAT;
410 	else
411 		so->so_snd.ssb_flags &= ~SSB_AUTOLOWAT;
412 
413 	if (head->so_rcv.ssb_flags & SSB_AUTOSIZE)
414 		so->so_rcv.ssb_flags |= SSB_AUTOSIZE;
415 	else
416 		so->so_rcv.ssb_flags &= ~SSB_AUTOSIZE;
417 
418 	if (head->so_snd.ssb_flags & SSB_AUTOSIZE)
419 		so->so_snd.ssb_flags |= SSB_AUTOSIZE;
420 	else
421 		so->so_snd.ssb_flags &= ~SSB_AUTOSIZE;
422 
423 	/*
424 	 * Save the faddr, if the information is provided and
425 	 * the protocol can perform the saving opertation.
426 	 */
427 	if (faddr != NULL && so->so_proto->pr_usrreqs->pru_savefaddr != NULL)
428 		so->so_proto->pr_usrreqs->pru_savefaddr(so, faddr);
429 
430 	lwkt_getpooltoken(head);
431 	if (connstatus) {
432 		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
433 		sosetstate(so, SS_COMP);
434 		head->so_qlen++;
435 	} else {
436 		if (head->so_incqlen > head->so_qlimit) {
437 			sp = TAILQ_FIRST(&head->so_incomp);
438 			TAILQ_REMOVE(&head->so_incomp, sp, so_list);
439 			head->so_incqlen--;
440 			soclrstate(sp, SS_INCOMP);
441 			sp->so_head = NULL;
442 			soabort_async(sp);
443 		}
444 		TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
445 		sosetstate(so, SS_INCOMP);
446 		head->so_incqlen++;
447 	}
448 	lwkt_relpooltoken(head);
449 	if (connstatus) {
450 		/*
451 		 * XXX head may be on a different protocol thread.
452 		 *     sorwakeup()->sowakeup() is hacked atm.
453 		 */
454 		logsowakeup(nconn_start);
455 		sorwakeup(head);
456 		logsowakeup(nconn_end);
457 
458 		logsowakeup(nconn_wakeupstart);
459 		wakeup((caddr_t)&head->so_timeo);
460 		logsowakeup(nconn_wakeupend);
461 
462 		sosetstate(so, connstatus);
463 	}
464 	soclrstate(so, SS_ASSERTINPROG);
465 	return (so);
466 }
467 
468 struct socket *
469 sonewconn(struct socket *head, int connstatus)
470 {
471 	return sonewconn_faddr(head, connstatus, NULL);
472 }
473 
474 /*
475  * Socantsendmore indicates that no more data will be sent on the
476  * socket; it would normally be applied to a socket when the user
477  * informs the system that no more data is to be sent, by the protocol
478  * code (in case PRU_SHUTDOWN).  Socantrcvmore indicates that no more data
479  * will be received, and will normally be applied to the socket by a
480  * protocol when it detects that the peer will send no more data.
481  * Data queued for reading in the socket may yet be read.
482  */
483 void
484 socantsendmore(struct socket *so)
485 {
486 	sosetstate(so, SS_CANTSENDMORE);
487 	sowwakeup(so);
488 }
489 
490 void
491 socantrcvmore(struct socket *so)
492 {
493 	sosetstate(so, SS_CANTRCVMORE);
494 	sorwakeup(so);
495 }
496 
497 /*
498  * Wakeup processes waiting on a socket buffer.  Do asynchronous notification
499  * via SIGIO if the socket has the SS_ASYNC flag set.
500  *
501  * For users waiting on send/recv try to avoid unnecessary context switch
502  * thrashing.  Particularly for senders of large buffers (needs to be
503  * extended to sel and aio? XXX)
504  *
505  * WARNING!  Can be called on a foreign socket from the wrong protocol
506  *	     thread.  aka is called on the 'head' listen socket when
507  *	     a new connection comes in.
508  */
509 
510 void
511 sowakeup(struct socket *so, struct signalsockbuf *ssb)
512 {
513 	struct kqinfo *kqinfo = &ssb->ssb_kq;
514 	uint32_t flags;
515 
516 	/*
517 	 * Atomically check the flags.  When no special features are being
518 	 * used, WAIT is clear, and WAKEUP is already set, we can simply
519 	 * return.  The upcoming synchronous waiter will not block.
520 	 */
521 	flags = atomic_fetchadd_int(&ssb->ssb_flags, 0);
522 	if ((flags & SSB_NOTIFY_MASK) == 0) {
523 		if (flags & SSB_WAKEUP)
524 			return;
525 	}
526 
527 	/*
528 	 * Check conditions, set the WAKEUP flag, and clear and signal if
529 	 * the WAIT flag is found to be set.  This interlocks against the
530 	 * client side.
531 	 */
532 	for (;;) {
533 		long space;
534 
535 		flags = ssb->ssb_flags;
536 		cpu_ccfence();
537 		if (ssb->ssb_flags & SSB_PREALLOC)
538 			space = ssb_space_prealloc(ssb);
539 		else
540 			space = ssb_space(ssb);
541 
542 		if ((ssb == &so->so_snd && space >= ssb->ssb_lowat) ||
543 		    (ssb == &so->so_rcv && ssb->ssb_cc >= ssb->ssb_lowat) ||
544 		    (ssb == &so->so_snd && (so->so_state & SS_CANTSENDMORE)) ||
545 		    (ssb == &so->so_rcv && (so->so_state & SS_CANTRCVMORE))
546 		) {
547 			if (atomic_cmpset_int(&ssb->ssb_flags, flags,
548 					  (flags | SSB_WAKEUP) & ~SSB_WAIT)) {
549 				if (flags & SSB_WAIT)
550 					wakeup(&ssb->ssb_cc);
551 				break;
552 			}
553 		} else {
554 			break;
555 		}
556 	}
557 
558 	/*
559 	 * Misc other events
560 	 */
561 	if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
562 		pgsigio(so->so_sigio, SIGIO, 0);
563 	if (ssb->ssb_flags & SSB_UPCALL)
564 		(*so->so_upcall)(so, so->so_upcallarg, MB_DONTWAIT);
565 	KNOTE(&kqinfo->ki_note, 0);
566 
567 	/*
568 	 * This is a bit of a hack.  Multiple threads can wind up scanning
569 	 * ki_mlist concurrently due to the fact that this function can be
570 	 * called on a foreign socket, so we can't afford to block here.
571 	 *
572 	 * We need the pool token for (so) (likely the listne socket if
573 	 * SSB_MEVENT is set) because the predicate function may have
574 	 * to access the accept queue.
575 	 */
576 	if (ssb->ssb_flags & SSB_MEVENT) {
577 		struct netmsg_so_notify *msg, *nmsg;
578 
579 		lwkt_getpooltoken(so);
580 		TAILQ_FOREACH_MUTABLE(msg, &kqinfo->ki_mlist, nm_list, nmsg) {
581 			if (msg->nm_predicate(msg)) {
582 				TAILQ_REMOVE(&kqinfo->ki_mlist, msg, nm_list);
583 				lwkt_replymsg(&msg->base.lmsg,
584 					      msg->base.lmsg.ms_error);
585 			}
586 		}
587 		if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist))
588 			atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT);
589 		lwkt_relpooltoken(so);
590 	}
591 }
592 
593 /*
594  * Socket buffer (struct signalsockbuf) utility routines.
595  *
596  * Each socket contains two socket buffers: one for sending data and
597  * one for receiving data.  Each buffer contains a queue of mbufs,
598  * information about the number of mbufs and amount of data in the
599  * queue, and other fields allowing kevent()/select()/poll() statements
600  * and notification on data availability to be implemented.
601  *
602  * Data stored in a socket buffer is maintained as a list of records.
603  * Each record is a list of mbufs chained together with the m_next
604  * field.  Records are chained together with the m_nextpkt field. The upper
605  * level routine soreceive() expects the following conventions to be
606  * observed when placing information in the receive buffer:
607  *
608  * 1. If the protocol requires each message be preceded by the sender's
609  *    name, then a record containing that name must be present before
610  *    any associated data (mbuf's must be of type MT_SONAME).
611  * 2. If the protocol supports the exchange of ``access rights'' (really
612  *    just additional data associated with the message), and there are
613  *    ``rights'' to be received, then a record containing this data
614  *    should be present (mbuf's must be of type MT_RIGHTS).
615  * 3. If a name or rights record exists, then it must be followed by
616  *    a data record, perhaps of zero length.
617  *
618  * Before using a new socket structure it is first necessary to reserve
619  * buffer space to the socket, by calling sbreserve().  This should commit
620  * some of the available buffer space in the system buffer pool for the
621  * socket (currently, it does nothing but enforce limits).  The space
622  * should be released by calling ssb_release() when the socket is destroyed.
623  */
624 int
625 soreserve(struct socket *so, u_long sndcc, u_long rcvcc, struct rlimit *rl)
626 {
627 	if (so->so_snd.ssb_lowat == 0)
628 		atomic_set_int(&so->so_snd.ssb_flags, SSB_AUTOLOWAT);
629 	if (ssb_reserve(&so->so_snd, sndcc, so, rl) == 0)
630 		goto bad;
631 	if (ssb_reserve(&so->so_rcv, rcvcc, so, rl) == 0)
632 		goto bad2;
633 	if (so->so_rcv.ssb_lowat == 0)
634 		so->so_rcv.ssb_lowat = 1;
635 	if (so->so_snd.ssb_lowat == 0)
636 		so->so_snd.ssb_lowat = MCLBYTES;
637 	if (so->so_snd.ssb_lowat > so->so_snd.ssb_hiwat)
638 		so->so_snd.ssb_lowat = so->so_snd.ssb_hiwat;
639 	return (0);
640 bad2:
641 	ssb_release(&so->so_snd, so);
642 bad:
643 	return (ENOBUFS);
644 }
645 
646 static int
647 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
648 {
649 	int error = 0;
650 	u_long old_sb_max = sb_max;
651 
652 	error = SYSCTL_OUT(req, arg1, sizeof(int));
653 	if (error || !req->newptr)
654 		return (error);
655 	error = SYSCTL_IN(req, arg1, sizeof(int));
656 	if (error)
657 		return (error);
658 	if (sb_max < MSIZE + MCLBYTES) {
659 		sb_max = old_sb_max;
660 		return (EINVAL);
661 	}
662 	sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
663 	return (0);
664 }
665 
666 /*
667  * Allot mbufs to a signalsockbuf.
668  *
669  * Attempt to scale mbmax so that mbcnt doesn't become limiting
670  * if buffering efficiency is near the normal case.
671  *
672  * sb_max only applies to user-sockets (where rl != NULL).  It does
673  * not apply to kernel sockets or kernel-controlled sockets.  Note
674  * that NFS overrides the sockbuf limits created when nfsd creates
675  * a socket.
676  */
677 int
678 ssb_reserve(struct signalsockbuf *ssb, u_long cc, struct socket *so,
679 	    struct rlimit *rl)
680 {
681 	/*
682 	 * rl will only be NULL when we're in an interrupt (eg, in tcp_input)
683 	 * or when called from netgraph (ie, ngd_attach)
684 	 */
685 	if (rl && cc > sb_max_adj)
686 		cc = sb_max_adj;
687 	if (!chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, cc,
688 		       rl ? rl->rlim_cur : RLIM_INFINITY)) {
689 		return (0);
690 	}
691 	if (rl)
692 		ssb->ssb_mbmax = min(cc * sb_efficiency, sb_max);
693 	else
694 		ssb->ssb_mbmax = cc * sb_efficiency;
695 
696 	/*
697 	 * AUTOLOWAT is set on send buffers and prevents large writes
698 	 * from generating a huge number of context switches.
699 	 */
700 	if (ssb->ssb_flags & SSB_AUTOLOWAT) {
701 		ssb->ssb_lowat = ssb->ssb_hiwat / 4;
702 		if (ssb->ssb_lowat < MCLBYTES)
703 			ssb->ssb_lowat = MCLBYTES;
704 	}
705 	if (ssb->ssb_lowat > ssb->ssb_hiwat)
706 		ssb->ssb_lowat = ssb->ssb_hiwat;
707 	return (1);
708 }
709 
710 /*
711  * Free mbufs held by a socket, and reserved mbuf space.
712  */
713 void
714 ssb_release(struct signalsockbuf *ssb, struct socket *so)
715 {
716 	sbflush(&ssb->sb);
717 	(void)chgsbsize(so->so_cred->cr_uidinfo, &ssb->ssb_hiwat, 0,
718 	    RLIM_INFINITY);
719 	ssb->ssb_mbmax = 0;
720 }
721 
722 /*
723  * Some routines that return EOPNOTSUPP for entry points that are not
724  * supported by a protocol.  Fill in as needed.
725  */
726 void
727 pr_generic_notsupp(netmsg_t msg)
728 {
729 	lwkt_replymsg(&msg->lmsg, EOPNOTSUPP);
730 }
731 
732 int
733 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
734 	   struct mbuf *top, struct mbuf *control, int flags,
735 	   struct thread *td)
736 {
737 	if (top)
738 		m_freem(top);
739 	if (control)
740 		m_freem(control);
741 	return (EOPNOTSUPP);
742 }
743 
744 int
745 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
746 		      struct uio *uio, struct sockbuf *sio,
747 		      struct mbuf **controlp, int *flagsp)
748 {
749 	return (EOPNOTSUPP);
750 }
751 
752 /*
753  * This isn't really a ``null'' operation, but it's the default one
754  * and doesn't do anything destructive.
755  */
756 void
757 pru_sense_null(netmsg_t msg)
758 {
759 	msg->sense.nm_stat->st_blksize = msg->base.nm_so->so_snd.ssb_hiwat;
760 	lwkt_replymsg(&msg->lmsg, 0);
761 }
762 
763 /*
764  * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.  Callers
765  * of this routine assume that it always succeeds, so we have to use a
766  * blockable allocation even though we might be called from a critical thread.
767  */
768 struct sockaddr *
769 dup_sockaddr(const struct sockaddr *sa)
770 {
771 	struct sockaddr *sa2;
772 
773 	sa2 = kmalloc(sa->sa_len, M_SONAME, M_INTWAIT);
774 	bcopy(sa, sa2, sa->sa_len);
775 	return (sa2);
776 }
777 
778 /*
779  * Create an external-format (``xsocket'') structure using the information
780  * in the kernel-format socket structure pointed to by so.  This is done
781  * to reduce the spew of irrelevant information over this interface,
782  * to isolate user code from changes in the kernel structure, and
783  * potentially to provide information-hiding if we decide that
784  * some of this information should be hidden from users.
785  */
786 void
787 sotoxsocket(struct socket *so, struct xsocket *xso)
788 {
789 	xso->xso_len = sizeof *xso;
790 	xso->xso_so = so;
791 	xso->so_type = so->so_type;
792 	xso->so_options = so->so_options;
793 	xso->so_linger = so->so_linger;
794 	xso->so_state = so->so_state;
795 	xso->so_pcb = so->so_pcb;
796 	xso->xso_protocol = so->so_proto->pr_protocol;
797 	xso->xso_family = so->so_proto->pr_domain->dom_family;
798 	xso->so_qlen = so->so_qlen;
799 	xso->so_incqlen = so->so_incqlen;
800 	xso->so_qlimit = so->so_qlimit;
801 	xso->so_timeo = so->so_timeo;
802 	xso->so_error = so->so_error;
803 	xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
804 	xso->so_oobmark = so->so_oobmark;
805 	ssbtoxsockbuf(&so->so_snd, &xso->so_snd);
806 	ssbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
807 	xso->so_uid = so->so_cred->cr_uid;
808 }
809 
810 /*
811  * Here is the definition of some of the basic objects in the kern.ipc
812  * branch of the MIB.
813  */
814 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
815 
816 /*
817  * This takes the place of kern.maxsockbuf, which moved to kern.ipc.
818  *
819  * NOTE! sb_max only applies to user-created socket buffers.
820  */
821 static int dummy;
822 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
823 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_INT|CTLFLAG_RW,
824     &sb_max, 0, sysctl_handle_sb_max, "I", "Maximum socket buffer size");
825 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD,
826     &maxsockets, 0, "Maximum number of sockets available");
827 SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
828     &sb_efficiency, 0,
829     "Socket buffer limit scaler");
830 
831 /*
832  * Initialize maxsockets
833  */
834 static void
835 init_maxsockets(void *ignored)
836 {
837     TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
838     maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
839 }
840 SYSINIT(param, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
841 	init_maxsockets, NULL);
842 
843