xref: /dflybsd-src/sys/kern/uipc_syscalls.c (revision 10cf3bfcde2ee9c50d77a153397b93d8026b03e1)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * sendfile(2) and related extensions:
6  * Copyright (c) 1998, David Greenman. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)uipc_syscalls.c	8.4 (Berkeley) 2/21/94
33  * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
34  */
35 
36 #include "opt_ktrace.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/sysproto.h>
42 #include <sys/malloc.h>
43 #include <sys/filedesc.h>
44 #include <sys/event.h>
45 #include <sys/proc.h>
46 #include <sys/fcntl.h>
47 #include <sys/file.h>
48 #include <sys/filio.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/mbuf.h>
51 #include <sys/protosw.h>
52 #include <sys/sfbuf.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/socketops.h>
56 #include <sys/uio.h>
57 #include <sys/vnode.h>
58 #include <sys/lock.h>
59 #include <sys/mount.h>
60 #ifdef KTRACE
61 #include <sys/ktrace.h>
62 #endif
63 #include <vm/vm.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pageout.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_extern.h>
69 #include <sys/file2.h>
70 #include <sys/signalvar.h>
71 #include <sys/serialize.h>
72 
73 #include <sys/thread2.h>
74 #include <sys/msgport2.h>
75 #include <sys/socketvar2.h>
76 #include <net/netmsg2.h>
77 #include <vm/vm_page2.h>
78 
79 extern int use_soaccept_pred_fast;
80 extern int use_sendfile_async;
81 extern int use_soconnect_async;
82 
83 /*
84  * System call interface to the socket abstraction.
85  */
86 
87 extern	struct fileops socketops;
88 
89 /*
90  * socket_args(int domain, int type, int protocol)
91  */
92 int
93 kern_socket(int domain, int type, int protocol, int *res)
94 {
95 	struct thread *td = curthread;
96 	struct filedesc *fdp = td->td_proc->p_fd;
97 	struct socket *so;
98 	struct file *fp;
99 	int fd, error;
100 
101 	KKASSERT(td->td_lwp);
102 
103 	error = falloc(td->td_lwp, &fp, &fd);
104 	if (error)
105 		return (error);
106 	error = socreate(domain, &so, type, protocol, td);
107 	if (error) {
108 		fsetfd(fdp, NULL, fd);
109 	} else {
110 		fp->f_type = DTYPE_SOCKET;
111 		fp->f_flag = FREAD | FWRITE;
112 		fp->f_ops = &socketops;
113 		fp->f_data = so;
114 		*res = fd;
115 		fsetfd(fdp, fp, fd);
116 	}
117 	fdrop(fp);
118 	return (error);
119 }
120 
121 /*
122  * MPALMOSTSAFE
123  */
124 int
125 sys_socket(struct socket_args *uap)
126 {
127 	int error;
128 
129 	error = kern_socket(uap->domain, uap->type, uap->protocol,
130 			    &uap->sysmsg_iresult);
131 
132 	return (error);
133 }
134 
135 int
136 kern_bind(int s, struct sockaddr *sa)
137 {
138 	struct thread *td = curthread;
139 	struct proc *p = td->td_proc;
140 	struct file *fp;
141 	int error;
142 
143 	KKASSERT(p);
144 	error = holdsock(p->p_fd, s, &fp);
145 	if (error)
146 		return (error);
147 	error = sobind((struct socket *)fp->f_data, sa, td);
148 	fdrop(fp);
149 	return (error);
150 }
151 
152 /*
153  * bind_args(int s, caddr_t name, int namelen)
154  *
155  * MPALMOSTSAFE
156  */
157 int
158 sys_bind(struct bind_args *uap)
159 {
160 	struct sockaddr *sa;
161 	int error;
162 
163 	error = getsockaddr(&sa, uap->name, uap->namelen);
164 	if (error)
165 		return (error);
166 	error = kern_bind(uap->s, sa);
167 	kfree(sa, M_SONAME);
168 
169 	return (error);
170 }
171 
172 int
173 kern_listen(int s, int backlog)
174 {
175 	struct thread *td = curthread;
176 	struct proc *p = td->td_proc;
177 	struct file *fp;
178 	int error;
179 
180 	KKASSERT(p);
181 	error = holdsock(p->p_fd, s, &fp);
182 	if (error)
183 		return (error);
184 	error = solisten((struct socket *)fp->f_data, backlog, td);
185 	fdrop(fp);
186 	return(error);
187 }
188 
189 /*
190  * listen_args(int s, int backlog)
191  *
192  * MPALMOSTSAFE
193  */
194 int
195 sys_listen(struct listen_args *uap)
196 {
197 	int error;
198 
199 	error = kern_listen(uap->s, uap->backlog);
200 	return (error);
201 }
202 
203 /*
204  * Returns the accepted socket as well.
205  *
206  * NOTE!  The sockets sitting on so_comp/so_incomp might have 0 refs, the
207  *	  pool token is absolutely required to avoid a sofree() race,
208  *	  as well as to avoid tailq handling races.
209  */
210 static boolean_t
211 soaccept_predicate(struct netmsg_so_notify *msg)
212 {
213 	struct socket *head = msg->base.nm_so;
214 	struct socket *so;
215 
216 	if (head->so_error != 0) {
217 		msg->base.lmsg.ms_error = head->so_error;
218 		return (TRUE);
219 	}
220 	lwkt_getpooltoken(head);
221 	if (!TAILQ_EMPTY(&head->so_comp)) {
222 		/* Abuse nm_so field as copy in/copy out parameter. XXX JH */
223 		so = TAILQ_FIRST(&head->so_comp);
224 		TAILQ_REMOVE(&head->so_comp, so, so_list);
225 		head->so_qlen--;
226 		soclrstate(so, SS_COMP);
227 		so->so_head = NULL;
228 		soreference(so);
229 
230 		lwkt_relpooltoken(head);
231 
232 		msg->base.lmsg.ms_error = 0;
233 		msg->base.nm_so = so;
234 		return (TRUE);
235 	}
236 	lwkt_relpooltoken(head);
237 	if (head->so_state & SS_CANTRCVMORE) {
238 		msg->base.lmsg.ms_error = ECONNABORTED;
239 		return (TRUE);
240 	}
241 	if (msg->nm_fflags & FNONBLOCK) {
242 		msg->base.lmsg.ms_error = EWOULDBLOCK;
243 		return (TRUE);
244 	}
245 
246 	return (FALSE);
247 }
248 
249 /*
250  * The second argument to kern_accept() is a handle to a struct sockaddr.
251  * This allows kern_accept() to return a pointer to an allocated struct
252  * sockaddr which must be freed later with FREE().  The caller must
253  * initialize *name to NULL.
254  */
255 int
256 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res)
257 {
258 	struct thread *td = curthread;
259 	struct filedesc *fdp = td->td_proc->p_fd;
260 	struct file *lfp = NULL;
261 	struct file *nfp = NULL;
262 	struct sockaddr *sa;
263 	struct socket *head, *so;
264 	struct netmsg_so_notify msg;
265 	int fd;
266 	u_int fflag;		/* type must match fp->f_flag */
267 	int error, tmp;
268 
269 	*res = -1;
270 	if (name && namelen && *namelen < 0)
271 		return (EINVAL);
272 
273 	error = holdsock(td->td_proc->p_fd, s, &lfp);
274 	if (error)
275 		return (error);
276 
277 	error = falloc(td->td_lwp, &nfp, &fd);
278 	if (error) {		/* Probably ran out of file descriptors. */
279 		fdrop(lfp);
280 		return (error);
281 	}
282 	head = (struct socket *)lfp->f_data;
283 	if ((head->so_options & SO_ACCEPTCONN) == 0) {
284 		error = EINVAL;
285 		goto done;
286 	}
287 
288 	if (fflags & O_FBLOCKING)
289 		fflags |= lfp->f_flag & ~FNONBLOCK;
290 	else if (fflags & O_FNONBLOCKING)
291 		fflags |= lfp->f_flag | FNONBLOCK;
292 	else
293 		fflags = lfp->f_flag;
294 
295 	if (use_soaccept_pred_fast) {
296 		boolean_t pred;
297 
298 		/* Initialize necessary parts for soaccept_predicate() */
299 		netmsg_init(&msg.base, head, &netisr_apanic_rport, 0, NULL);
300 		msg.nm_fflags = fflags;
301 
302 		lwkt_getpooltoken(head);
303 		pred = soaccept_predicate(&msg);
304 		lwkt_relpooltoken(head);
305 
306 		if (pred) {
307 			error = msg.base.lmsg.ms_error;
308 			if (error)
309 				goto done;
310 			else
311 				goto accepted;
312 		}
313 	}
314 
315 	/* optimize for uniprocessor case later XXX JH */
316 	netmsg_init_abortable(&msg.base, head, &curthread->td_msgport,
317 			      0, netmsg_so_notify, netmsg_so_notify_doabort);
318 	msg.nm_predicate = soaccept_predicate;
319 	msg.nm_fflags = fflags;
320 	msg.nm_etype = NM_REVENT;
321 	error = lwkt_domsg(head->so_port, &msg.base.lmsg, PCATCH);
322 	if (error)
323 		goto done;
324 
325 accepted:
326 	/*
327 	 * At this point we have the connection that's ready to be accepted.
328 	 *
329 	 * NOTE! soaccept_predicate() ref'd so for us, and soaccept() expects
330 	 * 	 to eat the ref and turn it into a descriptor.
331 	 */
332 	so = msg.base.nm_so;
333 
334 	fflag = lfp->f_flag;
335 
336 	/* connection has been removed from the listen queue */
337 	KNOTE(&head->so_rcv.ssb_kq.ki_note, 0);
338 
339 	if (head->so_sigio != NULL)
340 		fsetown(fgetown(&head->so_sigio), &so->so_sigio);
341 
342 	nfp->f_type = DTYPE_SOCKET;
343 	nfp->f_flag = fflag;
344 	nfp->f_ops = &socketops;
345 	nfp->f_data = so;
346 	/* Sync socket nonblocking/async state with file flags */
347 	tmp = fflag & FNONBLOCK;
348 	fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, td->td_ucred, NULL);
349 	tmp = fflag & FASYNC;
350 	fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td->td_ucred, NULL);
351 
352 	sa = NULL;
353 	if (so->so_faddr != NULL) {
354 		sa = so->so_faddr;
355 		so->so_faddr = NULL;
356 
357 		soaccept_generic(so);
358 		error = 0;
359 	} else {
360 		error = soaccept(so, &sa);
361 	}
362 
363 	/*
364 	 * Set the returned name and namelen as applicable.  Set the returned
365 	 * namelen to 0 for older code which might ignore the return value
366 	 * from accept.
367 	 */
368 	if (error == 0) {
369 		if (sa && name && namelen) {
370 			if (*namelen > sa->sa_len)
371 				*namelen = sa->sa_len;
372 			*name = sa;
373 		} else {
374 			if (sa)
375 				kfree(sa, M_SONAME);
376 		}
377 	}
378 
379 done:
380 	/*
381 	 * If an error occured clear the reserved descriptor, else associate
382 	 * nfp with it.
383 	 *
384 	 * Note that *res is normally ignored if an error is returned but
385 	 * a syscall message will still have access to the result code.
386 	 */
387 	if (error) {
388 		fsetfd(fdp, NULL, fd);
389 	} else {
390 		*res = fd;
391 		fsetfd(fdp, nfp, fd);
392 	}
393 	fdrop(nfp);
394 	fdrop(lfp);
395 	return (error);
396 }
397 
398 /*
399  * accept(int s, caddr_t name, int *anamelen)
400  *
401  * MPALMOSTSAFE
402  */
403 int
404 sys_accept(struct accept_args *uap)
405 {
406 	struct sockaddr *sa = NULL;
407 	int sa_len;
408 	int error;
409 
410 	if (uap->name) {
411 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
412 		if (error)
413 			return (error);
414 
415 		error = kern_accept(uap->s, 0, &sa, &sa_len,
416 				    &uap->sysmsg_iresult);
417 
418 		if (error == 0)
419 			error = copyout(sa, uap->name, sa_len);
420 		if (error == 0) {
421 			error = copyout(&sa_len, uap->anamelen,
422 			    sizeof(*uap->anamelen));
423 		}
424 		if (sa)
425 			kfree(sa, M_SONAME);
426 	} else {
427 		error = kern_accept(uap->s, 0, NULL, 0,
428 				    &uap->sysmsg_iresult);
429 	}
430 	return (error);
431 }
432 
433 /*
434  * extaccept(int s, int fflags, caddr_t name, int *anamelen)
435  *
436  * MPALMOSTSAFE
437  */
438 int
439 sys_extaccept(struct extaccept_args *uap)
440 {
441 	struct sockaddr *sa = NULL;
442 	int sa_len;
443 	int error;
444 	int fflags = uap->flags & O_FMASK;
445 
446 	if (uap->name) {
447 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
448 		if (error)
449 			return (error);
450 
451 		error = kern_accept(uap->s, fflags, &sa, &sa_len,
452 				    &uap->sysmsg_iresult);
453 
454 		if (error == 0)
455 			error = copyout(sa, uap->name, sa_len);
456 		if (error == 0) {
457 			error = copyout(&sa_len, uap->anamelen,
458 			    sizeof(*uap->anamelen));
459 		}
460 		if (sa)
461 			kfree(sa, M_SONAME);
462 	} else {
463 		error = kern_accept(uap->s, fflags, NULL, 0,
464 				    &uap->sysmsg_iresult);
465 	}
466 	return (error);
467 }
468 
469 
470 /*
471  * Returns TRUE if predicate satisfied.
472  */
473 static boolean_t
474 soconnected_predicate(struct netmsg_so_notify *msg)
475 {
476 	struct socket *so = msg->base.nm_so;
477 
478 	/* check predicate */
479 	if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
480 		msg->base.lmsg.ms_error = so->so_error;
481 		return (TRUE);
482 	}
483 
484 	return (FALSE);
485 }
486 
487 int
488 kern_connect(int s, int fflags, struct sockaddr *sa)
489 {
490 	struct thread *td = curthread;
491 	struct proc *p = td->td_proc;
492 	struct file *fp;
493 	struct socket *so;
494 	int error, interrupted = 0;
495 
496 	error = holdsock(p->p_fd, s, &fp);
497 	if (error)
498 		return (error);
499 	so = (struct socket *)fp->f_data;
500 
501 	if (fflags & O_FBLOCKING)
502 		/* fflags &= ~FNONBLOCK; */;
503 	else if (fflags & O_FNONBLOCKING)
504 		fflags |= FNONBLOCK;
505 	else
506 		fflags = fp->f_flag;
507 
508 	if (so->so_state & SS_ISCONNECTING) {
509 		error = EALREADY;
510 		goto done;
511 	}
512 	error = soconnect(so, sa, td, use_soconnect_async ? FALSE : TRUE);
513 	if (error)
514 		goto bad;
515 	if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
516 		error = EINPROGRESS;
517 		goto done;
518 	}
519 	if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
520 		struct netmsg_so_notify msg;
521 
522 		netmsg_init_abortable(&msg.base, so,
523 				      &curthread->td_msgport,
524 				      0,
525 				      netmsg_so_notify,
526 				      netmsg_so_notify_doabort);
527 		msg.nm_predicate = soconnected_predicate;
528 		msg.nm_etype = NM_REVENT;
529 		error = lwkt_domsg(so->so_port, &msg.base.lmsg, PCATCH);
530 		if (error == EINTR || error == ERESTART)
531 			interrupted = 1;
532 	}
533 	if (error == 0) {
534 		error = so->so_error;
535 		so->so_error = 0;
536 	}
537 bad:
538 	if (!interrupted)
539 		soclrstate(so, SS_ISCONNECTING);
540 	if (error == ERESTART)
541 		error = EINTR;
542 done:
543 	fdrop(fp);
544 	return (error);
545 }
546 
547 /*
548  * connect_args(int s, caddr_t name, int namelen)
549  *
550  * MPALMOSTSAFE
551  */
552 int
553 sys_connect(struct connect_args *uap)
554 {
555 	struct sockaddr *sa;
556 	int error;
557 
558 	error = getsockaddr(&sa, uap->name, uap->namelen);
559 	if (error)
560 		return (error);
561 	error = kern_connect(uap->s, 0, sa);
562 	kfree(sa, M_SONAME);
563 
564 	return (error);
565 }
566 
567 /*
568  * connect_args(int s, int fflags, caddr_t name, int namelen)
569  *
570  * MPALMOSTSAFE
571  */
572 int
573 sys_extconnect(struct extconnect_args *uap)
574 {
575 	struct sockaddr *sa;
576 	int error;
577 	int fflags = uap->flags & O_FMASK;
578 
579 	error = getsockaddr(&sa, uap->name, uap->namelen);
580 	if (error)
581 		return (error);
582 	error = kern_connect(uap->s, fflags, sa);
583 	kfree(sa, M_SONAME);
584 
585 	return (error);
586 }
587 
588 int
589 kern_socketpair(int domain, int type, int protocol, int *sv)
590 {
591 	struct thread *td = curthread;
592 	struct filedesc *fdp;
593 	struct file *fp1, *fp2;
594 	struct socket *so1, *so2;
595 	int fd1, fd2, error;
596 
597 	fdp = td->td_proc->p_fd;
598 	error = socreate(domain, &so1, type, protocol, td);
599 	if (error)
600 		return (error);
601 	error = socreate(domain, &so2, type, protocol, td);
602 	if (error)
603 		goto free1;
604 	error = falloc(td->td_lwp, &fp1, &fd1);
605 	if (error)
606 		goto free2;
607 	sv[0] = fd1;
608 	fp1->f_data = so1;
609 	error = falloc(td->td_lwp, &fp2, &fd2);
610 	if (error)
611 		goto free3;
612 	fp2->f_data = so2;
613 	sv[1] = fd2;
614 	error = soconnect2(so1, so2);
615 	if (error)
616 		goto free4;
617 	if (type == SOCK_DGRAM) {
618 		/*
619 		 * Datagram socket connection is asymmetric.
620 		 */
621 		 error = soconnect2(so2, so1);
622 		 if (error)
623 			goto free4;
624 	}
625 	fp1->f_type = fp2->f_type = DTYPE_SOCKET;
626 	fp1->f_flag = fp2->f_flag = FREAD|FWRITE;
627 	fp1->f_ops = fp2->f_ops = &socketops;
628 	fsetfd(fdp, fp1, fd1);
629 	fsetfd(fdp, fp2, fd2);
630 	fdrop(fp1);
631 	fdrop(fp2);
632 	return (error);
633 free4:
634 	fsetfd(fdp, NULL, fd2);
635 	fdrop(fp2);
636 free3:
637 	fsetfd(fdp, NULL, fd1);
638 	fdrop(fp1);
639 free2:
640 	(void)soclose(so2, 0);
641 free1:
642 	(void)soclose(so1, 0);
643 	return (error);
644 }
645 
646 /*
647  * socketpair(int domain, int type, int protocol, int *rsv)
648  */
649 int
650 sys_socketpair(struct socketpair_args *uap)
651 {
652 	int error, sockv[2];
653 
654 	error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
655 
656 	if (error == 0) {
657 		error = copyout(sockv, uap->rsv, sizeof(sockv));
658 
659 		if (error != 0) {
660 			kern_close(sockv[0]);
661 			kern_close(sockv[1]);
662 		}
663 	}
664 
665 	return (error);
666 }
667 
668 int
669 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
670 	     struct mbuf *control, int flags, size_t *res)
671 {
672 	struct thread *td = curthread;
673 	struct lwp *lp = td->td_lwp;
674 	struct proc *p = td->td_proc;
675 	struct file *fp;
676 	size_t len;
677 	int error;
678 	struct socket *so;
679 #ifdef KTRACE
680 	struct iovec *ktriov = NULL;
681 	struct uio ktruio;
682 #endif
683 
684 	error = holdsock(p->p_fd, s, &fp);
685 	if (error)
686 		return (error);
687 #ifdef KTRACE
688 	if (KTRPOINT(td, KTR_GENIO)) {
689 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
690 
691 		ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
692 		bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
693 		ktruio = *auio;
694 	}
695 #endif
696 	len = auio->uio_resid;
697 	so = (struct socket *)fp->f_data;
698 	if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
699 		if (fp->f_flag & FNONBLOCK)
700 			flags |= MSG_FNONBLOCKING;
701 	}
702 	error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
703 	if (error) {
704 		if (auio->uio_resid != len && (error == ERESTART ||
705 		    error == EINTR || error == EWOULDBLOCK))
706 			error = 0;
707 		if (error == EPIPE && !(flags & MSG_NOSIGNAL) &&
708 		    !(so->so_options & SO_NOSIGPIPE))
709 			lwpsignal(p, lp, SIGPIPE);
710 	}
711 #ifdef KTRACE
712 	if (ktriov != NULL) {
713 		if (error == 0) {
714 			ktruio.uio_iov = ktriov;
715 			ktruio.uio_resid = len - auio->uio_resid;
716 			ktrgenio(lp, s, UIO_WRITE, &ktruio, error);
717 		}
718 		kfree(ktriov, M_TEMP);
719 	}
720 #endif
721 	if (error == 0)
722 		*res  = len - auio->uio_resid;
723 	fdrop(fp);
724 	return (error);
725 }
726 
727 /*
728  * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
729  *
730  * MPALMOSTSAFE
731  */
732 int
733 sys_sendto(struct sendto_args *uap)
734 {
735 	struct thread *td = curthread;
736 	struct uio auio;
737 	struct iovec aiov;
738 	struct sockaddr *sa = NULL;
739 	int error;
740 
741 	if (uap->to) {
742 		error = getsockaddr(&sa, uap->to, uap->tolen);
743 		if (error)
744 			return (error);
745 	}
746 	aiov.iov_base = uap->buf;
747 	aiov.iov_len = uap->len;
748 	auio.uio_iov = &aiov;
749 	auio.uio_iovcnt = 1;
750 	auio.uio_offset = 0;
751 	auio.uio_resid = uap->len;
752 	auio.uio_segflg = UIO_USERSPACE;
753 	auio.uio_rw = UIO_WRITE;
754 	auio.uio_td = td;
755 
756 	error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
757 			     &uap->sysmsg_szresult);
758 
759 	if (sa)
760 		kfree(sa, M_SONAME);
761 	return (error);
762 }
763 
764 /*
765  * sendmsg_args(int s, caddr_t msg, int flags)
766  *
767  * MPALMOSTSAFE
768  */
769 int
770 sys_sendmsg(struct sendmsg_args *uap)
771 {
772 	struct thread *td = curthread;
773 	struct msghdr msg;
774 	struct uio auio;
775 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
776 	struct sockaddr *sa = NULL;
777 	struct mbuf *control = NULL;
778 	int error;
779 
780 	error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
781 	if (error)
782 		return (error);
783 
784 	/*
785 	 * Conditionally copyin msg.msg_name.
786 	 */
787 	if (msg.msg_name) {
788 		error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
789 		if (error)
790 			return (error);
791 	}
792 
793 	/*
794 	 * Populate auio.
795 	 */
796 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
797 			     &auio.uio_resid);
798 	if (error)
799 		goto cleanup2;
800 	auio.uio_iov = iov;
801 	auio.uio_iovcnt = msg.msg_iovlen;
802 	auio.uio_offset = 0;
803 	auio.uio_segflg = UIO_USERSPACE;
804 	auio.uio_rw = UIO_WRITE;
805 	auio.uio_td = td;
806 
807 	/*
808 	 * Conditionally copyin msg.msg_control.
809 	 */
810 	if (msg.msg_control) {
811 		if (msg.msg_controllen < sizeof(struct cmsghdr) ||
812 		    msg.msg_controllen > MLEN) {
813 			error = EINVAL;
814 			goto cleanup;
815 		}
816 		control = m_get(M_WAITOK, MT_CONTROL);
817 		if (control == NULL) {
818 			error = ENOBUFS;
819 			goto cleanup;
820 		}
821 		control->m_len = msg.msg_controllen;
822 		error = copyin(msg.msg_control, mtod(control, caddr_t),
823 			       msg.msg_controllen);
824 		if (error) {
825 			m_free(control);
826 			goto cleanup;
827 		}
828 	}
829 
830 	error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
831 			     &uap->sysmsg_szresult);
832 
833 cleanup:
834 	iovec_free(&iov, aiov);
835 cleanup2:
836 	if (sa)
837 		kfree(sa, M_SONAME);
838 	return (error);
839 }
840 
841 /*
842  * kern_recvmsg() takes a handle to sa and control.  If the handle is non-
843  * null, it returns a dynamically allocated struct sockaddr and an mbuf.
844  * Don't forget to FREE() and m_free() these if they are returned.
845  */
846 int
847 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
848 	     struct mbuf **control, int *flags, size_t *res)
849 {
850 	struct thread *td = curthread;
851 	struct proc *p = td->td_proc;
852 	struct file *fp;
853 	size_t len;
854 	int error;
855 	int lflags;
856 	struct socket *so;
857 #ifdef KTRACE
858 	struct iovec *ktriov = NULL;
859 	struct uio ktruio;
860 #endif
861 
862 	error = holdsock(p->p_fd, s, &fp);
863 	if (error)
864 		return (error);
865 #ifdef KTRACE
866 	if (KTRPOINT(td, KTR_GENIO)) {
867 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
868 
869 		ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
870 		bcopy(auio->uio_iov, ktriov, iovlen);
871 		ktruio = *auio;
872 	}
873 #endif
874 	len = auio->uio_resid;
875 	so = (struct socket *)fp->f_data;
876 
877 	if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
878 		if (fp->f_flag & FNONBLOCK) {
879 			if (flags) {
880 				*flags |= MSG_FNONBLOCKING;
881 			} else {
882 				lflags = MSG_FNONBLOCKING;
883 				flags = &lflags;
884 			}
885 		}
886 	}
887 
888 	error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
889 	if (error) {
890 		if (auio->uio_resid != len && (error == ERESTART ||
891 		    error == EINTR || error == EWOULDBLOCK))
892 			error = 0;
893 	}
894 #ifdef KTRACE
895 	if (ktriov != NULL) {
896 		if (error == 0) {
897 			ktruio.uio_iov = ktriov;
898 			ktruio.uio_resid = len - auio->uio_resid;
899 			ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error);
900 		}
901 		kfree(ktriov, M_TEMP);
902 	}
903 #endif
904 	if (error == 0)
905 		*res = len - auio->uio_resid;
906 	fdrop(fp);
907 	return (error);
908 }
909 
910 /*
911  * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
912  *			caddr_t from, int *fromlenaddr)
913  *
914  * MPALMOSTSAFE
915  */
916 int
917 sys_recvfrom(struct recvfrom_args *uap)
918 {
919 	struct thread *td = curthread;
920 	struct uio auio;
921 	struct iovec aiov;
922 	struct sockaddr *sa = NULL;
923 	int error, fromlen;
924 
925 	if (uap->from && uap->fromlenaddr) {
926 		error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
927 		if (error)
928 			return (error);
929 		if (fromlen < 0)
930 			return (EINVAL);
931 	} else {
932 		fromlen = 0;
933 	}
934 	aiov.iov_base = uap->buf;
935 	aiov.iov_len = uap->len;
936 	auio.uio_iov = &aiov;
937 	auio.uio_iovcnt = 1;
938 	auio.uio_offset = 0;
939 	auio.uio_resid = uap->len;
940 	auio.uio_segflg = UIO_USERSPACE;
941 	auio.uio_rw = UIO_READ;
942 	auio.uio_td = td;
943 
944 	error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
945 			     &uap->flags, &uap->sysmsg_szresult);
946 
947 	if (error == 0 && uap->from) {
948 		/* note: sa may still be NULL */
949 		if (sa) {
950 			fromlen = MIN(fromlen, sa->sa_len);
951 			error = copyout(sa, uap->from, fromlen);
952 		} else {
953 			fromlen = 0;
954 		}
955 		if (error == 0) {
956 			error = copyout(&fromlen, uap->fromlenaddr,
957 					sizeof(fromlen));
958 		}
959 	}
960 	if (sa)
961 		kfree(sa, M_SONAME);
962 
963 	return (error);
964 }
965 
966 /*
967  * recvmsg_args(int s, struct msghdr *msg, int flags)
968  *
969  * MPALMOSTSAFE
970  */
971 int
972 sys_recvmsg(struct recvmsg_args *uap)
973 {
974 	struct thread *td = curthread;
975 	struct msghdr msg;
976 	struct uio auio;
977 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
978 	struct mbuf *m, *control = NULL;
979 	struct sockaddr *sa = NULL;
980 	caddr_t ctlbuf;
981 	socklen_t *ufromlenp, *ucontrollenp;
982 	int error, fromlen, controllen, len, flags, *uflagsp;
983 
984 	/*
985 	 * This copyin handles everything except the iovec.
986 	 */
987 	error = copyin(uap->msg, &msg, sizeof(msg));
988 	if (error)
989 		return (error);
990 
991 	if (msg.msg_name && msg.msg_namelen < 0)
992 		return (EINVAL);
993 	if (msg.msg_control && msg.msg_controllen < 0)
994 		return (EINVAL);
995 
996 	ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
997 		    msg_namelen));
998 	ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
999 		       msg_controllen));
1000 	uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
1001 							msg_flags));
1002 
1003 	/*
1004 	 * Populate auio.
1005 	 */
1006 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
1007 			     &auio.uio_resid);
1008 	if (error)
1009 		return (error);
1010 	auio.uio_iov = iov;
1011 	auio.uio_iovcnt = msg.msg_iovlen;
1012 	auio.uio_offset = 0;
1013 	auio.uio_segflg = UIO_USERSPACE;
1014 	auio.uio_rw = UIO_READ;
1015 	auio.uio_td = td;
1016 
1017 	flags = uap->flags;
1018 
1019 	error = kern_recvmsg(uap->s,
1020 			     (msg.msg_name ? &sa : NULL), &auio,
1021 			     (msg.msg_control ? &control : NULL), &flags,
1022 			     &uap->sysmsg_szresult);
1023 
1024 	/*
1025 	 * Conditionally copyout the name and populate the namelen field.
1026 	 */
1027 	if (error == 0 && msg.msg_name) {
1028 		/* note: sa may still be NULL */
1029 		if (sa != NULL) {
1030 			fromlen = MIN(msg.msg_namelen, sa->sa_len);
1031 			error = copyout(sa, msg.msg_name, fromlen);
1032 		} else {
1033 			fromlen = 0;
1034 		}
1035 		if (error == 0)
1036 			error = copyout(&fromlen, ufromlenp,
1037 			    sizeof(*ufromlenp));
1038 	}
1039 
1040 	/*
1041 	 * Copyout msg.msg_control and msg.msg_controllen.
1042 	 */
1043 	if (error == 0 && msg.msg_control) {
1044 		len = msg.msg_controllen;
1045 		m = control;
1046 		ctlbuf = (caddr_t)msg.msg_control;
1047 
1048 		while(m && len > 0) {
1049 			unsigned int tocopy;
1050 
1051 			if (len >= m->m_len) {
1052 				tocopy = m->m_len;
1053 			} else {
1054 				msg.msg_flags |= MSG_CTRUNC;
1055 				tocopy = len;
1056 			}
1057 
1058 			error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
1059 			if (error)
1060 				goto cleanup;
1061 
1062 			ctlbuf += tocopy;
1063 			len -= tocopy;
1064 			m = m->m_next;
1065 		}
1066 		controllen = ctlbuf - (caddr_t)msg.msg_control;
1067 		error = copyout(&controllen, ucontrollenp,
1068 		    sizeof(*ucontrollenp));
1069 	}
1070 
1071 	if (error == 0)
1072 		error = copyout(&flags, uflagsp, sizeof(*uflagsp));
1073 
1074 cleanup:
1075 	if (sa)
1076 		kfree(sa, M_SONAME);
1077 	iovec_free(&iov, aiov);
1078 	if (control)
1079 		m_freem(control);
1080 	return (error);
1081 }
1082 
1083 /*
1084  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1085  * in kernel pointer instead of a userland pointer.  This allows us
1086  * to manipulate socket options in the emulation code.
1087  */
1088 int
1089 kern_setsockopt(int s, struct sockopt *sopt)
1090 {
1091 	struct thread *td = curthread;
1092 	struct proc *p = td->td_proc;
1093 	struct file *fp;
1094 	int error;
1095 
1096 	if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1097 		return (EFAULT);
1098 	if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1099 		return (EINVAL);
1100 	if (sopt->sopt_valsize > SOMAXOPT_SIZE)	/* unsigned */
1101 		return (EINVAL);
1102 
1103 	error = holdsock(p->p_fd, s, &fp);
1104 	if (error)
1105 		return (error);
1106 
1107 	error = sosetopt((struct socket *)fp->f_data, sopt);
1108 	fdrop(fp);
1109 	return (error);
1110 }
1111 
1112 /*
1113  * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1114  *
1115  * MPALMOSTSAFE
1116  */
1117 int
1118 sys_setsockopt(struct setsockopt_args *uap)
1119 {
1120 	struct thread *td = curthread;
1121 	struct sockopt sopt;
1122 	int error;
1123 
1124 	sopt.sopt_level = uap->level;
1125 	sopt.sopt_name = uap->name;
1126 	sopt.sopt_valsize = uap->valsize;
1127 	sopt.sopt_td = td;
1128 	sopt.sopt_val = NULL;
1129 
1130 	if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1131 		return (EINVAL);
1132 	if (uap->val) {
1133 		sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1134 		error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1135 		if (error)
1136 			goto out;
1137 	}
1138 
1139 	error = kern_setsockopt(uap->s, &sopt);
1140 out:
1141 	if (uap->val)
1142 		kfree(sopt.sopt_val, M_TEMP);
1143 	return(error);
1144 }
1145 
1146 /*
1147  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1148  * in kernel pointer instead of a userland pointer.  This allows us
1149  * to manipulate socket options in the emulation code.
1150  */
1151 int
1152 kern_getsockopt(int s, struct sockopt *sopt)
1153 {
1154 	struct thread *td = curthread;
1155 	struct proc *p = td->td_proc;
1156 	struct file *fp;
1157 	int error;
1158 
1159 	if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1160 		return (EFAULT);
1161 	if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1162 		return (EINVAL);
1163 	if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1164 		return (EINVAL);
1165 
1166 	error = holdsock(p->p_fd, s, &fp);
1167 	if (error)
1168 		return (error);
1169 
1170 	error = sogetopt((struct socket *)fp->f_data, sopt);
1171 	fdrop(fp);
1172 	return (error);
1173 }
1174 
1175 /*
1176  * getsockopt_args(int s, int level, int name, caddr_t val, int *avalsize)
1177  *
1178  * MPALMOSTSAFE
1179  */
1180 int
1181 sys_getsockopt(struct getsockopt_args *uap)
1182 {
1183 	struct thread *td = curthread;
1184 	struct	sockopt sopt;
1185 	int	error, valsize;
1186 
1187 	if (uap->val) {
1188 		error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1189 		if (error)
1190 			return (error);
1191 	} else {
1192 		valsize = 0;
1193 	}
1194 
1195 	sopt.sopt_level = uap->level;
1196 	sopt.sopt_name = uap->name;
1197 	sopt.sopt_valsize = valsize;
1198 	sopt.sopt_td = td;
1199 	sopt.sopt_val = NULL;
1200 
1201 	if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1202 		return (EINVAL);
1203 	if (uap->val) {
1204 		sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1205 		error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1206 		if (error)
1207 			goto out;
1208 	}
1209 
1210 	error = kern_getsockopt(uap->s, &sopt);
1211 	if (error)
1212 		goto out;
1213 	valsize = sopt.sopt_valsize;
1214 	error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1215 	if (error)
1216 		goto out;
1217 	if (uap->val)
1218 		error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize);
1219 out:
1220 	if (uap->val)
1221 		kfree(sopt.sopt_val, M_TEMP);
1222 	return (error);
1223 }
1224 
1225 /*
1226  * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1227  * This allows kern_getsockname() to return a pointer to an allocated struct
1228  * sockaddr which must be freed later with FREE().  The caller must
1229  * initialize *name to NULL.
1230  */
1231 int
1232 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1233 {
1234 	struct thread *td = curthread;
1235 	struct proc *p = td->td_proc;
1236 	struct file *fp;
1237 	struct socket *so;
1238 	struct sockaddr *sa = NULL;
1239 	int error;
1240 
1241 	error = holdsock(p->p_fd, s, &fp);
1242 	if (error)
1243 		return (error);
1244 	if (*namelen < 0) {
1245 		fdrop(fp);
1246 		return (EINVAL);
1247 	}
1248 	so = (struct socket *)fp->f_data;
1249 	error = so_pru_sockaddr(so, &sa);
1250 	if (error == 0) {
1251 		if (sa == NULL) {
1252 			*namelen = 0;
1253 		} else {
1254 			*namelen = MIN(*namelen, sa->sa_len);
1255 			*name = sa;
1256 		}
1257 	}
1258 
1259 	fdrop(fp);
1260 	return (error);
1261 }
1262 
1263 /*
1264  * getsockname_args(int fdes, caddr_t asa, int *alen)
1265  *
1266  * Get socket name.
1267  *
1268  * MPALMOSTSAFE
1269  */
1270 int
1271 sys_getsockname(struct getsockname_args *uap)
1272 {
1273 	struct sockaddr *sa = NULL;
1274 	int error, sa_len;
1275 
1276 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1277 	if (error)
1278 		return (error);
1279 
1280 	error = kern_getsockname(uap->fdes, &sa, &sa_len);
1281 
1282 	if (error == 0)
1283 		error = copyout(sa, uap->asa, sa_len);
1284 	if (error == 0)
1285 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1286 	if (sa)
1287 		kfree(sa, M_SONAME);
1288 	return (error);
1289 }
1290 
1291 /*
1292  * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1293  * This allows kern_getpeername() to return a pointer to an allocated struct
1294  * sockaddr which must be freed later with FREE().  The caller must
1295  * initialize *name to NULL.
1296  */
1297 int
1298 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1299 {
1300 	struct thread *td = curthread;
1301 	struct proc *p = td->td_proc;
1302 	struct file *fp;
1303 	struct socket *so;
1304 	struct sockaddr *sa = NULL;
1305 	int error;
1306 
1307 	error = holdsock(p->p_fd, s, &fp);
1308 	if (error)
1309 		return (error);
1310 	if (*namelen < 0) {
1311 		fdrop(fp);
1312 		return (EINVAL);
1313 	}
1314 	so = (struct socket *)fp->f_data;
1315 	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1316 		fdrop(fp);
1317 		return (ENOTCONN);
1318 	}
1319 	error = so_pru_peeraddr(so, &sa);
1320 	if (error == 0) {
1321 		if (sa == NULL) {
1322 			*namelen = 0;
1323 		} else {
1324 			*namelen = MIN(*namelen, sa->sa_len);
1325 			*name = sa;
1326 		}
1327 	}
1328 
1329 	fdrop(fp);
1330 	return (error);
1331 }
1332 
1333 /*
1334  * getpeername_args(int fdes, caddr_t asa, int *alen)
1335  *
1336  * Get name of peer for connected socket.
1337  *
1338  * MPALMOSTSAFE
1339  */
1340 int
1341 sys_getpeername(struct getpeername_args *uap)
1342 {
1343 	struct sockaddr *sa = NULL;
1344 	int error, sa_len;
1345 
1346 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1347 	if (error)
1348 		return (error);
1349 
1350 	error = kern_getpeername(uap->fdes, &sa, &sa_len);
1351 
1352 	if (error == 0)
1353 		error = copyout(sa, uap->asa, sa_len);
1354 	if (error == 0)
1355 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1356 	if (sa)
1357 		kfree(sa, M_SONAME);
1358 	return (error);
1359 }
1360 
1361 int
1362 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1363 {
1364 	struct sockaddr *sa;
1365 	int error;
1366 
1367 	*namp = NULL;
1368 	if (len > SOCK_MAXADDRLEN)
1369 		return ENAMETOOLONG;
1370 	if (len < offsetof(struct sockaddr, sa_data[0]))
1371 		return EDOM;
1372 	sa = kmalloc(len, M_SONAME, M_WAITOK);
1373 	error = copyin(uaddr, sa, len);
1374 	if (error) {
1375 		kfree(sa, M_SONAME);
1376 	} else {
1377 #if BYTE_ORDER != BIG_ENDIAN
1378 		/*
1379 		 * The bind(), connect(), and sendto() syscalls were not
1380 		 * versioned for COMPAT_43.  Thus, this check must stay.
1381 		 */
1382 		if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1383 			sa->sa_family = sa->sa_len;
1384 #endif
1385 		sa->sa_len = len;
1386 		*namp = sa;
1387 	}
1388 	return error;
1389 }
1390 
1391 /*
1392  * Detach a mapped page and release resources back to the system.
1393  * We must release our wiring and if the object is ripped out
1394  * from under the vm_page we become responsible for freeing the
1395  * page.
1396  *
1397  * MPSAFE
1398  */
1399 static void
1400 sf_buf_mfree(void *arg)
1401 {
1402 	struct sf_buf *sf = arg;
1403 	vm_page_t m;
1404 
1405 	m = sf_buf_page(sf);
1406 	if (sf_buf_free(sf)) {
1407 		/* sf invalid now */
1408 		/*
1409 		vm_page_busy_wait(m, FALSE, "sockpgf");
1410 		vm_page_wakeup(m);
1411 		*/
1412 		vm_page_unhold(m);
1413 #if 0
1414 		if (m->object == NULL &&
1415 		    m->wire_count == 0 &&
1416 		    (m->flags & PG_NEED_COMMIT) == 0) {
1417 			vm_page_free(m);
1418 		} else {
1419 			vm_page_wakeup(m);
1420 		}
1421 #endif
1422 	}
1423 }
1424 
1425 /*
1426  * sendfile(2).
1427  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1428  *	 struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1429  *
1430  * Send a file specified by 'fd' and starting at 'offset' to a socket
1431  * specified by 's'. Send only 'nbytes' of the file or until EOF if
1432  * nbytes == 0. Optionally add a header and/or trailer to the socket
1433  * output. If specified, write the total number of bytes sent into *sbytes.
1434  *
1435  * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1436  * the headers to count against the remaining bytes to be sent from
1437  * the file descriptor.  We may wish to implement a compatibility syscall
1438  * in the future.
1439  *
1440  * MPALMOSTSAFE
1441  */
1442 int
1443 sys_sendfile(struct sendfile_args *uap)
1444 {
1445 	struct thread *td = curthread;
1446 	struct proc *p = td->td_proc;
1447 	struct file *fp;
1448 	struct vnode *vp = NULL;
1449 	struct sf_hdtr hdtr;
1450 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1451 	struct uio auio;
1452 	struct mbuf *mheader = NULL;
1453 	size_t hbytes = 0;
1454 	size_t tbytes;
1455 	off_t hdtr_size = 0;
1456 	off_t sbytes;
1457 	int error;
1458 
1459 	KKASSERT(p);
1460 
1461 	/*
1462 	 * Do argument checking. Must be a regular file in, stream
1463 	 * type and connected socket out, positive offset.
1464 	 */
1465 	fp = holdfp(p->p_fd, uap->fd, FREAD);
1466 	if (fp == NULL) {
1467 		return (EBADF);
1468 	}
1469 	if (fp->f_type != DTYPE_VNODE) {
1470 		fdrop(fp);
1471 		return (EINVAL);
1472 	}
1473 	vp = (struct vnode *)fp->f_data;
1474 	vref(vp);
1475 	fdrop(fp);
1476 
1477 	/*
1478 	 * If specified, get the pointer to the sf_hdtr struct for
1479 	 * any headers/trailers.
1480 	 */
1481 	if (uap->hdtr) {
1482 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1483 		if (error)
1484 			goto done;
1485 		/*
1486 		 * Send any headers.
1487 		 */
1488 		if (hdtr.headers) {
1489 			error = iovec_copyin(hdtr.headers, &iov, aiov,
1490 					     hdtr.hdr_cnt, &hbytes);
1491 			if (error)
1492 				goto done;
1493 			auio.uio_iov = iov;
1494 			auio.uio_iovcnt = hdtr.hdr_cnt;
1495 			auio.uio_offset = 0;
1496 			auio.uio_segflg = UIO_USERSPACE;
1497 			auio.uio_rw = UIO_WRITE;
1498 			auio.uio_td = td;
1499 			auio.uio_resid = hbytes;
1500 
1501 			mheader = m_uiomove(&auio);
1502 
1503 			iovec_free(&iov, aiov);
1504 			if (mheader == NULL)
1505 				goto done;
1506 		}
1507 	}
1508 
1509 	error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1510 			      &sbytes, uap->flags);
1511 	if (error)
1512 		goto done;
1513 
1514 	/*
1515 	 * Send trailers. Wimp out and use writev(2).
1516 	 */
1517 	if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1518 		error = iovec_copyin(hdtr.trailers, &iov, aiov,
1519 				     hdtr.trl_cnt, &auio.uio_resid);
1520 		if (error)
1521 			goto done;
1522 		auio.uio_iov = iov;
1523 		auio.uio_iovcnt = hdtr.trl_cnt;
1524 		auio.uio_offset = 0;
1525 		auio.uio_segflg = UIO_USERSPACE;
1526 		auio.uio_rw = UIO_WRITE;
1527 		auio.uio_td = td;
1528 
1529 		error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1530 
1531 		iovec_free(&iov, aiov);
1532 		if (error)
1533 			goto done;
1534 		hdtr_size += tbytes;	/* trailer bytes successfully sent */
1535 	}
1536 
1537 done:
1538 	if (vp)
1539 		vrele(vp);
1540 	if (uap->sbytes != NULL) {
1541 		sbytes += hdtr_size;
1542 		copyout(&sbytes, uap->sbytes, sizeof(off_t));
1543 	}
1544 	return (error);
1545 }
1546 
1547 int
1548 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1549 	      struct mbuf *mheader, off_t *sbytes, int flags)
1550 {
1551 	struct thread *td = curthread;
1552 	struct proc *p = td->td_proc;
1553 	struct vm_object *obj;
1554 	struct socket *so;
1555 	struct file *fp;
1556 	struct mbuf *m, *mp;
1557 	struct sf_buf *sf;
1558 	struct vm_page *pg;
1559 	off_t off, xfsize, xbytes;
1560 	off_t hbytes = 0;
1561 	int error = 0;
1562 
1563 	if (vp->v_type != VREG) {
1564 		error = EINVAL;
1565 		goto done0;
1566 	}
1567 	if ((obj = vp->v_object) == NULL) {
1568 		error = EINVAL;
1569 		goto done0;
1570 	}
1571 	error = holdsock(p->p_fd, sfd, &fp);
1572 	if (error)
1573 		goto done0;
1574 	so = (struct socket *)fp->f_data;
1575 	if (so->so_type != SOCK_STREAM) {
1576 		error = EINVAL;
1577 		goto done;
1578 	}
1579 	if ((so->so_state & SS_ISCONNECTED) == 0) {
1580 		error = ENOTCONN;
1581 		goto done;
1582 	}
1583 	if (offset < 0) {
1584 		error = EINVAL;
1585 		goto done;
1586 	}
1587 
1588 	/*
1589 	 * preallocation is required for asynchronous passing of mbufs,
1590 	 * otherwise we can wind up building up an infinite number of
1591 	 * mbufs during the asynchronous latency.
1592 	 */
1593 	if ((so->so_snd.ssb_flags & (SSB_PREALLOC | SSB_STOPSUPP)) == 0) {
1594 		error = EINVAL;
1595 		goto done;
1596 	}
1597 
1598 	*sbytes = 0;
1599 	xbytes = 0;
1600 	/*
1601 	 * Protect against multiple writers to the socket.
1602 	 */
1603 	ssb_lock(&so->so_snd, M_WAITOK);
1604 
1605 	/*
1606 	 * Loop through the pages in the file, starting with the requested
1607 	 * offset. Get a file page (do I/O if necessary), map the file page
1608 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1609 	 * it on the socket.
1610 	 */
1611 	for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes, xbytes += xfsize) {
1612 		vm_pindex_t pindex;
1613 		vm_offset_t pgoff;
1614 		long space;
1615 
1616 		pindex = OFF_TO_IDX(off);
1617 retry_lookup:
1618 		/*
1619 		 * Calculate the amount to transfer. Not to exceed a page,
1620 		 * the EOF, or the passed in nbytes.
1621 		 */
1622 		xfsize = vp->v_filesize - off;
1623 		if (xfsize > PAGE_SIZE)
1624 			xfsize = PAGE_SIZE;
1625 		pgoff = (vm_offset_t)(off & PAGE_MASK);
1626 		if (PAGE_SIZE - pgoff < xfsize)
1627 			xfsize = PAGE_SIZE - pgoff;
1628 		if (nbytes && xfsize > (nbytes - xbytes))
1629 			xfsize = nbytes - xbytes;
1630 		if (xfsize <= 0)
1631 			break;
1632 		/*
1633 		 * Optimize the non-blocking case by looking at the socket space
1634 		 * before going to the extra work of constituting the sf_buf.
1635 		 */
1636 		if (so->so_snd.ssb_flags & SSB_PREALLOC)
1637 			space = ssb_space_prealloc(&so->so_snd);
1638 		else
1639 			space = ssb_space(&so->so_snd);
1640 
1641 		if ((fp->f_flag & FNONBLOCK) && space <= 0) {
1642 			if (so->so_state & SS_CANTSENDMORE)
1643 				error = EPIPE;
1644 			else
1645 				error = EAGAIN;
1646 			ssb_unlock(&so->so_snd);
1647 			goto done;
1648 		}
1649 		/*
1650 		 * Attempt to look up the page.
1651 		 *
1652 		 * Allocate if not found, wait and loop if busy, then hold the page.
1653 		 * We hold rather than wire the page because we do not want to prevent
1654 		 * filesystem truncation operations from occuring on the file.  This
1655 		 * can happen even under normal operation if the file being sent is
1656 		 * remove()d after the sendfile() call completes, because the socket buffer
1657 		 * may still be draining.  tmpfs will crash if we try to use wire.
1658 		 */
1659 		vm_object_hold(obj);
1660 		pg = vm_page_lookup_busy_try(obj, pindex, TRUE, &error);
1661 		if (error) {
1662 			vm_page_sleep_busy(pg, TRUE, "sfpbsy");
1663 			vm_object_drop(obj);
1664 			goto retry_lookup;
1665 		}
1666 		if (pg == NULL) {
1667 			pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL |
1668 							VM_ALLOC_NULL_OK);
1669 			if (pg == NULL) {
1670 				vm_wait(0);
1671 				vm_object_drop(obj);
1672 				goto retry_lookup;
1673 			}
1674 		}
1675 		vm_page_hold(pg);
1676 		vm_object_drop(obj);
1677 
1678 		/*
1679 		 * If page is not valid for what we need, initiate I/O
1680 		 */
1681 
1682 		if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
1683 			struct uio auio;
1684 			struct iovec aiov;
1685 			int bsize;
1686 
1687 			/*
1688 			 * Ensure that our page is still around when the I/O
1689 			 * completes.
1690 			 *
1691 			 * Ensure that our page is not modified while part of
1692 			 * a mbuf as this could mess up tcp checksums, DMA,
1693 			 * etc (XXX NEEDS WORK).  The softbusy is supposed to
1694 			 * help here but it actually doesn't.
1695 			 *
1696 			 * XXX THIS HAS MULTIPLE PROBLEMS.  The underlying
1697 			 *     VM pages are not protected by the soft-busy
1698 			 *     unless we vm_page_protect... READ them, and
1699 			 *     they STILL aren't protected against
1700 			 *     modification via the buffer cache (VOP_WRITE).
1701 			 *
1702 			 *     Fixing the second issue is particularly
1703 			 *     difficult.
1704 			 *
1705 			 * XXX We also can't soft-busy anyway because it can
1706 			 *     deadlock against the syncer doing a vfs_msync(),
1707 			 *     vfs_msync->vmntvnodesca->vfs_msync_scan2->
1708 			 *     vm_object_page_clean->(scan)-> ... page
1709 			 *     busy-wait.
1710 			 */
1711 			/*vm_page_io_start(pg);*/
1712 			vm_page_wakeup(pg);
1713 
1714 			/*
1715 			 * Get the page from backing store.
1716 			 */
1717 			bsize = vp->v_mount->mnt_stat.f_iosize;
1718 			auio.uio_iov = &aiov;
1719 			auio.uio_iovcnt = 1;
1720 			aiov.iov_base = 0;
1721 			aiov.iov_len = MAXBSIZE;
1722 			auio.uio_resid = MAXBSIZE;
1723 			auio.uio_offset = trunc_page(off);
1724 			auio.uio_segflg = UIO_NOCOPY;
1725 			auio.uio_rw = UIO_READ;
1726 			auio.uio_td = td;
1727 			vn_lock(vp, LK_SHARED | LK_RETRY);
1728 			error = VOP_READ(vp, &auio,
1729 				    IO_VMIO | ((MAXBSIZE / bsize) << 16),
1730 				    td->td_ucred);
1731 			vn_unlock(vp);
1732 			vm_page_flag_clear(pg, PG_ZERO);
1733 			vm_page_busy_wait(pg, FALSE, "sockpg");
1734 			/*vm_page_io_finish(pg);*/
1735 			if (error) {
1736 				vm_page_wakeup(pg);
1737 				vm_page_unhold(pg);
1738 				/* vm_page_try_to_free(pg); */
1739 				ssb_unlock(&so->so_snd);
1740 				goto done;
1741 			}
1742 		}
1743 
1744 
1745 		/*
1746 		 * Get a sendfile buf. We usually wait as long as necessary,
1747 		 * but this wait can be interrupted.
1748 		 */
1749 		if ((sf = sf_buf_alloc(pg)) == NULL) {
1750 			vm_page_wakeup(pg);
1751 			vm_page_unhold(pg);
1752 			/* vm_page_try_to_free(pg); */
1753 			ssb_unlock(&so->so_snd);
1754 			error = EINTR;
1755 			goto done;
1756 		}
1757 
1758 		/*
1759 		 * Get an mbuf header and set it up as having external storage.
1760 		 */
1761 		MGETHDR(m, M_WAITOK, MT_DATA);
1762 		if (m == NULL) {
1763 			error = ENOBUFS;
1764 			vm_page_wakeup(pg);
1765 			vm_page_unhold(pg);
1766 			/* vm_page_try_to_free(pg); */
1767 			sf_buf_free(sf);
1768 			ssb_unlock(&so->so_snd);
1769 			goto done;
1770 		}
1771 
1772 		vm_page_wakeup(pg);
1773 
1774 		m->m_ext.ext_free = sf_buf_mfree;
1775 		m->m_ext.ext_ref = sf_buf_ref;
1776 		m->m_ext.ext_arg = sf;
1777 		m->m_ext.ext_buf = (void *)sf_buf_kva(sf);
1778 		m->m_ext.ext_size = PAGE_SIZE;
1779 		m->m_data = (char *)sf_buf_kva(sf) + pgoff;
1780 		m->m_flags |= M_EXT;
1781 		m->m_pkthdr.len = m->m_len = xfsize;
1782 		KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0);
1783 
1784 		if (mheader != NULL) {
1785 			hbytes = mheader->m_pkthdr.len;
1786 			mheader->m_pkthdr.len += m->m_pkthdr.len;
1787 			m_cat(mheader, m);
1788 			m = mheader;
1789 			mheader = NULL;
1790 		} else
1791 			hbytes = 0;
1792 
1793 		/*
1794 		 * Add the buffer to the socket buffer chain.
1795 		 */
1796 		crit_enter();
1797 retry_space:
1798 		/*
1799 		 * Make sure that the socket is still able to take more data.
1800 		 * CANTSENDMORE being true usually means that the connection
1801 		 * was closed. so_error is true when an error was sensed after
1802 		 * a previous send.
1803 		 * The state is checked after the page mapping and buffer
1804 		 * allocation above since those operations may block and make
1805 		 * any socket checks stale. From this point forward, nothing
1806 		 * blocks before the pru_send (or more accurately, any blocking
1807 		 * results in a loop back to here to re-check).
1808 		 */
1809 		if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1810 			if (so->so_state & SS_CANTSENDMORE) {
1811 				error = EPIPE;
1812 			} else {
1813 				error = so->so_error;
1814 				so->so_error = 0;
1815 			}
1816 			m_freem(m);
1817 			ssb_unlock(&so->so_snd);
1818 			crit_exit();
1819 			goto done;
1820 		}
1821 		/*
1822 		 * Wait for socket space to become available. We do this just
1823 		 * after checking the connection state above in order to avoid
1824 		 * a race condition with ssb_wait().
1825 		 */
1826 		if (so->so_snd.ssb_flags & SSB_PREALLOC)
1827 			space = ssb_space_prealloc(&so->so_snd);
1828 		else
1829 			space = ssb_space(&so->so_snd);
1830 
1831 		if (space < m->m_pkthdr.len && space < so->so_snd.ssb_lowat) {
1832 			if (fp->f_flag & FNONBLOCK) {
1833 				m_freem(m);
1834 				ssb_unlock(&so->so_snd);
1835 				crit_exit();
1836 				error = EAGAIN;
1837 				goto done;
1838 			}
1839 			error = ssb_wait(&so->so_snd);
1840 			/*
1841 			 * An error from ssb_wait usually indicates that we've
1842 			 * been interrupted by a signal. If we've sent anything
1843 			 * then return bytes sent, otherwise return the error.
1844 			 */
1845 			if (error) {
1846 				m_freem(m);
1847 				ssb_unlock(&so->so_snd);
1848 				crit_exit();
1849 				goto done;
1850 			}
1851 			goto retry_space;
1852 		}
1853 
1854 		if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1855 			for (mp = m; mp != NULL; mp = mp->m_next)
1856 				ssb_preallocstream(&so->so_snd, mp);
1857 		}
1858 		if (use_sendfile_async)
1859 			error = so_pru_senda(so, 0, m, NULL, NULL, td);
1860 		else
1861 			error = so_pru_send(so, 0, m, NULL, NULL, td);
1862 
1863 		crit_exit();
1864 		if (error) {
1865 			ssb_unlock(&so->so_snd);
1866 			goto done;
1867 		}
1868 	}
1869 	if (mheader != NULL) {
1870 		*sbytes += mheader->m_pkthdr.len;
1871 
1872 		if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1873 			for (mp = mheader; mp != NULL; mp = mp->m_next)
1874 				ssb_preallocstream(&so->so_snd, mp);
1875 		}
1876 		if (use_sendfile_async)
1877 			error = so_pru_senda(so, 0, mheader, NULL, NULL, td);
1878 		else
1879 			error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1880 
1881 		mheader = NULL;
1882 	}
1883 	ssb_unlock(&so->so_snd);
1884 
1885 done:
1886 	fdrop(fp);
1887 done0:
1888 	if (mheader != NULL)
1889 		m_freem(mheader);
1890 	return (error);
1891 }
1892