xref: /onnv-gate/usr/src/uts/common/fs/sockfs/socksyscalls.c (revision 3415:2c8fda3c38c1)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51548Srshoaib  * Common Development and Distribution License (the "License").
61548Srshoaib  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211548Srshoaib 
220Sstevel@tonic-gate /*
23*3415Samehta  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <sys/types.h>
300Sstevel@tonic-gate #include <sys/t_lock.h>
310Sstevel@tonic-gate #include <sys/param.h>
320Sstevel@tonic-gate #include <sys/systm.h>
330Sstevel@tonic-gate #include <sys/buf.h>
340Sstevel@tonic-gate #include <sys/conf.h>
350Sstevel@tonic-gate #include <sys/cred.h>
360Sstevel@tonic-gate #include <sys/kmem.h>
370Sstevel@tonic-gate #include <sys/sysmacros.h>
380Sstevel@tonic-gate #include <sys/vfs.h>
390Sstevel@tonic-gate #include <sys/vnode.h>
400Sstevel@tonic-gate #include <sys/debug.h>
410Sstevel@tonic-gate #include <sys/errno.h>
420Sstevel@tonic-gate #include <sys/time.h>
430Sstevel@tonic-gate #include <sys/file.h>
440Sstevel@tonic-gate #include <sys/user.h>
450Sstevel@tonic-gate #include <sys/stream.h>
460Sstevel@tonic-gate #include <sys/strsubr.h>
470Sstevel@tonic-gate #include <sys/strsun.h>
480Sstevel@tonic-gate #include <sys/esunddi.h>
490Sstevel@tonic-gate #include <sys/flock.h>
500Sstevel@tonic-gate #include <sys/modctl.h>
510Sstevel@tonic-gate #include <sys/cmn_err.h>
520Sstevel@tonic-gate #include <sys/vmsystm.h>
530Sstevel@tonic-gate #include <sys/policy.h>
540Sstevel@tonic-gate 
550Sstevel@tonic-gate #include <sys/socket.h>
560Sstevel@tonic-gate #include <sys/socketvar.h>
570Sstevel@tonic-gate 
580Sstevel@tonic-gate #include <sys/isa_defs.h>
590Sstevel@tonic-gate #include <sys/inttypes.h>
600Sstevel@tonic-gate #include <sys/systm.h>
610Sstevel@tonic-gate #include <sys/cpuvar.h>
620Sstevel@tonic-gate #include <sys/filio.h>
630Sstevel@tonic-gate #include <sys/sendfile.h>
640Sstevel@tonic-gate #include <sys/ddi.h>
650Sstevel@tonic-gate #include <vm/seg.h>
660Sstevel@tonic-gate #include <vm/seg_map.h>
670Sstevel@tonic-gate #include <vm/seg_kpm.h>
680Sstevel@tonic-gate #include <fs/sockfs/nl7c.h>
690Sstevel@tonic-gate 
700Sstevel@tonic-gate #ifdef SOCK_TEST
710Sstevel@tonic-gate int do_useracc = 1;		/* Controlled by setting SO_DEBUG to 4 */
720Sstevel@tonic-gate #else
730Sstevel@tonic-gate #define	do_useracc	1
740Sstevel@tonic-gate #endif /* SOCK_TEST */
750Sstevel@tonic-gate 
760Sstevel@tonic-gate extern int xnet_truncate_print;
770Sstevel@tonic-gate 
780Sstevel@tonic-gate /*
790Sstevel@tonic-gate  * Note: DEF_IOV_MAX is defined and used as it is in "fs/vncalls.c"
800Sstevel@tonic-gate  *	 as there isn't a formal definition of IOV_MAX ???
810Sstevel@tonic-gate  */
820Sstevel@tonic-gate #define	MSG_MAXIOVLEN	16
830Sstevel@tonic-gate 
840Sstevel@tonic-gate /*
850Sstevel@tonic-gate  * Kernel component of socket creation.
860Sstevel@tonic-gate  *
870Sstevel@tonic-gate  * The socket library determines which version number to use.
880Sstevel@tonic-gate  * First the library calls this with a NULL devpath. If this fails
890Sstevel@tonic-gate  * to find a transport (using solookup) the library will look in /etc/netconfig
900Sstevel@tonic-gate  * for the appropriate transport. If one is found it will pass in the
910Sstevel@tonic-gate  * devpath for the kernel to use.
920Sstevel@tonic-gate  */
930Sstevel@tonic-gate int
940Sstevel@tonic-gate so_socket(int domain, int type, int protocol, char *devpath, int version)
950Sstevel@tonic-gate {
960Sstevel@tonic-gate 	vnode_t *accessvp;
970Sstevel@tonic-gate 	struct sonode *so;
980Sstevel@tonic-gate 	vnode_t *vp;
990Sstevel@tonic-gate 	struct file *fp;
1000Sstevel@tonic-gate 	int fd;
1010Sstevel@tonic-gate 	int error;
1020Sstevel@tonic-gate 	boolean_t wildcard = B_FALSE;
1030Sstevel@tonic-gate 	int saved_error = 0;
1040Sstevel@tonic-gate 	int sdomain = domain;
1050Sstevel@tonic-gate 
1060Sstevel@tonic-gate 	dprint(1, ("so_socket(%d,%d,%d,%p,%d)\n",
1070Sstevel@tonic-gate 		domain, type, protocol, devpath, version));
1080Sstevel@tonic-gate 
1090Sstevel@tonic-gate 	if (domain == AF_NCA) {
1100Sstevel@tonic-gate 		/*
1110Sstevel@tonic-gate 		 * The request is for an NCA socket so for NL7C use the
1120Sstevel@tonic-gate 		 * INET domain instead and mark NL7C_AF_NCA below.
1130Sstevel@tonic-gate 		 */
1140Sstevel@tonic-gate 		domain = AF_INET;
1150Sstevel@tonic-gate 		/*
1160Sstevel@tonic-gate 		 * NL7C is not supported in non-global zones,
1170Sstevel@tonic-gate 		 *  we enforce this restriction here.
1180Sstevel@tonic-gate 		 */
1190Sstevel@tonic-gate 		if (getzoneid() != GLOBAL_ZONEID) {
1200Sstevel@tonic-gate 			return (set_errno(ENOTSUP));
1210Sstevel@tonic-gate 		}
1220Sstevel@tonic-gate 	}
1230Sstevel@tonic-gate 
1240Sstevel@tonic-gate 	accessvp = solookup(domain, type, protocol, devpath, &error);
1250Sstevel@tonic-gate 	if (accessvp == NULL) {
1260Sstevel@tonic-gate 		/*
1270Sstevel@tonic-gate 		 * If there is either an EPROTONOSUPPORT or EPROTOTYPE error
1280Sstevel@tonic-gate 		 * it makes sense doing the wildcard lookup since the
1290Sstevel@tonic-gate 		 * protocol might not be in the table.
1300Sstevel@tonic-gate 		 */
1310Sstevel@tonic-gate 		if (devpath != NULL || protocol == 0 ||
1320Sstevel@tonic-gate 		    !(error == EPROTONOSUPPORT || error == EPROTOTYPE))
1330Sstevel@tonic-gate 			return (set_errno(error));
1340Sstevel@tonic-gate 
1350Sstevel@tonic-gate 		saved_error = error;
1360Sstevel@tonic-gate 
1370Sstevel@tonic-gate 		/*
1380Sstevel@tonic-gate 		 * Try wildcard lookup. Never use devpath for wildcards.
1390Sstevel@tonic-gate 		 */
1400Sstevel@tonic-gate 		accessvp = solookup(domain, type, 0, NULL, &error);
1410Sstevel@tonic-gate 		if (accessvp == NULL) {
1420Sstevel@tonic-gate 			/*
1430Sstevel@tonic-gate 			 * Can't find in kernel table - have library
1440Sstevel@tonic-gate 			 * fall back to /etc/netconfig and tell us
1450Sstevel@tonic-gate 			 * the devpath (The library will do this if it didn't
1460Sstevel@tonic-gate 			 * already pass in a devpath).
1470Sstevel@tonic-gate 			 */
1480Sstevel@tonic-gate 			if (saved_error != 0)
1490Sstevel@tonic-gate 				error = saved_error;
1500Sstevel@tonic-gate 			return (set_errno(error));
1510Sstevel@tonic-gate 		}
1520Sstevel@tonic-gate 		wildcard = B_TRUE;
1530Sstevel@tonic-gate 	}
1540Sstevel@tonic-gate 
1550Sstevel@tonic-gate 	/* Check the device policy */
1560Sstevel@tonic-gate 	if ((error = secpolicy_spec_open(CRED(),
1570Sstevel@tonic-gate 	    accessvp, FREAD|FWRITE)) != 0) {
1580Sstevel@tonic-gate 		return (set_errno(error));
1590Sstevel@tonic-gate 	}
1600Sstevel@tonic-gate 
1611548Srshoaib 	if (protocol == IPPROTO_SCTP) {
1620Sstevel@tonic-gate 		so = sosctp_create(accessvp, domain, type, protocol, version,
1630Sstevel@tonic-gate 		    NULL, &error);
1640Sstevel@tonic-gate 	} else {
1650Sstevel@tonic-gate 		so = sotpi_create(accessvp, domain, type, protocol, version,
1660Sstevel@tonic-gate 		    NULL, &error);
1670Sstevel@tonic-gate 	}
1680Sstevel@tonic-gate 	if (so == NULL) {
1690Sstevel@tonic-gate 		return (set_errno(error));
1700Sstevel@tonic-gate 	}
1710Sstevel@tonic-gate 	if (sdomain == AF_NCA && domain == AF_INET) {
1720Sstevel@tonic-gate 		so->so_nl7c_flags = NL7C_AF_NCA;
1730Sstevel@tonic-gate 	}
1740Sstevel@tonic-gate 	vp = SOTOV(so);
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate 	if (wildcard) {
1770Sstevel@tonic-gate 		/*
1780Sstevel@tonic-gate 		 * Issue SO_PROTOTYPE setsockopt.
1790Sstevel@tonic-gate 		 */
1800Sstevel@tonic-gate 		error = SOP_SETSOCKOPT(so, SOL_SOCKET, SO_PROTOTYPE,
1810Sstevel@tonic-gate 				&protocol,
1820Sstevel@tonic-gate 				(t_uscalar_t)sizeof (protocol));
1830Sstevel@tonic-gate 		if (error) {
1840Sstevel@tonic-gate 			(void) VOP_CLOSE(vp, 0, 1, 0, CRED());
1850Sstevel@tonic-gate 			VN_RELE(vp);
1860Sstevel@tonic-gate 			/*
1870Sstevel@tonic-gate 			 * Setsockopt often fails with ENOPROTOOPT but socket()
1880Sstevel@tonic-gate 			 * should fail with EPROTONOSUPPORT/EPROTOTYPE.
1890Sstevel@tonic-gate 			 */
1900Sstevel@tonic-gate 			if (saved_error != 0 && error == ENOPROTOOPT)
1910Sstevel@tonic-gate 				error = saved_error;
1920Sstevel@tonic-gate 			else
1930Sstevel@tonic-gate 				error = EPROTONOSUPPORT;
1940Sstevel@tonic-gate 			return (set_errno(error));
1950Sstevel@tonic-gate 		}
1960Sstevel@tonic-gate 	}
1970Sstevel@tonic-gate 	if (error = falloc(vp, FWRITE|FREAD, &fp, &fd)) {
1980Sstevel@tonic-gate 		(void) VOP_CLOSE(vp, 0, 1, 0, CRED());
1990Sstevel@tonic-gate 		VN_RELE(vp);
2000Sstevel@tonic-gate 		return (set_errno(error));
2010Sstevel@tonic-gate 	}
2020Sstevel@tonic-gate 
2030Sstevel@tonic-gate 	/*
2040Sstevel@tonic-gate 	 * Now fill in the entries that falloc reserved
2050Sstevel@tonic-gate 	 */
2060Sstevel@tonic-gate 	mutex_exit(&fp->f_tlock);
2070Sstevel@tonic-gate 	setf(fd, fp);
2080Sstevel@tonic-gate 
2090Sstevel@tonic-gate 	return (fd);
2100Sstevel@tonic-gate }
2110Sstevel@tonic-gate 
2120Sstevel@tonic-gate /*
2130Sstevel@tonic-gate  * Map from a file descriptor to a socket node.
2140Sstevel@tonic-gate  * Returns with the file descriptor held i.e. the caller has to
2150Sstevel@tonic-gate  * use releasef when done with the file descriptor.
2160Sstevel@tonic-gate  */
2170Sstevel@tonic-gate static struct sonode *
2180Sstevel@tonic-gate getsonode(int sock, int *errorp, file_t **fpp)
2190Sstevel@tonic-gate {
2200Sstevel@tonic-gate 	file_t *fp;
2210Sstevel@tonic-gate 	vnode_t *vp;
2220Sstevel@tonic-gate 	struct sonode *so;
2230Sstevel@tonic-gate 
2240Sstevel@tonic-gate 	if ((fp = getf(sock)) == NULL) {
2250Sstevel@tonic-gate 		*errorp = EBADF;
2260Sstevel@tonic-gate 		eprintline(*errorp);
2270Sstevel@tonic-gate 		return (NULL);
2280Sstevel@tonic-gate 	}
2290Sstevel@tonic-gate 	vp = fp->f_vnode;
2300Sstevel@tonic-gate 	/* Check if it is a socket */
2310Sstevel@tonic-gate 	if (vp->v_type != VSOCK) {
2320Sstevel@tonic-gate 		releasef(sock);
2330Sstevel@tonic-gate 		*errorp = ENOTSOCK;
2340Sstevel@tonic-gate 		eprintline(*errorp);
2350Sstevel@tonic-gate 		return (NULL);
2360Sstevel@tonic-gate 	}
2370Sstevel@tonic-gate 	/*
2380Sstevel@tonic-gate 	 * Use the stream head to find the real socket vnode.
2390Sstevel@tonic-gate 	 * This is needed when namefs sits above sockfs.
2400Sstevel@tonic-gate 	 */
2410Sstevel@tonic-gate 	if (vp->v_stream) {
2420Sstevel@tonic-gate 		ASSERT(vp->v_stream->sd_vnode);
2430Sstevel@tonic-gate 		vp = vp->v_stream->sd_vnode;
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate 		so = VTOSO(vp);
2460Sstevel@tonic-gate 		if (so->so_version == SOV_STREAM) {
2470Sstevel@tonic-gate 			releasef(sock);
2480Sstevel@tonic-gate 			*errorp = ENOTSOCK;
2490Sstevel@tonic-gate 			eprintsoline(so, *errorp);
2500Sstevel@tonic-gate 			return (NULL);
2510Sstevel@tonic-gate 		}
2520Sstevel@tonic-gate 	} else {
2530Sstevel@tonic-gate 		so = VTOSO(vp);
2540Sstevel@tonic-gate 	}
2550Sstevel@tonic-gate 	if (fpp)
2560Sstevel@tonic-gate 		*fpp = fp;
2570Sstevel@tonic-gate 	return (so);
2580Sstevel@tonic-gate }
2590Sstevel@tonic-gate 
2600Sstevel@tonic-gate /*
2610Sstevel@tonic-gate  * Allocate and copyin a sockaddr.
2620Sstevel@tonic-gate  * Ensures NULL termination for AF_UNIX addresses by extending them
2630Sstevel@tonic-gate  * with one NULL byte if need be. Verifies that the length is not
2640Sstevel@tonic-gate  * excessive to prevent an application from consuming all of kernel
2650Sstevel@tonic-gate  * memory. Returns NULL when an error occurred.
2660Sstevel@tonic-gate  */
2670Sstevel@tonic-gate static struct sockaddr *
2680Sstevel@tonic-gate copyin_name(struct sonode *so, struct sockaddr *name, socklen_t *namelenp,
2690Sstevel@tonic-gate 	    int *errorp)
2700Sstevel@tonic-gate {
2710Sstevel@tonic-gate 	char	*faddr;
2720Sstevel@tonic-gate 	size_t	namelen = (size_t)*namelenp;
2730Sstevel@tonic-gate 
2740Sstevel@tonic-gate 	ASSERT(namelen != 0);
2750Sstevel@tonic-gate 	if (namelen > SO_MAXARGSIZE) {
2760Sstevel@tonic-gate 		*errorp = EINVAL;
2770Sstevel@tonic-gate 		eprintsoline(so, *errorp);
2780Sstevel@tonic-gate 		return (NULL);
2790Sstevel@tonic-gate 	}
2800Sstevel@tonic-gate 
2810Sstevel@tonic-gate 	faddr = (char *)kmem_alloc(namelen, KM_SLEEP);
2820Sstevel@tonic-gate 	if (copyin(name, faddr, namelen)) {
2830Sstevel@tonic-gate 		kmem_free(faddr, namelen);
2840Sstevel@tonic-gate 		*errorp = EFAULT;
2850Sstevel@tonic-gate 		eprintsoline(so, *errorp);
2860Sstevel@tonic-gate 		return (NULL);
2870Sstevel@tonic-gate 	}
2880Sstevel@tonic-gate 
2890Sstevel@tonic-gate 	/*
2900Sstevel@tonic-gate 	 * Add space for NULL termination if needed.
2910Sstevel@tonic-gate 	 * Do a quick check if the last byte is NUL.
2920Sstevel@tonic-gate 	 */
2930Sstevel@tonic-gate 	if (so->so_family == AF_UNIX && faddr[namelen - 1] != '\0') {
2940Sstevel@tonic-gate 		/* Check if there is any NULL termination */
2950Sstevel@tonic-gate 		size_t	i;
2960Sstevel@tonic-gate 		int foundnull = 0;
2970Sstevel@tonic-gate 
2980Sstevel@tonic-gate 		for (i = sizeof (name->sa_family); i < namelen; i++) {
2990Sstevel@tonic-gate 			if (faddr[i] == '\0') {
3000Sstevel@tonic-gate 				foundnull = 1;
3010Sstevel@tonic-gate 				break;
3020Sstevel@tonic-gate 			}
3030Sstevel@tonic-gate 		}
3040Sstevel@tonic-gate 		if (!foundnull) {
3050Sstevel@tonic-gate 			/* Add extra byte for NUL padding */
3060Sstevel@tonic-gate 			char *nfaddr;
3070Sstevel@tonic-gate 
3080Sstevel@tonic-gate 			nfaddr = (char *)kmem_alloc(namelen + 1, KM_SLEEP);
3090Sstevel@tonic-gate 			bcopy(faddr, nfaddr, namelen);
3100Sstevel@tonic-gate 			kmem_free(faddr, namelen);
3110Sstevel@tonic-gate 
3120Sstevel@tonic-gate 			/* NUL terminate */
3130Sstevel@tonic-gate 			nfaddr[namelen] = '\0';
3140Sstevel@tonic-gate 			namelen++;
3150Sstevel@tonic-gate 			ASSERT((socklen_t)namelen == namelen);
3160Sstevel@tonic-gate 			*namelenp = (socklen_t)namelen;
3170Sstevel@tonic-gate 			faddr = nfaddr;
3180Sstevel@tonic-gate 		}
3190Sstevel@tonic-gate 	}
3200Sstevel@tonic-gate 	return ((struct sockaddr *)faddr);
3210Sstevel@tonic-gate }
3220Sstevel@tonic-gate 
3230Sstevel@tonic-gate /*
3240Sstevel@tonic-gate  * Copy from kaddr/klen to uaddr/ulen. Updates ulenp if non-NULL.
3250Sstevel@tonic-gate  */
3260Sstevel@tonic-gate static int
3270Sstevel@tonic-gate copyout_arg(void *uaddr, socklen_t ulen, void *ulenp,
3280Sstevel@tonic-gate 		void *kaddr, socklen_t klen)
3290Sstevel@tonic-gate {
3300Sstevel@tonic-gate 	if (uaddr != NULL) {
3310Sstevel@tonic-gate 		if (ulen > klen)
3320Sstevel@tonic-gate 			ulen = klen;
3330Sstevel@tonic-gate 
3340Sstevel@tonic-gate 		if (ulen != 0) {
3350Sstevel@tonic-gate 			if (copyout(kaddr, uaddr, ulen))
3360Sstevel@tonic-gate 				return (EFAULT);
3370Sstevel@tonic-gate 		}
3380Sstevel@tonic-gate 	} else
3390Sstevel@tonic-gate 		ulen = 0;
3400Sstevel@tonic-gate 
3410Sstevel@tonic-gate 	if (ulenp != NULL) {
3420Sstevel@tonic-gate 		if (copyout(&ulen, ulenp, sizeof (ulen)))
3430Sstevel@tonic-gate 			return (EFAULT);
3440Sstevel@tonic-gate 	}
3450Sstevel@tonic-gate 	return (0);
3460Sstevel@tonic-gate }
3470Sstevel@tonic-gate 
3480Sstevel@tonic-gate /*
3490Sstevel@tonic-gate  * Copy from kaddr/klen to uaddr/ulen. Updates ulenp if non-NULL.
3500Sstevel@tonic-gate  * If klen is greater than ulen it still uses the non-truncated
3510Sstevel@tonic-gate  * klen to update ulenp.
3520Sstevel@tonic-gate  */
3530Sstevel@tonic-gate static int
3540Sstevel@tonic-gate copyout_name(void *uaddr, socklen_t ulen, void *ulenp,
3550Sstevel@tonic-gate 		void *kaddr, socklen_t klen)
3560Sstevel@tonic-gate {
3570Sstevel@tonic-gate 	if (uaddr != NULL) {
3580Sstevel@tonic-gate 		if (ulen >= klen)
3590Sstevel@tonic-gate 			ulen = klen;
3600Sstevel@tonic-gate 		else if (ulen != 0 && xnet_truncate_print) {
3610Sstevel@tonic-gate 			printf("sockfs: truncating copyout of address using "
3620Sstevel@tonic-gate 			    "XNET semantics for pid = %d. Lengths %d, %d\n",
3630Sstevel@tonic-gate 			    curproc->p_pid, klen, ulen);
3640Sstevel@tonic-gate 		}
3650Sstevel@tonic-gate 
3660Sstevel@tonic-gate 		if (ulen != 0) {
3670Sstevel@tonic-gate 			if (copyout(kaddr, uaddr, ulen))
3680Sstevel@tonic-gate 				return (EFAULT);
3690Sstevel@tonic-gate 		} else
3700Sstevel@tonic-gate 			klen = 0;
3710Sstevel@tonic-gate 	} else
3720Sstevel@tonic-gate 		klen = 0;
3730Sstevel@tonic-gate 
3740Sstevel@tonic-gate 	if (ulenp != NULL) {
3750Sstevel@tonic-gate 		if (copyout(&klen, ulenp, sizeof (klen)))
3760Sstevel@tonic-gate 			return (EFAULT);
3770Sstevel@tonic-gate 	}
3780Sstevel@tonic-gate 	return (0);
3790Sstevel@tonic-gate }
3800Sstevel@tonic-gate 
3810Sstevel@tonic-gate /*
3820Sstevel@tonic-gate  * The socketpair() code in libsocket creates two sockets (using
3830Sstevel@tonic-gate  * the /etc/netconfig fallback if needed) before calling this routine
3840Sstevel@tonic-gate  * to connect the two sockets together.
3850Sstevel@tonic-gate  *
3860Sstevel@tonic-gate  * For a SOCK_STREAM socketpair a listener is needed - in that case this
3870Sstevel@tonic-gate  * routine will create a new file descriptor as part of accepting the
3880Sstevel@tonic-gate  * connection. The library socketpair() will check if svs[2] has changed
3890Sstevel@tonic-gate  * in which case it will close the changed fd.
3900Sstevel@tonic-gate  *
3910Sstevel@tonic-gate  * Note that this code could use the TPI feature of accepting the connection
3920Sstevel@tonic-gate  * on the listening endpoint. However, that would require significant changes
3930Sstevel@tonic-gate  * to soaccept.
3940Sstevel@tonic-gate  */
3950Sstevel@tonic-gate int
3960Sstevel@tonic-gate so_socketpair(int sv[2])
3970Sstevel@tonic-gate {
3980Sstevel@tonic-gate 	int svs[2];
3990Sstevel@tonic-gate 	struct sonode *so1, *so2;
4000Sstevel@tonic-gate 	int error;
4010Sstevel@tonic-gate 	struct sockaddr_ux *name;
4020Sstevel@tonic-gate 	size_t namelen;
4030Sstevel@tonic-gate 
4040Sstevel@tonic-gate 	dprint(1, ("so_socketpair(%p)\n", sv));
4050Sstevel@tonic-gate 
4060Sstevel@tonic-gate 	error = useracc(sv, sizeof (svs), B_WRITE);
4070Sstevel@tonic-gate 	if (error && do_useracc)
4080Sstevel@tonic-gate 		return (set_errno(EFAULT));
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate 	if (copyin(sv, svs, sizeof (svs)))
4110Sstevel@tonic-gate 		return (set_errno(EFAULT));
4120Sstevel@tonic-gate 
4130Sstevel@tonic-gate 	if ((so1 = getsonode(svs[0], &error, NULL)) == NULL)
4140Sstevel@tonic-gate 		return (set_errno(error));
4150Sstevel@tonic-gate 
4160Sstevel@tonic-gate 	if ((so2 = getsonode(svs[1], &error, NULL)) == NULL) {
4170Sstevel@tonic-gate 		releasef(svs[0]);
4180Sstevel@tonic-gate 		return (set_errno(error));
4190Sstevel@tonic-gate 	}
4200Sstevel@tonic-gate 
4210Sstevel@tonic-gate 	if (so1->so_family != AF_UNIX || so2->so_family != AF_UNIX) {
4220Sstevel@tonic-gate 		error = EOPNOTSUPP;
4230Sstevel@tonic-gate 		goto done;
4240Sstevel@tonic-gate 	}
4250Sstevel@tonic-gate 
4260Sstevel@tonic-gate 	/*
4270Sstevel@tonic-gate 	 * The code below makes assumptions about the "sockfs" implementation.
4280Sstevel@tonic-gate 	 * So make sure that the correct implementation is really used.
4290Sstevel@tonic-gate 	 */
4300Sstevel@tonic-gate 	ASSERT(so1->so_ops == &sotpi_sonodeops);
4310Sstevel@tonic-gate 	ASSERT(so2->so_ops == &sotpi_sonodeops);
4320Sstevel@tonic-gate 
4330Sstevel@tonic-gate 	if (so1->so_type == SOCK_DGRAM) {
4340Sstevel@tonic-gate 		/*
4350Sstevel@tonic-gate 		 * Bind both sockets and connect them with each other.
4360Sstevel@tonic-gate 		 * Need to allocate name/namelen for soconnect.
4370Sstevel@tonic-gate 		 */
4380Sstevel@tonic-gate 		error = SOP_BIND(so1, NULL, 0, _SOBIND_UNSPEC);
4390Sstevel@tonic-gate 		if (error) {
4400Sstevel@tonic-gate 			eprintsoline(so1, error);
4410Sstevel@tonic-gate 			goto done;
4420Sstevel@tonic-gate 		}
4430Sstevel@tonic-gate 		error = SOP_BIND(so2, NULL, 0, _SOBIND_UNSPEC);
4440Sstevel@tonic-gate 		if (error) {
4450Sstevel@tonic-gate 			eprintsoline(so2, error);
4460Sstevel@tonic-gate 			goto done;
4470Sstevel@tonic-gate 		}
4480Sstevel@tonic-gate 		namelen = sizeof (struct sockaddr_ux);
4490Sstevel@tonic-gate 		name = kmem_alloc(namelen, KM_SLEEP);
4500Sstevel@tonic-gate 		name->sou_family = AF_UNIX;
4510Sstevel@tonic-gate 		name->sou_addr = so2->so_ux_laddr;
4520Sstevel@tonic-gate 		error = SOP_CONNECT(so1,
4530Sstevel@tonic-gate 				(struct sockaddr *)name,
4540Sstevel@tonic-gate 				(socklen_t)namelen,
4550Sstevel@tonic-gate 				0, _SOCONNECT_NOXLATE);
4560Sstevel@tonic-gate 		if (error) {
4570Sstevel@tonic-gate 			kmem_free(name, namelen);
4580Sstevel@tonic-gate 			eprintsoline(so1, error);
4590Sstevel@tonic-gate 			goto done;
4600Sstevel@tonic-gate 		}
4610Sstevel@tonic-gate 		name->sou_addr = so1->so_ux_laddr;
4620Sstevel@tonic-gate 		error = SOP_CONNECT(so2,
4630Sstevel@tonic-gate 				(struct sockaddr *)name,
4640Sstevel@tonic-gate 				(socklen_t)namelen,
4650Sstevel@tonic-gate 				0, _SOCONNECT_NOXLATE);
4660Sstevel@tonic-gate 		kmem_free(name, namelen);
4670Sstevel@tonic-gate 		if (error) {
4680Sstevel@tonic-gate 			eprintsoline(so2, error);
4690Sstevel@tonic-gate 			goto done;
4700Sstevel@tonic-gate 		}
4710Sstevel@tonic-gate 		releasef(svs[0]);
4720Sstevel@tonic-gate 		releasef(svs[1]);
4730Sstevel@tonic-gate 	} else {
4740Sstevel@tonic-gate 		/*
4750Sstevel@tonic-gate 		 * Bind both sockets, with so1 being a listener.
4760Sstevel@tonic-gate 		 * Connect so2 to so1 - nonblocking to avoid waiting for
4770Sstevel@tonic-gate 		 * soaccept to complete.
4780Sstevel@tonic-gate 		 * Accept a connection on so1. Pass out the new fd as sv[0].
4790Sstevel@tonic-gate 		 * The library will detect the changed fd and close
4800Sstevel@tonic-gate 		 * the original one.
4810Sstevel@tonic-gate 		 */
4820Sstevel@tonic-gate 		struct sonode *nso;
4830Sstevel@tonic-gate 		struct vnode *nvp;
4840Sstevel@tonic-gate 		struct file *nfp;
4850Sstevel@tonic-gate 		int nfd;
4860Sstevel@tonic-gate 
4870Sstevel@tonic-gate 		/*
4880Sstevel@tonic-gate 		 * We could simply call SOP_LISTEN() here (which would do the
4890Sstevel@tonic-gate 		 * binding automatically) if the code didn't rely on passing
4900Sstevel@tonic-gate 		 * _SOBIND_NOXLATE to the TPI implementation of SOP_BIND().
4910Sstevel@tonic-gate 		 */
4920Sstevel@tonic-gate 		error = SOP_BIND(so1, NULL, 0, _SOBIND_UNSPEC|_SOBIND_NOXLATE|
4930Sstevel@tonic-gate 		    _SOBIND_LISTEN|_SOBIND_SOCKETPAIR);
4940Sstevel@tonic-gate 		if (error) {
4950Sstevel@tonic-gate 			eprintsoline(so1, error);
4960Sstevel@tonic-gate 			goto done;
4970Sstevel@tonic-gate 		}
4980Sstevel@tonic-gate 		error = SOP_BIND(so2, NULL, 0, _SOBIND_UNSPEC);
4990Sstevel@tonic-gate 		if (error) {
5000Sstevel@tonic-gate 			eprintsoline(so2, error);
5010Sstevel@tonic-gate 			goto done;
5020Sstevel@tonic-gate 		}
5030Sstevel@tonic-gate 
5040Sstevel@tonic-gate 		namelen = sizeof (struct sockaddr_ux);
5050Sstevel@tonic-gate 		name = kmem_alloc(namelen, KM_SLEEP);
5060Sstevel@tonic-gate 		name->sou_family = AF_UNIX;
5070Sstevel@tonic-gate 		name->sou_addr = so1->so_ux_laddr;
5080Sstevel@tonic-gate 		error = SOP_CONNECT(so2,
5090Sstevel@tonic-gate 				(struct sockaddr *)name,
5100Sstevel@tonic-gate 				(socklen_t)namelen,
5110Sstevel@tonic-gate 				FNONBLOCK, _SOCONNECT_NOXLATE);
5120Sstevel@tonic-gate 		kmem_free(name, namelen);
5130Sstevel@tonic-gate 		if (error) {
5140Sstevel@tonic-gate 			if (error != EINPROGRESS) {
5150Sstevel@tonic-gate 				eprintsoline(so2, error);
5160Sstevel@tonic-gate 				goto done;
5170Sstevel@tonic-gate 			}
5180Sstevel@tonic-gate 		}
5190Sstevel@tonic-gate 
5200Sstevel@tonic-gate 		error = SOP_ACCEPT(so1, 0, &nso);
5210Sstevel@tonic-gate 		if (error) {
5220Sstevel@tonic-gate 			eprintsoline(so1, error);
5230Sstevel@tonic-gate 			goto done;
5240Sstevel@tonic-gate 		}
5250Sstevel@tonic-gate 
5260Sstevel@tonic-gate 		/* wait for so2 being SS_CONNECTED ignoring signals */
5270Sstevel@tonic-gate 		mutex_enter(&so2->so_lock);
5280Sstevel@tonic-gate 		error = sowaitconnected(so2, 0, 1);
5290Sstevel@tonic-gate 		mutex_exit(&so2->so_lock);
5300Sstevel@tonic-gate 		nvp = SOTOV(nso);
5310Sstevel@tonic-gate 		if (error != 0) {
5320Sstevel@tonic-gate 			(void) VOP_CLOSE(nvp, 0, 1, 0, CRED());
5330Sstevel@tonic-gate 			VN_RELE(nvp);
5340Sstevel@tonic-gate 			eprintsoline(so2, error);
5350Sstevel@tonic-gate 			goto done;
5360Sstevel@tonic-gate 		}
5370Sstevel@tonic-gate 
5380Sstevel@tonic-gate 		if (error = falloc(nvp, FWRITE|FREAD, &nfp, &nfd)) {
5390Sstevel@tonic-gate 			(void) VOP_CLOSE(nvp, 0, 1, 0, CRED());
5400Sstevel@tonic-gate 			VN_RELE(nvp);
5410Sstevel@tonic-gate 			eprintsoline(nso, error);
5420Sstevel@tonic-gate 			goto done;
5430Sstevel@tonic-gate 		}
5440Sstevel@tonic-gate 		/*
5450Sstevel@tonic-gate 		 * fill in the entries that falloc reserved
5460Sstevel@tonic-gate 		 */
5470Sstevel@tonic-gate 		mutex_exit(&nfp->f_tlock);
5480Sstevel@tonic-gate 		setf(nfd, nfp);
5490Sstevel@tonic-gate 
5500Sstevel@tonic-gate 		releasef(svs[0]);
5510Sstevel@tonic-gate 		releasef(svs[1]);
5520Sstevel@tonic-gate 		svs[0] = nfd;
5530Sstevel@tonic-gate 
5540Sstevel@tonic-gate 		/*
5550Sstevel@tonic-gate 		 * The socketpair library routine will close the original
5560Sstevel@tonic-gate 		 * svs[0] when this code passes out a different file
5570Sstevel@tonic-gate 		 * descriptor.
5580Sstevel@tonic-gate 		 */
5590Sstevel@tonic-gate 		if (copyout(svs, sv, sizeof (svs))) {
5600Sstevel@tonic-gate 			(void) closeandsetf(nfd, NULL);
5610Sstevel@tonic-gate 			eprintline(EFAULT);
5620Sstevel@tonic-gate 			return (set_errno(EFAULT));
5630Sstevel@tonic-gate 		}
5640Sstevel@tonic-gate 	}
5650Sstevel@tonic-gate 	return (0);
5660Sstevel@tonic-gate 
5670Sstevel@tonic-gate done:
5680Sstevel@tonic-gate 	releasef(svs[0]);
5690Sstevel@tonic-gate 	releasef(svs[1]);
5700Sstevel@tonic-gate 	return (set_errno(error));
5710Sstevel@tonic-gate }
5720Sstevel@tonic-gate 
5730Sstevel@tonic-gate int
5740Sstevel@tonic-gate bind(int sock, struct sockaddr *name, socklen_t namelen, int version)
5750Sstevel@tonic-gate {
5760Sstevel@tonic-gate 	struct sonode *so;
5770Sstevel@tonic-gate 	int error;
5780Sstevel@tonic-gate 
5790Sstevel@tonic-gate 	dprint(1, ("bind(%d, %p, %d)\n",
5800Sstevel@tonic-gate 		sock, name, namelen));
5810Sstevel@tonic-gate 
5820Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
5830Sstevel@tonic-gate 		return (set_errno(error));
5840Sstevel@tonic-gate 
5850Sstevel@tonic-gate 	/* Allocate and copyin name */
5860Sstevel@tonic-gate 	/*
5870Sstevel@tonic-gate 	 * X/Open test does not expect EFAULT with NULL name and non-zero
5880Sstevel@tonic-gate 	 * namelen.
5890Sstevel@tonic-gate 	 */
5900Sstevel@tonic-gate 	if (name != NULL && namelen != 0) {
5910Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
5920Sstevel@tonic-gate 		name = copyin_name(so, name, &namelen, &error);
5930Sstevel@tonic-gate 		if (name == NULL) {
5940Sstevel@tonic-gate 			releasef(sock);
5950Sstevel@tonic-gate 			return (set_errno(error));
5960Sstevel@tonic-gate 		}
5970Sstevel@tonic-gate 	} else {
5980Sstevel@tonic-gate 		name = NULL;
5990Sstevel@tonic-gate 		namelen = 0;
6000Sstevel@tonic-gate 	}
6010Sstevel@tonic-gate 
6020Sstevel@tonic-gate 	switch (version) {
6030Sstevel@tonic-gate 	default:
6040Sstevel@tonic-gate 		error = SOP_BIND(so, name, namelen, 0);
6050Sstevel@tonic-gate 		break;
6060Sstevel@tonic-gate 	case SOV_XPG4_2:
6070Sstevel@tonic-gate 		error = SOP_BIND(so, name, namelen, _SOBIND_XPG4_2);
6080Sstevel@tonic-gate 		break;
6090Sstevel@tonic-gate 	case SOV_SOCKBSD:
6100Sstevel@tonic-gate 		error = SOP_BIND(so, name, namelen, _SOBIND_SOCKBSD);
6110Sstevel@tonic-gate 		break;
6120Sstevel@tonic-gate 	}
6130Sstevel@tonic-gate done:
6140Sstevel@tonic-gate 	releasef(sock);
6150Sstevel@tonic-gate 	if (name != NULL)
6160Sstevel@tonic-gate 		kmem_free(name, (size_t)namelen);
6170Sstevel@tonic-gate 
6180Sstevel@tonic-gate 	if (error)
6190Sstevel@tonic-gate 		return (set_errno(error));
6200Sstevel@tonic-gate 	return (0);
6210Sstevel@tonic-gate }
6220Sstevel@tonic-gate 
6230Sstevel@tonic-gate /* ARGSUSED2 */
6240Sstevel@tonic-gate int
6250Sstevel@tonic-gate listen(int sock, int backlog, int version)
6260Sstevel@tonic-gate {
6270Sstevel@tonic-gate 	struct sonode *so;
6280Sstevel@tonic-gate 	int error;
6290Sstevel@tonic-gate 
6300Sstevel@tonic-gate 	dprint(1, ("listen(%d, %d)\n",
6310Sstevel@tonic-gate 		sock, backlog));
6320Sstevel@tonic-gate 
6330Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
6340Sstevel@tonic-gate 		return (set_errno(error));
6350Sstevel@tonic-gate 
6360Sstevel@tonic-gate 	error = SOP_LISTEN(so, backlog);
6370Sstevel@tonic-gate 
6380Sstevel@tonic-gate 	releasef(sock);
6390Sstevel@tonic-gate 	if (error)
6400Sstevel@tonic-gate 		return (set_errno(error));
6410Sstevel@tonic-gate 	return (0);
6420Sstevel@tonic-gate }
6430Sstevel@tonic-gate 
6440Sstevel@tonic-gate /*ARGSUSED3*/
6450Sstevel@tonic-gate int
6460Sstevel@tonic-gate accept(int sock, struct sockaddr *name, socklen_t *namelenp, int version)
6470Sstevel@tonic-gate {
6480Sstevel@tonic-gate 	struct sonode *so;
6490Sstevel@tonic-gate 	file_t *fp;
6500Sstevel@tonic-gate 	int error;
6510Sstevel@tonic-gate 	socklen_t namelen;
6520Sstevel@tonic-gate 	struct sonode *nso;
6530Sstevel@tonic-gate 	struct vnode *nvp;
6540Sstevel@tonic-gate 	struct file *nfp;
6550Sstevel@tonic-gate 	int nfd;
6560Sstevel@tonic-gate 
6570Sstevel@tonic-gate 	dprint(1, ("accept(%d, %p, %p)\n",
6580Sstevel@tonic-gate 		sock, name, namelenp));
6590Sstevel@tonic-gate 
6600Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
6610Sstevel@tonic-gate 		return (set_errno(error));
6620Sstevel@tonic-gate 
6630Sstevel@tonic-gate 	if (name != NULL) {
6640Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
6650Sstevel@tonic-gate 		if (copyin(namelenp, &namelen, sizeof (namelen))) {
6660Sstevel@tonic-gate 			releasef(sock);
6670Sstevel@tonic-gate 			return (set_errno(EFAULT));
6680Sstevel@tonic-gate 		}
6690Sstevel@tonic-gate 		if (namelen != 0) {
6700Sstevel@tonic-gate 			error = useracc(name, (size_t)namelen, B_WRITE);
6710Sstevel@tonic-gate 			if (error && do_useracc) {
6720Sstevel@tonic-gate 				releasef(sock);
6730Sstevel@tonic-gate 				return (set_errno(EFAULT));
6740Sstevel@tonic-gate 			}
6750Sstevel@tonic-gate 		} else
6760Sstevel@tonic-gate 			name = NULL;
6770Sstevel@tonic-gate 	} else {
6780Sstevel@tonic-gate 		namelen = 0;
6790Sstevel@tonic-gate 	}
6800Sstevel@tonic-gate 
6810Sstevel@tonic-gate 	/*
6820Sstevel@tonic-gate 	 * Allocate the user fd before SOP_ACCEPT() in order to
6830Sstevel@tonic-gate 	 * catch EMFILE errors before calling SOP_ACCEPT().
6840Sstevel@tonic-gate 	 */
6850Sstevel@tonic-gate 	if ((nfd = ufalloc(0)) == -1) {
6860Sstevel@tonic-gate 		eprintsoline(so, EMFILE);
6870Sstevel@tonic-gate 		releasef(sock);
6880Sstevel@tonic-gate 		return (set_errno(EMFILE));
6890Sstevel@tonic-gate 	}
6900Sstevel@tonic-gate 	error = SOP_ACCEPT(so, fp->f_flag, &nso);
6910Sstevel@tonic-gate 	releasef(sock);
6920Sstevel@tonic-gate 	if (error) {
6930Sstevel@tonic-gate 		setf(nfd, NULL);
6940Sstevel@tonic-gate 		return (set_errno(error));
6950Sstevel@tonic-gate 	}
6960Sstevel@tonic-gate 
6970Sstevel@tonic-gate 	nvp = SOTOV(nso);
6980Sstevel@tonic-gate 
6990Sstevel@tonic-gate 	/*
7000Sstevel@tonic-gate 	 * so_faddr_sa can not go away even though we are not holding so_lock.
7010Sstevel@tonic-gate 	 * However, in theory its content could change from underneath us.
7020Sstevel@tonic-gate 	 * But this is not possible in practice since it can only
7030Sstevel@tonic-gate 	 * change due to either some socket system call
7040Sstevel@tonic-gate 	 * or due to a T_CONN_CON being received from the stream head.
7050Sstevel@tonic-gate 	 * Since the falloc/setf have not yet been done no thread
7060Sstevel@tonic-gate 	 * can do any system call on nso and T_CONN_CON can not arrive
7070Sstevel@tonic-gate 	 * on a socket that is already connected.
7080Sstevel@tonic-gate 	 * Thus there is no reason to hold so_lock here.
7090Sstevel@tonic-gate 	 *
7100Sstevel@tonic-gate 	 * SOP_ACCEPT() is required to have set the valid bit for the faddr,
7110Sstevel@tonic-gate 	 * but it could be instantly cleared by a disconnect from the transport.
7120Sstevel@tonic-gate 	 * For that reason we ignore it here.
7130Sstevel@tonic-gate 	 */
7140Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&nso->so_lock));
7150Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp,
7160Sstevel@tonic-gate 	    nso->so_faddr_sa, (socklen_t)nso->so_faddr_len);
7170Sstevel@tonic-gate 	if (error) {
7180Sstevel@tonic-gate 		setf(nfd, NULL);
7190Sstevel@tonic-gate 		(void) VOP_CLOSE(nvp, 0, 1, 0, CRED());
7200Sstevel@tonic-gate 		VN_RELE(nvp);
7210Sstevel@tonic-gate 		return (set_errno(error));
7220Sstevel@tonic-gate 	}
7230Sstevel@tonic-gate 	if (error = falloc(NULL, FWRITE|FREAD, &nfp, NULL)) {
7240Sstevel@tonic-gate 		setf(nfd, NULL);
7250Sstevel@tonic-gate 		(void) VOP_CLOSE(nvp, 0, 1, 0, CRED());
7260Sstevel@tonic-gate 		VN_RELE(nvp);
7270Sstevel@tonic-gate 		eprintsoline(so, error);
7280Sstevel@tonic-gate 		return (set_errno(error));
7290Sstevel@tonic-gate 	}
7300Sstevel@tonic-gate 	/*
7310Sstevel@tonic-gate 	 * fill in the entries that falloc reserved
7320Sstevel@tonic-gate 	 */
7330Sstevel@tonic-gate 	nfp->f_vnode = nvp;
7340Sstevel@tonic-gate 	mutex_exit(&nfp->f_tlock);
7350Sstevel@tonic-gate 	setf(nfd, nfp);
7360Sstevel@tonic-gate 
7370Sstevel@tonic-gate 	/*
7380Sstevel@tonic-gate 	 * Copy FNDELAY and FNONBLOCK from listener to acceptor
7390Sstevel@tonic-gate 	 */
7400Sstevel@tonic-gate 	if (so->so_state & (SS_NDELAY|SS_NONBLOCK)) {
7410Sstevel@tonic-gate 		uint_t oflag = nfp->f_flag;
7420Sstevel@tonic-gate 		int arg = 0;
7430Sstevel@tonic-gate 
7440Sstevel@tonic-gate 		if (so->so_state & SS_NONBLOCK)
7450Sstevel@tonic-gate 			arg |= FNONBLOCK;
7460Sstevel@tonic-gate 		else if (so->so_state & SS_NDELAY)
7470Sstevel@tonic-gate 			arg |= FNDELAY;
7480Sstevel@tonic-gate 
7490Sstevel@tonic-gate 		/*
7500Sstevel@tonic-gate 		 * This code is a simplification of the F_SETFL code in fcntl()
7510Sstevel@tonic-gate 		 * Ignore any errors from VOP_SETFL.
7520Sstevel@tonic-gate 		 */
7530Sstevel@tonic-gate 		if ((error = VOP_SETFL(nvp, oflag, arg, nfp->f_cred)) != 0) {
7540Sstevel@tonic-gate 			eprintsoline(so, error);
7550Sstevel@tonic-gate 			error = 0;
7560Sstevel@tonic-gate 		} else {
7570Sstevel@tonic-gate 			mutex_enter(&nfp->f_tlock);
7580Sstevel@tonic-gate 			nfp->f_flag &= ~FMASK | (FREAD|FWRITE);
7590Sstevel@tonic-gate 			nfp->f_flag |= arg;
7600Sstevel@tonic-gate 			mutex_exit(&nfp->f_tlock);
7610Sstevel@tonic-gate 		}
7620Sstevel@tonic-gate 	}
7630Sstevel@tonic-gate 	return (nfd);
7640Sstevel@tonic-gate }
7650Sstevel@tonic-gate 
7660Sstevel@tonic-gate int
7670Sstevel@tonic-gate connect(int sock, struct sockaddr *name, socklen_t namelen, int version)
7680Sstevel@tonic-gate {
7690Sstevel@tonic-gate 	struct sonode *so;
7700Sstevel@tonic-gate 	file_t *fp;
7710Sstevel@tonic-gate 	int error;
7720Sstevel@tonic-gate 
7730Sstevel@tonic-gate 	dprint(1, ("connect(%d, %p, %d)\n",
7740Sstevel@tonic-gate 		sock, name, namelen));
7750Sstevel@tonic-gate 
7760Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
7770Sstevel@tonic-gate 		return (set_errno(error));
7780Sstevel@tonic-gate 
7790Sstevel@tonic-gate 	/* Allocate and copyin name */
7800Sstevel@tonic-gate 	if (namelen != 0) {
7810Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
7820Sstevel@tonic-gate 		name = copyin_name(so, name, &namelen, &error);
7830Sstevel@tonic-gate 		if (name == NULL) {
7840Sstevel@tonic-gate 			releasef(sock);
7850Sstevel@tonic-gate 			return (set_errno(error));
7860Sstevel@tonic-gate 		}
7870Sstevel@tonic-gate 	} else
7880Sstevel@tonic-gate 		name = NULL;
7890Sstevel@tonic-gate 
7900Sstevel@tonic-gate 	error = SOP_CONNECT(so, name, namelen, fp->f_flag,
7910Sstevel@tonic-gate 	    (version != SOV_XPG4_2) ? 0 : _SOCONNECT_XPG4_2);
7920Sstevel@tonic-gate 	releasef(sock);
7930Sstevel@tonic-gate 	if (name)
7940Sstevel@tonic-gate 		kmem_free(name, (size_t)namelen);
7950Sstevel@tonic-gate 	if (error)
7960Sstevel@tonic-gate 		return (set_errno(error));
7970Sstevel@tonic-gate 	return (0);
7980Sstevel@tonic-gate }
7990Sstevel@tonic-gate 
8000Sstevel@tonic-gate /*ARGSUSED2*/
8010Sstevel@tonic-gate int
8020Sstevel@tonic-gate shutdown(int sock, int how, int version)
8030Sstevel@tonic-gate {
8040Sstevel@tonic-gate 	struct sonode *so;
8050Sstevel@tonic-gate 	int error;
8060Sstevel@tonic-gate 
8070Sstevel@tonic-gate 	dprint(1, ("shutdown(%d, %d)\n",
8080Sstevel@tonic-gate 		sock, how));
8090Sstevel@tonic-gate 
8100Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
8110Sstevel@tonic-gate 		return (set_errno(error));
8120Sstevel@tonic-gate 
8130Sstevel@tonic-gate 	error = SOP_SHUTDOWN(so, how);
8140Sstevel@tonic-gate 
8150Sstevel@tonic-gate 	releasef(sock);
8160Sstevel@tonic-gate 	if (error)
8170Sstevel@tonic-gate 		return (set_errno(error));
8180Sstevel@tonic-gate 	return (0);
8190Sstevel@tonic-gate }
8200Sstevel@tonic-gate 
8210Sstevel@tonic-gate /*
8220Sstevel@tonic-gate  * Common receive routine.
8230Sstevel@tonic-gate  */
8240Sstevel@tonic-gate static ssize_t
8250Sstevel@tonic-gate recvit(int sock,
8260Sstevel@tonic-gate 	struct nmsghdr *msg,
8270Sstevel@tonic-gate 	struct uio *uiop,
8280Sstevel@tonic-gate 	int flags,
8290Sstevel@tonic-gate 	socklen_t *namelenp,
8300Sstevel@tonic-gate 	socklen_t *controllenp,
8310Sstevel@tonic-gate 	int *flagsp)
8320Sstevel@tonic-gate {
8330Sstevel@tonic-gate 	struct sonode *so;
8340Sstevel@tonic-gate 	file_t *fp;
8350Sstevel@tonic-gate 	void *name;
8360Sstevel@tonic-gate 	socklen_t namelen;
8370Sstevel@tonic-gate 	void *control;
8380Sstevel@tonic-gate 	socklen_t controllen;
8390Sstevel@tonic-gate 	ssize_t len;
8400Sstevel@tonic-gate 	int error;
8410Sstevel@tonic-gate 
8420Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
8430Sstevel@tonic-gate 		return (set_errno(error));
8440Sstevel@tonic-gate 
8450Sstevel@tonic-gate 	len = uiop->uio_resid;
8460Sstevel@tonic-gate 	uiop->uio_fmode = fp->f_flag;
8470Sstevel@tonic-gate 	uiop->uio_extflg = UIO_COPY_CACHED;
8480Sstevel@tonic-gate 
8490Sstevel@tonic-gate 	name = msg->msg_name;
8500Sstevel@tonic-gate 	namelen = msg->msg_namelen;
8510Sstevel@tonic-gate 	control = msg->msg_control;
8520Sstevel@tonic-gate 	controllen = msg->msg_controllen;
8530Sstevel@tonic-gate 
8540Sstevel@tonic-gate 	msg->msg_flags = flags & (MSG_OOB | MSG_PEEK | MSG_WAITALL |
8550Sstevel@tonic-gate 	    MSG_DONTWAIT | MSG_XPG4_2);
8560Sstevel@tonic-gate 
8570Sstevel@tonic-gate 	error = SOP_RECVMSG(so, msg, uiop);
8580Sstevel@tonic-gate 	if (error) {
8590Sstevel@tonic-gate 		releasef(sock);
8600Sstevel@tonic-gate 		return (set_errno(error));
8610Sstevel@tonic-gate 	}
8620Sstevel@tonic-gate 	lwp_stat_update(LWP_STAT_MSGRCV, 1);
8630Sstevel@tonic-gate 	so_update_attrs(so, SOACC);
8640Sstevel@tonic-gate 	releasef(sock);
8650Sstevel@tonic-gate 
8660Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp,
8670Sstevel@tonic-gate 	    msg->msg_name, msg->msg_namelen);
8680Sstevel@tonic-gate 	if (error)
8690Sstevel@tonic-gate 		goto err;
8700Sstevel@tonic-gate 
8710Sstevel@tonic-gate 	if (flagsp != NULL) {
8720Sstevel@tonic-gate 		/*
8730Sstevel@tonic-gate 		 * Clear internal flag.
8740Sstevel@tonic-gate 		 */
8750Sstevel@tonic-gate 		msg->msg_flags &= ~MSG_XPG4_2;
8760Sstevel@tonic-gate 
8770Sstevel@tonic-gate 		/*
8780Sstevel@tonic-gate 		 * Determine MSG_CTRUNC. sorecvmsg sets MSG_CTRUNC only
8790Sstevel@tonic-gate 		 * when controllen is zero and there is control data to
8800Sstevel@tonic-gate 		 * copy out.
8810Sstevel@tonic-gate 		 */
8820Sstevel@tonic-gate 		if (controllen != 0 &&
8830Sstevel@tonic-gate 		    (msg->msg_controllen > controllen || control == NULL)) {
8840Sstevel@tonic-gate 			dprint(1, ("recvit: CTRUNC %d %d %p\n",
8850Sstevel@tonic-gate 			    msg->msg_controllen, controllen, control));
8860Sstevel@tonic-gate 
8870Sstevel@tonic-gate 			msg->msg_flags |= MSG_CTRUNC;
8880Sstevel@tonic-gate 		}
8890Sstevel@tonic-gate 		if (copyout(&msg->msg_flags, flagsp,
8900Sstevel@tonic-gate 		    sizeof (msg->msg_flags))) {
8910Sstevel@tonic-gate 			error = EFAULT;
8920Sstevel@tonic-gate 			goto err;
8930Sstevel@tonic-gate 		}
8940Sstevel@tonic-gate 	}
8950Sstevel@tonic-gate 	/*
8960Sstevel@tonic-gate 	 * Note: This MUST be done last. There can be no "goto err" after this
8970Sstevel@tonic-gate 	 * point since it could make so_closefds run twice on some part
8980Sstevel@tonic-gate 	 * of the file descriptor array.
8990Sstevel@tonic-gate 	 */
9000Sstevel@tonic-gate 	if (controllen != 0) {
9010Sstevel@tonic-gate 		if (!(flags & MSG_XPG4_2)) {
9020Sstevel@tonic-gate 			/*
9030Sstevel@tonic-gate 			 * Good old msg_accrights can only return a multiple
9040Sstevel@tonic-gate 			 * of 4 bytes.
9050Sstevel@tonic-gate 			 */
9060Sstevel@tonic-gate 			controllen &= ~((int)sizeof (uint32_t) - 1);
9070Sstevel@tonic-gate 		}
9080Sstevel@tonic-gate 		error = copyout_arg(control, controllen, controllenp,
9090Sstevel@tonic-gate 		    msg->msg_control, msg->msg_controllen);
9100Sstevel@tonic-gate 		if (error)
9110Sstevel@tonic-gate 			goto err;
9120Sstevel@tonic-gate 
9130Sstevel@tonic-gate 		if (msg->msg_controllen > controllen || control == NULL) {
9140Sstevel@tonic-gate 			if (control == NULL)
9150Sstevel@tonic-gate 				controllen = 0;
9160Sstevel@tonic-gate 			so_closefds(msg->msg_control, msg->msg_controllen,
9170Sstevel@tonic-gate 			    !(flags & MSG_XPG4_2), controllen);
9180Sstevel@tonic-gate 		}
9190Sstevel@tonic-gate 	}
9200Sstevel@tonic-gate 	if (msg->msg_namelen != 0)
9210Sstevel@tonic-gate 		kmem_free(msg->msg_name, (size_t)msg->msg_namelen);
9220Sstevel@tonic-gate 	if (msg->msg_controllen != 0)
9230Sstevel@tonic-gate 		kmem_free(msg->msg_control, (size_t)msg->msg_controllen);
9240Sstevel@tonic-gate 	return (len - uiop->uio_resid);
9250Sstevel@tonic-gate 
9260Sstevel@tonic-gate err:
9270Sstevel@tonic-gate 	/*
9280Sstevel@tonic-gate 	 * If we fail and the control part contains file descriptors
9290Sstevel@tonic-gate 	 * we have to close the fd's.
9300Sstevel@tonic-gate 	 */
9310Sstevel@tonic-gate 	if (msg->msg_controllen != 0)
9320Sstevel@tonic-gate 		so_closefds(msg->msg_control, msg->msg_controllen,
9330Sstevel@tonic-gate 		    !(flags & MSG_XPG4_2), 0);
9340Sstevel@tonic-gate 	if (msg->msg_namelen != 0)
9350Sstevel@tonic-gate 		kmem_free(msg->msg_name, (size_t)msg->msg_namelen);
9360Sstevel@tonic-gate 	if (msg->msg_controllen != 0)
9370Sstevel@tonic-gate 		kmem_free(msg->msg_control, (size_t)msg->msg_controllen);
9380Sstevel@tonic-gate 	return (set_errno(error));
9390Sstevel@tonic-gate }
9400Sstevel@tonic-gate 
9410Sstevel@tonic-gate /*
9420Sstevel@tonic-gate  * Native system call
9430Sstevel@tonic-gate  */
9440Sstevel@tonic-gate ssize_t
9450Sstevel@tonic-gate recv(int sock, void *buffer, size_t len, int flags)
9460Sstevel@tonic-gate {
9470Sstevel@tonic-gate 	struct nmsghdr lmsg;
9480Sstevel@tonic-gate 	struct uio auio;
9490Sstevel@tonic-gate 	struct iovec aiov[1];
9500Sstevel@tonic-gate 
9510Sstevel@tonic-gate 	dprint(1, ("recv(%d, %p, %ld, %d)\n",
9520Sstevel@tonic-gate 		sock, buffer, len, flags));
9530Sstevel@tonic-gate 
9540Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
9550Sstevel@tonic-gate 		return (set_errno(EINVAL));
9560Sstevel@tonic-gate 	}
9570Sstevel@tonic-gate 
9580Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
9590Sstevel@tonic-gate 	aiov[0].iov_len = len;
9600Sstevel@tonic-gate 	auio.uio_loffset = 0;
9610Sstevel@tonic-gate 	auio.uio_iov = aiov;
9620Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
9630Sstevel@tonic-gate 	auio.uio_resid = len;
9640Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
9650Sstevel@tonic-gate 	auio.uio_limit = 0;
9660Sstevel@tonic-gate 
9670Sstevel@tonic-gate 	lmsg.msg_namelen = 0;
9680Sstevel@tonic-gate 	lmsg.msg_controllen = 0;
9690Sstevel@tonic-gate 	lmsg.msg_flags = 0;
9700Sstevel@tonic-gate 	return (recvit(sock, &lmsg, &auio, flags, NULL, NULL, NULL));
9710Sstevel@tonic-gate }
9720Sstevel@tonic-gate 
9730Sstevel@tonic-gate ssize_t
9740Sstevel@tonic-gate recvfrom(int sock, void *buffer, size_t len, int flags,
9750Sstevel@tonic-gate 	struct sockaddr *name, socklen_t *namelenp)
9760Sstevel@tonic-gate {
9770Sstevel@tonic-gate 	struct nmsghdr lmsg;
9780Sstevel@tonic-gate 	struct uio auio;
9790Sstevel@tonic-gate 	struct iovec aiov[1];
9800Sstevel@tonic-gate 
9810Sstevel@tonic-gate 	dprint(1, ("recvfrom(%d, %p, %ld, %d, %p, %p)\n",
9820Sstevel@tonic-gate 		sock, buffer, len, flags, name, namelenp));
9830Sstevel@tonic-gate 
9840Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
9850Sstevel@tonic-gate 		return (set_errno(EINVAL));
9860Sstevel@tonic-gate 	}
9870Sstevel@tonic-gate 
9880Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
9890Sstevel@tonic-gate 	aiov[0].iov_len = len;
9900Sstevel@tonic-gate 	auio.uio_loffset = 0;
9910Sstevel@tonic-gate 	auio.uio_iov = aiov;
9920Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
9930Sstevel@tonic-gate 	auio.uio_resid = len;
9940Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
9950Sstevel@tonic-gate 	auio.uio_limit = 0;
9960Sstevel@tonic-gate 
9970Sstevel@tonic-gate 	lmsg.msg_name = (char *)name;
9980Sstevel@tonic-gate 	if (namelenp != NULL) {
9990Sstevel@tonic-gate 		if (copyin(namelenp, &lmsg.msg_namelen,
10000Sstevel@tonic-gate 		    sizeof (lmsg.msg_namelen)))
10010Sstevel@tonic-gate 			return (set_errno(EFAULT));
10020Sstevel@tonic-gate 	} else {
10030Sstevel@tonic-gate 		lmsg.msg_namelen = 0;
10040Sstevel@tonic-gate 	}
10050Sstevel@tonic-gate 	lmsg.msg_controllen = 0;
10060Sstevel@tonic-gate 	lmsg.msg_flags = 0;
10070Sstevel@tonic-gate 
10080Sstevel@tonic-gate 	return (recvit(sock, &lmsg, &auio, flags, namelenp, NULL, NULL));
10090Sstevel@tonic-gate }
10100Sstevel@tonic-gate 
10110Sstevel@tonic-gate /*
10120Sstevel@tonic-gate  * Uses the MSG_XPG4_2 flag to determine if the caller is using
10130Sstevel@tonic-gate  * struct omsghdr or struct nmsghdr.
10140Sstevel@tonic-gate  */
10150Sstevel@tonic-gate ssize_t
10160Sstevel@tonic-gate recvmsg(int sock, struct nmsghdr *msg, int flags)
10170Sstevel@tonic-gate {
10180Sstevel@tonic-gate 	STRUCT_DECL(nmsghdr, u_lmsg);
10190Sstevel@tonic-gate 	STRUCT_HANDLE(nmsghdr, umsgptr);
10200Sstevel@tonic-gate 	struct nmsghdr lmsg;
10210Sstevel@tonic-gate 	struct uio auio;
10220Sstevel@tonic-gate 	struct iovec aiov[MSG_MAXIOVLEN];
10230Sstevel@tonic-gate 	int iovcnt;
10240Sstevel@tonic-gate 	ssize_t len;
10250Sstevel@tonic-gate 	int i;
10260Sstevel@tonic-gate 	int *flagsp;
10270Sstevel@tonic-gate 	model_t	model;
10280Sstevel@tonic-gate 
10290Sstevel@tonic-gate 	dprint(1, ("recvmsg(%d, %p, %d)\n",
10300Sstevel@tonic-gate 		sock, msg, flags));
10310Sstevel@tonic-gate 
10320Sstevel@tonic-gate 	model = get_udatamodel();
10330Sstevel@tonic-gate 	STRUCT_INIT(u_lmsg, model);
10340Sstevel@tonic-gate 	STRUCT_SET_HANDLE(umsgptr, model, msg);
10350Sstevel@tonic-gate 
10360Sstevel@tonic-gate 	if (flags & MSG_XPG4_2) {
10370Sstevel@tonic-gate 		if (copyin(msg, STRUCT_BUF(u_lmsg), STRUCT_SIZE(u_lmsg)))
10380Sstevel@tonic-gate 			return (set_errno(EFAULT));
10390Sstevel@tonic-gate 		flagsp = STRUCT_FADDR(umsgptr, msg_flags);
10400Sstevel@tonic-gate 	} else {
10410Sstevel@tonic-gate 		/*
10420Sstevel@tonic-gate 		 * Assumes that nmsghdr and omsghdr are identically shaped
10430Sstevel@tonic-gate 		 * except for the added msg_flags field.
10440Sstevel@tonic-gate 		 */
10450Sstevel@tonic-gate 		if (copyin(msg, STRUCT_BUF(u_lmsg),
10460Sstevel@tonic-gate 		    SIZEOF_STRUCT(omsghdr, model)))
10470Sstevel@tonic-gate 			return (set_errno(EFAULT));
10480Sstevel@tonic-gate 		STRUCT_FSET(u_lmsg, msg_flags, 0);
10490Sstevel@tonic-gate 		flagsp = NULL;
10500Sstevel@tonic-gate 	}
10510Sstevel@tonic-gate 
10520Sstevel@tonic-gate 	/*
10530Sstevel@tonic-gate 	 * Code below us will kmem_alloc memory and hang it
10540Sstevel@tonic-gate 	 * off msg_control and msg_name fields. This forces
10550Sstevel@tonic-gate 	 * us to copy the structure to its native form.
10560Sstevel@tonic-gate 	 */
10570Sstevel@tonic-gate 	lmsg.msg_name = STRUCT_FGETP(u_lmsg, msg_name);
10580Sstevel@tonic-gate 	lmsg.msg_namelen = STRUCT_FGET(u_lmsg, msg_namelen);
10590Sstevel@tonic-gate 	lmsg.msg_iov = STRUCT_FGETP(u_lmsg, msg_iov);
10600Sstevel@tonic-gate 	lmsg.msg_iovlen = STRUCT_FGET(u_lmsg, msg_iovlen);
10610Sstevel@tonic-gate 	lmsg.msg_control = STRUCT_FGETP(u_lmsg, msg_control);
10620Sstevel@tonic-gate 	lmsg.msg_controllen = STRUCT_FGET(u_lmsg, msg_controllen);
10630Sstevel@tonic-gate 	lmsg.msg_flags = STRUCT_FGET(u_lmsg, msg_flags);
10640Sstevel@tonic-gate 
10650Sstevel@tonic-gate 	iovcnt = lmsg.msg_iovlen;
10660Sstevel@tonic-gate 
10670Sstevel@tonic-gate 	if (iovcnt <= 0 || iovcnt > MSG_MAXIOVLEN) {
10680Sstevel@tonic-gate 		return (set_errno(EMSGSIZE));
10690Sstevel@tonic-gate 	}
10700Sstevel@tonic-gate 
10710Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
10720Sstevel@tonic-gate 	/*
10730Sstevel@tonic-gate 	 * 32-bit callers need to have their iovec expanded, while ensuring
10740Sstevel@tonic-gate 	 * that they can't move more than 2Gbytes of data in a single call.
10750Sstevel@tonic-gate 	 */
10760Sstevel@tonic-gate 	if (model == DATAMODEL_ILP32) {
10770Sstevel@tonic-gate 		struct iovec32 aiov32[MSG_MAXIOVLEN];
10780Sstevel@tonic-gate 		ssize32_t count32;
10790Sstevel@tonic-gate 
10800Sstevel@tonic-gate 		if (copyin((struct iovec32 *)lmsg.msg_iov, aiov32,
10810Sstevel@tonic-gate 		    iovcnt * sizeof (struct iovec32)))
10820Sstevel@tonic-gate 			return (set_errno(EFAULT));
10830Sstevel@tonic-gate 
10840Sstevel@tonic-gate 		count32 = 0;
10850Sstevel@tonic-gate 		for (i = 0; i < iovcnt; i++) {
10860Sstevel@tonic-gate 			ssize32_t iovlen32;
10870Sstevel@tonic-gate 
10880Sstevel@tonic-gate 			iovlen32 = aiov32[i].iov_len;
10890Sstevel@tonic-gate 			count32 += iovlen32;
10900Sstevel@tonic-gate 			if (iovlen32 < 0 || count32 < 0)
10910Sstevel@tonic-gate 				return (set_errno(EINVAL));
10920Sstevel@tonic-gate 			aiov[i].iov_len = iovlen32;
10930Sstevel@tonic-gate 			aiov[i].iov_base =
10940Sstevel@tonic-gate 			    (caddr_t)(uintptr_t)aiov32[i].iov_base;
10950Sstevel@tonic-gate 		}
10960Sstevel@tonic-gate 	} else
10970Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
10980Sstevel@tonic-gate 	if (copyin(lmsg.msg_iov, aiov, iovcnt * sizeof (struct iovec))) {
10990Sstevel@tonic-gate 		return (set_errno(EFAULT));
11000Sstevel@tonic-gate 	}
11010Sstevel@tonic-gate 	len = 0;
11020Sstevel@tonic-gate 	for (i = 0; i < iovcnt; i++) {
11030Sstevel@tonic-gate 		ssize_t iovlen = aiov[i].iov_len;
11040Sstevel@tonic-gate 		len += iovlen;
11050Sstevel@tonic-gate 		if (iovlen < 0 || len < 0) {
11060Sstevel@tonic-gate 			return (set_errno(EINVAL));
11070Sstevel@tonic-gate 		}
11080Sstevel@tonic-gate 	}
11090Sstevel@tonic-gate 	auio.uio_loffset = 0;
11100Sstevel@tonic-gate 	auio.uio_iov = aiov;
11110Sstevel@tonic-gate 	auio.uio_iovcnt = iovcnt;
11120Sstevel@tonic-gate 	auio.uio_resid = len;
11130Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
11140Sstevel@tonic-gate 	auio.uio_limit = 0;
11150Sstevel@tonic-gate 
11160Sstevel@tonic-gate 	if (lmsg.msg_control != NULL &&
11170Sstevel@tonic-gate 	    (do_useracc == 0 ||
11180Sstevel@tonic-gate 	    useracc(lmsg.msg_control, lmsg.msg_controllen,
11190Sstevel@tonic-gate 			B_WRITE) != 0)) {
11200Sstevel@tonic-gate 		return (set_errno(EFAULT));
11210Sstevel@tonic-gate 	}
11220Sstevel@tonic-gate 
11230Sstevel@tonic-gate 	return (recvit(sock, &lmsg, &auio, flags,
11240Sstevel@tonic-gate 		STRUCT_FADDR(umsgptr, msg_namelen),
11250Sstevel@tonic-gate 		STRUCT_FADDR(umsgptr, msg_controllen), flagsp));
11260Sstevel@tonic-gate }
11270Sstevel@tonic-gate 
11280Sstevel@tonic-gate /*
11290Sstevel@tonic-gate  * Common send function.
11300Sstevel@tonic-gate  */
11310Sstevel@tonic-gate static ssize_t
11320Sstevel@tonic-gate sendit(int sock, struct nmsghdr *msg, struct uio *uiop, int flags)
11330Sstevel@tonic-gate {
11340Sstevel@tonic-gate 	struct sonode *so;
11350Sstevel@tonic-gate 	file_t *fp;
11360Sstevel@tonic-gate 	void *name;
11370Sstevel@tonic-gate 	socklen_t namelen;
11380Sstevel@tonic-gate 	void *control;
11390Sstevel@tonic-gate 	socklen_t controllen;
11400Sstevel@tonic-gate 	ssize_t len;
11410Sstevel@tonic-gate 	int error;
11420Sstevel@tonic-gate 
11430Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
11440Sstevel@tonic-gate 		return (set_errno(error));
11450Sstevel@tonic-gate 
11460Sstevel@tonic-gate 	uiop->uio_fmode = fp->f_flag;
11470Sstevel@tonic-gate 
11480Sstevel@tonic-gate 	if (so->so_family == AF_UNIX)
11490Sstevel@tonic-gate 		uiop->uio_extflg = UIO_COPY_CACHED;
11500Sstevel@tonic-gate 	else
11510Sstevel@tonic-gate 		uiop->uio_extflg = UIO_COPY_DEFAULT;
11520Sstevel@tonic-gate 
11530Sstevel@tonic-gate 	/* Allocate and copyin name and control */
11540Sstevel@tonic-gate 	name = msg->msg_name;
11550Sstevel@tonic-gate 	namelen = msg->msg_namelen;
11560Sstevel@tonic-gate 	if (name != NULL && namelen != 0) {
11570Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
11580Sstevel@tonic-gate 		name = copyin_name(so,
11590Sstevel@tonic-gate 				(struct sockaddr *)name,
11600Sstevel@tonic-gate 				&namelen, &error);
11610Sstevel@tonic-gate 		if (name == NULL)
11620Sstevel@tonic-gate 			goto done3;
11630Sstevel@tonic-gate 		/* copyin_name null terminates addresses for AF_UNIX */
11640Sstevel@tonic-gate 		msg->msg_namelen = namelen;
11650Sstevel@tonic-gate 		msg->msg_name = name;
11660Sstevel@tonic-gate 	} else {
11670Sstevel@tonic-gate 		msg->msg_name = name = NULL;
11680Sstevel@tonic-gate 		msg->msg_namelen = namelen = 0;
11690Sstevel@tonic-gate 	}
11700Sstevel@tonic-gate 
11710Sstevel@tonic-gate 	control = msg->msg_control;
11720Sstevel@tonic-gate 	controllen = msg->msg_controllen;
11730Sstevel@tonic-gate 	if ((control != NULL) && (controllen != 0)) {
11740Sstevel@tonic-gate 		/*
11750Sstevel@tonic-gate 		 * Verify that the length is not excessive to prevent
11760Sstevel@tonic-gate 		 * an application from consuming all of kernel memory.
11770Sstevel@tonic-gate 		 */
11780Sstevel@tonic-gate 		if (controllen > SO_MAXARGSIZE) {
11790Sstevel@tonic-gate 			error = EINVAL;
11800Sstevel@tonic-gate 			goto done2;
11810Sstevel@tonic-gate 		}
11820Sstevel@tonic-gate 		control = kmem_alloc(controllen, KM_SLEEP);
11830Sstevel@tonic-gate 
11840Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
11850Sstevel@tonic-gate 		if (copyin(msg->msg_control, control, controllen)) {
11860Sstevel@tonic-gate 			error = EFAULT;
11870Sstevel@tonic-gate 			goto done1;
11880Sstevel@tonic-gate 		}
11890Sstevel@tonic-gate 		msg->msg_control = control;
11900Sstevel@tonic-gate 	} else {
11910Sstevel@tonic-gate 		msg->msg_control = control = NULL;
11920Sstevel@tonic-gate 		msg->msg_controllen = controllen = 0;
11930Sstevel@tonic-gate 	}
11940Sstevel@tonic-gate 
11950Sstevel@tonic-gate 	len = uiop->uio_resid;
11960Sstevel@tonic-gate 	msg->msg_flags = flags;
11970Sstevel@tonic-gate 
11980Sstevel@tonic-gate 	error = SOP_SENDMSG(so, msg, uiop);
11990Sstevel@tonic-gate done1:
12000Sstevel@tonic-gate 	if (control != NULL)
12010Sstevel@tonic-gate 		kmem_free(control, controllen);
12020Sstevel@tonic-gate done2:
12030Sstevel@tonic-gate 	if (name != NULL)
12040Sstevel@tonic-gate 		kmem_free(name, namelen);
12050Sstevel@tonic-gate done3:
12060Sstevel@tonic-gate 	if (error != 0) {
12070Sstevel@tonic-gate 		releasef(sock);
12080Sstevel@tonic-gate 		return (set_errno(error));
12090Sstevel@tonic-gate 	}
12100Sstevel@tonic-gate 	lwp_stat_update(LWP_STAT_MSGSND, 1);
12110Sstevel@tonic-gate 	so_update_attrs(so, SOMOD);
12120Sstevel@tonic-gate 	releasef(sock);
12130Sstevel@tonic-gate 	return (len - uiop->uio_resid);
12140Sstevel@tonic-gate }
12150Sstevel@tonic-gate 
12160Sstevel@tonic-gate /*
12170Sstevel@tonic-gate  * Native system call
12180Sstevel@tonic-gate  */
12190Sstevel@tonic-gate ssize_t
12200Sstevel@tonic-gate send(int sock, void *buffer, size_t len, int flags)
12210Sstevel@tonic-gate {
12220Sstevel@tonic-gate 	struct nmsghdr lmsg;
12230Sstevel@tonic-gate 	struct uio auio;
12240Sstevel@tonic-gate 	struct iovec aiov[1];
12250Sstevel@tonic-gate 
12260Sstevel@tonic-gate 	dprint(1, ("send(%d, %p, %ld, %d)\n",
12270Sstevel@tonic-gate 		sock, buffer, len, flags));
12280Sstevel@tonic-gate 
12290Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
12300Sstevel@tonic-gate 		return (set_errno(EINVAL));
12310Sstevel@tonic-gate 	}
12320Sstevel@tonic-gate 
12330Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
12340Sstevel@tonic-gate 	aiov[0].iov_len = len;
12350Sstevel@tonic-gate 	auio.uio_loffset = 0;
12360Sstevel@tonic-gate 	auio.uio_iov = aiov;
12370Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
12380Sstevel@tonic-gate 	auio.uio_resid = len;
12390Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
12400Sstevel@tonic-gate 	auio.uio_limit = 0;
12410Sstevel@tonic-gate 
12420Sstevel@tonic-gate 	lmsg.msg_name = NULL;
12430Sstevel@tonic-gate 	lmsg.msg_control = NULL;
12440Sstevel@tonic-gate 	if (!(flags & MSG_XPG4_2)) {
12450Sstevel@tonic-gate 		/*
12460Sstevel@tonic-gate 		 * In order to be compatible with the libsocket/sockmod
12470Sstevel@tonic-gate 		 * implementation we set EOR for all send* calls.
12480Sstevel@tonic-gate 		 */
12490Sstevel@tonic-gate 		flags |= MSG_EOR;
12500Sstevel@tonic-gate 	}
12510Sstevel@tonic-gate 	return (sendit(sock, &lmsg, &auio, flags));
12520Sstevel@tonic-gate }
12530Sstevel@tonic-gate 
12540Sstevel@tonic-gate /*
12550Sstevel@tonic-gate  * Uses the MSG_XPG4_2 flag to determine if the caller is using
12560Sstevel@tonic-gate  * struct omsghdr or struct nmsghdr.
12570Sstevel@tonic-gate  */
12580Sstevel@tonic-gate ssize_t
12590Sstevel@tonic-gate sendmsg(int sock, struct nmsghdr *msg, int flags)
12600Sstevel@tonic-gate {
12610Sstevel@tonic-gate 	struct nmsghdr lmsg;
12620Sstevel@tonic-gate 	STRUCT_DECL(nmsghdr, u_lmsg);
12630Sstevel@tonic-gate 	struct uio auio;
12640Sstevel@tonic-gate 	struct iovec aiov[MSG_MAXIOVLEN];
12650Sstevel@tonic-gate 	int iovcnt;
12660Sstevel@tonic-gate 	ssize_t len;
12670Sstevel@tonic-gate 	int i;
12680Sstevel@tonic-gate 	model_t	model;
12690Sstevel@tonic-gate 
12700Sstevel@tonic-gate 	dprint(1, ("sendmsg(%d, %p, %d)\n", sock, msg, flags));
12710Sstevel@tonic-gate 
12720Sstevel@tonic-gate 	model = get_udatamodel();
12730Sstevel@tonic-gate 	STRUCT_INIT(u_lmsg, model);
12740Sstevel@tonic-gate 
12750Sstevel@tonic-gate 	if (flags & MSG_XPG4_2) {
12760Sstevel@tonic-gate 		if (copyin(msg, (char *)STRUCT_BUF(u_lmsg),
12770Sstevel@tonic-gate 		    STRUCT_SIZE(u_lmsg)))
12780Sstevel@tonic-gate 			return (set_errno(EFAULT));
12790Sstevel@tonic-gate 	} else {
12800Sstevel@tonic-gate 		/*
12810Sstevel@tonic-gate 		 * Assumes that nmsghdr and omsghdr are identically shaped
12820Sstevel@tonic-gate 		 * except for the added msg_flags field.
12830Sstevel@tonic-gate 		 */
12840Sstevel@tonic-gate 		if (copyin(msg, (char *)STRUCT_BUF(u_lmsg),
12850Sstevel@tonic-gate 		    SIZEOF_STRUCT(omsghdr, model)))
12860Sstevel@tonic-gate 			return (set_errno(EFAULT));
12870Sstevel@tonic-gate 		/*
12880Sstevel@tonic-gate 		 * In order to be compatible with the libsocket/sockmod
12890Sstevel@tonic-gate 		 * implementation we set EOR for all send* calls.
12900Sstevel@tonic-gate 		 */
12910Sstevel@tonic-gate 		flags |= MSG_EOR;
12920Sstevel@tonic-gate 	}
12930Sstevel@tonic-gate 
12940Sstevel@tonic-gate 	/*
12950Sstevel@tonic-gate 	 * Code below us will kmem_alloc memory and hang it
12960Sstevel@tonic-gate 	 * off msg_control and msg_name fields. This forces
12970Sstevel@tonic-gate 	 * us to copy the structure to its native form.
12980Sstevel@tonic-gate 	 */
12990Sstevel@tonic-gate 	lmsg.msg_name = STRUCT_FGETP(u_lmsg, msg_name);
13000Sstevel@tonic-gate 	lmsg.msg_namelen = STRUCT_FGET(u_lmsg, msg_namelen);
13010Sstevel@tonic-gate 	lmsg.msg_iov = STRUCT_FGETP(u_lmsg, msg_iov);
13020Sstevel@tonic-gate 	lmsg.msg_iovlen = STRUCT_FGET(u_lmsg, msg_iovlen);
13030Sstevel@tonic-gate 	lmsg.msg_control = STRUCT_FGETP(u_lmsg, msg_control);
13040Sstevel@tonic-gate 	lmsg.msg_controllen = STRUCT_FGET(u_lmsg, msg_controllen);
13050Sstevel@tonic-gate 	lmsg.msg_flags = STRUCT_FGET(u_lmsg, msg_flags);
13060Sstevel@tonic-gate 
13070Sstevel@tonic-gate 	iovcnt = lmsg.msg_iovlen;
13080Sstevel@tonic-gate 
13090Sstevel@tonic-gate 	if (iovcnt <= 0 || iovcnt > MSG_MAXIOVLEN) {
13100Sstevel@tonic-gate 		/*
13110Sstevel@tonic-gate 		 * Unless this is XPG 4.2 we allow iovcnt == 0 to
13120Sstevel@tonic-gate 		 * be compatible with SunOS 4.X and 4.4BSD.
13130Sstevel@tonic-gate 		 */
13140Sstevel@tonic-gate 		if (iovcnt != 0 || (flags & MSG_XPG4_2))
13150Sstevel@tonic-gate 			return (set_errno(EMSGSIZE));
13160Sstevel@tonic-gate 	}
13170Sstevel@tonic-gate 
13180Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
13190Sstevel@tonic-gate 	/*
13200Sstevel@tonic-gate 	 * 32-bit callers need to have their iovec expanded, while ensuring
13210Sstevel@tonic-gate 	 * that they can't move more than 2Gbytes of data in a single call.
13220Sstevel@tonic-gate 	 */
13230Sstevel@tonic-gate 	if (model == DATAMODEL_ILP32) {
13240Sstevel@tonic-gate 		struct iovec32 aiov32[MSG_MAXIOVLEN];
13250Sstevel@tonic-gate 		ssize32_t count32;
13260Sstevel@tonic-gate 
13270Sstevel@tonic-gate 		if (iovcnt != 0 &&
13280Sstevel@tonic-gate 		    copyin((struct iovec32 *)lmsg.msg_iov, aiov32,
13290Sstevel@tonic-gate 		    iovcnt * sizeof (struct iovec32)))
13300Sstevel@tonic-gate 			return (set_errno(EFAULT));
13310Sstevel@tonic-gate 
13320Sstevel@tonic-gate 		count32 = 0;
13330Sstevel@tonic-gate 		for (i = 0; i < iovcnt; i++) {
13340Sstevel@tonic-gate 			ssize32_t iovlen32;
13350Sstevel@tonic-gate 
13360Sstevel@tonic-gate 			iovlen32 = aiov32[i].iov_len;
13370Sstevel@tonic-gate 			count32 += iovlen32;
13380Sstevel@tonic-gate 			if (iovlen32 < 0 || count32 < 0)
13390Sstevel@tonic-gate 				return (set_errno(EINVAL));
13400Sstevel@tonic-gate 			aiov[i].iov_len = iovlen32;
13410Sstevel@tonic-gate 			aiov[i].iov_base =
13420Sstevel@tonic-gate 			    (caddr_t)(uintptr_t)aiov32[i].iov_base;
13430Sstevel@tonic-gate 		}
13440Sstevel@tonic-gate 	} else
13450Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
13460Sstevel@tonic-gate 	if (iovcnt != 0 &&
13470Sstevel@tonic-gate 	    copyin(lmsg.msg_iov, aiov,
13480Sstevel@tonic-gate 	    (unsigned)iovcnt * sizeof (struct iovec))) {
13490Sstevel@tonic-gate 		return (set_errno(EFAULT));
13500Sstevel@tonic-gate 	}
13510Sstevel@tonic-gate 	len = 0;
13520Sstevel@tonic-gate 	for (i = 0; i < iovcnt; i++) {
13530Sstevel@tonic-gate 		ssize_t iovlen = aiov[i].iov_len;
13540Sstevel@tonic-gate 		len += iovlen;
13550Sstevel@tonic-gate 		if (iovlen < 0 || len < 0) {
13560Sstevel@tonic-gate 			return (set_errno(EINVAL));
13570Sstevel@tonic-gate 		}
13580Sstevel@tonic-gate 	}
13590Sstevel@tonic-gate 	auio.uio_loffset = 0;
13600Sstevel@tonic-gate 	auio.uio_iov = aiov;
13610Sstevel@tonic-gate 	auio.uio_iovcnt = iovcnt;
13620Sstevel@tonic-gate 	auio.uio_resid = len;
13630Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
13640Sstevel@tonic-gate 	auio.uio_limit = 0;
13650Sstevel@tonic-gate 
13660Sstevel@tonic-gate 	return (sendit(sock, &lmsg, &auio, flags));
13670Sstevel@tonic-gate }
13680Sstevel@tonic-gate 
13690Sstevel@tonic-gate ssize_t
13700Sstevel@tonic-gate sendto(int sock, void *buffer, size_t len, int flags,
13710Sstevel@tonic-gate     struct sockaddr *name, socklen_t namelen)
13720Sstevel@tonic-gate {
13730Sstevel@tonic-gate 	struct nmsghdr lmsg;
13740Sstevel@tonic-gate 	struct uio auio;
13750Sstevel@tonic-gate 	struct iovec aiov[1];
13760Sstevel@tonic-gate 
13770Sstevel@tonic-gate 	dprint(1, ("sendto(%d, %p, %ld, %d, %p, %d)\n",
13780Sstevel@tonic-gate 		sock, buffer, len, flags, name, namelen));
13790Sstevel@tonic-gate 
13800Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
13810Sstevel@tonic-gate 		return (set_errno(EINVAL));
13820Sstevel@tonic-gate 	}
13830Sstevel@tonic-gate 
13840Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
13850Sstevel@tonic-gate 	aiov[0].iov_len = len;
13860Sstevel@tonic-gate 	auio.uio_loffset = 0;
13870Sstevel@tonic-gate 	auio.uio_iov = aiov;
13880Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
13890Sstevel@tonic-gate 	auio.uio_resid = len;
13900Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
13910Sstevel@tonic-gate 	auio.uio_limit = 0;
13920Sstevel@tonic-gate 
13930Sstevel@tonic-gate 	lmsg.msg_name = (char *)name;
13940Sstevel@tonic-gate 	lmsg.msg_namelen = namelen;
13950Sstevel@tonic-gate 	lmsg.msg_control = NULL;
13960Sstevel@tonic-gate 	if (!(flags & MSG_XPG4_2)) {
13970Sstevel@tonic-gate 		/*
13980Sstevel@tonic-gate 		 * In order to be compatible with the libsocket/sockmod
13990Sstevel@tonic-gate 		 * implementation we set EOR for all send* calls.
14000Sstevel@tonic-gate 		 */
14010Sstevel@tonic-gate 		flags |= MSG_EOR;
14020Sstevel@tonic-gate 	}
14030Sstevel@tonic-gate 	return (sendit(sock, &lmsg, &auio, flags));
14040Sstevel@tonic-gate }
14050Sstevel@tonic-gate 
14060Sstevel@tonic-gate /*ARGSUSED3*/
14070Sstevel@tonic-gate int
14080Sstevel@tonic-gate getpeername(int sock, struct sockaddr *name, socklen_t *namelenp, int version)
14090Sstevel@tonic-gate {
14100Sstevel@tonic-gate 	struct sonode *so;
14110Sstevel@tonic-gate 	int error;
14120Sstevel@tonic-gate 	socklen_t namelen;
14130Sstevel@tonic-gate 	union {
14140Sstevel@tonic-gate 		struct sockaddr_in sin;
14150Sstevel@tonic-gate 		struct sockaddr_in6 sin6;
14160Sstevel@tonic-gate 	} sin;			/* Temporary buffer, common case */
14170Sstevel@tonic-gate 	void *addr;		/* Temporary buffer, uncommon case */
14180Sstevel@tonic-gate 	socklen_t addrlen, size;
14190Sstevel@tonic-gate 
14200Sstevel@tonic-gate 	dprint(1, ("getpeername(%d, %p, %p)\n",
14210Sstevel@tonic-gate 		sock, name, namelenp));
14220Sstevel@tonic-gate 
14230Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
14240Sstevel@tonic-gate 		goto bad;
14250Sstevel@tonic-gate 
14260Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
14270Sstevel@tonic-gate 	if (copyin(namelenp, &namelen, sizeof (namelen)) ||
14280Sstevel@tonic-gate 	    (name == NULL && namelen != 0)) {
14290Sstevel@tonic-gate 		error = EFAULT;
14300Sstevel@tonic-gate 		goto rel_out;
14310Sstevel@tonic-gate 	}
14320Sstevel@tonic-gate 	/*
14330Sstevel@tonic-gate 	 * If a connect or accept has been done, unless we're an Xnet socket,
14340Sstevel@tonic-gate 	 * the remote address has already been updated in so_faddr_sa.
14350Sstevel@tonic-gate 	 */
14360Sstevel@tonic-gate 	if (so->so_version != SOV_SOCKSTREAM && so->so_version != SOV_SOCKBSD ||
14370Sstevel@tonic-gate 	    !(so->so_state & SS_FADDR_VALID)) {
14380Sstevel@tonic-gate 		if ((error = SOP_GETPEERNAME(so)) != 0)
14390Sstevel@tonic-gate 			goto rel_out;
14400Sstevel@tonic-gate 	}
14410Sstevel@tonic-gate 
14420Sstevel@tonic-gate 	if (so->so_faddr_maxlen <= sizeof (sin)) {
14430Sstevel@tonic-gate 		size = 0;
14440Sstevel@tonic-gate 		addr = &sin;
14450Sstevel@tonic-gate 	} else {
14460Sstevel@tonic-gate 		/*
14470Sstevel@tonic-gate 		 * Allocate temporary to avoid holding so_lock across
14480Sstevel@tonic-gate 		 * copyout
14490Sstevel@tonic-gate 		 */
14500Sstevel@tonic-gate 		size = so->so_faddr_maxlen;
14510Sstevel@tonic-gate 		addr = kmem_alloc(size, KM_SLEEP);
14520Sstevel@tonic-gate 	}
14530Sstevel@tonic-gate 	/* Prevent so_faddr_sa/len from changing while accessed */
14540Sstevel@tonic-gate 	mutex_enter(&so->so_lock);
14550Sstevel@tonic-gate 	if (!(so->so_state & SS_ISCONNECTED)) {
14560Sstevel@tonic-gate 		mutex_exit(&so->so_lock);
14570Sstevel@tonic-gate 		error = ENOTCONN;
14580Sstevel@tonic-gate 		goto free_out;
14590Sstevel@tonic-gate 	}
14600Sstevel@tonic-gate 	addrlen = so->so_faddr_len;
14610Sstevel@tonic-gate 	bcopy(so->so_faddr_sa, addr, addrlen);
14620Sstevel@tonic-gate 	mutex_exit(&so->so_lock);
14630Sstevel@tonic-gate 
14640Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
14650Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp, addr,
14660Sstevel@tonic-gate 	    (so->so_state & SS_FADDR_NOXLATE) ? 0 : addrlen);
14670Sstevel@tonic-gate free_out:
14680Sstevel@tonic-gate 	if (size != 0)
14690Sstevel@tonic-gate 		kmem_free(addr, size);
14700Sstevel@tonic-gate rel_out:
14710Sstevel@tonic-gate 	releasef(sock);
14720Sstevel@tonic-gate bad:	return (error != 0 ? set_errno(error) : 0);
14730Sstevel@tonic-gate }
14740Sstevel@tonic-gate 
14750Sstevel@tonic-gate /*ARGSUSED3*/
14760Sstevel@tonic-gate int
14770Sstevel@tonic-gate getsockname(int sock, struct sockaddr *name,
14780Sstevel@tonic-gate 		socklen_t *namelenp, int version)
14790Sstevel@tonic-gate {
14800Sstevel@tonic-gate 	struct sonode *so;
14810Sstevel@tonic-gate 	int error;
14820Sstevel@tonic-gate 	socklen_t namelen;
14830Sstevel@tonic-gate 	union {
14840Sstevel@tonic-gate 		struct sockaddr_in sin;
14850Sstevel@tonic-gate 		struct sockaddr_in6 sin6;
14860Sstevel@tonic-gate 	} sin;			/* Temporary buffer, common case */
14870Sstevel@tonic-gate 	void *addr;		/* Temporary buffer, uncommon case */
14880Sstevel@tonic-gate 	socklen_t addrlen, size;
14890Sstevel@tonic-gate 
14900Sstevel@tonic-gate 	dprint(1, ("getsockname(%d, %p, %p)\n",
14910Sstevel@tonic-gate 		sock, name, namelenp));
14920Sstevel@tonic-gate 
14930Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
14940Sstevel@tonic-gate 		goto bad;
14950Sstevel@tonic-gate 
14960Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
14970Sstevel@tonic-gate 	if (copyin(namelenp, &namelen, sizeof (namelen)) ||
14980Sstevel@tonic-gate 	    (name == NULL && namelen != 0)) {
14990Sstevel@tonic-gate 		error = EFAULT;
15000Sstevel@tonic-gate 		goto rel_out;
15010Sstevel@tonic-gate 	}
15020Sstevel@tonic-gate 
15030Sstevel@tonic-gate 	/*
15040Sstevel@tonic-gate 	 * If a bind or accept has been done, unless we're an Xnet endpoint,
15050Sstevel@tonic-gate 	 * the local address has already been updated in so_laddr_sa.
15060Sstevel@tonic-gate 	 */
15070Sstevel@tonic-gate 	if ((so->so_version != SOV_SOCKSTREAM &&
15080Sstevel@tonic-gate 	    so->so_version != SOV_SOCKBSD) ||
15090Sstevel@tonic-gate 	    !(so->so_state & SS_LADDR_VALID)) {
15100Sstevel@tonic-gate 		if ((error = SOP_GETSOCKNAME(so)) != 0)
15110Sstevel@tonic-gate 			goto rel_out;
15120Sstevel@tonic-gate 	}
15130Sstevel@tonic-gate 
15140Sstevel@tonic-gate 	if (so->so_laddr_maxlen <= sizeof (sin)) {
15150Sstevel@tonic-gate 		size = 0;
15160Sstevel@tonic-gate 		addr = &sin;
15170Sstevel@tonic-gate 	} else {
15180Sstevel@tonic-gate 		/*
15190Sstevel@tonic-gate 		 * Allocate temporary to avoid holding so_lock across
15200Sstevel@tonic-gate 		 * copyout
15210Sstevel@tonic-gate 		 */
15220Sstevel@tonic-gate 		size = so->so_laddr_maxlen;
15230Sstevel@tonic-gate 		addr = kmem_alloc(size, KM_SLEEP);
15240Sstevel@tonic-gate 	}
15250Sstevel@tonic-gate 	/* Prevent so_laddr_sa/len from changing while accessed */
15260Sstevel@tonic-gate 	mutex_enter(&so->so_lock);
15270Sstevel@tonic-gate 	addrlen = so->so_laddr_len;
15280Sstevel@tonic-gate 	bcopy(so->so_laddr_sa, addr, addrlen);
15290Sstevel@tonic-gate 	mutex_exit(&so->so_lock);
15300Sstevel@tonic-gate 
15310Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
15320Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp,
15330Sstevel@tonic-gate 	    addr, addrlen);
15340Sstevel@tonic-gate 	if (size != 0)
15350Sstevel@tonic-gate 		kmem_free(addr, size);
15360Sstevel@tonic-gate rel_out:
15370Sstevel@tonic-gate 	releasef(sock);
15380Sstevel@tonic-gate bad:	return (error != 0 ? set_errno(error) : 0);
15390Sstevel@tonic-gate }
15400Sstevel@tonic-gate 
15410Sstevel@tonic-gate /*ARGSUSED5*/
15420Sstevel@tonic-gate int
15430Sstevel@tonic-gate getsockopt(int sock,
15440Sstevel@tonic-gate 	int level,
15450Sstevel@tonic-gate 	int option_name,
15460Sstevel@tonic-gate 	void *option_value,
15470Sstevel@tonic-gate 	socklen_t *option_lenp,
15480Sstevel@tonic-gate 	int version)
15490Sstevel@tonic-gate {
15500Sstevel@tonic-gate 	struct sonode *so;
15510Sstevel@tonic-gate 	socklen_t optlen, optlen_res;
15520Sstevel@tonic-gate 	void *optval;
15530Sstevel@tonic-gate 	int error;
15540Sstevel@tonic-gate 
15550Sstevel@tonic-gate 	dprint(1, ("getsockopt(%d, %d, %d, %p, %p)\n",
15560Sstevel@tonic-gate 		sock, level, option_name, option_value, option_lenp));
15570Sstevel@tonic-gate 
15580Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
15590Sstevel@tonic-gate 		return (set_errno(error));
15600Sstevel@tonic-gate 
15610Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
15620Sstevel@tonic-gate 	if (copyin(option_lenp, &optlen, sizeof (optlen))) {
15630Sstevel@tonic-gate 		releasef(sock);
15640Sstevel@tonic-gate 		return (set_errno(EFAULT));
15650Sstevel@tonic-gate 	}
15660Sstevel@tonic-gate 	/*
15670Sstevel@tonic-gate 	 * Verify that the length is not excessive to prevent
15680Sstevel@tonic-gate 	 * an application from consuming all of kernel memory.
15690Sstevel@tonic-gate 	 */
15700Sstevel@tonic-gate 	if (optlen > SO_MAXARGSIZE) {
15710Sstevel@tonic-gate 		error = EINVAL;
15720Sstevel@tonic-gate 		releasef(sock);
15730Sstevel@tonic-gate 		return (set_errno(error));
15740Sstevel@tonic-gate 	}
15750Sstevel@tonic-gate 	optval = kmem_alloc(optlen, KM_SLEEP);
15760Sstevel@tonic-gate 	optlen_res = optlen;
15770Sstevel@tonic-gate 	error = SOP_GETSOCKOPT(so, level, option_name, optval,
15780Sstevel@tonic-gate 	    &optlen_res, (version != SOV_XPG4_2) ? 0 : _SOGETSOCKOPT_XPG4_2);
15790Sstevel@tonic-gate 	releasef(sock);
15800Sstevel@tonic-gate 	if (error) {
15810Sstevel@tonic-gate 		kmem_free(optval, optlen);
15820Sstevel@tonic-gate 		return (set_errno(error));
15830Sstevel@tonic-gate 	}
15840Sstevel@tonic-gate 	error = copyout_arg(option_value, optlen, option_lenp,
15850Sstevel@tonic-gate 	    optval, optlen_res);
15860Sstevel@tonic-gate 	kmem_free(optval, optlen);
15870Sstevel@tonic-gate 	if (error)
15880Sstevel@tonic-gate 		return (set_errno(error));
15890Sstevel@tonic-gate 	return (0);
15900Sstevel@tonic-gate }
15910Sstevel@tonic-gate 
15920Sstevel@tonic-gate /*ARGSUSED5*/
15930Sstevel@tonic-gate int
15940Sstevel@tonic-gate setsockopt(int sock,
15950Sstevel@tonic-gate 	int level,
15960Sstevel@tonic-gate 	int option_name,
15970Sstevel@tonic-gate 	void *option_value,
15980Sstevel@tonic-gate 	socklen_t option_len,
15990Sstevel@tonic-gate 	int version)
16000Sstevel@tonic-gate {
16010Sstevel@tonic-gate 	struct sonode *so;
16020Sstevel@tonic-gate 	intptr_t buffer[2];
16030Sstevel@tonic-gate 	void *optval = NULL;
16040Sstevel@tonic-gate 	int error;
16050Sstevel@tonic-gate 
16060Sstevel@tonic-gate 	dprint(1, ("setsockopt(%d, %d, %d, %p, %d)\n",
16070Sstevel@tonic-gate 		sock, level, option_name, option_value, option_len));
16080Sstevel@tonic-gate 
16090Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
16100Sstevel@tonic-gate 		return (set_errno(error));
16110Sstevel@tonic-gate 
16120Sstevel@tonic-gate 	if (option_value != NULL) {
16130Sstevel@tonic-gate 		if (option_len != 0) {
16140Sstevel@tonic-gate 			/*
16150Sstevel@tonic-gate 			 * Verify that the length is not excessive to prevent
16160Sstevel@tonic-gate 			 * an application from consuming all of kernel memory.
16170Sstevel@tonic-gate 			 */
16180Sstevel@tonic-gate 			if (option_len > SO_MAXARGSIZE) {
16190Sstevel@tonic-gate 				error = EINVAL;
16200Sstevel@tonic-gate 				goto done2;
16210Sstevel@tonic-gate 			}
16220Sstevel@tonic-gate 			optval = option_len <= sizeof (buffer) ?
16230Sstevel@tonic-gate 			    &buffer : kmem_alloc((size_t)option_len, KM_SLEEP);
16240Sstevel@tonic-gate 			ASSERT(MUTEX_NOT_HELD(&so->so_lock));
16250Sstevel@tonic-gate 			if (copyin(option_value, optval, (size_t)option_len)) {
16260Sstevel@tonic-gate 				error = EFAULT;
16270Sstevel@tonic-gate 				goto done1;
16280Sstevel@tonic-gate 			}
16290Sstevel@tonic-gate 		}
16300Sstevel@tonic-gate 	} else
16310Sstevel@tonic-gate 		option_len = 0;
16320Sstevel@tonic-gate 
16330Sstevel@tonic-gate 	error = SOP_SETSOCKOPT(so, level, option_name, optval,
16340Sstevel@tonic-gate 	    (t_uscalar_t)option_len);
16350Sstevel@tonic-gate done1:
16360Sstevel@tonic-gate 	if (optval != buffer)
16370Sstevel@tonic-gate 		kmem_free(optval, (size_t)option_len);
16380Sstevel@tonic-gate done2:
16390Sstevel@tonic-gate 	releasef(sock);
16400Sstevel@tonic-gate 	if (error)
16410Sstevel@tonic-gate 		return (set_errno(error));
16420Sstevel@tonic-gate 	return (0);
16430Sstevel@tonic-gate }
16440Sstevel@tonic-gate 
16450Sstevel@tonic-gate /*
16460Sstevel@tonic-gate  * Add config info when devpath is non-NULL; delete info when devpath is NULL.
16470Sstevel@tonic-gate  * devpath is a user address.
16480Sstevel@tonic-gate  */
16490Sstevel@tonic-gate int
16500Sstevel@tonic-gate sockconfig(int domain, int type, int protocol, char *devpath)
16510Sstevel@tonic-gate {
16520Sstevel@tonic-gate 	char *kdevpath;		/* Copied in devpath string */
16530Sstevel@tonic-gate 	size_t kdevpathlen;
16540Sstevel@tonic-gate 	int error = 0;
16550Sstevel@tonic-gate 
16560Sstevel@tonic-gate 	dprint(1, ("sockconfig(%d, %d, %d, %p)\n",
16570Sstevel@tonic-gate 		domain, type, protocol, devpath));
16580Sstevel@tonic-gate 
16590Sstevel@tonic-gate 	if (secpolicy_net_config(CRED(), B_FALSE) != 0)
16600Sstevel@tonic-gate 		return (set_errno(EPERM));
16610Sstevel@tonic-gate 
16620Sstevel@tonic-gate 	if (devpath == NULL) {
16630Sstevel@tonic-gate 		/* Deleting an entry */
16640Sstevel@tonic-gate 		kdevpath = NULL;
16650Sstevel@tonic-gate 		kdevpathlen = 0;
16660Sstevel@tonic-gate 	} else {
16670Sstevel@tonic-gate 		/*
16680Sstevel@tonic-gate 		 * Adding an entry.
16690Sstevel@tonic-gate 		 * Copyin the devpath.
16700Sstevel@tonic-gate 		 * This also makes it possible to check for too long pathnames.
16710Sstevel@tonic-gate 		 * Compress the space needed for the devpath before passing it
16720Sstevel@tonic-gate 		 * to soconfig - soconfig will store the string until
16730Sstevel@tonic-gate 		 * the configuration is removed.
16740Sstevel@tonic-gate 		 */
16750Sstevel@tonic-gate 		char *buf;
16760Sstevel@tonic-gate 
16770Sstevel@tonic-gate 		buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
16780Sstevel@tonic-gate 		if ((error = copyinstr(devpath, buf, MAXPATHLEN,
16790Sstevel@tonic-gate 		    &kdevpathlen)) != 0) {
16800Sstevel@tonic-gate 			kmem_free(buf, MAXPATHLEN);
16810Sstevel@tonic-gate 			goto done;
16820Sstevel@tonic-gate 		}
16830Sstevel@tonic-gate 
16840Sstevel@tonic-gate 		kdevpath = kmem_alloc(kdevpathlen, KM_SLEEP);
16850Sstevel@tonic-gate 		bcopy(buf, kdevpath, kdevpathlen);
16860Sstevel@tonic-gate 		kdevpath[kdevpathlen - 1] = '\0';
16870Sstevel@tonic-gate 
16880Sstevel@tonic-gate 		kmem_free(buf, MAXPATHLEN);
16890Sstevel@tonic-gate 	}
16900Sstevel@tonic-gate 	error = soconfig(domain, type, protocol, kdevpath, (int)kdevpathlen);
16910Sstevel@tonic-gate done:
16920Sstevel@tonic-gate 	if (error) {
16930Sstevel@tonic-gate 		eprintline(error);
16940Sstevel@tonic-gate 		return (set_errno(error));
16950Sstevel@tonic-gate 	}
16960Sstevel@tonic-gate 	return (0);
16970Sstevel@tonic-gate }
16980Sstevel@tonic-gate 
16990Sstevel@tonic-gate 
17000Sstevel@tonic-gate /*
17010Sstevel@tonic-gate  * Sendfile is implemented through two schemes, direct I/O or by
17020Sstevel@tonic-gate  * caching in the filesystem page cache. We cache the input file by
17030Sstevel@tonic-gate  * default and use direct I/O only if sendfile_max_size is set
17040Sstevel@tonic-gate  * appropriately as explained below. Note that this logic is consistent
17050Sstevel@tonic-gate  * with other filesystems where caching is turned on by default
17060Sstevel@tonic-gate  * unless explicitly turned off by using the DIRECTIO ioctl.
17070Sstevel@tonic-gate  *
17080Sstevel@tonic-gate  * We choose a slightly different scheme here. One can turn off
17090Sstevel@tonic-gate  * caching by setting sendfile_max_size to 0. One can also enable
17100Sstevel@tonic-gate  * caching of files <= sendfile_max_size by setting sendfile_max_size
17110Sstevel@tonic-gate  * to an appropriate value. By default sendfile_max_size is set to the
17120Sstevel@tonic-gate  * maximum value so that all files are cached. In future, we may provide
17130Sstevel@tonic-gate  * better interfaces for caching the file.
17140Sstevel@tonic-gate  *
17150Sstevel@tonic-gate  * Sendfile through Direct I/O (Zero copy)
17160Sstevel@tonic-gate  * --------------------------------------
17170Sstevel@tonic-gate  *
17180Sstevel@tonic-gate  * As disks are normally slower than the network, we can't have a
17190Sstevel@tonic-gate  * single thread that reads the disk and writes to the network. We
17200Sstevel@tonic-gate  * need to have parallelism. This is done by having the sendfile
17210Sstevel@tonic-gate  * thread create another thread that reads from the filesystem
17220Sstevel@tonic-gate  * and queues it for network processing. In this scheme, the data
17230Sstevel@tonic-gate  * is never copied anywhere i.e it is zero copy unlike the other
17240Sstevel@tonic-gate  * scheme.
17250Sstevel@tonic-gate  *
17260Sstevel@tonic-gate  * We have a sendfile queue (snfq) where each sendfile
17270Sstevel@tonic-gate  * request (snf_req_t) is queued for processing by a thread. Number
17280Sstevel@tonic-gate  * of threads is dynamically allocated and they exit if they are idling
17290Sstevel@tonic-gate  * beyond a specified amount of time. When each request (snf_req_t) is
17300Sstevel@tonic-gate  * processed by a thread, it produces a number of mblk_t structures to
17310Sstevel@tonic-gate  * be consumed by the sendfile thread. snf_deque and snf_enque are
17320Sstevel@tonic-gate  * used for consuming and producing mblks. Size of the filesystem
17330Sstevel@tonic-gate  * read is determined by the tuneable (sendfile_read_size). A single
17340Sstevel@tonic-gate  * mblk holds sendfile_read_size worth of data (except the last
17350Sstevel@tonic-gate  * read of the file) which is sent down as a whole to the network.
17360Sstevel@tonic-gate  * sendfile_read_size is set to 1 MB as this seems to be the optimal
17370Sstevel@tonic-gate  * value for the UFS filesystem backed by a striped storage array.
17380Sstevel@tonic-gate  *
17390Sstevel@tonic-gate  * Synchronisation between read (producer) and write (consumer) threads.
17400Sstevel@tonic-gate  * --------------------------------------------------------------------
17410Sstevel@tonic-gate  *
17420Sstevel@tonic-gate  * sr_lock protects sr_ib_head and sr_ib_tail. The lock is held while
17430Sstevel@tonic-gate  * adding and deleting items in this list. Error can happen anytime
17440Sstevel@tonic-gate  * during read or write. There could be unprocessed mblks in the
17450Sstevel@tonic-gate  * sr_ib_XXX list when a read or write error occurs. Whenever error
17460Sstevel@tonic-gate  * is encountered, we need two things to happen :
17470Sstevel@tonic-gate  *
17480Sstevel@tonic-gate  * a) One of the threads need to clean the mblks.
17490Sstevel@tonic-gate  * b) When one thread encounters an error, the other should stop.
17500Sstevel@tonic-gate  *
17510Sstevel@tonic-gate  * For (a), we don't want to penalise the reader thread as it could do
17520Sstevel@tonic-gate  * some useful work processing other requests. For (b), the error can
17530Sstevel@tonic-gate  * be detected by examining sr_read_error or sr_write_error.
17540Sstevel@tonic-gate  * sr_lock protects sr_read_error and sr_write_error. If both reader and
17550Sstevel@tonic-gate  * writer encounters error, we need to report the write error back to
17560Sstevel@tonic-gate  * the application as that's what would have happened if the operations
17570Sstevel@tonic-gate  * were done sequentially. With this in mind, following should work :
17580Sstevel@tonic-gate  *
17590Sstevel@tonic-gate  * 	- Check for errors before read or write.
17600Sstevel@tonic-gate  *	- If the reader encounters error, set the error in sr_read_error.
17610Sstevel@tonic-gate  *	  Check sr_write_error, if it is set, send cv_signal as it is
17620Sstevel@tonic-gate  *	  waiting for reader to complete. If it is not set, the writer
17630Sstevel@tonic-gate  *	  is either running sinking data to the network or blocked
17640Sstevel@tonic-gate  *        because of flow control. For handling the latter case, we
17650Sstevel@tonic-gate  *	  always send a signal. In any case, it will examine sr_read_error
17660Sstevel@tonic-gate  *	  and return. sr_read_error is marked with SR_READ_DONE to tell
17670Sstevel@tonic-gate  *	  the writer that the reader is done in all the cases.
17680Sstevel@tonic-gate  *	- If the writer encounters error, set the error in sr_write_error.
17690Sstevel@tonic-gate  *	  The reader thread is either blocked because of flow control or
17700Sstevel@tonic-gate  *	  running reading data from the disk. For the former, we need to
17710Sstevel@tonic-gate  *	  wakeup the thread. Again to keep it simple, we always wake up
17720Sstevel@tonic-gate  *	  the reader thread. Then, wait for the read thread to complete
17730Sstevel@tonic-gate  *	  if it is not done yet. Cleanup and return.
17740Sstevel@tonic-gate  *
17750Sstevel@tonic-gate  * High and low water marks for the read thread.
17760Sstevel@tonic-gate  * --------------------------------------------
17770Sstevel@tonic-gate  *
17780Sstevel@tonic-gate  * If sendfile() is used to send data over a slow network, we need to
17790Sstevel@tonic-gate  * make sure that the read thread does not produce data at a faster
17800Sstevel@tonic-gate  * rate than the network. This can happen if the disk is faster than
17810Sstevel@tonic-gate  * the network. In such a case, we don't want to build a very large queue.
17820Sstevel@tonic-gate  * But we would still like to get all of the network throughput possible.
17830Sstevel@tonic-gate  * This implies that network should never block waiting for data.
17840Sstevel@tonic-gate  * As there are lot of disk throughput/network throughput combinations
17850Sstevel@tonic-gate  * possible, it is difficult to come up with an accurate number.
17860Sstevel@tonic-gate  * A typical 10K RPM disk has a max seek latency 17ms and rotational
17870Sstevel@tonic-gate  * latency of 3ms for reading a disk block. Thus, the total latency to
17880Sstevel@tonic-gate  * initiate a new read, transfer data from the disk and queue for
17890Sstevel@tonic-gate  * transmission would take about a max of 25ms. Todays max transfer rate
17900Sstevel@tonic-gate  * for network is 100MB/sec. If the thread is blocked because of flow
17910Sstevel@tonic-gate  * control, it would take 25ms to get new data ready for transmission.
17920Sstevel@tonic-gate  * We have to make sure that network is not idling, while we are initiating
17930Sstevel@tonic-gate  * new transfers. So, at 100MB/sec, to keep network busy we would need
17940Sstevel@tonic-gate  * 2.5MB of data. Roundig off, we keep the low water mark to be 3MB of data.
17950Sstevel@tonic-gate  * We need to pick a high water mark so that the woken up thread would
17960Sstevel@tonic-gate  * do considerable work before blocking again to prevent thrashing. Currently,
17970Sstevel@tonic-gate  * we pick this to be 10 times that of the low water mark.
17980Sstevel@tonic-gate  *
17990Sstevel@tonic-gate  * Sendfile with segmap caching (One copy from page cache to mblks).
18000Sstevel@tonic-gate  * ----------------------------------------------------------------
18010Sstevel@tonic-gate  *
18020Sstevel@tonic-gate  * We use the segmap cache for caching the file, if the size of file
18030Sstevel@tonic-gate  * is <= sendfile_max_size. In this case we don't use threads as VM
18040Sstevel@tonic-gate  * is reasonably fast enough to keep up with the network. If the underlying
18050Sstevel@tonic-gate  * transport allows, we call segmap_getmapflt() to map MAXBSIZE (8K) worth
18060Sstevel@tonic-gate  * of data into segmap space, and use the virtual address from segmap
18070Sstevel@tonic-gate  * directly through desballoc() to avoid copy. Once the transport is done
18080Sstevel@tonic-gate  * with the data, the mapping will be released through segmap_release()
18090Sstevel@tonic-gate  * called by the call-back routine.
18100Sstevel@tonic-gate  *
18110Sstevel@tonic-gate  * If zero-copy is not allowed by the transport, we simply call VOP_READ()
18120Sstevel@tonic-gate  * to copy the data from the filesystem into our temporary network buffer.
18130Sstevel@tonic-gate  *
18140Sstevel@tonic-gate  * To disable caching, set sendfile_max_size to 0.
18150Sstevel@tonic-gate  */
18160Sstevel@tonic-gate 
18170Sstevel@tonic-gate uint_t sendfile_read_size = 1024 * 1024;
18180Sstevel@tonic-gate #define	SENDFILE_REQ_LOWAT	3 * 1024 * 1024
18190Sstevel@tonic-gate uint_t sendfile_req_lowat = SENDFILE_REQ_LOWAT;
18200Sstevel@tonic-gate uint_t sendfile_req_hiwat = 10 * SENDFILE_REQ_LOWAT;
18210Sstevel@tonic-gate struct sendfile_stats sf_stats;
18220Sstevel@tonic-gate struct sendfile_queue *snfq;
18230Sstevel@tonic-gate clock_t snfq_timeout;
18240Sstevel@tonic-gate off64_t sendfile_max_size;
18250Sstevel@tonic-gate 
18260Sstevel@tonic-gate static void snf_enque(snf_req_t *, mblk_t *);
18270Sstevel@tonic-gate static mblk_t *snf_deque(snf_req_t *);
18280Sstevel@tonic-gate 
18290Sstevel@tonic-gate void
18300Sstevel@tonic-gate sendfile_init(void)
18310Sstevel@tonic-gate {
18320Sstevel@tonic-gate 	snfq = kmem_zalloc(sizeof (struct sendfile_queue), KM_SLEEP);
18330Sstevel@tonic-gate 
18340Sstevel@tonic-gate 	mutex_init(&snfq->snfq_lock, NULL, MUTEX_DEFAULT, NULL);
18350Sstevel@tonic-gate 	cv_init(&snfq->snfq_cv, NULL, CV_DEFAULT, NULL);
18360Sstevel@tonic-gate 	snfq->snfq_max_threads = max_ncpus;
18370Sstevel@tonic-gate 	snfq_timeout = SNFQ_TIMEOUT;
18380Sstevel@tonic-gate 	/* Cache all files by default. */
18390Sstevel@tonic-gate 	sendfile_max_size = MAXOFFSET_T;
18400Sstevel@tonic-gate }
18410Sstevel@tonic-gate 
18420Sstevel@tonic-gate /*
18430Sstevel@tonic-gate  * Queues a mblk_t for network processing.
18440Sstevel@tonic-gate  */
18450Sstevel@tonic-gate static void
18460Sstevel@tonic-gate snf_enque(snf_req_t *sr, mblk_t *mp)
18470Sstevel@tonic-gate {
18480Sstevel@tonic-gate 	mp->b_next = NULL;
18490Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
18500Sstevel@tonic-gate 	if (sr->sr_mp_head == NULL) {
18510Sstevel@tonic-gate 		sr->sr_mp_head = sr->sr_mp_tail = mp;
18520Sstevel@tonic-gate 		cv_signal(&sr->sr_cv);
18530Sstevel@tonic-gate 	} else {
18540Sstevel@tonic-gate 		sr->sr_mp_tail->b_next = mp;
18550Sstevel@tonic-gate 		sr->sr_mp_tail = mp;
18560Sstevel@tonic-gate 	}
18570Sstevel@tonic-gate 	sr->sr_qlen += MBLKL(mp);
18580Sstevel@tonic-gate 	while ((sr->sr_qlen > sr->sr_hiwat) &&
18590Sstevel@tonic-gate 	    (sr->sr_write_error == 0)) {
18600Sstevel@tonic-gate 		sf_stats.ss_full_waits++;
18610Sstevel@tonic-gate 		cv_wait(&sr->sr_cv, &sr->sr_lock);
18620Sstevel@tonic-gate 	}
18630Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
18640Sstevel@tonic-gate }
18650Sstevel@tonic-gate 
18660Sstevel@tonic-gate /*
18670Sstevel@tonic-gate  * De-queues a mblk_t for network processing.
18680Sstevel@tonic-gate  */
18690Sstevel@tonic-gate static mblk_t *
18700Sstevel@tonic-gate snf_deque(snf_req_t *sr)
18710Sstevel@tonic-gate {
18720Sstevel@tonic-gate 	mblk_t *mp;
18730Sstevel@tonic-gate 
18740Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
18750Sstevel@tonic-gate 	/*
18760Sstevel@tonic-gate 	 * If we have encountered an error on read or read is
18770Sstevel@tonic-gate 	 * completed and no more mblks, return NULL.
18780Sstevel@tonic-gate 	 * We need to check for NULL sr_mp_head also as
18790Sstevel@tonic-gate 	 * the reads could have completed and there is
18800Sstevel@tonic-gate 	 * nothing more to come.
18810Sstevel@tonic-gate 	 */
18820Sstevel@tonic-gate 	if (((sr->sr_read_error & ~SR_READ_DONE) != 0) ||
18830Sstevel@tonic-gate 	    ((sr->sr_read_error & SR_READ_DONE) &&
18840Sstevel@tonic-gate 	    sr->sr_mp_head == NULL)) {
18850Sstevel@tonic-gate 		mutex_exit(&sr->sr_lock);
18860Sstevel@tonic-gate 		return (NULL);
18870Sstevel@tonic-gate 	}
18880Sstevel@tonic-gate 	/*
18890Sstevel@tonic-gate 	 * To start with neither SR_READ_DONE is marked nor
18900Sstevel@tonic-gate 	 * the error is set. When we wake up from cv_wait,
18910Sstevel@tonic-gate 	 * following are the possibilities :
18920Sstevel@tonic-gate 	 *
18930Sstevel@tonic-gate 	 *	a) sr_read_error is zero and mblks are queued.
18940Sstevel@tonic-gate 	 *	b) sr_read_error is set to SR_READ_DONE
18950Sstevel@tonic-gate 	 *	   and mblks are queued.
18960Sstevel@tonic-gate 	 *	c) sr_read_error is set to SR_READ_DONE
18970Sstevel@tonic-gate 	 *	   and no mblks.
18980Sstevel@tonic-gate 	 *	d) sr_read_error is set to some error other
18990Sstevel@tonic-gate 	 *	   than SR_READ_DONE.
19000Sstevel@tonic-gate 	 */
19010Sstevel@tonic-gate 
19020Sstevel@tonic-gate 	while ((sr->sr_read_error == 0) && (sr->sr_mp_head == NULL)) {
19030Sstevel@tonic-gate 		sf_stats.ss_empty_waits++;
19040Sstevel@tonic-gate 		cv_wait(&sr->sr_cv, &sr->sr_lock);
19050Sstevel@tonic-gate 	}
19060Sstevel@tonic-gate 	/* Handle (a) and (b) first  - the normal case. */
19070Sstevel@tonic-gate 	if (((sr->sr_read_error & ~SR_READ_DONE) == 0) &&
19080Sstevel@tonic-gate 	    (sr->sr_mp_head != NULL)) {
19090Sstevel@tonic-gate 		mp = sr->sr_mp_head;
19100Sstevel@tonic-gate 		sr->sr_mp_head = mp->b_next;
19110Sstevel@tonic-gate 		sr->sr_qlen -= MBLKL(mp);
19120Sstevel@tonic-gate 		if (sr->sr_qlen < sr->sr_lowat)
19130Sstevel@tonic-gate 			cv_signal(&sr->sr_cv);
19140Sstevel@tonic-gate 		mutex_exit(&sr->sr_lock);
19150Sstevel@tonic-gate 		mp->b_next = NULL;
19160Sstevel@tonic-gate 		return (mp);
19170Sstevel@tonic-gate 	}
19180Sstevel@tonic-gate 	/* Handle (c) and (d). */
19190Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
19200Sstevel@tonic-gate 	return (NULL);
19210Sstevel@tonic-gate }
19220Sstevel@tonic-gate 
19230Sstevel@tonic-gate /*
19240Sstevel@tonic-gate  * Reads data from the filesystem and queues it for network processing.
19250Sstevel@tonic-gate  */
19260Sstevel@tonic-gate void
19270Sstevel@tonic-gate snf_async_read(snf_req_t *sr)
19280Sstevel@tonic-gate {
19290Sstevel@tonic-gate 	size_t iosize;
19300Sstevel@tonic-gate 	u_offset_t fileoff;
19310Sstevel@tonic-gate 	u_offset_t size;
19320Sstevel@tonic-gate 	int ret_size;
19330Sstevel@tonic-gate 	int error;
19340Sstevel@tonic-gate 	file_t *fp;
19350Sstevel@tonic-gate 	mblk_t *mp;
19360Sstevel@tonic-gate 
19370Sstevel@tonic-gate 	fp = sr->sr_fp;
19380Sstevel@tonic-gate 	size = sr->sr_file_size;
19390Sstevel@tonic-gate 	fileoff = sr->sr_file_off;
19400Sstevel@tonic-gate 
19410Sstevel@tonic-gate 	/*
19420Sstevel@tonic-gate 	 * Ignore the error for filesystems that doesn't support DIRECTIO.
19430Sstevel@tonic-gate 	 */
19440Sstevel@tonic-gate 	(void) VOP_IOCTL(fp->f_vnode, _FIODIRECTIO, DIRECTIO_ON, 0,
19450Sstevel@tonic-gate 	    kcred, NULL);
19460Sstevel@tonic-gate 
19470Sstevel@tonic-gate 	while ((size != 0) && (sr->sr_write_error == 0)) {
19480Sstevel@tonic-gate 
19490Sstevel@tonic-gate 		iosize = (int)MIN(sr->sr_maxpsz, size);
19500Sstevel@tonic-gate 
19510Sstevel@tonic-gate 		if ((mp = allocb(iosize, BPRI_MED)) == NULL) {
19520Sstevel@tonic-gate 			error = EAGAIN;
19530Sstevel@tonic-gate 			break;
19540Sstevel@tonic-gate 		}
19550Sstevel@tonic-gate 		ret_size = soreadfile(fp, mp->b_rptr, fileoff, &error, iosize);
19560Sstevel@tonic-gate 
19570Sstevel@tonic-gate 		/* Error or Reached EOF ? */
19580Sstevel@tonic-gate 		if ((error != 0) || (ret_size == 0)) {
19590Sstevel@tonic-gate 			freeb(mp);
19600Sstevel@tonic-gate 			break;
19610Sstevel@tonic-gate 		}
19620Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + ret_size;
19630Sstevel@tonic-gate 
19640Sstevel@tonic-gate 		snf_enque(sr, mp);
19650Sstevel@tonic-gate 		size -= ret_size;
19660Sstevel@tonic-gate 		fileoff += ret_size;
19670Sstevel@tonic-gate 	}
19680Sstevel@tonic-gate 	(void) VOP_IOCTL(fp->f_vnode, _FIODIRECTIO, DIRECTIO_OFF, 0,
19690Sstevel@tonic-gate 	    kcred, NULL);
19700Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
19710Sstevel@tonic-gate 	sr->sr_read_error = error;
19720Sstevel@tonic-gate 	sr->sr_read_error |= SR_READ_DONE;
19730Sstevel@tonic-gate 	cv_signal(&sr->sr_cv);
19740Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
19750Sstevel@tonic-gate }
19760Sstevel@tonic-gate 
19770Sstevel@tonic-gate void
19780Sstevel@tonic-gate snf_async_thread(void)
19790Sstevel@tonic-gate {
19800Sstevel@tonic-gate 	snf_req_t *sr;
19810Sstevel@tonic-gate 	callb_cpr_t cprinfo;
19820Sstevel@tonic-gate 	clock_t time_left = 1;
19830Sstevel@tonic-gate 	clock_t now;
19840Sstevel@tonic-gate 
19850Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &snfq->snfq_lock, callb_generic_cpr, "snfq");
19860Sstevel@tonic-gate 
19870Sstevel@tonic-gate 	mutex_enter(&snfq->snfq_lock);
19880Sstevel@tonic-gate 	for (;;) {
19890Sstevel@tonic-gate 		/*
19900Sstevel@tonic-gate 		 * If we didn't find a entry, then block until woken up
19910Sstevel@tonic-gate 		 * again and then look through the queues again.
19920Sstevel@tonic-gate 		 */
19930Sstevel@tonic-gate 		while ((sr = snfq->snfq_req_head) == NULL) {
19940Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
19950Sstevel@tonic-gate 			if (time_left <= 0) {
19960Sstevel@tonic-gate 				snfq->snfq_svc_threads--;
19970Sstevel@tonic-gate 				CALLB_CPR_EXIT(&cprinfo);
19980Sstevel@tonic-gate 				thread_exit();
19990Sstevel@tonic-gate 				/* NOTREACHED */
20000Sstevel@tonic-gate 			}
20010Sstevel@tonic-gate 			snfq->snfq_idle_cnt++;
20020Sstevel@tonic-gate 
20030Sstevel@tonic-gate 			time_to_wait(&now, snfq_timeout);
20040Sstevel@tonic-gate 			time_left = cv_timedwait(&snfq->snfq_cv,
20050Sstevel@tonic-gate 			    &snfq->snfq_lock, now);
20060Sstevel@tonic-gate 			snfq->snfq_idle_cnt--;
20070Sstevel@tonic-gate 
20080Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, &snfq->snfq_lock);
20090Sstevel@tonic-gate 		}
20100Sstevel@tonic-gate 		snfq->snfq_req_head = sr->sr_next;
20110Sstevel@tonic-gate 		snfq->snfq_req_cnt--;
20120Sstevel@tonic-gate 		mutex_exit(&snfq->snfq_lock);
20130Sstevel@tonic-gate 		snf_async_read(sr);
20140Sstevel@tonic-gate 		mutex_enter(&snfq->snfq_lock);
20150Sstevel@tonic-gate 	}
20160Sstevel@tonic-gate }
20170Sstevel@tonic-gate 
20180Sstevel@tonic-gate 
20190Sstevel@tonic-gate snf_req_t *
20200Sstevel@tonic-gate create_thread(int operation, struct vnode *vp, file_t *fp,
20210Sstevel@tonic-gate     u_offset_t fileoff, u_offset_t size)
20220Sstevel@tonic-gate {
20230Sstevel@tonic-gate 	snf_req_t *sr;
20240Sstevel@tonic-gate 	stdata_t *stp;
20250Sstevel@tonic-gate 
20260Sstevel@tonic-gate 	sr = (snf_req_t *)kmem_zalloc(sizeof (snf_req_t), KM_SLEEP);
20270Sstevel@tonic-gate 
20280Sstevel@tonic-gate 	sr->sr_vp = vp;
20290Sstevel@tonic-gate 	sr->sr_fp = fp;
20300Sstevel@tonic-gate 	stp = vp->v_stream;
20310Sstevel@tonic-gate 
20320Sstevel@tonic-gate 	/*
20330Sstevel@tonic-gate 	 * store sd_qn_maxpsz into sr_maxpsz while we have stream head.
20340Sstevel@tonic-gate 	 * stream might be closed before thread returns from snf_async_read.
20350Sstevel@tonic-gate 	 */
20360Sstevel@tonic-gate 	if (stp->sd_qn_maxpsz > 0) {
20370Sstevel@tonic-gate 		sr->sr_maxpsz = MIN(MAXBSIZE, stp->sd_qn_maxpsz);
20380Sstevel@tonic-gate 	} else {
20390Sstevel@tonic-gate 		sr->sr_maxpsz = MAXBSIZE;
20400Sstevel@tonic-gate 	}
20410Sstevel@tonic-gate 
20420Sstevel@tonic-gate 	sr->sr_operation = operation;
20430Sstevel@tonic-gate 	sr->sr_file_off = fileoff;
20440Sstevel@tonic-gate 	sr->sr_file_size = size;
20450Sstevel@tonic-gate 	sr->sr_hiwat = sendfile_req_hiwat;
20460Sstevel@tonic-gate 	sr->sr_lowat = sendfile_req_lowat;
20470Sstevel@tonic-gate 	mutex_init(&sr->sr_lock, NULL, MUTEX_DEFAULT, NULL);
20480Sstevel@tonic-gate 	cv_init(&sr->sr_cv, NULL, CV_DEFAULT, NULL);
20490Sstevel@tonic-gate 	/*
20500Sstevel@tonic-gate 	 * See whether we need another thread for servicing this
20510Sstevel@tonic-gate 	 * request. If there are already enough requests queued
20520Sstevel@tonic-gate 	 * for the threads, create one if not exceeding
20530Sstevel@tonic-gate 	 * snfq_max_threads.
20540Sstevel@tonic-gate 	 */
20550Sstevel@tonic-gate 	mutex_enter(&snfq->snfq_lock);
20560Sstevel@tonic-gate 	if (snfq->snfq_req_cnt >= snfq->snfq_idle_cnt &&
20570Sstevel@tonic-gate 	    snfq->snfq_svc_threads < snfq->snfq_max_threads) {
20580Sstevel@tonic-gate 		(void) thread_create(NULL, 0, &snf_async_thread, 0, 0, &p0,
20590Sstevel@tonic-gate 		    TS_RUN, minclsyspri);
20600Sstevel@tonic-gate 		snfq->snfq_svc_threads++;
20610Sstevel@tonic-gate 	}
20620Sstevel@tonic-gate 	if (snfq->snfq_req_head == NULL) {
20630Sstevel@tonic-gate 		snfq->snfq_req_head = snfq->snfq_req_tail = sr;
20640Sstevel@tonic-gate 		cv_signal(&snfq->snfq_cv);
20650Sstevel@tonic-gate 	} else {
20660Sstevel@tonic-gate 		snfq->snfq_req_tail->sr_next = sr;
20670Sstevel@tonic-gate 		snfq->snfq_req_tail = sr;
20680Sstevel@tonic-gate 	}
20690Sstevel@tonic-gate 	snfq->snfq_req_cnt++;
20700Sstevel@tonic-gate 	mutex_exit(&snfq->snfq_lock);
20710Sstevel@tonic-gate 	return (sr);
20720Sstevel@tonic-gate }
20730Sstevel@tonic-gate 
20740Sstevel@tonic-gate int
20750Sstevel@tonic-gate snf_direct_io(file_t *fp, file_t *rfp, u_offset_t fileoff, u_offset_t size,
20760Sstevel@tonic-gate     ssize_t *count)
20770Sstevel@tonic-gate {
20780Sstevel@tonic-gate 	snf_req_t *sr;
20790Sstevel@tonic-gate 	mblk_t *mp;
20800Sstevel@tonic-gate 	int iosize;
20810Sstevel@tonic-gate 	int error = 0;
20820Sstevel@tonic-gate 	short fflag;
20830Sstevel@tonic-gate 	struct vnode *vp;
20840Sstevel@tonic-gate 	int ksize;
20850Sstevel@tonic-gate 
20860Sstevel@tonic-gate 	ksize = 0;
20870Sstevel@tonic-gate 	*count = 0;
20880Sstevel@tonic-gate 
20890Sstevel@tonic-gate 	vp = fp->f_vnode;
20900Sstevel@tonic-gate 	fflag = fp->f_flag;
20910Sstevel@tonic-gate 	if ((sr = create_thread(READ_OP, vp, rfp, fileoff, size)) == NULL)
20920Sstevel@tonic-gate 		return (EAGAIN);
20930Sstevel@tonic-gate 
20940Sstevel@tonic-gate 	/*
20950Sstevel@tonic-gate 	 * We check for read error in snf_deque. It has to check
20960Sstevel@tonic-gate 	 * for successful READ_DONE and return NULL, and we might
20970Sstevel@tonic-gate 	 * as well make an additional check there.
20980Sstevel@tonic-gate 	 */
20990Sstevel@tonic-gate 	while ((mp = snf_deque(sr)) != NULL) {
21000Sstevel@tonic-gate 
21010Sstevel@tonic-gate 		if (ISSIG(curthread, JUSTLOOKING)) {
21020Sstevel@tonic-gate 			freeb(mp);
21030Sstevel@tonic-gate 			error = EINTR;
21040Sstevel@tonic-gate 			break;
21050Sstevel@tonic-gate 		}
21060Sstevel@tonic-gate 		iosize = MBLKL(mp);
21070Sstevel@tonic-gate 
21080Sstevel@tonic-gate 		if ((error = kstrwritemp(vp, mp, fflag)) != 0) {
21090Sstevel@tonic-gate 			freeb(mp);
21100Sstevel@tonic-gate 			break;
21110Sstevel@tonic-gate 		}
21120Sstevel@tonic-gate 		ksize += iosize;
21130Sstevel@tonic-gate 	}
21140Sstevel@tonic-gate 	*count = ksize;
21150Sstevel@tonic-gate 
21160Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
21170Sstevel@tonic-gate 	sr->sr_write_error = error;
21180Sstevel@tonic-gate 	/* Look at the big comments on why we cv_signal here. */
21190Sstevel@tonic-gate 	cv_signal(&sr->sr_cv);
21200Sstevel@tonic-gate 
21210Sstevel@tonic-gate 	/* Wait for the reader to complete always. */
21220Sstevel@tonic-gate 	while (!(sr->sr_read_error & SR_READ_DONE)) {
21230Sstevel@tonic-gate 		cv_wait(&sr->sr_cv, &sr->sr_lock);
21240Sstevel@tonic-gate 	}
21250Sstevel@tonic-gate 	/* If there is no write error, check for read error. */
21260Sstevel@tonic-gate 	if (error == 0)
21270Sstevel@tonic-gate 		error = (sr->sr_read_error & ~SR_READ_DONE);
21280Sstevel@tonic-gate 
21290Sstevel@tonic-gate 	if (error != 0) {
21300Sstevel@tonic-gate 		mblk_t *next_mp;
21310Sstevel@tonic-gate 
21320Sstevel@tonic-gate 		mp = sr->sr_mp_head;
21330Sstevel@tonic-gate 		while (mp != NULL) {
21340Sstevel@tonic-gate 			next_mp = mp->b_next;
21350Sstevel@tonic-gate 			mp->b_next = NULL;
21360Sstevel@tonic-gate 			freeb(mp);
21370Sstevel@tonic-gate 			mp = next_mp;
21380Sstevel@tonic-gate 		}
21390Sstevel@tonic-gate 	}
21400Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
21410Sstevel@tonic-gate 	kmem_free(sr, sizeof (snf_req_t));
21420Sstevel@tonic-gate 	return (error);
21430Sstevel@tonic-gate }
21440Sstevel@tonic-gate 
21450Sstevel@tonic-gate typedef struct {
21460Sstevel@tonic-gate 	frtn_t		snfi_frtn;
21470Sstevel@tonic-gate 	caddr_t		snfi_base;
21480Sstevel@tonic-gate 	uint_t		snfi_mapoff;
21490Sstevel@tonic-gate 	size_t		snfi_len;
21500Sstevel@tonic-gate 	vnode_t		*snfi_vp;
21510Sstevel@tonic-gate } snf_smap_desbinfo;
21520Sstevel@tonic-gate 
21530Sstevel@tonic-gate /*
21540Sstevel@tonic-gate  * The callback function when the last ref of the mblk is dropped,
21550Sstevel@tonic-gate  * normally occurs when TCP receives the ack. But it can be the driver
21560Sstevel@tonic-gate  * too due to lazy reclaim.
21570Sstevel@tonic-gate  */
21580Sstevel@tonic-gate void
21590Sstevel@tonic-gate snf_smap_desbfree(snf_smap_desbinfo *snfi)
21600Sstevel@tonic-gate {
21610Sstevel@tonic-gate 	if (!segmap_kpm) {
21620Sstevel@tonic-gate 		/*
21630Sstevel@tonic-gate 		 * We don't need to call segmap_fault(F_SOFTUNLOCK) for
21640Sstevel@tonic-gate 		 * segmap_kpm as long as the latter never falls back to
21650Sstevel@tonic-gate 		 * "use_segmap_range". (See segmap_getmapflt().)
21660Sstevel@tonic-gate 		 *
21670Sstevel@tonic-gate 		 * Using S_OTHER saves an redundant hat_setref() in
21680Sstevel@tonic-gate 		 * segmap_unlock()
21690Sstevel@tonic-gate 		 */
21700Sstevel@tonic-gate 		(void) segmap_fault(kas.a_hat, segkmap,
2171408Skrgopi 		    (caddr_t)(uintptr_t)(((uintptr_t)snfi->snfi_base +
2172408Skrgopi 		    snfi->snfi_mapoff) & PAGEMASK), snfi->snfi_len,
2173408Skrgopi 		    F_SOFTUNLOCK, S_OTHER);
21740Sstevel@tonic-gate 	}
21750Sstevel@tonic-gate 	(void) segmap_release(segkmap, snfi->snfi_base, SM_DONTNEED);
21760Sstevel@tonic-gate 	VN_RELE(snfi->snfi_vp);
21770Sstevel@tonic-gate 	kmem_free(snfi, sizeof (*snfi));
21780Sstevel@tonic-gate }
21790Sstevel@tonic-gate 
21800Sstevel@tonic-gate /*
21810Sstevel@tonic-gate  * Use segmap instead of bcopy to send down a chain of desballoca'ed, mblks.
21820Sstevel@tonic-gate  * Each mblk contains a segmap slot of no more than MAXBSIZE. The total
21830Sstevel@tonic-gate  * length of a chain is no more than sd_qn_maxpsz.
21840Sstevel@tonic-gate  *
21850Sstevel@tonic-gate  * At the end of the whole sendfile() operation, we wait till the data from
21860Sstevel@tonic-gate  * the last mblk is ack'ed by the transport before returning so that the
21870Sstevel@tonic-gate  * caller of sendfile() can safely modify the file content.
21880Sstevel@tonic-gate  */
21890Sstevel@tonic-gate int
21900Sstevel@tonic-gate snf_segmap(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t size,
21910Sstevel@tonic-gate     uint_t maxpsz, ssize_t *count, boolean_t nowait)
21920Sstevel@tonic-gate {
21930Sstevel@tonic-gate 	caddr_t base;
21940Sstevel@tonic-gate 	int mapoff;
21950Sstevel@tonic-gate 	vnode_t *vp;
21960Sstevel@tonic-gate 	mblk_t *mp, *mp1;
21970Sstevel@tonic-gate 	int iosize, iosize1;
21980Sstevel@tonic-gate 	int error;
21990Sstevel@tonic-gate 	short fflag;
22000Sstevel@tonic-gate 	int ksize;
22010Sstevel@tonic-gate 	snf_smap_desbinfo *snfi;
22020Sstevel@tonic-gate 	struct vattr va;
22030Sstevel@tonic-gate 	boolean_t dowait = B_FALSE;
22040Sstevel@tonic-gate 
22050Sstevel@tonic-gate 	vp = fp->f_vnode;
22060Sstevel@tonic-gate 	fflag = fp->f_flag;
22070Sstevel@tonic-gate 	ksize = 0;
22080Sstevel@tonic-gate 	for (;;) {
22090Sstevel@tonic-gate 		if (ISSIG(curthread, JUSTLOOKING)) {
22100Sstevel@tonic-gate 			error = EINTR;
22110Sstevel@tonic-gate 			break;
22120Sstevel@tonic-gate 		}
22130Sstevel@tonic-gate 		iosize = 0;
22140Sstevel@tonic-gate 		mp = NULL;
22150Sstevel@tonic-gate 		do {
22160Sstevel@tonic-gate 			mapoff = fileoff & MAXBOFFSET;
22170Sstevel@tonic-gate 			iosize1 = MAXBSIZE - mapoff;
22180Sstevel@tonic-gate 			if (iosize1 > size)
22190Sstevel@tonic-gate 				iosize1 = size;
22200Sstevel@tonic-gate 			/*
22210Sstevel@tonic-gate 			 * we don't forcefault because we'll call
22220Sstevel@tonic-gate 			 * segmap_fault(F_SOFTLOCK) next.
22230Sstevel@tonic-gate 			 *
22240Sstevel@tonic-gate 			 * S_READ will get the ref bit set (by either
22250Sstevel@tonic-gate 			 * segmap_getmapflt() or segmap_fault()) and page
22260Sstevel@tonic-gate 			 * shared locked.
22270Sstevel@tonic-gate 			 */
22280Sstevel@tonic-gate 			base = segmap_getmapflt(segkmap, fvp, fileoff, iosize1,
22290Sstevel@tonic-gate 			    segmap_kpm ? SM_FAULT : 0, S_READ);
22300Sstevel@tonic-gate 
22310Sstevel@tonic-gate 			snfi = kmem_alloc(sizeof (*snfi), KM_SLEEP);
22320Sstevel@tonic-gate 			snfi->snfi_len = (size_t)roundup(mapoff+iosize1,
22330Sstevel@tonic-gate 			    PAGESIZE)- (mapoff & PAGEMASK);
22340Sstevel@tonic-gate 			/*
22350Sstevel@tonic-gate 			 * We must call segmap_fault() even for segmap_kpm
22360Sstevel@tonic-gate 			 * because that's how error gets returned.
22370Sstevel@tonic-gate 			 * (segmap_getmapflt() never fails but segmap_fault()
22380Sstevel@tonic-gate 			 * does.)
22390Sstevel@tonic-gate 			 */
22400Sstevel@tonic-gate 			if (segmap_fault(kas.a_hat, segkmap,
2241408Skrgopi 			    (caddr_t)(uintptr_t)(((uintptr_t)base + mapoff) &
2242408Skrgopi 			    PAGEMASK), snfi->snfi_len, F_SOFTLOCK,
2243408Skrgopi 			    S_READ) != 0) {
22440Sstevel@tonic-gate 				(void) segmap_release(segkmap, base, 0);
22450Sstevel@tonic-gate 				kmem_free(snfi, sizeof (*snfi));
22460Sstevel@tonic-gate 				freemsg(mp);
22470Sstevel@tonic-gate 				error = EIO;
22480Sstevel@tonic-gate 				goto out;
22490Sstevel@tonic-gate 			}
22500Sstevel@tonic-gate 			snfi->snfi_frtn.free_func = snf_smap_desbfree;
22510Sstevel@tonic-gate 			snfi->snfi_frtn.free_arg = (caddr_t)snfi;
22520Sstevel@tonic-gate 			snfi->snfi_base = base;
22530Sstevel@tonic-gate 			snfi->snfi_mapoff = mapoff;
22542994Sss146032 			mp1 = esballoca((uchar_t *)base + mapoff,
22550Sstevel@tonic-gate 			    iosize1, BPRI_HI, &snfi->snfi_frtn);
22560Sstevel@tonic-gate 
22570Sstevel@tonic-gate 			if (mp1 == NULL) {
22580Sstevel@tonic-gate 				(void) segmap_fault(kas.a_hat, segkmap,
2259408Skrgopi 				    (caddr_t)(uintptr_t)(((uintptr_t)base +
2260408Skrgopi 				    mapoff) & PAGEMASK), snfi->snfi_len,
22610Sstevel@tonic-gate 				    F_SOFTUNLOCK, S_OTHER);
22620Sstevel@tonic-gate 				(void) segmap_release(segkmap, base, 0);
22630Sstevel@tonic-gate 				kmem_free(snfi, sizeof (*snfi));
22640Sstevel@tonic-gate 				freemsg(mp);
22650Sstevel@tonic-gate 				error = EAGAIN;
22660Sstevel@tonic-gate 				goto out;
22670Sstevel@tonic-gate 			}
22680Sstevel@tonic-gate 			VN_HOLD(fvp);
22690Sstevel@tonic-gate 			snfi->snfi_vp = fvp;
22700Sstevel@tonic-gate 			mp1->b_wptr += iosize1;
22710Sstevel@tonic-gate 
22720Sstevel@tonic-gate 			/* Mark this dblk with the zero-copy flag */
22730Sstevel@tonic-gate 			mp1->b_datap->db_struioflag |= STRUIO_ZC;
22740Sstevel@tonic-gate 			if (mp == NULL)
22750Sstevel@tonic-gate 				mp = mp1;
22760Sstevel@tonic-gate 			else
22770Sstevel@tonic-gate 				linkb(mp, mp1);
22780Sstevel@tonic-gate 			iosize += iosize1;
22790Sstevel@tonic-gate 			fileoff += iosize1;
22800Sstevel@tonic-gate 			size -= iosize1;
22810Sstevel@tonic-gate 		} while (iosize < maxpsz && size != 0);
22820Sstevel@tonic-gate 
22830Sstevel@tonic-gate 		if (size == 0 && !nowait) {
22840Sstevel@tonic-gate 			ASSERT(!dowait);
22850Sstevel@tonic-gate 			dowait = B_TRUE;
22860Sstevel@tonic-gate 			mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY;
22870Sstevel@tonic-gate 		}
22880Sstevel@tonic-gate 		VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
22890Sstevel@tonic-gate 		if ((error = kstrwritemp(vp, mp, fflag)) != 0) {
22900Sstevel@tonic-gate 			*count = ksize;
22910Sstevel@tonic-gate 			freemsg(mp);
22920Sstevel@tonic-gate 			return (error);
22930Sstevel@tonic-gate 		}
22940Sstevel@tonic-gate 		ksize += iosize;
22950Sstevel@tonic-gate 		if (size == 0)
22960Sstevel@tonic-gate 			goto done;
22970Sstevel@tonic-gate 
22980Sstevel@tonic-gate 		(void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL);
22990Sstevel@tonic-gate 		va.va_mask = AT_SIZE;
23000Sstevel@tonic-gate 		error = VOP_GETATTR(fvp, &va, 0, kcred);
23010Sstevel@tonic-gate 		if (error)
23020Sstevel@tonic-gate 			break;
23030Sstevel@tonic-gate 		/* Read as much as possible. */
23040Sstevel@tonic-gate 		if (fileoff >= va.va_size)
23050Sstevel@tonic-gate 			break;
23060Sstevel@tonic-gate 		if (size + fileoff > va.va_size)
23070Sstevel@tonic-gate 			size = va.va_size - fileoff;
23080Sstevel@tonic-gate 	}
23090Sstevel@tonic-gate out:
23100Sstevel@tonic-gate 	VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
23110Sstevel@tonic-gate done:
23120Sstevel@tonic-gate 	*count = ksize;
23130Sstevel@tonic-gate 	if (dowait) {
23140Sstevel@tonic-gate 		stdata_t *stp;
23150Sstevel@tonic-gate 
23160Sstevel@tonic-gate 		stp = vp->v_stream;
23170Sstevel@tonic-gate 		mutex_enter(&stp->sd_lock);
23180Sstevel@tonic-gate 		while (!(stp->sd_flag & STZCNOTIFY)) {
2319*3415Samehta 			if (cv_wait_sig(&stp->sd_zcopy_wait,
2320*3415Samehta 				&stp->sd_lock) == 0) {
2321*3415Samehta 				error = EINTR;
2322*3415Samehta 				break;
2323*3415Samehta 			}
23240Sstevel@tonic-gate 		}
23250Sstevel@tonic-gate 		stp->sd_flag &= ~STZCNOTIFY;
23260Sstevel@tonic-gate 		mutex_exit(&stp->sd_lock);
23270Sstevel@tonic-gate 	}
23280Sstevel@tonic-gate 	return (error);
23290Sstevel@tonic-gate }
23300Sstevel@tonic-gate 
23310Sstevel@tonic-gate int
23320Sstevel@tonic-gate snf_cache(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t size,
23330Sstevel@tonic-gate     uint_t maxpsz, ssize_t *count)
23340Sstevel@tonic-gate {
23350Sstevel@tonic-gate 	struct vnode *vp;
23360Sstevel@tonic-gate 	mblk_t *mp;
23370Sstevel@tonic-gate 	int iosize;
23380Sstevel@tonic-gate 	int error;
23390Sstevel@tonic-gate 	short fflag;
23400Sstevel@tonic-gate 	int ksize;
23410Sstevel@tonic-gate 	int ioflag;
23420Sstevel@tonic-gate 	struct uio auio;
23430Sstevel@tonic-gate 	struct iovec aiov;
23440Sstevel@tonic-gate 	struct vattr va;
23450Sstevel@tonic-gate 
23460Sstevel@tonic-gate 	vp = fp->f_vnode;
23470Sstevel@tonic-gate 	fflag = fp->f_flag;
23480Sstevel@tonic-gate 	ksize = 0;
23490Sstevel@tonic-gate 	auio.uio_iov = &aiov;
23500Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
23510Sstevel@tonic-gate 	auio.uio_segflg = UIO_SYSSPACE;
23520Sstevel@tonic-gate 	auio.uio_llimit = MAXOFFSET_T;
23530Sstevel@tonic-gate 	auio.uio_fmode = fflag;
23540Sstevel@tonic-gate 	auio.uio_extflg = UIO_COPY_CACHED;
23550Sstevel@tonic-gate 	ioflag = auio.uio_fmode & (FSYNC|FDSYNC|FRSYNC);
23560Sstevel@tonic-gate 	/* If read sync is not asked for, filter sync flags */
23570Sstevel@tonic-gate 	if ((ioflag & FRSYNC) == 0)
23580Sstevel@tonic-gate 		ioflag &= ~(FSYNC|FDSYNC);
23590Sstevel@tonic-gate 	for (;;) {
23600Sstevel@tonic-gate 		if (ISSIG(curthread, JUSTLOOKING)) {
23610Sstevel@tonic-gate 			error = EINTR;
23620Sstevel@tonic-gate 			break;
23630Sstevel@tonic-gate 		}
23640Sstevel@tonic-gate 		iosize = (int)MIN(maxpsz, size);
23650Sstevel@tonic-gate 		if ((mp = allocb(iosize, BPRI_MED)) == NULL) {
23660Sstevel@tonic-gate 			error = EAGAIN;
23670Sstevel@tonic-gate 			break;
23680Sstevel@tonic-gate 		}
23690Sstevel@tonic-gate 		aiov.iov_base = (caddr_t)mp->b_rptr;
23700Sstevel@tonic-gate 		aiov.iov_len = iosize;
23710Sstevel@tonic-gate 		auio.uio_loffset = fileoff;
23720Sstevel@tonic-gate 		auio.uio_resid = iosize;
23730Sstevel@tonic-gate 
23740Sstevel@tonic-gate 		error = VOP_READ(fvp, &auio, ioflag, fp->f_cred, NULL);
23750Sstevel@tonic-gate 		iosize -= auio.uio_resid;
23760Sstevel@tonic-gate 
23770Sstevel@tonic-gate 		if (error == EINTR && iosize != 0)
23780Sstevel@tonic-gate 			error = 0;
23790Sstevel@tonic-gate 
23800Sstevel@tonic-gate 		if (error != 0 || iosize == 0) {
23810Sstevel@tonic-gate 			freeb(mp);
23820Sstevel@tonic-gate 			break;
23830Sstevel@tonic-gate 		}
23840Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + iosize;
23850Sstevel@tonic-gate 
23860Sstevel@tonic-gate 		VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
23870Sstevel@tonic-gate 		if ((error = kstrwritemp(vp, mp, fflag)) != 0) {
23880Sstevel@tonic-gate 			*count = ksize;
23890Sstevel@tonic-gate 			freeb(mp);
23900Sstevel@tonic-gate 			return (error);
23910Sstevel@tonic-gate 		}
23920Sstevel@tonic-gate 		ksize += iosize;
23930Sstevel@tonic-gate 		size -= iosize;
23940Sstevel@tonic-gate 		if (size == 0)
23950Sstevel@tonic-gate 			goto done;
23960Sstevel@tonic-gate 
23970Sstevel@tonic-gate 		fileoff += iosize;
23980Sstevel@tonic-gate 		(void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL);
23990Sstevel@tonic-gate 		va.va_mask = AT_SIZE;
24000Sstevel@tonic-gate 		error = VOP_GETATTR(fvp, &va, 0, kcred);
24010Sstevel@tonic-gate 		if (error)
24020Sstevel@tonic-gate 			break;
24030Sstevel@tonic-gate 		/* Read as much as possible. */
24040Sstevel@tonic-gate 		if (fileoff >= va.va_size)
24050Sstevel@tonic-gate 			size = 0;
24060Sstevel@tonic-gate 		else if (size + fileoff > va.va_size)
24070Sstevel@tonic-gate 			size = va.va_size - fileoff;
24080Sstevel@tonic-gate 	}
24090Sstevel@tonic-gate 	VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
24100Sstevel@tonic-gate done:
24110Sstevel@tonic-gate 	*count = ksize;
24120Sstevel@tonic-gate 	return (error);
24130Sstevel@tonic-gate }
24140Sstevel@tonic-gate 
24150Sstevel@tonic-gate #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
24160Sstevel@tonic-gate /*
24170Sstevel@tonic-gate  * Largefile support for 32 bit applications only.
24180Sstevel@tonic-gate  */
24190Sstevel@tonic-gate int
24200Sstevel@tonic-gate sosendfile64(file_t *fp, file_t *rfp, const struct ksendfilevec64 *sfv,
24210Sstevel@tonic-gate     ssize32_t *count32)
24220Sstevel@tonic-gate {
24230Sstevel@tonic-gate 	ssize32_t sfv_len;
24240Sstevel@tonic-gate 	u_offset_t sfv_off, va_size;
24250Sstevel@tonic-gate 	struct vnode *vp, *fvp, *realvp;
24260Sstevel@tonic-gate 	struct vattr va;
24270Sstevel@tonic-gate 	stdata_t *stp;
24280Sstevel@tonic-gate 	ssize_t count = 0;
24290Sstevel@tonic-gate 	int error = 0;
24300Sstevel@tonic-gate 	boolean_t dozcopy = B_FALSE;
24310Sstevel@tonic-gate 	uint_t maxpsz;
24320Sstevel@tonic-gate 
24330Sstevel@tonic-gate 	sfv_len = (ssize32_t)sfv->sfv_len;
24340Sstevel@tonic-gate 	if (sfv_len < 0) {
24350Sstevel@tonic-gate 		error = EINVAL;
24360Sstevel@tonic-gate 		goto out;
24370Sstevel@tonic-gate 	}
24380Sstevel@tonic-gate 
24390Sstevel@tonic-gate 	if (sfv_len == 0) goto out;
24400Sstevel@tonic-gate 
24410Sstevel@tonic-gate 	sfv_off = (u_offset_t)sfv->sfv_off;
24420Sstevel@tonic-gate 
24430Sstevel@tonic-gate 	/* Same checks as in pread */
24440Sstevel@tonic-gate 	if (sfv_off > MAXOFFSET_T) {
24450Sstevel@tonic-gate 		error = EINVAL;
24460Sstevel@tonic-gate 		goto out;
24470Sstevel@tonic-gate 	}
24480Sstevel@tonic-gate 	if (sfv_off + sfv_len > MAXOFFSET_T)
24490Sstevel@tonic-gate 		sfv_len = (ssize32_t)(MAXOFFSET_T - sfv_off);
24500Sstevel@tonic-gate 
24510Sstevel@tonic-gate 	/*
24520Sstevel@tonic-gate 	 * There are no more checks on sfv_len. So, we cast it to
24530Sstevel@tonic-gate 	 * u_offset_t and share the snf_direct_io/snf_cache code between
24540Sstevel@tonic-gate 	 * 32 bit and 64 bit.
24550Sstevel@tonic-gate 	 *
24560Sstevel@tonic-gate 	 * TODO: should do nbl_need_check() like read()?
24570Sstevel@tonic-gate 	 */
24580Sstevel@tonic-gate 	if (sfv_len > sendfile_max_size) {
24590Sstevel@tonic-gate 		sf_stats.ss_file_not_cached++;
24600Sstevel@tonic-gate 		error = snf_direct_io(fp, rfp, sfv_off, (u_offset_t)sfv_len,
24610Sstevel@tonic-gate 		    &count);
24620Sstevel@tonic-gate 		goto out;
24630Sstevel@tonic-gate 	}
24640Sstevel@tonic-gate 	fvp = rfp->f_vnode;
24650Sstevel@tonic-gate 	if (VOP_REALVP(fvp, &realvp) == 0)
24660Sstevel@tonic-gate 		fvp = realvp;
24670Sstevel@tonic-gate 	/*
24680Sstevel@tonic-gate 	 * Grab the lock as a reader to prevent the file size
24690Sstevel@tonic-gate 	 * from changing underneath.
24700Sstevel@tonic-gate 	 */
24710Sstevel@tonic-gate 	(void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL);
24720Sstevel@tonic-gate 	va.va_mask = AT_SIZE;
24730Sstevel@tonic-gate 	error = VOP_GETATTR(fvp, &va, 0, kcred);
24740Sstevel@tonic-gate 	va_size = va.va_size;
24750Sstevel@tonic-gate 	if ((error != 0) || (va_size == 0) || (sfv_off >= va_size)) {
24760Sstevel@tonic-gate 		VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
24770Sstevel@tonic-gate 		goto out;
24780Sstevel@tonic-gate 	}
24790Sstevel@tonic-gate 	/* Read as much as possible. */
24800Sstevel@tonic-gate 	if (sfv_off + sfv_len > va_size)
24810Sstevel@tonic-gate 		sfv_len = va_size - sfv_off;
24820Sstevel@tonic-gate 
24830Sstevel@tonic-gate 	vp = fp->f_vnode;
24840Sstevel@tonic-gate 	stp = vp->v_stream;
24850Sstevel@tonic-gate 	if (stp->sd_qn_maxpsz == INFPSZ)
24860Sstevel@tonic-gate 		maxpsz = MAXOFF32_T;
24870Sstevel@tonic-gate 	else
24880Sstevel@tonic-gate 		maxpsz = roundup(stp->sd_qn_maxpsz, MAXBSIZE);
24890Sstevel@tonic-gate 	/*
24900Sstevel@tonic-gate 	 * When the NOWAIT flag is not set, we enable zero-copy only if the
24910Sstevel@tonic-gate 	 * transfer size is large enough. This prevents performance loss
24920Sstevel@tonic-gate 	 * when the caller sends the file piece by piece.
24930Sstevel@tonic-gate 	 */
24940Sstevel@tonic-gate 	if (sfv_len >= MAXBSIZE && (sfv_len >= (va_size >> 1) ||
24950Sstevel@tonic-gate 	    (sfv->sfv_flag & SFV_NOWAIT) || sfv_len >= 0x1000000) &&
24960Sstevel@tonic-gate 	    !vn_has_flocks(fvp)) {
24970Sstevel@tonic-gate 		if ((stp->sd_copyflag & (STZCVMSAFE|STZCVMUNSAFE)) == 0) {
24980Sstevel@tonic-gate 			int on = 1;
24990Sstevel@tonic-gate 
25000Sstevel@tonic-gate 			if (SOP_SETSOCKOPT(VTOSO(vp), SOL_SOCKET,
25010Sstevel@tonic-gate 			    SO_SND_COPYAVOID, &on, sizeof (on)) == 0)
25020Sstevel@tonic-gate 				dozcopy = B_TRUE;
25030Sstevel@tonic-gate 		} else {
25040Sstevel@tonic-gate 			dozcopy = (stp->sd_copyflag & STZCVMSAFE);
25050Sstevel@tonic-gate 		}
25060Sstevel@tonic-gate 	}
25070Sstevel@tonic-gate 	if (dozcopy) {
25080Sstevel@tonic-gate 		sf_stats.ss_file_segmap++;
25090Sstevel@tonic-gate 		error = snf_segmap(fp, fvp, sfv_off, (u_offset_t)sfv_len,
25100Sstevel@tonic-gate 		    maxpsz, &count, ((sfv->sfv_flag & SFV_NOWAIT) != 0));
25110Sstevel@tonic-gate 	} else {
25120Sstevel@tonic-gate 		sf_stats.ss_file_cached++;
25130Sstevel@tonic-gate 		error = snf_cache(fp, fvp, sfv_off, (u_offset_t)sfv_len,
25140Sstevel@tonic-gate 		    maxpsz, &count);
25150Sstevel@tonic-gate 	}
25160Sstevel@tonic-gate out:
25170Sstevel@tonic-gate 	releasef(sfv->sfv_fd);
25180Sstevel@tonic-gate 	*count32 = (ssize32_t)count;
25190Sstevel@tonic-gate 	return (error);
25200Sstevel@tonic-gate }
25210Sstevel@tonic-gate #endif
25220Sstevel@tonic-gate 
25230Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
25240Sstevel@tonic-gate /*
25250Sstevel@tonic-gate  * recv32(), recvfrom32(), send32(), sendto32(): intentionally return a
25260Sstevel@tonic-gate  * ssize_t rather than ssize32_t; see the comments above read32 for details.
25270Sstevel@tonic-gate  */
25280Sstevel@tonic-gate 
25290Sstevel@tonic-gate ssize_t
25300Sstevel@tonic-gate recv32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags)
25310Sstevel@tonic-gate {
25320Sstevel@tonic-gate 	return (recv(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags));
25330Sstevel@tonic-gate }
25340Sstevel@tonic-gate 
25350Sstevel@tonic-gate ssize_t
25360Sstevel@tonic-gate recvfrom32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags,
25370Sstevel@tonic-gate 	caddr32_t name, caddr32_t namelenp)
25380Sstevel@tonic-gate {
25390Sstevel@tonic-gate 	return (recvfrom(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags,
25400Sstevel@tonic-gate 	    (void *)(uintptr_t)name, (void *)(uintptr_t)namelenp));
25410Sstevel@tonic-gate }
25420Sstevel@tonic-gate 
25430Sstevel@tonic-gate ssize_t
25440Sstevel@tonic-gate send32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags)
25450Sstevel@tonic-gate {
25460Sstevel@tonic-gate 	return (send(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags));
25470Sstevel@tonic-gate }
25480Sstevel@tonic-gate 
25490Sstevel@tonic-gate ssize_t
25500Sstevel@tonic-gate sendto32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags,
25510Sstevel@tonic-gate 	caddr32_t name, socklen_t namelen)
25520Sstevel@tonic-gate {
25530Sstevel@tonic-gate 	return (sendto(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags,
25540Sstevel@tonic-gate 	    (void *)(uintptr_t)name, namelen));
25550Sstevel@tonic-gate }
25560Sstevel@tonic-gate #endif	/* _SYSCALL32_IMPL */
25570Sstevel@tonic-gate 
25580Sstevel@tonic-gate /*
25590Sstevel@tonic-gate  * Function wrappers (mostly arround the sonode switch) for
25600Sstevel@tonic-gate  * backward compatibility.
25610Sstevel@tonic-gate  */
25620Sstevel@tonic-gate 
25630Sstevel@tonic-gate int
25640Sstevel@tonic-gate soaccept(struct sonode *so, int fflag, struct sonode **nsop)
25650Sstevel@tonic-gate {
25660Sstevel@tonic-gate 	return (SOP_ACCEPT(so, fflag, nsop));
25670Sstevel@tonic-gate }
25680Sstevel@tonic-gate 
25690Sstevel@tonic-gate int
25700Sstevel@tonic-gate sobind(struct sonode *so, struct sockaddr *name, socklen_t namelen,
25710Sstevel@tonic-gate     int backlog, int flags)
25720Sstevel@tonic-gate {
25730Sstevel@tonic-gate 	int	error;
25740Sstevel@tonic-gate 
25750Sstevel@tonic-gate 	error = SOP_BIND(so, name, namelen, flags);
25760Sstevel@tonic-gate 	if (error == 0 && backlog != 0)
25770Sstevel@tonic-gate 		return (SOP_LISTEN(so, backlog));
25780Sstevel@tonic-gate 
25790Sstevel@tonic-gate 	return (error);
25800Sstevel@tonic-gate }
25810Sstevel@tonic-gate 
25820Sstevel@tonic-gate int
25830Sstevel@tonic-gate solisten(struct sonode *so, int backlog)
25840Sstevel@tonic-gate {
25850Sstevel@tonic-gate 	return (SOP_LISTEN(so, backlog));
25860Sstevel@tonic-gate }
25870Sstevel@tonic-gate 
25880Sstevel@tonic-gate int
25890Sstevel@tonic-gate soconnect(struct sonode *so, const struct sockaddr *name, socklen_t namelen,
25900Sstevel@tonic-gate     int fflag, int flags)
25910Sstevel@tonic-gate {
25920Sstevel@tonic-gate 	return (SOP_CONNECT(so, name, namelen, fflag, flags));
25930Sstevel@tonic-gate }
25940Sstevel@tonic-gate 
25950Sstevel@tonic-gate int
25960Sstevel@tonic-gate sorecvmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop)
25970Sstevel@tonic-gate {
25980Sstevel@tonic-gate 	return (SOP_RECVMSG(so, msg, uiop));
25990Sstevel@tonic-gate }
26000Sstevel@tonic-gate 
26010Sstevel@tonic-gate int
26020Sstevel@tonic-gate sosendmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop)
26030Sstevel@tonic-gate {
26040Sstevel@tonic-gate 	return (SOP_SENDMSG(so, msg, uiop));
26050Sstevel@tonic-gate }
26060Sstevel@tonic-gate 
26070Sstevel@tonic-gate int
26080Sstevel@tonic-gate sogetpeername(struct sonode *so)
26090Sstevel@tonic-gate {
26100Sstevel@tonic-gate 	return (SOP_GETPEERNAME(so));
26110Sstevel@tonic-gate }
26120Sstevel@tonic-gate 
26130Sstevel@tonic-gate int
26140Sstevel@tonic-gate sogetsockname(struct sonode *so)
26150Sstevel@tonic-gate {
26160Sstevel@tonic-gate 	return (SOP_GETSOCKNAME(so));
26170Sstevel@tonic-gate }
26180Sstevel@tonic-gate 
26190Sstevel@tonic-gate int
26200Sstevel@tonic-gate soshutdown(struct sonode *so, int how)
26210Sstevel@tonic-gate {
26220Sstevel@tonic-gate 	return (SOP_SHUTDOWN(so, how));
26230Sstevel@tonic-gate }
26240Sstevel@tonic-gate 
26250Sstevel@tonic-gate int
26260Sstevel@tonic-gate sogetsockopt(struct sonode *so, int level, int option_name, void *optval,
26270Sstevel@tonic-gate     socklen_t *optlenp, int flags)
26280Sstevel@tonic-gate {
26290Sstevel@tonic-gate 	return (SOP_GETSOCKOPT(so, level, option_name, optval, optlenp,
26300Sstevel@tonic-gate 	    flags));
26310Sstevel@tonic-gate }
26320Sstevel@tonic-gate 
26330Sstevel@tonic-gate int
26340Sstevel@tonic-gate sosetsockopt(struct sonode *so, int level, int option_name, const void *optval,
26350Sstevel@tonic-gate     t_uscalar_t optlen)
26360Sstevel@tonic-gate {
26370Sstevel@tonic-gate 	return (SOP_SETSOCKOPT(so, level, option_name, optval, optlen));
26380Sstevel@tonic-gate }
26390Sstevel@tonic-gate 
26400Sstevel@tonic-gate /*
26410Sstevel@tonic-gate  * Because this is backward compatibility interface it only needs to be
26420Sstevel@tonic-gate  * able to handle the creation of TPI sockfs sockets.
26430Sstevel@tonic-gate  */
26440Sstevel@tonic-gate struct sonode *
26450Sstevel@tonic-gate socreate(vnode_t *accessvp, int domain, int type, int protocol, int version,
26460Sstevel@tonic-gate     struct sonode *tso, int *errorp)
26470Sstevel@tonic-gate {
26480Sstevel@tonic-gate 	return (sotpi_create(accessvp, domain, type, protocol, version, tso,
26490Sstevel@tonic-gate 	    errorp));
26500Sstevel@tonic-gate }
2651