xref: /onnv-gate/usr/src/uts/common/fs/sockfs/socksyscalls.c (revision 6240:f76d3ebaed6b)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51548Srshoaib  * Common Development and Distribution License (the "License").
61548Srshoaib  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211548Srshoaib 
220Sstevel@tonic-gate /*
23*6240Skrishna  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <sys/types.h>
300Sstevel@tonic-gate #include <sys/t_lock.h>
310Sstevel@tonic-gate #include <sys/param.h>
320Sstevel@tonic-gate #include <sys/systm.h>
330Sstevel@tonic-gate #include <sys/buf.h>
340Sstevel@tonic-gate #include <sys/conf.h>
350Sstevel@tonic-gate #include <sys/cred.h>
360Sstevel@tonic-gate #include <sys/kmem.h>
370Sstevel@tonic-gate #include <sys/sysmacros.h>
380Sstevel@tonic-gate #include <sys/vfs.h>
390Sstevel@tonic-gate #include <sys/vnode.h>
400Sstevel@tonic-gate #include <sys/debug.h>
410Sstevel@tonic-gate #include <sys/errno.h>
420Sstevel@tonic-gate #include <sys/time.h>
430Sstevel@tonic-gate #include <sys/file.h>
440Sstevel@tonic-gate #include <sys/user.h>
450Sstevel@tonic-gate #include <sys/stream.h>
460Sstevel@tonic-gate #include <sys/strsubr.h>
470Sstevel@tonic-gate #include <sys/strsun.h>
484173Spr14459 #include <sys/sunddi.h>
490Sstevel@tonic-gate #include <sys/esunddi.h>
500Sstevel@tonic-gate #include <sys/flock.h>
510Sstevel@tonic-gate #include <sys/modctl.h>
520Sstevel@tonic-gate #include <sys/cmn_err.h>
530Sstevel@tonic-gate #include <sys/vmsystm.h>
540Sstevel@tonic-gate #include <sys/policy.h>
550Sstevel@tonic-gate 
560Sstevel@tonic-gate #include <sys/socket.h>
570Sstevel@tonic-gate #include <sys/socketvar.h>
580Sstevel@tonic-gate 
590Sstevel@tonic-gate #include <sys/isa_defs.h>
600Sstevel@tonic-gate #include <sys/inttypes.h>
610Sstevel@tonic-gate #include <sys/systm.h>
620Sstevel@tonic-gate #include <sys/cpuvar.h>
630Sstevel@tonic-gate #include <sys/filio.h>
640Sstevel@tonic-gate #include <sys/sendfile.h>
650Sstevel@tonic-gate #include <sys/ddi.h>
660Sstevel@tonic-gate #include <vm/seg.h>
670Sstevel@tonic-gate #include <vm/seg_map.h>
680Sstevel@tonic-gate #include <vm/seg_kpm.h>
690Sstevel@tonic-gate #include <fs/sockfs/nl7c.h>
700Sstevel@tonic-gate 
710Sstevel@tonic-gate #ifdef SOCK_TEST
720Sstevel@tonic-gate int do_useracc = 1;		/* Controlled by setting SO_DEBUG to 4 */
730Sstevel@tonic-gate #else
740Sstevel@tonic-gate #define	do_useracc	1
750Sstevel@tonic-gate #endif /* SOCK_TEST */
760Sstevel@tonic-gate 
770Sstevel@tonic-gate extern int xnet_truncate_print;
780Sstevel@tonic-gate 
790Sstevel@tonic-gate /*
800Sstevel@tonic-gate  * Note: DEF_IOV_MAX is defined and used as it is in "fs/vncalls.c"
810Sstevel@tonic-gate  *	 as there isn't a formal definition of IOV_MAX ???
820Sstevel@tonic-gate  */
830Sstevel@tonic-gate #define	MSG_MAXIOVLEN	16
840Sstevel@tonic-gate 
850Sstevel@tonic-gate /*
860Sstevel@tonic-gate  * Kernel component of socket creation.
870Sstevel@tonic-gate  *
880Sstevel@tonic-gate  * The socket library determines which version number to use.
890Sstevel@tonic-gate  * First the library calls this with a NULL devpath. If this fails
900Sstevel@tonic-gate  * to find a transport (using solookup) the library will look in /etc/netconfig
910Sstevel@tonic-gate  * for the appropriate transport. If one is found it will pass in the
920Sstevel@tonic-gate  * devpath for the kernel to use.
930Sstevel@tonic-gate  */
940Sstevel@tonic-gate int
950Sstevel@tonic-gate so_socket(int domain, int type, int protocol, char *devpath, int version)
960Sstevel@tonic-gate {
970Sstevel@tonic-gate 	vnode_t *accessvp;
980Sstevel@tonic-gate 	struct sonode *so;
990Sstevel@tonic-gate 	vnode_t *vp;
1000Sstevel@tonic-gate 	struct file *fp;
1010Sstevel@tonic-gate 	int fd;
1020Sstevel@tonic-gate 	int error;
1030Sstevel@tonic-gate 	boolean_t wildcard = B_FALSE;
1040Sstevel@tonic-gate 	int saved_error = 0;
1050Sstevel@tonic-gate 	int sdomain = domain;
1060Sstevel@tonic-gate 
1070Sstevel@tonic-gate 	dprint(1, ("so_socket(%d,%d,%d,%p,%d)\n",
1085227Stz204579 	    domain, type, protocol, devpath, version));
1090Sstevel@tonic-gate 
1100Sstevel@tonic-gate 	if (domain == AF_NCA) {
1110Sstevel@tonic-gate 		/*
1120Sstevel@tonic-gate 		 * The request is for an NCA socket so for NL7C use the
1130Sstevel@tonic-gate 		 * INET domain instead and mark NL7C_AF_NCA below.
1140Sstevel@tonic-gate 		 */
1150Sstevel@tonic-gate 		domain = AF_INET;
1160Sstevel@tonic-gate 		/*
1170Sstevel@tonic-gate 		 * NL7C is not supported in non-global zones,
1180Sstevel@tonic-gate 		 *  we enforce this restriction here.
1190Sstevel@tonic-gate 		 */
1200Sstevel@tonic-gate 		if (getzoneid() != GLOBAL_ZONEID) {
1210Sstevel@tonic-gate 			return (set_errno(ENOTSUP));
1220Sstevel@tonic-gate 		}
1230Sstevel@tonic-gate 	}
1240Sstevel@tonic-gate 
1250Sstevel@tonic-gate 	accessvp = solookup(domain, type, protocol, devpath, &error);
1260Sstevel@tonic-gate 	if (accessvp == NULL) {
1270Sstevel@tonic-gate 		/*
1280Sstevel@tonic-gate 		 * If there is either an EPROTONOSUPPORT or EPROTOTYPE error
1290Sstevel@tonic-gate 		 * it makes sense doing the wildcard lookup since the
1300Sstevel@tonic-gate 		 * protocol might not be in the table.
1310Sstevel@tonic-gate 		 */
1320Sstevel@tonic-gate 		if (devpath != NULL || protocol == 0 ||
1330Sstevel@tonic-gate 		    !(error == EPROTONOSUPPORT || error == EPROTOTYPE))
1340Sstevel@tonic-gate 			return (set_errno(error));
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate 		saved_error = error;
1370Sstevel@tonic-gate 
1380Sstevel@tonic-gate 		/*
1390Sstevel@tonic-gate 		 * Try wildcard lookup. Never use devpath for wildcards.
1400Sstevel@tonic-gate 		 */
1410Sstevel@tonic-gate 		accessvp = solookup(domain, type, 0, NULL, &error);
1420Sstevel@tonic-gate 		if (accessvp == NULL) {
1430Sstevel@tonic-gate 			/*
1440Sstevel@tonic-gate 			 * Can't find in kernel table - have library
1450Sstevel@tonic-gate 			 * fall back to /etc/netconfig and tell us
1460Sstevel@tonic-gate 			 * the devpath (The library will do this if it didn't
1470Sstevel@tonic-gate 			 * already pass in a devpath).
1480Sstevel@tonic-gate 			 */
1490Sstevel@tonic-gate 			if (saved_error != 0)
1500Sstevel@tonic-gate 				error = saved_error;
1510Sstevel@tonic-gate 			return (set_errno(error));
1520Sstevel@tonic-gate 		}
1530Sstevel@tonic-gate 		wildcard = B_TRUE;
1540Sstevel@tonic-gate 	}
1550Sstevel@tonic-gate 
1560Sstevel@tonic-gate 	/* Check the device policy */
1570Sstevel@tonic-gate 	if ((error = secpolicy_spec_open(CRED(),
1580Sstevel@tonic-gate 	    accessvp, FREAD|FWRITE)) != 0) {
1590Sstevel@tonic-gate 		return (set_errno(error));
1600Sstevel@tonic-gate 	}
1610Sstevel@tonic-gate 
1621548Srshoaib 	if (protocol == IPPROTO_SCTP) {
1630Sstevel@tonic-gate 		so = sosctp_create(accessvp, domain, type, protocol, version,
1640Sstevel@tonic-gate 		    NULL, &error);
1653422Snh145002 	} else if (protocol == PROTO_SDP) {
1663422Snh145002 		so = sosdp_create(accessvp, domain, type, protocol, version,
1673422Snh145002 		    NULL, &error);
1680Sstevel@tonic-gate 	} else {
1690Sstevel@tonic-gate 		so = sotpi_create(accessvp, domain, type, protocol, version,
1700Sstevel@tonic-gate 		    NULL, &error);
1710Sstevel@tonic-gate 	}
1720Sstevel@tonic-gate 	if (so == NULL) {
1730Sstevel@tonic-gate 		return (set_errno(error));
1740Sstevel@tonic-gate 	}
1750Sstevel@tonic-gate 	if (sdomain == AF_NCA && domain == AF_INET) {
1760Sstevel@tonic-gate 		so->so_nl7c_flags = NL7C_AF_NCA;
1770Sstevel@tonic-gate 	}
1780Sstevel@tonic-gate 	vp = SOTOV(so);
1790Sstevel@tonic-gate 
1800Sstevel@tonic-gate 	if (wildcard) {
1810Sstevel@tonic-gate 		/*
1820Sstevel@tonic-gate 		 * Issue SO_PROTOTYPE setsockopt.
1830Sstevel@tonic-gate 		 */
1840Sstevel@tonic-gate 		error = SOP_SETSOCKOPT(so, SOL_SOCKET, SO_PROTOTYPE,
1855227Stz204579 		    &protocol,
1865227Stz204579 		    (t_uscalar_t)sizeof (protocol));
1870Sstevel@tonic-gate 		if (error) {
1885331Samw 			(void) VOP_CLOSE(vp, 0, 1, 0, CRED(), NULL);
1890Sstevel@tonic-gate 			VN_RELE(vp);
1900Sstevel@tonic-gate 			/*
1910Sstevel@tonic-gate 			 * Setsockopt often fails with ENOPROTOOPT but socket()
1920Sstevel@tonic-gate 			 * should fail with EPROTONOSUPPORT/EPROTOTYPE.
1930Sstevel@tonic-gate 			 */
1940Sstevel@tonic-gate 			if (saved_error != 0 && error == ENOPROTOOPT)
1950Sstevel@tonic-gate 				error = saved_error;
1960Sstevel@tonic-gate 			else
1970Sstevel@tonic-gate 				error = EPROTONOSUPPORT;
1980Sstevel@tonic-gate 			return (set_errno(error));
1990Sstevel@tonic-gate 		}
2000Sstevel@tonic-gate 	}
2010Sstevel@tonic-gate 	if (error = falloc(vp, FWRITE|FREAD, &fp, &fd)) {
2025331Samw 		(void) VOP_CLOSE(vp, 0, 1, 0, CRED(), NULL);
2030Sstevel@tonic-gate 		VN_RELE(vp);
2040Sstevel@tonic-gate 		return (set_errno(error));
2050Sstevel@tonic-gate 	}
2060Sstevel@tonic-gate 
2070Sstevel@tonic-gate 	/*
2080Sstevel@tonic-gate 	 * Now fill in the entries that falloc reserved
2090Sstevel@tonic-gate 	 */
2100Sstevel@tonic-gate 	mutex_exit(&fp->f_tlock);
2110Sstevel@tonic-gate 	setf(fd, fp);
2120Sstevel@tonic-gate 
2130Sstevel@tonic-gate 	return (fd);
2140Sstevel@tonic-gate }
2150Sstevel@tonic-gate 
2160Sstevel@tonic-gate /*
2170Sstevel@tonic-gate  * Map from a file descriptor to a socket node.
2180Sstevel@tonic-gate  * Returns with the file descriptor held i.e. the caller has to
2190Sstevel@tonic-gate  * use releasef when done with the file descriptor.
2200Sstevel@tonic-gate  */
2215227Stz204579 struct sonode *
2220Sstevel@tonic-gate getsonode(int sock, int *errorp, file_t **fpp)
2230Sstevel@tonic-gate {
2240Sstevel@tonic-gate 	file_t *fp;
2250Sstevel@tonic-gate 	vnode_t *vp;
2260Sstevel@tonic-gate 	struct sonode *so;
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate 	if ((fp = getf(sock)) == NULL) {
2290Sstevel@tonic-gate 		*errorp = EBADF;
2300Sstevel@tonic-gate 		eprintline(*errorp);
2310Sstevel@tonic-gate 		return (NULL);
2320Sstevel@tonic-gate 	}
2330Sstevel@tonic-gate 	vp = fp->f_vnode;
2340Sstevel@tonic-gate 	/* Check if it is a socket */
2350Sstevel@tonic-gate 	if (vp->v_type != VSOCK) {
2360Sstevel@tonic-gate 		releasef(sock);
2370Sstevel@tonic-gate 		*errorp = ENOTSOCK;
2380Sstevel@tonic-gate 		eprintline(*errorp);
2390Sstevel@tonic-gate 		return (NULL);
2400Sstevel@tonic-gate 	}
2410Sstevel@tonic-gate 	/*
2420Sstevel@tonic-gate 	 * Use the stream head to find the real socket vnode.
2430Sstevel@tonic-gate 	 * This is needed when namefs sits above sockfs.
2440Sstevel@tonic-gate 	 */
2450Sstevel@tonic-gate 	if (vp->v_stream) {
2460Sstevel@tonic-gate 		ASSERT(vp->v_stream->sd_vnode);
2470Sstevel@tonic-gate 		vp = vp->v_stream->sd_vnode;
2480Sstevel@tonic-gate 
2490Sstevel@tonic-gate 		so = VTOSO(vp);
2500Sstevel@tonic-gate 		if (so->so_version == SOV_STREAM) {
2510Sstevel@tonic-gate 			releasef(sock);
2520Sstevel@tonic-gate 			*errorp = ENOTSOCK;
2530Sstevel@tonic-gate 			eprintsoline(so, *errorp);
2540Sstevel@tonic-gate 			return (NULL);
2550Sstevel@tonic-gate 		}
2560Sstevel@tonic-gate 	} else {
2570Sstevel@tonic-gate 		so = VTOSO(vp);
2580Sstevel@tonic-gate 	}
2590Sstevel@tonic-gate 	if (fpp)
2600Sstevel@tonic-gate 		*fpp = fp;
2610Sstevel@tonic-gate 	return (so);
2620Sstevel@tonic-gate }
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate /*
2650Sstevel@tonic-gate  * Allocate and copyin a sockaddr.
2660Sstevel@tonic-gate  * Ensures NULL termination for AF_UNIX addresses by extending them
2670Sstevel@tonic-gate  * with one NULL byte if need be. Verifies that the length is not
2680Sstevel@tonic-gate  * excessive to prevent an application from consuming all of kernel
2690Sstevel@tonic-gate  * memory. Returns NULL when an error occurred.
2700Sstevel@tonic-gate  */
2710Sstevel@tonic-gate static struct sockaddr *
2720Sstevel@tonic-gate copyin_name(struct sonode *so, struct sockaddr *name, socklen_t *namelenp,
2730Sstevel@tonic-gate 	    int *errorp)
2740Sstevel@tonic-gate {
2750Sstevel@tonic-gate 	char	*faddr;
2760Sstevel@tonic-gate 	size_t	namelen = (size_t)*namelenp;
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate 	ASSERT(namelen != 0);
2790Sstevel@tonic-gate 	if (namelen > SO_MAXARGSIZE) {
2800Sstevel@tonic-gate 		*errorp = EINVAL;
2810Sstevel@tonic-gate 		eprintsoline(so, *errorp);
2820Sstevel@tonic-gate 		return (NULL);
2830Sstevel@tonic-gate 	}
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate 	faddr = (char *)kmem_alloc(namelen, KM_SLEEP);
2860Sstevel@tonic-gate 	if (copyin(name, faddr, namelen)) {
2870Sstevel@tonic-gate 		kmem_free(faddr, namelen);
2880Sstevel@tonic-gate 		*errorp = EFAULT;
2890Sstevel@tonic-gate 		eprintsoline(so, *errorp);
2900Sstevel@tonic-gate 		return (NULL);
2910Sstevel@tonic-gate 	}
2920Sstevel@tonic-gate 
2930Sstevel@tonic-gate 	/*
2940Sstevel@tonic-gate 	 * Add space for NULL termination if needed.
2950Sstevel@tonic-gate 	 * Do a quick check if the last byte is NUL.
2960Sstevel@tonic-gate 	 */
2970Sstevel@tonic-gate 	if (so->so_family == AF_UNIX && faddr[namelen - 1] != '\0') {
2980Sstevel@tonic-gate 		/* Check if there is any NULL termination */
2990Sstevel@tonic-gate 		size_t	i;
3000Sstevel@tonic-gate 		int foundnull = 0;
3010Sstevel@tonic-gate 
3020Sstevel@tonic-gate 		for (i = sizeof (name->sa_family); i < namelen; i++) {
3030Sstevel@tonic-gate 			if (faddr[i] == '\0') {
3040Sstevel@tonic-gate 				foundnull = 1;
3050Sstevel@tonic-gate 				break;
3060Sstevel@tonic-gate 			}
3070Sstevel@tonic-gate 		}
3080Sstevel@tonic-gate 		if (!foundnull) {
3090Sstevel@tonic-gate 			/* Add extra byte for NUL padding */
3100Sstevel@tonic-gate 			char *nfaddr;
3110Sstevel@tonic-gate 
3120Sstevel@tonic-gate 			nfaddr = (char *)kmem_alloc(namelen + 1, KM_SLEEP);
3130Sstevel@tonic-gate 			bcopy(faddr, nfaddr, namelen);
3140Sstevel@tonic-gate 			kmem_free(faddr, namelen);
3150Sstevel@tonic-gate 
3160Sstevel@tonic-gate 			/* NUL terminate */
3170Sstevel@tonic-gate 			nfaddr[namelen] = '\0';
3180Sstevel@tonic-gate 			namelen++;
3190Sstevel@tonic-gate 			ASSERT((socklen_t)namelen == namelen);
3200Sstevel@tonic-gate 			*namelenp = (socklen_t)namelen;
3210Sstevel@tonic-gate 			faddr = nfaddr;
3220Sstevel@tonic-gate 		}
3230Sstevel@tonic-gate 	}
3240Sstevel@tonic-gate 	return ((struct sockaddr *)faddr);
3250Sstevel@tonic-gate }
3260Sstevel@tonic-gate 
3270Sstevel@tonic-gate /*
3280Sstevel@tonic-gate  * Copy from kaddr/klen to uaddr/ulen. Updates ulenp if non-NULL.
3290Sstevel@tonic-gate  */
3300Sstevel@tonic-gate static int
3310Sstevel@tonic-gate copyout_arg(void *uaddr, socklen_t ulen, void *ulenp,
3320Sstevel@tonic-gate 		void *kaddr, socklen_t klen)
3330Sstevel@tonic-gate {
3340Sstevel@tonic-gate 	if (uaddr != NULL) {
3350Sstevel@tonic-gate 		if (ulen > klen)
3360Sstevel@tonic-gate 			ulen = klen;
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate 		if (ulen != 0) {
3390Sstevel@tonic-gate 			if (copyout(kaddr, uaddr, ulen))
3400Sstevel@tonic-gate 				return (EFAULT);
3410Sstevel@tonic-gate 		}
3420Sstevel@tonic-gate 	} else
3430Sstevel@tonic-gate 		ulen = 0;
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate 	if (ulenp != NULL) {
3460Sstevel@tonic-gate 		if (copyout(&ulen, ulenp, sizeof (ulen)))
3470Sstevel@tonic-gate 			return (EFAULT);
3480Sstevel@tonic-gate 	}
3490Sstevel@tonic-gate 	return (0);
3500Sstevel@tonic-gate }
3510Sstevel@tonic-gate 
3520Sstevel@tonic-gate /*
3530Sstevel@tonic-gate  * Copy from kaddr/klen to uaddr/ulen. Updates ulenp if non-NULL.
3540Sstevel@tonic-gate  * If klen is greater than ulen it still uses the non-truncated
3550Sstevel@tonic-gate  * klen to update ulenp.
3560Sstevel@tonic-gate  */
3570Sstevel@tonic-gate static int
3580Sstevel@tonic-gate copyout_name(void *uaddr, socklen_t ulen, void *ulenp,
3590Sstevel@tonic-gate 		void *kaddr, socklen_t klen)
3600Sstevel@tonic-gate {
3610Sstevel@tonic-gate 	if (uaddr != NULL) {
3620Sstevel@tonic-gate 		if (ulen >= klen)
3630Sstevel@tonic-gate 			ulen = klen;
3640Sstevel@tonic-gate 		else if (ulen != 0 && xnet_truncate_print) {
3650Sstevel@tonic-gate 			printf("sockfs: truncating copyout of address using "
3660Sstevel@tonic-gate 			    "XNET semantics for pid = %d. Lengths %d, %d\n",
3670Sstevel@tonic-gate 			    curproc->p_pid, klen, ulen);
3680Sstevel@tonic-gate 		}
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate 		if (ulen != 0) {
3710Sstevel@tonic-gate 			if (copyout(kaddr, uaddr, ulen))
3720Sstevel@tonic-gate 				return (EFAULT);
3730Sstevel@tonic-gate 		} else
3740Sstevel@tonic-gate 			klen = 0;
3750Sstevel@tonic-gate 	} else
3760Sstevel@tonic-gate 		klen = 0;
3770Sstevel@tonic-gate 
3780Sstevel@tonic-gate 	if (ulenp != NULL) {
3790Sstevel@tonic-gate 		if (copyout(&klen, ulenp, sizeof (klen)))
3800Sstevel@tonic-gate 			return (EFAULT);
3810Sstevel@tonic-gate 	}
3820Sstevel@tonic-gate 	return (0);
3830Sstevel@tonic-gate }
3840Sstevel@tonic-gate 
3850Sstevel@tonic-gate /*
3860Sstevel@tonic-gate  * The socketpair() code in libsocket creates two sockets (using
3870Sstevel@tonic-gate  * the /etc/netconfig fallback if needed) before calling this routine
3880Sstevel@tonic-gate  * to connect the two sockets together.
3890Sstevel@tonic-gate  *
3900Sstevel@tonic-gate  * For a SOCK_STREAM socketpair a listener is needed - in that case this
3910Sstevel@tonic-gate  * routine will create a new file descriptor as part of accepting the
3920Sstevel@tonic-gate  * connection. The library socketpair() will check if svs[2] has changed
3930Sstevel@tonic-gate  * in which case it will close the changed fd.
3940Sstevel@tonic-gate  *
3950Sstevel@tonic-gate  * Note that this code could use the TPI feature of accepting the connection
3960Sstevel@tonic-gate  * on the listening endpoint. However, that would require significant changes
3970Sstevel@tonic-gate  * to soaccept.
3980Sstevel@tonic-gate  */
3990Sstevel@tonic-gate int
4000Sstevel@tonic-gate so_socketpair(int sv[2])
4010Sstevel@tonic-gate {
4020Sstevel@tonic-gate 	int svs[2];
4030Sstevel@tonic-gate 	struct sonode *so1, *so2;
4040Sstevel@tonic-gate 	int error;
4050Sstevel@tonic-gate 	struct sockaddr_ux *name;
4060Sstevel@tonic-gate 	size_t namelen;
4070Sstevel@tonic-gate 
4080Sstevel@tonic-gate 	dprint(1, ("so_socketpair(%p)\n", sv));
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate 	error = useracc(sv, sizeof (svs), B_WRITE);
4110Sstevel@tonic-gate 	if (error && do_useracc)
4120Sstevel@tonic-gate 		return (set_errno(EFAULT));
4130Sstevel@tonic-gate 
4140Sstevel@tonic-gate 	if (copyin(sv, svs, sizeof (svs)))
4150Sstevel@tonic-gate 		return (set_errno(EFAULT));
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 	if ((so1 = getsonode(svs[0], &error, NULL)) == NULL)
4180Sstevel@tonic-gate 		return (set_errno(error));
4190Sstevel@tonic-gate 
4200Sstevel@tonic-gate 	if ((so2 = getsonode(svs[1], &error, NULL)) == NULL) {
4210Sstevel@tonic-gate 		releasef(svs[0]);
4220Sstevel@tonic-gate 		return (set_errno(error));
4230Sstevel@tonic-gate 	}
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate 	if (so1->so_family != AF_UNIX || so2->so_family != AF_UNIX) {
4260Sstevel@tonic-gate 		error = EOPNOTSUPP;
4270Sstevel@tonic-gate 		goto done;
4280Sstevel@tonic-gate 	}
4290Sstevel@tonic-gate 
4300Sstevel@tonic-gate 	/*
4310Sstevel@tonic-gate 	 * The code below makes assumptions about the "sockfs" implementation.
4320Sstevel@tonic-gate 	 * So make sure that the correct implementation is really used.
4330Sstevel@tonic-gate 	 */
4340Sstevel@tonic-gate 	ASSERT(so1->so_ops == &sotpi_sonodeops);
4350Sstevel@tonic-gate 	ASSERT(so2->so_ops == &sotpi_sonodeops);
4360Sstevel@tonic-gate 
4370Sstevel@tonic-gate 	if (so1->so_type == SOCK_DGRAM) {
4380Sstevel@tonic-gate 		/*
4390Sstevel@tonic-gate 		 * Bind both sockets and connect them with each other.
4400Sstevel@tonic-gate 		 * Need to allocate name/namelen for soconnect.
4410Sstevel@tonic-gate 		 */
4420Sstevel@tonic-gate 		error = SOP_BIND(so1, NULL, 0, _SOBIND_UNSPEC);
4430Sstevel@tonic-gate 		if (error) {
4440Sstevel@tonic-gate 			eprintsoline(so1, error);
4450Sstevel@tonic-gate 			goto done;
4460Sstevel@tonic-gate 		}
4470Sstevel@tonic-gate 		error = SOP_BIND(so2, NULL, 0, _SOBIND_UNSPEC);
4480Sstevel@tonic-gate 		if (error) {
4490Sstevel@tonic-gate 			eprintsoline(so2, error);
4500Sstevel@tonic-gate 			goto done;
4510Sstevel@tonic-gate 		}
4520Sstevel@tonic-gate 		namelen = sizeof (struct sockaddr_ux);
4530Sstevel@tonic-gate 		name = kmem_alloc(namelen, KM_SLEEP);
4540Sstevel@tonic-gate 		name->sou_family = AF_UNIX;
4550Sstevel@tonic-gate 		name->sou_addr = so2->so_ux_laddr;
4560Sstevel@tonic-gate 		error = SOP_CONNECT(so1,
4575227Stz204579 		    (struct sockaddr *)name,
4585227Stz204579 		    (socklen_t)namelen,
4595227Stz204579 		    0, _SOCONNECT_NOXLATE);
4600Sstevel@tonic-gate 		if (error) {
4610Sstevel@tonic-gate 			kmem_free(name, namelen);
4620Sstevel@tonic-gate 			eprintsoline(so1, error);
4630Sstevel@tonic-gate 			goto done;
4640Sstevel@tonic-gate 		}
4650Sstevel@tonic-gate 		name->sou_addr = so1->so_ux_laddr;
4660Sstevel@tonic-gate 		error = SOP_CONNECT(so2,
4675227Stz204579 		    (struct sockaddr *)name,
4685227Stz204579 		    (socklen_t)namelen,
4695227Stz204579 		    0, _SOCONNECT_NOXLATE);
4700Sstevel@tonic-gate 		kmem_free(name, namelen);
4710Sstevel@tonic-gate 		if (error) {
4720Sstevel@tonic-gate 			eprintsoline(so2, error);
4730Sstevel@tonic-gate 			goto done;
4740Sstevel@tonic-gate 		}
4750Sstevel@tonic-gate 		releasef(svs[0]);
4760Sstevel@tonic-gate 		releasef(svs[1]);
4770Sstevel@tonic-gate 	} else {
4780Sstevel@tonic-gate 		/*
4790Sstevel@tonic-gate 		 * Bind both sockets, with so1 being a listener.
4800Sstevel@tonic-gate 		 * Connect so2 to so1 - nonblocking to avoid waiting for
4810Sstevel@tonic-gate 		 * soaccept to complete.
4820Sstevel@tonic-gate 		 * Accept a connection on so1. Pass out the new fd as sv[0].
4830Sstevel@tonic-gate 		 * The library will detect the changed fd and close
4840Sstevel@tonic-gate 		 * the original one.
4850Sstevel@tonic-gate 		 */
4860Sstevel@tonic-gate 		struct sonode *nso;
4870Sstevel@tonic-gate 		struct vnode *nvp;
4880Sstevel@tonic-gate 		struct file *nfp;
4890Sstevel@tonic-gate 		int nfd;
4900Sstevel@tonic-gate 
4910Sstevel@tonic-gate 		/*
4920Sstevel@tonic-gate 		 * We could simply call SOP_LISTEN() here (which would do the
4930Sstevel@tonic-gate 		 * binding automatically) if the code didn't rely on passing
4940Sstevel@tonic-gate 		 * _SOBIND_NOXLATE to the TPI implementation of SOP_BIND().
4950Sstevel@tonic-gate 		 */
4960Sstevel@tonic-gate 		error = SOP_BIND(so1, NULL, 0, _SOBIND_UNSPEC|_SOBIND_NOXLATE|
4970Sstevel@tonic-gate 		    _SOBIND_LISTEN|_SOBIND_SOCKETPAIR);
4980Sstevel@tonic-gate 		if (error) {
4990Sstevel@tonic-gate 			eprintsoline(so1, error);
5000Sstevel@tonic-gate 			goto done;
5010Sstevel@tonic-gate 		}
5020Sstevel@tonic-gate 		error = SOP_BIND(so2, NULL, 0, _SOBIND_UNSPEC);
5030Sstevel@tonic-gate 		if (error) {
5040Sstevel@tonic-gate 			eprintsoline(so2, error);
5050Sstevel@tonic-gate 			goto done;
5060Sstevel@tonic-gate 		}
5070Sstevel@tonic-gate 
5080Sstevel@tonic-gate 		namelen = sizeof (struct sockaddr_ux);
5090Sstevel@tonic-gate 		name = kmem_alloc(namelen, KM_SLEEP);
5100Sstevel@tonic-gate 		name->sou_family = AF_UNIX;
5110Sstevel@tonic-gate 		name->sou_addr = so1->so_ux_laddr;
5120Sstevel@tonic-gate 		error = SOP_CONNECT(so2,
5135227Stz204579 		    (struct sockaddr *)name,
5145227Stz204579 		    (socklen_t)namelen,
5155227Stz204579 		    FNONBLOCK, _SOCONNECT_NOXLATE);
5160Sstevel@tonic-gate 		kmem_free(name, namelen);
5170Sstevel@tonic-gate 		if (error) {
5180Sstevel@tonic-gate 			if (error != EINPROGRESS) {
5190Sstevel@tonic-gate 				eprintsoline(so2, error);
5200Sstevel@tonic-gate 				goto done;
5210Sstevel@tonic-gate 			}
5220Sstevel@tonic-gate 		}
5230Sstevel@tonic-gate 
5240Sstevel@tonic-gate 		error = SOP_ACCEPT(so1, 0, &nso);
5250Sstevel@tonic-gate 		if (error) {
5260Sstevel@tonic-gate 			eprintsoline(so1, error);
5270Sstevel@tonic-gate 			goto done;
5280Sstevel@tonic-gate 		}
5290Sstevel@tonic-gate 
5300Sstevel@tonic-gate 		/* wait for so2 being SS_CONNECTED ignoring signals */
5310Sstevel@tonic-gate 		mutex_enter(&so2->so_lock);
5320Sstevel@tonic-gate 		error = sowaitconnected(so2, 0, 1);
5330Sstevel@tonic-gate 		mutex_exit(&so2->so_lock);
5340Sstevel@tonic-gate 		nvp = SOTOV(nso);
5350Sstevel@tonic-gate 		if (error != 0) {
5365331Samw 			(void) VOP_CLOSE(nvp, 0, 1, 0, CRED(), NULL);
5370Sstevel@tonic-gate 			VN_RELE(nvp);
5380Sstevel@tonic-gate 			eprintsoline(so2, error);
5390Sstevel@tonic-gate 			goto done;
5400Sstevel@tonic-gate 		}
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate 		if (error = falloc(nvp, FWRITE|FREAD, &nfp, &nfd)) {
5435331Samw 			(void) VOP_CLOSE(nvp, 0, 1, 0, CRED(), NULL);
5440Sstevel@tonic-gate 			VN_RELE(nvp);
5450Sstevel@tonic-gate 			eprintsoline(nso, error);
5460Sstevel@tonic-gate 			goto done;
5470Sstevel@tonic-gate 		}
5480Sstevel@tonic-gate 		/*
5490Sstevel@tonic-gate 		 * fill in the entries that falloc reserved
5500Sstevel@tonic-gate 		 */
5510Sstevel@tonic-gate 		mutex_exit(&nfp->f_tlock);
5520Sstevel@tonic-gate 		setf(nfd, nfp);
5530Sstevel@tonic-gate 
5540Sstevel@tonic-gate 		releasef(svs[0]);
5550Sstevel@tonic-gate 		releasef(svs[1]);
5560Sstevel@tonic-gate 		svs[0] = nfd;
5570Sstevel@tonic-gate 
5580Sstevel@tonic-gate 		/*
5590Sstevel@tonic-gate 		 * The socketpair library routine will close the original
5600Sstevel@tonic-gate 		 * svs[0] when this code passes out a different file
5610Sstevel@tonic-gate 		 * descriptor.
5620Sstevel@tonic-gate 		 */
5630Sstevel@tonic-gate 		if (copyout(svs, sv, sizeof (svs))) {
5640Sstevel@tonic-gate 			(void) closeandsetf(nfd, NULL);
5650Sstevel@tonic-gate 			eprintline(EFAULT);
5660Sstevel@tonic-gate 			return (set_errno(EFAULT));
5670Sstevel@tonic-gate 		}
5680Sstevel@tonic-gate 	}
5690Sstevel@tonic-gate 	return (0);
5700Sstevel@tonic-gate 
5710Sstevel@tonic-gate done:
5720Sstevel@tonic-gate 	releasef(svs[0]);
5730Sstevel@tonic-gate 	releasef(svs[1]);
5740Sstevel@tonic-gate 	return (set_errno(error));
5750Sstevel@tonic-gate }
5760Sstevel@tonic-gate 
5770Sstevel@tonic-gate int
5780Sstevel@tonic-gate bind(int sock, struct sockaddr *name, socklen_t namelen, int version)
5790Sstevel@tonic-gate {
5800Sstevel@tonic-gate 	struct sonode *so;
5810Sstevel@tonic-gate 	int error;
5820Sstevel@tonic-gate 
5830Sstevel@tonic-gate 	dprint(1, ("bind(%d, %p, %d)\n",
5845227Stz204579 	    sock, name, namelen));
5850Sstevel@tonic-gate 
5860Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
5870Sstevel@tonic-gate 		return (set_errno(error));
5880Sstevel@tonic-gate 
5890Sstevel@tonic-gate 	/* Allocate and copyin name */
5900Sstevel@tonic-gate 	/*
5910Sstevel@tonic-gate 	 * X/Open test does not expect EFAULT with NULL name and non-zero
5920Sstevel@tonic-gate 	 * namelen.
5930Sstevel@tonic-gate 	 */
5940Sstevel@tonic-gate 	if (name != NULL && namelen != 0) {
5950Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
5960Sstevel@tonic-gate 		name = copyin_name(so, name, &namelen, &error);
5970Sstevel@tonic-gate 		if (name == NULL) {
5980Sstevel@tonic-gate 			releasef(sock);
5990Sstevel@tonic-gate 			return (set_errno(error));
6000Sstevel@tonic-gate 		}
6010Sstevel@tonic-gate 	} else {
6020Sstevel@tonic-gate 		name = NULL;
6030Sstevel@tonic-gate 		namelen = 0;
6040Sstevel@tonic-gate 	}
6050Sstevel@tonic-gate 
6060Sstevel@tonic-gate 	switch (version) {
6070Sstevel@tonic-gate 	default:
6080Sstevel@tonic-gate 		error = SOP_BIND(so, name, namelen, 0);
6090Sstevel@tonic-gate 		break;
6100Sstevel@tonic-gate 	case SOV_XPG4_2:
6110Sstevel@tonic-gate 		error = SOP_BIND(so, name, namelen, _SOBIND_XPG4_2);
6120Sstevel@tonic-gate 		break;
6130Sstevel@tonic-gate 	case SOV_SOCKBSD:
6140Sstevel@tonic-gate 		error = SOP_BIND(so, name, namelen, _SOBIND_SOCKBSD);
6150Sstevel@tonic-gate 		break;
6160Sstevel@tonic-gate 	}
6170Sstevel@tonic-gate done:
6180Sstevel@tonic-gate 	releasef(sock);
6190Sstevel@tonic-gate 	if (name != NULL)
6200Sstevel@tonic-gate 		kmem_free(name, (size_t)namelen);
6210Sstevel@tonic-gate 
6220Sstevel@tonic-gate 	if (error)
6230Sstevel@tonic-gate 		return (set_errno(error));
6240Sstevel@tonic-gate 	return (0);
6250Sstevel@tonic-gate }
6260Sstevel@tonic-gate 
6270Sstevel@tonic-gate /* ARGSUSED2 */
6280Sstevel@tonic-gate int
6290Sstevel@tonic-gate listen(int sock, int backlog, int version)
6300Sstevel@tonic-gate {
6310Sstevel@tonic-gate 	struct sonode *so;
6320Sstevel@tonic-gate 	int error;
6330Sstevel@tonic-gate 
6340Sstevel@tonic-gate 	dprint(1, ("listen(%d, %d)\n",
6355227Stz204579 	    sock, backlog));
6360Sstevel@tonic-gate 
6370Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
6380Sstevel@tonic-gate 		return (set_errno(error));
6390Sstevel@tonic-gate 
6400Sstevel@tonic-gate 	error = SOP_LISTEN(so, backlog);
6410Sstevel@tonic-gate 
6420Sstevel@tonic-gate 	releasef(sock);
6430Sstevel@tonic-gate 	if (error)
6440Sstevel@tonic-gate 		return (set_errno(error));
6450Sstevel@tonic-gate 	return (0);
6460Sstevel@tonic-gate }
6470Sstevel@tonic-gate 
6480Sstevel@tonic-gate /*ARGSUSED3*/
6490Sstevel@tonic-gate int
6500Sstevel@tonic-gate accept(int sock, struct sockaddr *name, socklen_t *namelenp, int version)
6510Sstevel@tonic-gate {
6520Sstevel@tonic-gate 	struct sonode *so;
6530Sstevel@tonic-gate 	file_t *fp;
6540Sstevel@tonic-gate 	int error;
6550Sstevel@tonic-gate 	socklen_t namelen;
6560Sstevel@tonic-gate 	struct sonode *nso;
6570Sstevel@tonic-gate 	struct vnode *nvp;
6580Sstevel@tonic-gate 	struct file *nfp;
6590Sstevel@tonic-gate 	int nfd;
6600Sstevel@tonic-gate 
6610Sstevel@tonic-gate 	dprint(1, ("accept(%d, %p, %p)\n",
6625227Stz204579 	    sock, name, namelenp));
6630Sstevel@tonic-gate 
6640Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
6650Sstevel@tonic-gate 		return (set_errno(error));
6660Sstevel@tonic-gate 
6670Sstevel@tonic-gate 	if (name != NULL) {
6680Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
6690Sstevel@tonic-gate 		if (copyin(namelenp, &namelen, sizeof (namelen))) {
6700Sstevel@tonic-gate 			releasef(sock);
6710Sstevel@tonic-gate 			return (set_errno(EFAULT));
6720Sstevel@tonic-gate 		}
6730Sstevel@tonic-gate 		if (namelen != 0) {
6740Sstevel@tonic-gate 			error = useracc(name, (size_t)namelen, B_WRITE);
6750Sstevel@tonic-gate 			if (error && do_useracc) {
6760Sstevel@tonic-gate 				releasef(sock);
6770Sstevel@tonic-gate 				return (set_errno(EFAULT));
6780Sstevel@tonic-gate 			}
6790Sstevel@tonic-gate 		} else
6800Sstevel@tonic-gate 			name = NULL;
6810Sstevel@tonic-gate 	} else {
6820Sstevel@tonic-gate 		namelen = 0;
6830Sstevel@tonic-gate 	}
6840Sstevel@tonic-gate 
6850Sstevel@tonic-gate 	/*
6860Sstevel@tonic-gate 	 * Allocate the user fd before SOP_ACCEPT() in order to
6870Sstevel@tonic-gate 	 * catch EMFILE errors before calling SOP_ACCEPT().
6880Sstevel@tonic-gate 	 */
6890Sstevel@tonic-gate 	if ((nfd = ufalloc(0)) == -1) {
6900Sstevel@tonic-gate 		eprintsoline(so, EMFILE);
6910Sstevel@tonic-gate 		releasef(sock);
6920Sstevel@tonic-gate 		return (set_errno(EMFILE));
6930Sstevel@tonic-gate 	}
6940Sstevel@tonic-gate 	error = SOP_ACCEPT(so, fp->f_flag, &nso);
6950Sstevel@tonic-gate 	releasef(sock);
6960Sstevel@tonic-gate 	if (error) {
6970Sstevel@tonic-gate 		setf(nfd, NULL);
6980Sstevel@tonic-gate 		return (set_errno(error));
6990Sstevel@tonic-gate 	}
7000Sstevel@tonic-gate 
7010Sstevel@tonic-gate 	nvp = SOTOV(nso);
7020Sstevel@tonic-gate 
7030Sstevel@tonic-gate 	/*
7040Sstevel@tonic-gate 	 * so_faddr_sa can not go away even though we are not holding so_lock.
7050Sstevel@tonic-gate 	 * However, in theory its content could change from underneath us.
7060Sstevel@tonic-gate 	 * But this is not possible in practice since it can only
7070Sstevel@tonic-gate 	 * change due to either some socket system call
7080Sstevel@tonic-gate 	 * or due to a T_CONN_CON being received from the stream head.
7090Sstevel@tonic-gate 	 * Since the falloc/setf have not yet been done no thread
7100Sstevel@tonic-gate 	 * can do any system call on nso and T_CONN_CON can not arrive
7110Sstevel@tonic-gate 	 * on a socket that is already connected.
7120Sstevel@tonic-gate 	 * Thus there is no reason to hold so_lock here.
7130Sstevel@tonic-gate 	 *
7140Sstevel@tonic-gate 	 * SOP_ACCEPT() is required to have set the valid bit for the faddr,
7150Sstevel@tonic-gate 	 * but it could be instantly cleared by a disconnect from the transport.
7160Sstevel@tonic-gate 	 * For that reason we ignore it here.
7170Sstevel@tonic-gate 	 */
7180Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&nso->so_lock));
7190Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp,
7200Sstevel@tonic-gate 	    nso->so_faddr_sa, (socklen_t)nso->so_faddr_len);
7210Sstevel@tonic-gate 	if (error) {
7220Sstevel@tonic-gate 		setf(nfd, NULL);
7235331Samw 		(void) VOP_CLOSE(nvp, 0, 1, 0, CRED(), NULL);
7240Sstevel@tonic-gate 		VN_RELE(nvp);
7250Sstevel@tonic-gate 		return (set_errno(error));
7260Sstevel@tonic-gate 	}
7270Sstevel@tonic-gate 	if (error = falloc(NULL, FWRITE|FREAD, &nfp, NULL)) {
7280Sstevel@tonic-gate 		setf(nfd, NULL);
7295331Samw 		(void) VOP_CLOSE(nvp, 0, 1, 0, CRED(), NULL);
7300Sstevel@tonic-gate 		VN_RELE(nvp);
7310Sstevel@tonic-gate 		eprintsoline(so, error);
7320Sstevel@tonic-gate 		return (set_errno(error));
7330Sstevel@tonic-gate 	}
7340Sstevel@tonic-gate 	/*
7350Sstevel@tonic-gate 	 * fill in the entries that falloc reserved
7360Sstevel@tonic-gate 	 */
7370Sstevel@tonic-gate 	nfp->f_vnode = nvp;
7380Sstevel@tonic-gate 	mutex_exit(&nfp->f_tlock);
7390Sstevel@tonic-gate 	setf(nfd, nfp);
7400Sstevel@tonic-gate 
7410Sstevel@tonic-gate 	/*
7420Sstevel@tonic-gate 	 * Copy FNDELAY and FNONBLOCK from listener to acceptor
7430Sstevel@tonic-gate 	 */
7440Sstevel@tonic-gate 	if (so->so_state & (SS_NDELAY|SS_NONBLOCK)) {
7450Sstevel@tonic-gate 		uint_t oflag = nfp->f_flag;
7460Sstevel@tonic-gate 		int arg = 0;
7470Sstevel@tonic-gate 
7480Sstevel@tonic-gate 		if (so->so_state & SS_NONBLOCK)
7490Sstevel@tonic-gate 			arg |= FNONBLOCK;
7500Sstevel@tonic-gate 		else if (so->so_state & SS_NDELAY)
7510Sstevel@tonic-gate 			arg |= FNDELAY;
7520Sstevel@tonic-gate 
7530Sstevel@tonic-gate 		/*
7540Sstevel@tonic-gate 		 * This code is a simplification of the F_SETFL code in fcntl()
7550Sstevel@tonic-gate 		 * Ignore any errors from VOP_SETFL.
7560Sstevel@tonic-gate 		 */
7575331Samw 		if ((error = VOP_SETFL(nvp, oflag, arg, nfp->f_cred, NULL))
7585331Samw 		    != 0) {
7590Sstevel@tonic-gate 			eprintsoline(so, error);
7600Sstevel@tonic-gate 			error = 0;
7610Sstevel@tonic-gate 		} else {
7620Sstevel@tonic-gate 			mutex_enter(&nfp->f_tlock);
7630Sstevel@tonic-gate 			nfp->f_flag &= ~FMASK | (FREAD|FWRITE);
7640Sstevel@tonic-gate 			nfp->f_flag |= arg;
7650Sstevel@tonic-gate 			mutex_exit(&nfp->f_tlock);
7660Sstevel@tonic-gate 		}
7670Sstevel@tonic-gate 	}
7680Sstevel@tonic-gate 	return (nfd);
7690Sstevel@tonic-gate }
7700Sstevel@tonic-gate 
7710Sstevel@tonic-gate int
7720Sstevel@tonic-gate connect(int sock, struct sockaddr *name, socklen_t namelen, int version)
7730Sstevel@tonic-gate {
7740Sstevel@tonic-gate 	struct sonode *so;
7750Sstevel@tonic-gate 	file_t *fp;
7760Sstevel@tonic-gate 	int error;
7770Sstevel@tonic-gate 
7780Sstevel@tonic-gate 	dprint(1, ("connect(%d, %p, %d)\n",
7795227Stz204579 	    sock, name, namelen));
7800Sstevel@tonic-gate 
7810Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
7820Sstevel@tonic-gate 		return (set_errno(error));
7830Sstevel@tonic-gate 
7840Sstevel@tonic-gate 	/* Allocate and copyin name */
7850Sstevel@tonic-gate 	if (namelen != 0) {
7860Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
7870Sstevel@tonic-gate 		name = copyin_name(so, name, &namelen, &error);
7880Sstevel@tonic-gate 		if (name == NULL) {
7890Sstevel@tonic-gate 			releasef(sock);
7900Sstevel@tonic-gate 			return (set_errno(error));
7910Sstevel@tonic-gate 		}
7920Sstevel@tonic-gate 	} else
7930Sstevel@tonic-gate 		name = NULL;
7940Sstevel@tonic-gate 
7950Sstevel@tonic-gate 	error = SOP_CONNECT(so, name, namelen, fp->f_flag,
7960Sstevel@tonic-gate 	    (version != SOV_XPG4_2) ? 0 : _SOCONNECT_XPG4_2);
7970Sstevel@tonic-gate 	releasef(sock);
7980Sstevel@tonic-gate 	if (name)
7990Sstevel@tonic-gate 		kmem_free(name, (size_t)namelen);
8000Sstevel@tonic-gate 	if (error)
8010Sstevel@tonic-gate 		return (set_errno(error));
8020Sstevel@tonic-gate 	return (0);
8030Sstevel@tonic-gate }
8040Sstevel@tonic-gate 
8050Sstevel@tonic-gate /*ARGSUSED2*/
8060Sstevel@tonic-gate int
8070Sstevel@tonic-gate shutdown(int sock, int how, int version)
8080Sstevel@tonic-gate {
8090Sstevel@tonic-gate 	struct sonode *so;
8100Sstevel@tonic-gate 	int error;
8110Sstevel@tonic-gate 
8120Sstevel@tonic-gate 	dprint(1, ("shutdown(%d, %d)\n",
8135227Stz204579 	    sock, how));
8140Sstevel@tonic-gate 
8150Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
8160Sstevel@tonic-gate 		return (set_errno(error));
8170Sstevel@tonic-gate 
8180Sstevel@tonic-gate 	error = SOP_SHUTDOWN(so, how);
8190Sstevel@tonic-gate 
8200Sstevel@tonic-gate 	releasef(sock);
8210Sstevel@tonic-gate 	if (error)
8220Sstevel@tonic-gate 		return (set_errno(error));
8230Sstevel@tonic-gate 	return (0);
8240Sstevel@tonic-gate }
8250Sstevel@tonic-gate 
8260Sstevel@tonic-gate /*
8270Sstevel@tonic-gate  * Common receive routine.
8280Sstevel@tonic-gate  */
8290Sstevel@tonic-gate static ssize_t
8300Sstevel@tonic-gate recvit(int sock,
8310Sstevel@tonic-gate 	struct nmsghdr *msg,
8320Sstevel@tonic-gate 	struct uio *uiop,
8330Sstevel@tonic-gate 	int flags,
8340Sstevel@tonic-gate 	socklen_t *namelenp,
8350Sstevel@tonic-gate 	socklen_t *controllenp,
8360Sstevel@tonic-gate 	int *flagsp)
8370Sstevel@tonic-gate {
8380Sstevel@tonic-gate 	struct sonode *so;
8390Sstevel@tonic-gate 	file_t *fp;
8400Sstevel@tonic-gate 	void *name;
8410Sstevel@tonic-gate 	socklen_t namelen;
8420Sstevel@tonic-gate 	void *control;
8430Sstevel@tonic-gate 	socklen_t controllen;
8440Sstevel@tonic-gate 	ssize_t len;
8450Sstevel@tonic-gate 	int error;
8460Sstevel@tonic-gate 
8470Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
8480Sstevel@tonic-gate 		return (set_errno(error));
8490Sstevel@tonic-gate 
8500Sstevel@tonic-gate 	len = uiop->uio_resid;
8510Sstevel@tonic-gate 	uiop->uio_fmode = fp->f_flag;
8520Sstevel@tonic-gate 	uiop->uio_extflg = UIO_COPY_CACHED;
8530Sstevel@tonic-gate 
8540Sstevel@tonic-gate 	name = msg->msg_name;
8550Sstevel@tonic-gate 	namelen = msg->msg_namelen;
8560Sstevel@tonic-gate 	control = msg->msg_control;
8570Sstevel@tonic-gate 	controllen = msg->msg_controllen;
8580Sstevel@tonic-gate 
8590Sstevel@tonic-gate 	msg->msg_flags = flags & (MSG_OOB | MSG_PEEK | MSG_WAITALL |
8600Sstevel@tonic-gate 	    MSG_DONTWAIT | MSG_XPG4_2);
8610Sstevel@tonic-gate 
8620Sstevel@tonic-gate 	error = SOP_RECVMSG(so, msg, uiop);
8630Sstevel@tonic-gate 	if (error) {
8640Sstevel@tonic-gate 		releasef(sock);
8650Sstevel@tonic-gate 		return (set_errno(error));
8660Sstevel@tonic-gate 	}
8670Sstevel@tonic-gate 	lwp_stat_update(LWP_STAT_MSGRCV, 1);
8680Sstevel@tonic-gate 	so_update_attrs(so, SOACC);
8690Sstevel@tonic-gate 	releasef(sock);
8700Sstevel@tonic-gate 
8710Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp,
8720Sstevel@tonic-gate 	    msg->msg_name, msg->msg_namelen);
8730Sstevel@tonic-gate 	if (error)
8740Sstevel@tonic-gate 		goto err;
8750Sstevel@tonic-gate 
8760Sstevel@tonic-gate 	if (flagsp != NULL) {
8770Sstevel@tonic-gate 		/*
8780Sstevel@tonic-gate 		 * Clear internal flag.
8790Sstevel@tonic-gate 		 */
8800Sstevel@tonic-gate 		msg->msg_flags &= ~MSG_XPG4_2;
8810Sstevel@tonic-gate 
8820Sstevel@tonic-gate 		/*
8830Sstevel@tonic-gate 		 * Determine MSG_CTRUNC. sorecvmsg sets MSG_CTRUNC only
8840Sstevel@tonic-gate 		 * when controllen is zero and there is control data to
8850Sstevel@tonic-gate 		 * copy out.
8860Sstevel@tonic-gate 		 */
8870Sstevel@tonic-gate 		if (controllen != 0 &&
8880Sstevel@tonic-gate 		    (msg->msg_controllen > controllen || control == NULL)) {
8890Sstevel@tonic-gate 			dprint(1, ("recvit: CTRUNC %d %d %p\n",
8900Sstevel@tonic-gate 			    msg->msg_controllen, controllen, control));
8910Sstevel@tonic-gate 
8920Sstevel@tonic-gate 			msg->msg_flags |= MSG_CTRUNC;
8930Sstevel@tonic-gate 		}
8940Sstevel@tonic-gate 		if (copyout(&msg->msg_flags, flagsp,
8950Sstevel@tonic-gate 		    sizeof (msg->msg_flags))) {
8960Sstevel@tonic-gate 			error = EFAULT;
8970Sstevel@tonic-gate 			goto err;
8980Sstevel@tonic-gate 		}
8990Sstevel@tonic-gate 	}
9000Sstevel@tonic-gate 	/*
9010Sstevel@tonic-gate 	 * Note: This MUST be done last. There can be no "goto err" after this
9020Sstevel@tonic-gate 	 * point since it could make so_closefds run twice on some part
9030Sstevel@tonic-gate 	 * of the file descriptor array.
9040Sstevel@tonic-gate 	 */
9050Sstevel@tonic-gate 	if (controllen != 0) {
9060Sstevel@tonic-gate 		if (!(flags & MSG_XPG4_2)) {
9070Sstevel@tonic-gate 			/*
9080Sstevel@tonic-gate 			 * Good old msg_accrights can only return a multiple
9090Sstevel@tonic-gate 			 * of 4 bytes.
9100Sstevel@tonic-gate 			 */
9110Sstevel@tonic-gate 			controllen &= ~((int)sizeof (uint32_t) - 1);
9120Sstevel@tonic-gate 		}
9130Sstevel@tonic-gate 		error = copyout_arg(control, controllen, controllenp,
9140Sstevel@tonic-gate 		    msg->msg_control, msg->msg_controllen);
9150Sstevel@tonic-gate 		if (error)
9160Sstevel@tonic-gate 			goto err;
9170Sstevel@tonic-gate 
9180Sstevel@tonic-gate 		if (msg->msg_controllen > controllen || control == NULL) {
9190Sstevel@tonic-gate 			if (control == NULL)
9200Sstevel@tonic-gate 				controllen = 0;
9210Sstevel@tonic-gate 			so_closefds(msg->msg_control, msg->msg_controllen,
9220Sstevel@tonic-gate 			    !(flags & MSG_XPG4_2), controllen);
9230Sstevel@tonic-gate 		}
9240Sstevel@tonic-gate 	}
9250Sstevel@tonic-gate 	if (msg->msg_namelen != 0)
9260Sstevel@tonic-gate 		kmem_free(msg->msg_name, (size_t)msg->msg_namelen);
9270Sstevel@tonic-gate 	if (msg->msg_controllen != 0)
9280Sstevel@tonic-gate 		kmem_free(msg->msg_control, (size_t)msg->msg_controllen);
9290Sstevel@tonic-gate 	return (len - uiop->uio_resid);
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate err:
9320Sstevel@tonic-gate 	/*
9330Sstevel@tonic-gate 	 * If we fail and the control part contains file descriptors
9340Sstevel@tonic-gate 	 * we have to close the fd's.
9350Sstevel@tonic-gate 	 */
9360Sstevel@tonic-gate 	if (msg->msg_controllen != 0)
9370Sstevel@tonic-gate 		so_closefds(msg->msg_control, msg->msg_controllen,
9380Sstevel@tonic-gate 		    !(flags & MSG_XPG4_2), 0);
9390Sstevel@tonic-gate 	if (msg->msg_namelen != 0)
9400Sstevel@tonic-gate 		kmem_free(msg->msg_name, (size_t)msg->msg_namelen);
9410Sstevel@tonic-gate 	if (msg->msg_controllen != 0)
9420Sstevel@tonic-gate 		kmem_free(msg->msg_control, (size_t)msg->msg_controllen);
9430Sstevel@tonic-gate 	return (set_errno(error));
9440Sstevel@tonic-gate }
9450Sstevel@tonic-gate 
9460Sstevel@tonic-gate /*
9470Sstevel@tonic-gate  * Native system call
9480Sstevel@tonic-gate  */
9490Sstevel@tonic-gate ssize_t
9500Sstevel@tonic-gate recv(int sock, void *buffer, size_t len, int flags)
9510Sstevel@tonic-gate {
9520Sstevel@tonic-gate 	struct nmsghdr lmsg;
9530Sstevel@tonic-gate 	struct uio auio;
9540Sstevel@tonic-gate 	struct iovec aiov[1];
9550Sstevel@tonic-gate 
9560Sstevel@tonic-gate 	dprint(1, ("recv(%d, %p, %ld, %d)\n",
9575227Stz204579 	    sock, buffer, len, flags));
9580Sstevel@tonic-gate 
9590Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
9600Sstevel@tonic-gate 		return (set_errno(EINVAL));
9610Sstevel@tonic-gate 	}
9620Sstevel@tonic-gate 
9630Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
9640Sstevel@tonic-gate 	aiov[0].iov_len = len;
9650Sstevel@tonic-gate 	auio.uio_loffset = 0;
9660Sstevel@tonic-gate 	auio.uio_iov = aiov;
9670Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
9680Sstevel@tonic-gate 	auio.uio_resid = len;
9690Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
9700Sstevel@tonic-gate 	auio.uio_limit = 0;
9710Sstevel@tonic-gate 
9720Sstevel@tonic-gate 	lmsg.msg_namelen = 0;
9730Sstevel@tonic-gate 	lmsg.msg_controllen = 0;
9740Sstevel@tonic-gate 	lmsg.msg_flags = 0;
9750Sstevel@tonic-gate 	return (recvit(sock, &lmsg, &auio, flags, NULL, NULL, NULL));
9760Sstevel@tonic-gate }
9770Sstevel@tonic-gate 
9780Sstevel@tonic-gate ssize_t
9790Sstevel@tonic-gate recvfrom(int sock, void *buffer, size_t len, int flags,
9800Sstevel@tonic-gate 	struct sockaddr *name, socklen_t *namelenp)
9810Sstevel@tonic-gate {
9820Sstevel@tonic-gate 	struct nmsghdr lmsg;
9830Sstevel@tonic-gate 	struct uio auio;
9840Sstevel@tonic-gate 	struct iovec aiov[1];
9850Sstevel@tonic-gate 
9860Sstevel@tonic-gate 	dprint(1, ("recvfrom(%d, %p, %ld, %d, %p, %p)\n",
9875227Stz204579 	    sock, buffer, len, flags, name, namelenp));
9880Sstevel@tonic-gate 
9890Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
9900Sstevel@tonic-gate 		return (set_errno(EINVAL));
9910Sstevel@tonic-gate 	}
9920Sstevel@tonic-gate 
9930Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
9940Sstevel@tonic-gate 	aiov[0].iov_len = len;
9950Sstevel@tonic-gate 	auio.uio_loffset = 0;
9960Sstevel@tonic-gate 	auio.uio_iov = aiov;
9970Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
9980Sstevel@tonic-gate 	auio.uio_resid = len;
9990Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
10000Sstevel@tonic-gate 	auio.uio_limit = 0;
10010Sstevel@tonic-gate 
10020Sstevel@tonic-gate 	lmsg.msg_name = (char *)name;
10030Sstevel@tonic-gate 	if (namelenp != NULL) {
10040Sstevel@tonic-gate 		if (copyin(namelenp, &lmsg.msg_namelen,
10050Sstevel@tonic-gate 		    sizeof (lmsg.msg_namelen)))
10060Sstevel@tonic-gate 			return (set_errno(EFAULT));
10070Sstevel@tonic-gate 	} else {
10080Sstevel@tonic-gate 		lmsg.msg_namelen = 0;
10090Sstevel@tonic-gate 	}
10100Sstevel@tonic-gate 	lmsg.msg_controllen = 0;
10110Sstevel@tonic-gate 	lmsg.msg_flags = 0;
10120Sstevel@tonic-gate 
10130Sstevel@tonic-gate 	return (recvit(sock, &lmsg, &auio, flags, namelenp, NULL, NULL));
10140Sstevel@tonic-gate }
10150Sstevel@tonic-gate 
10160Sstevel@tonic-gate /*
10170Sstevel@tonic-gate  * Uses the MSG_XPG4_2 flag to determine if the caller is using
10180Sstevel@tonic-gate  * struct omsghdr or struct nmsghdr.
10190Sstevel@tonic-gate  */
10200Sstevel@tonic-gate ssize_t
10210Sstevel@tonic-gate recvmsg(int sock, struct nmsghdr *msg, int flags)
10220Sstevel@tonic-gate {
10230Sstevel@tonic-gate 	STRUCT_DECL(nmsghdr, u_lmsg);
10240Sstevel@tonic-gate 	STRUCT_HANDLE(nmsghdr, umsgptr);
10250Sstevel@tonic-gate 	struct nmsghdr lmsg;
10260Sstevel@tonic-gate 	struct uio auio;
10270Sstevel@tonic-gate 	struct iovec aiov[MSG_MAXIOVLEN];
10280Sstevel@tonic-gate 	int iovcnt;
10290Sstevel@tonic-gate 	ssize_t len;
10300Sstevel@tonic-gate 	int i;
10310Sstevel@tonic-gate 	int *flagsp;
10320Sstevel@tonic-gate 	model_t	model;
10330Sstevel@tonic-gate 
10340Sstevel@tonic-gate 	dprint(1, ("recvmsg(%d, %p, %d)\n",
10355227Stz204579 	    sock, msg, flags));
10360Sstevel@tonic-gate 
10370Sstevel@tonic-gate 	model = get_udatamodel();
10380Sstevel@tonic-gate 	STRUCT_INIT(u_lmsg, model);
10390Sstevel@tonic-gate 	STRUCT_SET_HANDLE(umsgptr, model, msg);
10400Sstevel@tonic-gate 
10410Sstevel@tonic-gate 	if (flags & MSG_XPG4_2) {
10420Sstevel@tonic-gate 		if (copyin(msg, STRUCT_BUF(u_lmsg), STRUCT_SIZE(u_lmsg)))
10430Sstevel@tonic-gate 			return (set_errno(EFAULT));
10440Sstevel@tonic-gate 		flagsp = STRUCT_FADDR(umsgptr, msg_flags);
10450Sstevel@tonic-gate 	} else {
10460Sstevel@tonic-gate 		/*
10470Sstevel@tonic-gate 		 * Assumes that nmsghdr and omsghdr are identically shaped
10480Sstevel@tonic-gate 		 * except for the added msg_flags field.
10490Sstevel@tonic-gate 		 */
10500Sstevel@tonic-gate 		if (copyin(msg, STRUCT_BUF(u_lmsg),
10510Sstevel@tonic-gate 		    SIZEOF_STRUCT(omsghdr, model)))
10520Sstevel@tonic-gate 			return (set_errno(EFAULT));
10530Sstevel@tonic-gate 		STRUCT_FSET(u_lmsg, msg_flags, 0);
10540Sstevel@tonic-gate 		flagsp = NULL;
10550Sstevel@tonic-gate 	}
10560Sstevel@tonic-gate 
10570Sstevel@tonic-gate 	/*
10580Sstevel@tonic-gate 	 * Code below us will kmem_alloc memory and hang it
10590Sstevel@tonic-gate 	 * off msg_control and msg_name fields. This forces
10600Sstevel@tonic-gate 	 * us to copy the structure to its native form.
10610Sstevel@tonic-gate 	 */
10620Sstevel@tonic-gate 	lmsg.msg_name = STRUCT_FGETP(u_lmsg, msg_name);
10630Sstevel@tonic-gate 	lmsg.msg_namelen = STRUCT_FGET(u_lmsg, msg_namelen);
10640Sstevel@tonic-gate 	lmsg.msg_iov = STRUCT_FGETP(u_lmsg, msg_iov);
10650Sstevel@tonic-gate 	lmsg.msg_iovlen = STRUCT_FGET(u_lmsg, msg_iovlen);
10660Sstevel@tonic-gate 	lmsg.msg_control = STRUCT_FGETP(u_lmsg, msg_control);
10670Sstevel@tonic-gate 	lmsg.msg_controllen = STRUCT_FGET(u_lmsg, msg_controllen);
10680Sstevel@tonic-gate 	lmsg.msg_flags = STRUCT_FGET(u_lmsg, msg_flags);
10690Sstevel@tonic-gate 
10700Sstevel@tonic-gate 	iovcnt = lmsg.msg_iovlen;
10710Sstevel@tonic-gate 
10720Sstevel@tonic-gate 	if (iovcnt <= 0 || iovcnt > MSG_MAXIOVLEN) {
10730Sstevel@tonic-gate 		return (set_errno(EMSGSIZE));
10740Sstevel@tonic-gate 	}
10750Sstevel@tonic-gate 
10760Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
10770Sstevel@tonic-gate 	/*
10780Sstevel@tonic-gate 	 * 32-bit callers need to have their iovec expanded, while ensuring
10790Sstevel@tonic-gate 	 * that they can't move more than 2Gbytes of data in a single call.
10800Sstevel@tonic-gate 	 */
10810Sstevel@tonic-gate 	if (model == DATAMODEL_ILP32) {
10820Sstevel@tonic-gate 		struct iovec32 aiov32[MSG_MAXIOVLEN];
10830Sstevel@tonic-gate 		ssize32_t count32;
10840Sstevel@tonic-gate 
10850Sstevel@tonic-gate 		if (copyin((struct iovec32 *)lmsg.msg_iov, aiov32,
10860Sstevel@tonic-gate 		    iovcnt * sizeof (struct iovec32)))
10870Sstevel@tonic-gate 			return (set_errno(EFAULT));
10880Sstevel@tonic-gate 
10890Sstevel@tonic-gate 		count32 = 0;
10900Sstevel@tonic-gate 		for (i = 0; i < iovcnt; i++) {
10910Sstevel@tonic-gate 			ssize32_t iovlen32;
10920Sstevel@tonic-gate 
10930Sstevel@tonic-gate 			iovlen32 = aiov32[i].iov_len;
10940Sstevel@tonic-gate 			count32 += iovlen32;
10950Sstevel@tonic-gate 			if (iovlen32 < 0 || count32 < 0)
10960Sstevel@tonic-gate 				return (set_errno(EINVAL));
10970Sstevel@tonic-gate 			aiov[i].iov_len = iovlen32;
10980Sstevel@tonic-gate 			aiov[i].iov_base =
10990Sstevel@tonic-gate 			    (caddr_t)(uintptr_t)aiov32[i].iov_base;
11000Sstevel@tonic-gate 		}
11010Sstevel@tonic-gate 	} else
11020Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
11030Sstevel@tonic-gate 	if (copyin(lmsg.msg_iov, aiov, iovcnt * sizeof (struct iovec))) {
11040Sstevel@tonic-gate 		return (set_errno(EFAULT));
11050Sstevel@tonic-gate 	}
11060Sstevel@tonic-gate 	len = 0;
11070Sstevel@tonic-gate 	for (i = 0; i < iovcnt; i++) {
11080Sstevel@tonic-gate 		ssize_t iovlen = aiov[i].iov_len;
11090Sstevel@tonic-gate 		len += iovlen;
11100Sstevel@tonic-gate 		if (iovlen < 0 || len < 0) {
11110Sstevel@tonic-gate 			return (set_errno(EINVAL));
11120Sstevel@tonic-gate 		}
11130Sstevel@tonic-gate 	}
11140Sstevel@tonic-gate 	auio.uio_loffset = 0;
11150Sstevel@tonic-gate 	auio.uio_iov = aiov;
11160Sstevel@tonic-gate 	auio.uio_iovcnt = iovcnt;
11170Sstevel@tonic-gate 	auio.uio_resid = len;
11180Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
11190Sstevel@tonic-gate 	auio.uio_limit = 0;
11200Sstevel@tonic-gate 
11210Sstevel@tonic-gate 	if (lmsg.msg_control != NULL &&
11220Sstevel@tonic-gate 	    (do_useracc == 0 ||
11230Sstevel@tonic-gate 	    useracc(lmsg.msg_control, lmsg.msg_controllen,
11245227Stz204579 	    B_WRITE) != 0)) {
11250Sstevel@tonic-gate 		return (set_errno(EFAULT));
11260Sstevel@tonic-gate 	}
11270Sstevel@tonic-gate 
11280Sstevel@tonic-gate 	return (recvit(sock, &lmsg, &auio, flags,
11295227Stz204579 	    STRUCT_FADDR(umsgptr, msg_namelen),
11305227Stz204579 	    STRUCT_FADDR(umsgptr, msg_controllen), flagsp));
11310Sstevel@tonic-gate }
11320Sstevel@tonic-gate 
11330Sstevel@tonic-gate /*
11340Sstevel@tonic-gate  * Common send function.
11350Sstevel@tonic-gate  */
11360Sstevel@tonic-gate static ssize_t
11370Sstevel@tonic-gate sendit(int sock, struct nmsghdr *msg, struct uio *uiop, int flags)
11380Sstevel@tonic-gate {
11390Sstevel@tonic-gate 	struct sonode *so;
11400Sstevel@tonic-gate 	file_t *fp;
11410Sstevel@tonic-gate 	void *name;
11420Sstevel@tonic-gate 	socklen_t namelen;
11430Sstevel@tonic-gate 	void *control;
11440Sstevel@tonic-gate 	socklen_t controllen;
11450Sstevel@tonic-gate 	ssize_t len;
11460Sstevel@tonic-gate 	int error;
11470Sstevel@tonic-gate 
11480Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
11490Sstevel@tonic-gate 		return (set_errno(error));
11500Sstevel@tonic-gate 
11510Sstevel@tonic-gate 	uiop->uio_fmode = fp->f_flag;
11520Sstevel@tonic-gate 
11530Sstevel@tonic-gate 	if (so->so_family == AF_UNIX)
11540Sstevel@tonic-gate 		uiop->uio_extflg = UIO_COPY_CACHED;
11550Sstevel@tonic-gate 	else
11560Sstevel@tonic-gate 		uiop->uio_extflg = UIO_COPY_DEFAULT;
11570Sstevel@tonic-gate 
11580Sstevel@tonic-gate 	/* Allocate and copyin name and control */
11590Sstevel@tonic-gate 	name = msg->msg_name;
11600Sstevel@tonic-gate 	namelen = msg->msg_namelen;
11610Sstevel@tonic-gate 	if (name != NULL && namelen != 0) {
11620Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
11630Sstevel@tonic-gate 		name = copyin_name(so,
11645227Stz204579 		    (struct sockaddr *)name,
11655227Stz204579 		    &namelen, &error);
11660Sstevel@tonic-gate 		if (name == NULL)
11670Sstevel@tonic-gate 			goto done3;
11680Sstevel@tonic-gate 		/* copyin_name null terminates addresses for AF_UNIX */
11690Sstevel@tonic-gate 		msg->msg_namelen = namelen;
11700Sstevel@tonic-gate 		msg->msg_name = name;
11710Sstevel@tonic-gate 	} else {
11720Sstevel@tonic-gate 		msg->msg_name = name = NULL;
11730Sstevel@tonic-gate 		msg->msg_namelen = namelen = 0;
11740Sstevel@tonic-gate 	}
11750Sstevel@tonic-gate 
11760Sstevel@tonic-gate 	control = msg->msg_control;
11770Sstevel@tonic-gate 	controllen = msg->msg_controllen;
11780Sstevel@tonic-gate 	if ((control != NULL) && (controllen != 0)) {
11790Sstevel@tonic-gate 		/*
11800Sstevel@tonic-gate 		 * Verify that the length is not excessive to prevent
11810Sstevel@tonic-gate 		 * an application from consuming all of kernel memory.
11820Sstevel@tonic-gate 		 */
11830Sstevel@tonic-gate 		if (controllen > SO_MAXARGSIZE) {
11840Sstevel@tonic-gate 			error = EINVAL;
11850Sstevel@tonic-gate 			goto done2;
11860Sstevel@tonic-gate 		}
11870Sstevel@tonic-gate 		control = kmem_alloc(controllen, KM_SLEEP);
11880Sstevel@tonic-gate 
11890Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
11900Sstevel@tonic-gate 		if (copyin(msg->msg_control, control, controllen)) {
11910Sstevel@tonic-gate 			error = EFAULT;
11920Sstevel@tonic-gate 			goto done1;
11930Sstevel@tonic-gate 		}
11940Sstevel@tonic-gate 		msg->msg_control = control;
11950Sstevel@tonic-gate 	} else {
11960Sstevel@tonic-gate 		msg->msg_control = control = NULL;
11970Sstevel@tonic-gate 		msg->msg_controllen = controllen = 0;
11980Sstevel@tonic-gate 	}
11990Sstevel@tonic-gate 
12000Sstevel@tonic-gate 	len = uiop->uio_resid;
12010Sstevel@tonic-gate 	msg->msg_flags = flags;
12020Sstevel@tonic-gate 
12030Sstevel@tonic-gate 	error = SOP_SENDMSG(so, msg, uiop);
12040Sstevel@tonic-gate done1:
12050Sstevel@tonic-gate 	if (control != NULL)
12060Sstevel@tonic-gate 		kmem_free(control, controllen);
12070Sstevel@tonic-gate done2:
12080Sstevel@tonic-gate 	if (name != NULL)
12090Sstevel@tonic-gate 		kmem_free(name, namelen);
12100Sstevel@tonic-gate done3:
12110Sstevel@tonic-gate 	if (error != 0) {
12120Sstevel@tonic-gate 		releasef(sock);
12130Sstevel@tonic-gate 		return (set_errno(error));
12140Sstevel@tonic-gate 	}
12150Sstevel@tonic-gate 	lwp_stat_update(LWP_STAT_MSGSND, 1);
12160Sstevel@tonic-gate 	so_update_attrs(so, SOMOD);
12170Sstevel@tonic-gate 	releasef(sock);
12180Sstevel@tonic-gate 	return (len - uiop->uio_resid);
12190Sstevel@tonic-gate }
12200Sstevel@tonic-gate 
12210Sstevel@tonic-gate /*
12220Sstevel@tonic-gate  * Native system call
12230Sstevel@tonic-gate  */
12240Sstevel@tonic-gate ssize_t
12250Sstevel@tonic-gate send(int sock, void *buffer, size_t len, int flags)
12260Sstevel@tonic-gate {
12270Sstevel@tonic-gate 	struct nmsghdr lmsg;
12280Sstevel@tonic-gate 	struct uio auio;
12290Sstevel@tonic-gate 	struct iovec aiov[1];
12300Sstevel@tonic-gate 
12310Sstevel@tonic-gate 	dprint(1, ("send(%d, %p, %ld, %d)\n",
12325227Stz204579 	    sock, buffer, len, flags));
12330Sstevel@tonic-gate 
12340Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
12350Sstevel@tonic-gate 		return (set_errno(EINVAL));
12360Sstevel@tonic-gate 	}
12370Sstevel@tonic-gate 
12380Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
12390Sstevel@tonic-gate 	aiov[0].iov_len = len;
12400Sstevel@tonic-gate 	auio.uio_loffset = 0;
12410Sstevel@tonic-gate 	auio.uio_iov = aiov;
12420Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
12430Sstevel@tonic-gate 	auio.uio_resid = len;
12440Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
12450Sstevel@tonic-gate 	auio.uio_limit = 0;
12460Sstevel@tonic-gate 
12470Sstevel@tonic-gate 	lmsg.msg_name = NULL;
12480Sstevel@tonic-gate 	lmsg.msg_control = NULL;
12490Sstevel@tonic-gate 	if (!(flags & MSG_XPG4_2)) {
12500Sstevel@tonic-gate 		/*
12510Sstevel@tonic-gate 		 * In order to be compatible with the libsocket/sockmod
12520Sstevel@tonic-gate 		 * implementation we set EOR for all send* calls.
12530Sstevel@tonic-gate 		 */
12540Sstevel@tonic-gate 		flags |= MSG_EOR;
12550Sstevel@tonic-gate 	}
12560Sstevel@tonic-gate 	return (sendit(sock, &lmsg, &auio, flags));
12570Sstevel@tonic-gate }
12580Sstevel@tonic-gate 
12590Sstevel@tonic-gate /*
12600Sstevel@tonic-gate  * Uses the MSG_XPG4_2 flag to determine if the caller is using
12610Sstevel@tonic-gate  * struct omsghdr or struct nmsghdr.
12620Sstevel@tonic-gate  */
12630Sstevel@tonic-gate ssize_t
12640Sstevel@tonic-gate sendmsg(int sock, struct nmsghdr *msg, int flags)
12650Sstevel@tonic-gate {
12660Sstevel@tonic-gate 	struct nmsghdr lmsg;
12670Sstevel@tonic-gate 	STRUCT_DECL(nmsghdr, u_lmsg);
12680Sstevel@tonic-gate 	struct uio auio;
12690Sstevel@tonic-gate 	struct iovec aiov[MSG_MAXIOVLEN];
12700Sstevel@tonic-gate 	int iovcnt;
12710Sstevel@tonic-gate 	ssize_t len;
12720Sstevel@tonic-gate 	int i;
12730Sstevel@tonic-gate 	model_t	model;
12740Sstevel@tonic-gate 
12750Sstevel@tonic-gate 	dprint(1, ("sendmsg(%d, %p, %d)\n", sock, msg, flags));
12760Sstevel@tonic-gate 
12770Sstevel@tonic-gate 	model = get_udatamodel();
12780Sstevel@tonic-gate 	STRUCT_INIT(u_lmsg, model);
12790Sstevel@tonic-gate 
12800Sstevel@tonic-gate 	if (flags & MSG_XPG4_2) {
12810Sstevel@tonic-gate 		if (copyin(msg, (char *)STRUCT_BUF(u_lmsg),
12820Sstevel@tonic-gate 		    STRUCT_SIZE(u_lmsg)))
12830Sstevel@tonic-gate 			return (set_errno(EFAULT));
12840Sstevel@tonic-gate 	} else {
12850Sstevel@tonic-gate 		/*
12860Sstevel@tonic-gate 		 * Assumes that nmsghdr and omsghdr are identically shaped
12870Sstevel@tonic-gate 		 * except for the added msg_flags field.
12880Sstevel@tonic-gate 		 */
12890Sstevel@tonic-gate 		if (copyin(msg, (char *)STRUCT_BUF(u_lmsg),
12900Sstevel@tonic-gate 		    SIZEOF_STRUCT(omsghdr, model)))
12910Sstevel@tonic-gate 			return (set_errno(EFAULT));
12920Sstevel@tonic-gate 		/*
12930Sstevel@tonic-gate 		 * In order to be compatible with the libsocket/sockmod
12940Sstevel@tonic-gate 		 * implementation we set EOR for all send* calls.
12950Sstevel@tonic-gate 		 */
12960Sstevel@tonic-gate 		flags |= MSG_EOR;
12970Sstevel@tonic-gate 	}
12980Sstevel@tonic-gate 
12990Sstevel@tonic-gate 	/*
13000Sstevel@tonic-gate 	 * Code below us will kmem_alloc memory and hang it
13010Sstevel@tonic-gate 	 * off msg_control and msg_name fields. This forces
13020Sstevel@tonic-gate 	 * us to copy the structure to its native form.
13030Sstevel@tonic-gate 	 */
13040Sstevel@tonic-gate 	lmsg.msg_name = STRUCT_FGETP(u_lmsg, msg_name);
13050Sstevel@tonic-gate 	lmsg.msg_namelen = STRUCT_FGET(u_lmsg, msg_namelen);
13060Sstevel@tonic-gate 	lmsg.msg_iov = STRUCT_FGETP(u_lmsg, msg_iov);
13070Sstevel@tonic-gate 	lmsg.msg_iovlen = STRUCT_FGET(u_lmsg, msg_iovlen);
13080Sstevel@tonic-gate 	lmsg.msg_control = STRUCT_FGETP(u_lmsg, msg_control);
13090Sstevel@tonic-gate 	lmsg.msg_controllen = STRUCT_FGET(u_lmsg, msg_controllen);
13100Sstevel@tonic-gate 	lmsg.msg_flags = STRUCT_FGET(u_lmsg, msg_flags);
13110Sstevel@tonic-gate 
13120Sstevel@tonic-gate 	iovcnt = lmsg.msg_iovlen;
13130Sstevel@tonic-gate 
13140Sstevel@tonic-gate 	if (iovcnt <= 0 || iovcnt > MSG_MAXIOVLEN) {
13150Sstevel@tonic-gate 		/*
13160Sstevel@tonic-gate 		 * Unless this is XPG 4.2 we allow iovcnt == 0 to
13170Sstevel@tonic-gate 		 * be compatible with SunOS 4.X and 4.4BSD.
13180Sstevel@tonic-gate 		 */
13190Sstevel@tonic-gate 		if (iovcnt != 0 || (flags & MSG_XPG4_2))
13200Sstevel@tonic-gate 			return (set_errno(EMSGSIZE));
13210Sstevel@tonic-gate 	}
13220Sstevel@tonic-gate 
13230Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
13240Sstevel@tonic-gate 	/*
13250Sstevel@tonic-gate 	 * 32-bit callers need to have their iovec expanded, while ensuring
13260Sstevel@tonic-gate 	 * that they can't move more than 2Gbytes of data in a single call.
13270Sstevel@tonic-gate 	 */
13280Sstevel@tonic-gate 	if (model == DATAMODEL_ILP32) {
13290Sstevel@tonic-gate 		struct iovec32 aiov32[MSG_MAXIOVLEN];
13300Sstevel@tonic-gate 		ssize32_t count32;
13310Sstevel@tonic-gate 
13320Sstevel@tonic-gate 		if (iovcnt != 0 &&
13330Sstevel@tonic-gate 		    copyin((struct iovec32 *)lmsg.msg_iov, aiov32,
13340Sstevel@tonic-gate 		    iovcnt * sizeof (struct iovec32)))
13350Sstevel@tonic-gate 			return (set_errno(EFAULT));
13360Sstevel@tonic-gate 
13370Sstevel@tonic-gate 		count32 = 0;
13380Sstevel@tonic-gate 		for (i = 0; i < iovcnt; i++) {
13390Sstevel@tonic-gate 			ssize32_t iovlen32;
13400Sstevel@tonic-gate 
13410Sstevel@tonic-gate 			iovlen32 = aiov32[i].iov_len;
13420Sstevel@tonic-gate 			count32 += iovlen32;
13430Sstevel@tonic-gate 			if (iovlen32 < 0 || count32 < 0)
13440Sstevel@tonic-gate 				return (set_errno(EINVAL));
13450Sstevel@tonic-gate 			aiov[i].iov_len = iovlen32;
13460Sstevel@tonic-gate 			aiov[i].iov_base =
13470Sstevel@tonic-gate 			    (caddr_t)(uintptr_t)aiov32[i].iov_base;
13480Sstevel@tonic-gate 		}
13490Sstevel@tonic-gate 	} else
13500Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
13510Sstevel@tonic-gate 	if (iovcnt != 0 &&
13520Sstevel@tonic-gate 	    copyin(lmsg.msg_iov, aiov,
13530Sstevel@tonic-gate 	    (unsigned)iovcnt * sizeof (struct iovec))) {
13540Sstevel@tonic-gate 		return (set_errno(EFAULT));
13550Sstevel@tonic-gate 	}
13560Sstevel@tonic-gate 	len = 0;
13570Sstevel@tonic-gate 	for (i = 0; i < iovcnt; i++) {
13580Sstevel@tonic-gate 		ssize_t iovlen = aiov[i].iov_len;
13590Sstevel@tonic-gate 		len += iovlen;
13600Sstevel@tonic-gate 		if (iovlen < 0 || len < 0) {
13610Sstevel@tonic-gate 			return (set_errno(EINVAL));
13620Sstevel@tonic-gate 		}
13630Sstevel@tonic-gate 	}
13640Sstevel@tonic-gate 	auio.uio_loffset = 0;
13650Sstevel@tonic-gate 	auio.uio_iov = aiov;
13660Sstevel@tonic-gate 	auio.uio_iovcnt = iovcnt;
13670Sstevel@tonic-gate 	auio.uio_resid = len;
13680Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
13690Sstevel@tonic-gate 	auio.uio_limit = 0;
13700Sstevel@tonic-gate 
13710Sstevel@tonic-gate 	return (sendit(sock, &lmsg, &auio, flags));
13720Sstevel@tonic-gate }
13730Sstevel@tonic-gate 
13740Sstevel@tonic-gate ssize_t
13750Sstevel@tonic-gate sendto(int sock, void *buffer, size_t len, int flags,
13760Sstevel@tonic-gate     struct sockaddr *name, socklen_t namelen)
13770Sstevel@tonic-gate {
13780Sstevel@tonic-gate 	struct nmsghdr lmsg;
13790Sstevel@tonic-gate 	struct uio auio;
13800Sstevel@tonic-gate 	struct iovec aiov[1];
13810Sstevel@tonic-gate 
13820Sstevel@tonic-gate 	dprint(1, ("sendto(%d, %p, %ld, %d, %p, %d)\n",
13835227Stz204579 	    sock, buffer, len, flags, name, namelen));
13840Sstevel@tonic-gate 
13850Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
13860Sstevel@tonic-gate 		return (set_errno(EINVAL));
13870Sstevel@tonic-gate 	}
13880Sstevel@tonic-gate 
13890Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
13900Sstevel@tonic-gate 	aiov[0].iov_len = len;
13910Sstevel@tonic-gate 	auio.uio_loffset = 0;
13920Sstevel@tonic-gate 	auio.uio_iov = aiov;
13930Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
13940Sstevel@tonic-gate 	auio.uio_resid = len;
13950Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
13960Sstevel@tonic-gate 	auio.uio_limit = 0;
13970Sstevel@tonic-gate 
13980Sstevel@tonic-gate 	lmsg.msg_name = (char *)name;
13990Sstevel@tonic-gate 	lmsg.msg_namelen = namelen;
14000Sstevel@tonic-gate 	lmsg.msg_control = NULL;
14010Sstevel@tonic-gate 	if (!(flags & MSG_XPG4_2)) {
14020Sstevel@tonic-gate 		/*
14030Sstevel@tonic-gate 		 * In order to be compatible with the libsocket/sockmod
14040Sstevel@tonic-gate 		 * implementation we set EOR for all send* calls.
14050Sstevel@tonic-gate 		 */
14060Sstevel@tonic-gate 		flags |= MSG_EOR;
14070Sstevel@tonic-gate 	}
14080Sstevel@tonic-gate 	return (sendit(sock, &lmsg, &auio, flags));
14090Sstevel@tonic-gate }
14100Sstevel@tonic-gate 
14110Sstevel@tonic-gate /*ARGSUSED3*/
14120Sstevel@tonic-gate int
14130Sstevel@tonic-gate getpeername(int sock, struct sockaddr *name, socklen_t *namelenp, int version)
14140Sstevel@tonic-gate {
14150Sstevel@tonic-gate 	struct sonode *so;
14160Sstevel@tonic-gate 	int error;
14170Sstevel@tonic-gate 	socklen_t namelen;
14180Sstevel@tonic-gate 	union {
14190Sstevel@tonic-gate 		struct sockaddr_in sin;
14200Sstevel@tonic-gate 		struct sockaddr_in6 sin6;
14210Sstevel@tonic-gate 	} sin;			/* Temporary buffer, common case */
14220Sstevel@tonic-gate 	void *addr;		/* Temporary buffer, uncommon case */
14230Sstevel@tonic-gate 	socklen_t addrlen, size;
14240Sstevel@tonic-gate 
14250Sstevel@tonic-gate 	dprint(1, ("getpeername(%d, %p, %p)\n",
14265227Stz204579 	    sock, name, namelenp));
14270Sstevel@tonic-gate 
14280Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
14290Sstevel@tonic-gate 		goto bad;
14300Sstevel@tonic-gate 
14310Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
14320Sstevel@tonic-gate 	if (copyin(namelenp, &namelen, sizeof (namelen)) ||
14330Sstevel@tonic-gate 	    (name == NULL && namelen != 0)) {
14340Sstevel@tonic-gate 		error = EFAULT;
14350Sstevel@tonic-gate 		goto rel_out;
14360Sstevel@tonic-gate 	}
14370Sstevel@tonic-gate 	/*
14380Sstevel@tonic-gate 	 * If a connect or accept has been done, unless we're an Xnet socket,
14390Sstevel@tonic-gate 	 * the remote address has already been updated in so_faddr_sa.
14400Sstevel@tonic-gate 	 */
14410Sstevel@tonic-gate 	if (so->so_version != SOV_SOCKSTREAM && so->so_version != SOV_SOCKBSD ||
14420Sstevel@tonic-gate 	    !(so->so_state & SS_FADDR_VALID)) {
14430Sstevel@tonic-gate 		if ((error = SOP_GETPEERNAME(so)) != 0)
14440Sstevel@tonic-gate 			goto rel_out;
14450Sstevel@tonic-gate 	}
14460Sstevel@tonic-gate 
14470Sstevel@tonic-gate 	if (so->so_faddr_maxlen <= sizeof (sin)) {
14480Sstevel@tonic-gate 		size = 0;
14490Sstevel@tonic-gate 		addr = &sin;
14500Sstevel@tonic-gate 	} else {
14510Sstevel@tonic-gate 		/*
14520Sstevel@tonic-gate 		 * Allocate temporary to avoid holding so_lock across
14530Sstevel@tonic-gate 		 * copyout
14540Sstevel@tonic-gate 		 */
14550Sstevel@tonic-gate 		size = so->so_faddr_maxlen;
14560Sstevel@tonic-gate 		addr = kmem_alloc(size, KM_SLEEP);
14570Sstevel@tonic-gate 	}
14580Sstevel@tonic-gate 	/* Prevent so_faddr_sa/len from changing while accessed */
14590Sstevel@tonic-gate 	mutex_enter(&so->so_lock);
14600Sstevel@tonic-gate 	if (!(so->so_state & SS_ISCONNECTED)) {
14610Sstevel@tonic-gate 		mutex_exit(&so->so_lock);
14620Sstevel@tonic-gate 		error = ENOTCONN;
14630Sstevel@tonic-gate 		goto free_out;
14640Sstevel@tonic-gate 	}
14650Sstevel@tonic-gate 	addrlen = so->so_faddr_len;
14660Sstevel@tonic-gate 	bcopy(so->so_faddr_sa, addr, addrlen);
14670Sstevel@tonic-gate 	mutex_exit(&so->so_lock);
14680Sstevel@tonic-gate 
14690Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
14700Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp, addr,
14710Sstevel@tonic-gate 	    (so->so_state & SS_FADDR_NOXLATE) ? 0 : addrlen);
14720Sstevel@tonic-gate free_out:
14730Sstevel@tonic-gate 	if (size != 0)
14740Sstevel@tonic-gate 		kmem_free(addr, size);
14750Sstevel@tonic-gate rel_out:
14760Sstevel@tonic-gate 	releasef(sock);
14770Sstevel@tonic-gate bad:	return (error != 0 ? set_errno(error) : 0);
14780Sstevel@tonic-gate }
14790Sstevel@tonic-gate 
14800Sstevel@tonic-gate /*ARGSUSED3*/
14810Sstevel@tonic-gate int
14820Sstevel@tonic-gate getsockname(int sock, struct sockaddr *name,
14830Sstevel@tonic-gate 		socklen_t *namelenp, int version)
14840Sstevel@tonic-gate {
14850Sstevel@tonic-gate 	struct sonode *so;
14860Sstevel@tonic-gate 	int error;
14870Sstevel@tonic-gate 	socklen_t namelen;
14880Sstevel@tonic-gate 	union {
14890Sstevel@tonic-gate 		struct sockaddr_in sin;
14900Sstevel@tonic-gate 		struct sockaddr_in6 sin6;
14910Sstevel@tonic-gate 	} sin;			/* Temporary buffer, common case */
14920Sstevel@tonic-gate 	void *addr;		/* Temporary buffer, uncommon case */
14930Sstevel@tonic-gate 	socklen_t addrlen, size;
14940Sstevel@tonic-gate 
14950Sstevel@tonic-gate 	dprint(1, ("getsockname(%d, %p, %p)\n",
14965227Stz204579 	    sock, name, namelenp));
14970Sstevel@tonic-gate 
14980Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
14990Sstevel@tonic-gate 		goto bad;
15000Sstevel@tonic-gate 
15010Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
15020Sstevel@tonic-gate 	if (copyin(namelenp, &namelen, sizeof (namelen)) ||
15030Sstevel@tonic-gate 	    (name == NULL && namelen != 0)) {
15040Sstevel@tonic-gate 		error = EFAULT;
15050Sstevel@tonic-gate 		goto rel_out;
15060Sstevel@tonic-gate 	}
15070Sstevel@tonic-gate 
15080Sstevel@tonic-gate 	/*
15090Sstevel@tonic-gate 	 * If a bind or accept has been done, unless we're an Xnet endpoint,
15100Sstevel@tonic-gate 	 * the local address has already been updated in so_laddr_sa.
15110Sstevel@tonic-gate 	 */
15120Sstevel@tonic-gate 	if ((so->so_version != SOV_SOCKSTREAM &&
15130Sstevel@tonic-gate 	    so->so_version != SOV_SOCKBSD) ||
15140Sstevel@tonic-gate 	    !(so->so_state & SS_LADDR_VALID)) {
15150Sstevel@tonic-gate 		if ((error = SOP_GETSOCKNAME(so)) != 0)
15160Sstevel@tonic-gate 			goto rel_out;
15170Sstevel@tonic-gate 	}
15180Sstevel@tonic-gate 
15190Sstevel@tonic-gate 	if (so->so_laddr_maxlen <= sizeof (sin)) {
15200Sstevel@tonic-gate 		size = 0;
15210Sstevel@tonic-gate 		addr = &sin;
15220Sstevel@tonic-gate 	} else {
15230Sstevel@tonic-gate 		/*
15240Sstevel@tonic-gate 		 * Allocate temporary to avoid holding so_lock across
15250Sstevel@tonic-gate 		 * copyout
15260Sstevel@tonic-gate 		 */
15270Sstevel@tonic-gate 		size = so->so_laddr_maxlen;
15280Sstevel@tonic-gate 		addr = kmem_alloc(size, KM_SLEEP);
15290Sstevel@tonic-gate 	}
15300Sstevel@tonic-gate 	/* Prevent so_laddr_sa/len from changing while accessed */
15310Sstevel@tonic-gate 	mutex_enter(&so->so_lock);
15320Sstevel@tonic-gate 	addrlen = so->so_laddr_len;
15330Sstevel@tonic-gate 	bcopy(so->so_laddr_sa, addr, addrlen);
15340Sstevel@tonic-gate 	mutex_exit(&so->so_lock);
15350Sstevel@tonic-gate 
15360Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
15370Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp,
15380Sstevel@tonic-gate 	    addr, addrlen);
15390Sstevel@tonic-gate 	if (size != 0)
15400Sstevel@tonic-gate 		kmem_free(addr, size);
15410Sstevel@tonic-gate rel_out:
15420Sstevel@tonic-gate 	releasef(sock);
15430Sstevel@tonic-gate bad:	return (error != 0 ? set_errno(error) : 0);
15440Sstevel@tonic-gate }
15450Sstevel@tonic-gate 
15460Sstevel@tonic-gate /*ARGSUSED5*/
15470Sstevel@tonic-gate int
15480Sstevel@tonic-gate getsockopt(int sock,
15490Sstevel@tonic-gate 	int level,
15500Sstevel@tonic-gate 	int option_name,
15510Sstevel@tonic-gate 	void *option_value,
15520Sstevel@tonic-gate 	socklen_t *option_lenp,
15530Sstevel@tonic-gate 	int version)
15540Sstevel@tonic-gate {
15550Sstevel@tonic-gate 	struct sonode *so;
15560Sstevel@tonic-gate 	socklen_t optlen, optlen_res;
15570Sstevel@tonic-gate 	void *optval;
15580Sstevel@tonic-gate 	int error;
15590Sstevel@tonic-gate 
15600Sstevel@tonic-gate 	dprint(1, ("getsockopt(%d, %d, %d, %p, %p)\n",
15615227Stz204579 	    sock, level, option_name, option_value, option_lenp));
15620Sstevel@tonic-gate 
15630Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
15640Sstevel@tonic-gate 		return (set_errno(error));
15650Sstevel@tonic-gate 
15660Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
15670Sstevel@tonic-gate 	if (copyin(option_lenp, &optlen, sizeof (optlen))) {
15680Sstevel@tonic-gate 		releasef(sock);
15690Sstevel@tonic-gate 		return (set_errno(EFAULT));
15700Sstevel@tonic-gate 	}
15710Sstevel@tonic-gate 	/*
15720Sstevel@tonic-gate 	 * Verify that the length is not excessive to prevent
15730Sstevel@tonic-gate 	 * an application from consuming all of kernel memory.
15740Sstevel@tonic-gate 	 */
15750Sstevel@tonic-gate 	if (optlen > SO_MAXARGSIZE) {
15760Sstevel@tonic-gate 		error = EINVAL;
15770Sstevel@tonic-gate 		releasef(sock);
15780Sstevel@tonic-gate 		return (set_errno(error));
15790Sstevel@tonic-gate 	}
15800Sstevel@tonic-gate 	optval = kmem_alloc(optlen, KM_SLEEP);
15810Sstevel@tonic-gate 	optlen_res = optlen;
15820Sstevel@tonic-gate 	error = SOP_GETSOCKOPT(so, level, option_name, optval,
15830Sstevel@tonic-gate 	    &optlen_res, (version != SOV_XPG4_2) ? 0 : _SOGETSOCKOPT_XPG4_2);
15840Sstevel@tonic-gate 	releasef(sock);
15850Sstevel@tonic-gate 	if (error) {
15860Sstevel@tonic-gate 		kmem_free(optval, optlen);
15870Sstevel@tonic-gate 		return (set_errno(error));
15880Sstevel@tonic-gate 	}
15890Sstevel@tonic-gate 	error = copyout_arg(option_value, optlen, option_lenp,
15900Sstevel@tonic-gate 	    optval, optlen_res);
15910Sstevel@tonic-gate 	kmem_free(optval, optlen);
15920Sstevel@tonic-gate 	if (error)
15930Sstevel@tonic-gate 		return (set_errno(error));
15940Sstevel@tonic-gate 	return (0);
15950Sstevel@tonic-gate }
15960Sstevel@tonic-gate 
15970Sstevel@tonic-gate /*ARGSUSED5*/
15980Sstevel@tonic-gate int
15990Sstevel@tonic-gate setsockopt(int sock,
16000Sstevel@tonic-gate 	int level,
16010Sstevel@tonic-gate 	int option_name,
16020Sstevel@tonic-gate 	void *option_value,
16030Sstevel@tonic-gate 	socklen_t option_len,
16040Sstevel@tonic-gate 	int version)
16050Sstevel@tonic-gate {
16060Sstevel@tonic-gate 	struct sonode *so;
16070Sstevel@tonic-gate 	intptr_t buffer[2];
16080Sstevel@tonic-gate 	void *optval = NULL;
16090Sstevel@tonic-gate 	int error;
16100Sstevel@tonic-gate 
16110Sstevel@tonic-gate 	dprint(1, ("setsockopt(%d, %d, %d, %p, %d)\n",
16125227Stz204579 	    sock, level, option_name, option_value, option_len));
16130Sstevel@tonic-gate 
16140Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
16150Sstevel@tonic-gate 		return (set_errno(error));
16160Sstevel@tonic-gate 
16170Sstevel@tonic-gate 	if (option_value != NULL) {
16180Sstevel@tonic-gate 		if (option_len != 0) {
16190Sstevel@tonic-gate 			/*
16200Sstevel@tonic-gate 			 * Verify that the length is not excessive to prevent
16210Sstevel@tonic-gate 			 * an application from consuming all of kernel memory.
16220Sstevel@tonic-gate 			 */
16230Sstevel@tonic-gate 			if (option_len > SO_MAXARGSIZE) {
16240Sstevel@tonic-gate 				error = EINVAL;
16250Sstevel@tonic-gate 				goto done2;
16260Sstevel@tonic-gate 			}
16270Sstevel@tonic-gate 			optval = option_len <= sizeof (buffer) ?
16280Sstevel@tonic-gate 			    &buffer : kmem_alloc((size_t)option_len, KM_SLEEP);
16290Sstevel@tonic-gate 			ASSERT(MUTEX_NOT_HELD(&so->so_lock));
16300Sstevel@tonic-gate 			if (copyin(option_value, optval, (size_t)option_len)) {
16310Sstevel@tonic-gate 				error = EFAULT;
16320Sstevel@tonic-gate 				goto done1;
16330Sstevel@tonic-gate 			}
16340Sstevel@tonic-gate 		}
16350Sstevel@tonic-gate 	} else
16360Sstevel@tonic-gate 		option_len = 0;
16370Sstevel@tonic-gate 
16380Sstevel@tonic-gate 	error = SOP_SETSOCKOPT(so, level, option_name, optval,
16390Sstevel@tonic-gate 	    (t_uscalar_t)option_len);
16400Sstevel@tonic-gate done1:
16410Sstevel@tonic-gate 	if (optval != buffer)
16420Sstevel@tonic-gate 		kmem_free(optval, (size_t)option_len);
16430Sstevel@tonic-gate done2:
16440Sstevel@tonic-gate 	releasef(sock);
16450Sstevel@tonic-gate 	if (error)
16460Sstevel@tonic-gate 		return (set_errno(error));
16470Sstevel@tonic-gate 	return (0);
16480Sstevel@tonic-gate }
16490Sstevel@tonic-gate 
16500Sstevel@tonic-gate /*
16510Sstevel@tonic-gate  * Add config info when devpath is non-NULL; delete info when devpath is NULL.
16520Sstevel@tonic-gate  * devpath is a user address.
16530Sstevel@tonic-gate  */
16540Sstevel@tonic-gate int
16550Sstevel@tonic-gate sockconfig(int domain, int type, int protocol, char *devpath)
16560Sstevel@tonic-gate {
16570Sstevel@tonic-gate 	char *kdevpath;		/* Copied in devpath string */
16580Sstevel@tonic-gate 	size_t kdevpathlen;
16590Sstevel@tonic-gate 	int error = 0;
16600Sstevel@tonic-gate 
16610Sstevel@tonic-gate 	dprint(1, ("sockconfig(%d, %d, %d, %p)\n",
16625227Stz204579 	    domain, type, protocol, devpath));
16630Sstevel@tonic-gate 
16640Sstevel@tonic-gate 	if (secpolicy_net_config(CRED(), B_FALSE) != 0)
16650Sstevel@tonic-gate 		return (set_errno(EPERM));
16660Sstevel@tonic-gate 
16670Sstevel@tonic-gate 	if (devpath == NULL) {
16680Sstevel@tonic-gate 		/* Deleting an entry */
16690Sstevel@tonic-gate 		kdevpath = NULL;
16700Sstevel@tonic-gate 		kdevpathlen = 0;
16710Sstevel@tonic-gate 	} else {
16720Sstevel@tonic-gate 		/*
16730Sstevel@tonic-gate 		 * Adding an entry.
16740Sstevel@tonic-gate 		 * Copyin the devpath.
16750Sstevel@tonic-gate 		 * This also makes it possible to check for too long pathnames.
16760Sstevel@tonic-gate 		 * Compress the space needed for the devpath before passing it
16770Sstevel@tonic-gate 		 * to soconfig - soconfig will store the string until
16780Sstevel@tonic-gate 		 * the configuration is removed.
16790Sstevel@tonic-gate 		 */
16800Sstevel@tonic-gate 		char *buf;
16810Sstevel@tonic-gate 
16820Sstevel@tonic-gate 		buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
16830Sstevel@tonic-gate 		if ((error = copyinstr(devpath, buf, MAXPATHLEN,
16840Sstevel@tonic-gate 		    &kdevpathlen)) != 0) {
16850Sstevel@tonic-gate 			kmem_free(buf, MAXPATHLEN);
16860Sstevel@tonic-gate 			goto done;
16870Sstevel@tonic-gate 		}
16880Sstevel@tonic-gate 
16890Sstevel@tonic-gate 		kdevpath = kmem_alloc(kdevpathlen, KM_SLEEP);
16900Sstevel@tonic-gate 		bcopy(buf, kdevpath, kdevpathlen);
16910Sstevel@tonic-gate 		kdevpath[kdevpathlen - 1] = '\0';
16920Sstevel@tonic-gate 
16930Sstevel@tonic-gate 		kmem_free(buf, MAXPATHLEN);
16940Sstevel@tonic-gate 	}
16950Sstevel@tonic-gate 	error = soconfig(domain, type, protocol, kdevpath, (int)kdevpathlen);
16960Sstevel@tonic-gate done:
16970Sstevel@tonic-gate 	if (error) {
16980Sstevel@tonic-gate 		eprintline(error);
16990Sstevel@tonic-gate 		return (set_errno(error));
17000Sstevel@tonic-gate 	}
17010Sstevel@tonic-gate 	return (0);
17020Sstevel@tonic-gate }
17030Sstevel@tonic-gate 
17040Sstevel@tonic-gate 
17050Sstevel@tonic-gate /*
17060Sstevel@tonic-gate  * Sendfile is implemented through two schemes, direct I/O or by
17070Sstevel@tonic-gate  * caching in the filesystem page cache. We cache the input file by
17080Sstevel@tonic-gate  * default and use direct I/O only if sendfile_max_size is set
17090Sstevel@tonic-gate  * appropriately as explained below. Note that this logic is consistent
17100Sstevel@tonic-gate  * with other filesystems where caching is turned on by default
17110Sstevel@tonic-gate  * unless explicitly turned off by using the DIRECTIO ioctl.
17120Sstevel@tonic-gate  *
17130Sstevel@tonic-gate  * We choose a slightly different scheme here. One can turn off
17140Sstevel@tonic-gate  * caching by setting sendfile_max_size to 0. One can also enable
17150Sstevel@tonic-gate  * caching of files <= sendfile_max_size by setting sendfile_max_size
17160Sstevel@tonic-gate  * to an appropriate value. By default sendfile_max_size is set to the
17170Sstevel@tonic-gate  * maximum value so that all files are cached. In future, we may provide
17180Sstevel@tonic-gate  * better interfaces for caching the file.
17190Sstevel@tonic-gate  *
17200Sstevel@tonic-gate  * Sendfile through Direct I/O (Zero copy)
17210Sstevel@tonic-gate  * --------------------------------------
17220Sstevel@tonic-gate  *
17230Sstevel@tonic-gate  * As disks are normally slower than the network, we can't have a
17240Sstevel@tonic-gate  * single thread that reads the disk and writes to the network. We
17250Sstevel@tonic-gate  * need to have parallelism. This is done by having the sendfile
17260Sstevel@tonic-gate  * thread create another thread that reads from the filesystem
17270Sstevel@tonic-gate  * and queues it for network processing. In this scheme, the data
17280Sstevel@tonic-gate  * is never copied anywhere i.e it is zero copy unlike the other
17290Sstevel@tonic-gate  * scheme.
17300Sstevel@tonic-gate  *
17310Sstevel@tonic-gate  * We have a sendfile queue (snfq) where each sendfile
17320Sstevel@tonic-gate  * request (snf_req_t) is queued for processing by a thread. Number
17330Sstevel@tonic-gate  * of threads is dynamically allocated and they exit if they are idling
17340Sstevel@tonic-gate  * beyond a specified amount of time. When each request (snf_req_t) is
17350Sstevel@tonic-gate  * processed by a thread, it produces a number of mblk_t structures to
17360Sstevel@tonic-gate  * be consumed by the sendfile thread. snf_deque and snf_enque are
17370Sstevel@tonic-gate  * used for consuming and producing mblks. Size of the filesystem
17385331Samw  * read is determined by the tunable (sendfile_read_size). A single
17390Sstevel@tonic-gate  * mblk holds sendfile_read_size worth of data (except the last
17400Sstevel@tonic-gate  * read of the file) which is sent down as a whole to the network.
17410Sstevel@tonic-gate  * sendfile_read_size is set to 1 MB as this seems to be the optimal
17420Sstevel@tonic-gate  * value for the UFS filesystem backed by a striped storage array.
17430Sstevel@tonic-gate  *
17440Sstevel@tonic-gate  * Synchronisation between read (producer) and write (consumer) threads.
17450Sstevel@tonic-gate  * --------------------------------------------------------------------
17460Sstevel@tonic-gate  *
17470Sstevel@tonic-gate  * sr_lock protects sr_ib_head and sr_ib_tail. The lock is held while
17480Sstevel@tonic-gate  * adding and deleting items in this list. Error can happen anytime
17490Sstevel@tonic-gate  * during read or write. There could be unprocessed mblks in the
17500Sstevel@tonic-gate  * sr_ib_XXX list when a read or write error occurs. Whenever error
17510Sstevel@tonic-gate  * is encountered, we need two things to happen :
17520Sstevel@tonic-gate  *
17530Sstevel@tonic-gate  * a) One of the threads need to clean the mblks.
17540Sstevel@tonic-gate  * b) When one thread encounters an error, the other should stop.
17550Sstevel@tonic-gate  *
17565331Samw  * For (a), we don't want to penalize the reader thread as it could do
17570Sstevel@tonic-gate  * some useful work processing other requests. For (b), the error can
17580Sstevel@tonic-gate  * be detected by examining sr_read_error or sr_write_error.
17590Sstevel@tonic-gate  * sr_lock protects sr_read_error and sr_write_error. If both reader and
17600Sstevel@tonic-gate  * writer encounters error, we need to report the write error back to
17610Sstevel@tonic-gate  * the application as that's what would have happened if the operations
17620Sstevel@tonic-gate  * were done sequentially. With this in mind, following should work :
17630Sstevel@tonic-gate  *
17640Sstevel@tonic-gate  * 	- Check for errors before read or write.
17650Sstevel@tonic-gate  *	- If the reader encounters error, set the error in sr_read_error.
17660Sstevel@tonic-gate  *	  Check sr_write_error, if it is set, send cv_signal as it is
17670Sstevel@tonic-gate  *	  waiting for reader to complete. If it is not set, the writer
17680Sstevel@tonic-gate  *	  is either running sinking data to the network or blocked
17690Sstevel@tonic-gate  *        because of flow control. For handling the latter case, we
17700Sstevel@tonic-gate  *	  always send a signal. In any case, it will examine sr_read_error
17710Sstevel@tonic-gate  *	  and return. sr_read_error is marked with SR_READ_DONE to tell
17720Sstevel@tonic-gate  *	  the writer that the reader is done in all the cases.
17730Sstevel@tonic-gate  *	- If the writer encounters error, set the error in sr_write_error.
17740Sstevel@tonic-gate  *	  The reader thread is either blocked because of flow control or
17750Sstevel@tonic-gate  *	  running reading data from the disk. For the former, we need to
17760Sstevel@tonic-gate  *	  wakeup the thread. Again to keep it simple, we always wake up
17770Sstevel@tonic-gate  *	  the reader thread. Then, wait for the read thread to complete
17780Sstevel@tonic-gate  *	  if it is not done yet. Cleanup and return.
17790Sstevel@tonic-gate  *
17800Sstevel@tonic-gate  * High and low water marks for the read thread.
17810Sstevel@tonic-gate  * --------------------------------------------
17820Sstevel@tonic-gate  *
17830Sstevel@tonic-gate  * If sendfile() is used to send data over a slow network, we need to
17840Sstevel@tonic-gate  * make sure that the read thread does not produce data at a faster
17850Sstevel@tonic-gate  * rate than the network. This can happen if the disk is faster than
17860Sstevel@tonic-gate  * the network. In such a case, we don't want to build a very large queue.
17870Sstevel@tonic-gate  * But we would still like to get all of the network throughput possible.
17880Sstevel@tonic-gate  * This implies that network should never block waiting for data.
17890Sstevel@tonic-gate  * As there are lot of disk throughput/network throughput combinations
17900Sstevel@tonic-gate  * possible, it is difficult to come up with an accurate number.
17910Sstevel@tonic-gate  * A typical 10K RPM disk has a max seek latency 17ms and rotational
17920Sstevel@tonic-gate  * latency of 3ms for reading a disk block. Thus, the total latency to
17930Sstevel@tonic-gate  * initiate a new read, transfer data from the disk and queue for
17940Sstevel@tonic-gate  * transmission would take about a max of 25ms. Todays max transfer rate
17950Sstevel@tonic-gate  * for network is 100MB/sec. If the thread is blocked because of flow
17960Sstevel@tonic-gate  * control, it would take 25ms to get new data ready for transmission.
17970Sstevel@tonic-gate  * We have to make sure that network is not idling, while we are initiating
17980Sstevel@tonic-gate  * new transfers. So, at 100MB/sec, to keep network busy we would need
17995331Samw  * 2.5MB of data. Rounding off, we keep the low water mark to be 3MB of data.
18000Sstevel@tonic-gate  * We need to pick a high water mark so that the woken up thread would
18010Sstevel@tonic-gate  * do considerable work before blocking again to prevent thrashing. Currently,
18020Sstevel@tonic-gate  * we pick this to be 10 times that of the low water mark.
18030Sstevel@tonic-gate  *
18040Sstevel@tonic-gate  * Sendfile with segmap caching (One copy from page cache to mblks).
18050Sstevel@tonic-gate  * ----------------------------------------------------------------
18060Sstevel@tonic-gate  *
18070Sstevel@tonic-gate  * We use the segmap cache for caching the file, if the size of file
18080Sstevel@tonic-gate  * is <= sendfile_max_size. In this case we don't use threads as VM
18090Sstevel@tonic-gate  * is reasonably fast enough to keep up with the network. If the underlying
18100Sstevel@tonic-gate  * transport allows, we call segmap_getmapflt() to map MAXBSIZE (8K) worth
18110Sstevel@tonic-gate  * of data into segmap space, and use the virtual address from segmap
18120Sstevel@tonic-gate  * directly through desballoc() to avoid copy. Once the transport is done
18130Sstevel@tonic-gate  * with the data, the mapping will be released through segmap_release()
18140Sstevel@tonic-gate  * called by the call-back routine.
18150Sstevel@tonic-gate  *
18160Sstevel@tonic-gate  * If zero-copy is not allowed by the transport, we simply call VOP_READ()
18170Sstevel@tonic-gate  * to copy the data from the filesystem into our temporary network buffer.
18180Sstevel@tonic-gate  *
18190Sstevel@tonic-gate  * To disable caching, set sendfile_max_size to 0.
18200Sstevel@tonic-gate  */
18210Sstevel@tonic-gate 
18220Sstevel@tonic-gate uint_t sendfile_read_size = 1024 * 1024;
18230Sstevel@tonic-gate #define	SENDFILE_REQ_LOWAT	3 * 1024 * 1024
18240Sstevel@tonic-gate uint_t sendfile_req_lowat = SENDFILE_REQ_LOWAT;
18250Sstevel@tonic-gate uint_t sendfile_req_hiwat = 10 * SENDFILE_REQ_LOWAT;
18260Sstevel@tonic-gate struct sendfile_stats sf_stats;
18270Sstevel@tonic-gate struct sendfile_queue *snfq;
18280Sstevel@tonic-gate clock_t snfq_timeout;
18290Sstevel@tonic-gate off64_t sendfile_max_size;
18300Sstevel@tonic-gate 
18310Sstevel@tonic-gate static void snf_enque(snf_req_t *, mblk_t *);
18320Sstevel@tonic-gate static mblk_t *snf_deque(snf_req_t *);
18330Sstevel@tonic-gate 
18340Sstevel@tonic-gate void
18350Sstevel@tonic-gate sendfile_init(void)
18360Sstevel@tonic-gate {
18370Sstevel@tonic-gate 	snfq = kmem_zalloc(sizeof (struct sendfile_queue), KM_SLEEP);
18380Sstevel@tonic-gate 
18390Sstevel@tonic-gate 	mutex_init(&snfq->snfq_lock, NULL, MUTEX_DEFAULT, NULL);
18400Sstevel@tonic-gate 	cv_init(&snfq->snfq_cv, NULL, CV_DEFAULT, NULL);
18410Sstevel@tonic-gate 	snfq->snfq_max_threads = max_ncpus;
18420Sstevel@tonic-gate 	snfq_timeout = SNFQ_TIMEOUT;
18430Sstevel@tonic-gate 	/* Cache all files by default. */
18440Sstevel@tonic-gate 	sendfile_max_size = MAXOFFSET_T;
18450Sstevel@tonic-gate }
18460Sstevel@tonic-gate 
18470Sstevel@tonic-gate /*
18480Sstevel@tonic-gate  * Queues a mblk_t for network processing.
18490Sstevel@tonic-gate  */
18500Sstevel@tonic-gate static void
18510Sstevel@tonic-gate snf_enque(snf_req_t *sr, mblk_t *mp)
18520Sstevel@tonic-gate {
18530Sstevel@tonic-gate 	mp->b_next = NULL;
18540Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
18550Sstevel@tonic-gate 	if (sr->sr_mp_head == NULL) {
18560Sstevel@tonic-gate 		sr->sr_mp_head = sr->sr_mp_tail = mp;
18570Sstevel@tonic-gate 		cv_signal(&sr->sr_cv);
18580Sstevel@tonic-gate 	} else {
18590Sstevel@tonic-gate 		sr->sr_mp_tail->b_next = mp;
18600Sstevel@tonic-gate 		sr->sr_mp_tail = mp;
18610Sstevel@tonic-gate 	}
18620Sstevel@tonic-gate 	sr->sr_qlen += MBLKL(mp);
18630Sstevel@tonic-gate 	while ((sr->sr_qlen > sr->sr_hiwat) &&
18640Sstevel@tonic-gate 	    (sr->sr_write_error == 0)) {
18650Sstevel@tonic-gate 		sf_stats.ss_full_waits++;
18660Sstevel@tonic-gate 		cv_wait(&sr->sr_cv, &sr->sr_lock);
18670Sstevel@tonic-gate 	}
18680Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
18690Sstevel@tonic-gate }
18700Sstevel@tonic-gate 
18710Sstevel@tonic-gate /*
18720Sstevel@tonic-gate  * De-queues a mblk_t for network processing.
18730Sstevel@tonic-gate  */
18740Sstevel@tonic-gate static mblk_t *
18750Sstevel@tonic-gate snf_deque(snf_req_t *sr)
18760Sstevel@tonic-gate {
18770Sstevel@tonic-gate 	mblk_t *mp;
18780Sstevel@tonic-gate 
18790Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
18800Sstevel@tonic-gate 	/*
18810Sstevel@tonic-gate 	 * If we have encountered an error on read or read is
18820Sstevel@tonic-gate 	 * completed and no more mblks, return NULL.
18830Sstevel@tonic-gate 	 * We need to check for NULL sr_mp_head also as
18840Sstevel@tonic-gate 	 * the reads could have completed and there is
18850Sstevel@tonic-gate 	 * nothing more to come.
18860Sstevel@tonic-gate 	 */
18870Sstevel@tonic-gate 	if (((sr->sr_read_error & ~SR_READ_DONE) != 0) ||
18880Sstevel@tonic-gate 	    ((sr->sr_read_error & SR_READ_DONE) &&
18890Sstevel@tonic-gate 	    sr->sr_mp_head == NULL)) {
18900Sstevel@tonic-gate 		mutex_exit(&sr->sr_lock);
18910Sstevel@tonic-gate 		return (NULL);
18920Sstevel@tonic-gate 	}
18930Sstevel@tonic-gate 	/*
18940Sstevel@tonic-gate 	 * To start with neither SR_READ_DONE is marked nor
18950Sstevel@tonic-gate 	 * the error is set. When we wake up from cv_wait,
18960Sstevel@tonic-gate 	 * following are the possibilities :
18970Sstevel@tonic-gate 	 *
18980Sstevel@tonic-gate 	 *	a) sr_read_error is zero and mblks are queued.
18990Sstevel@tonic-gate 	 *	b) sr_read_error is set to SR_READ_DONE
19000Sstevel@tonic-gate 	 *	   and mblks are queued.
19010Sstevel@tonic-gate 	 *	c) sr_read_error is set to SR_READ_DONE
19020Sstevel@tonic-gate 	 *	   and no mblks.
19030Sstevel@tonic-gate 	 *	d) sr_read_error is set to some error other
19040Sstevel@tonic-gate 	 *	   than SR_READ_DONE.
19050Sstevel@tonic-gate 	 */
19060Sstevel@tonic-gate 
19070Sstevel@tonic-gate 	while ((sr->sr_read_error == 0) && (sr->sr_mp_head == NULL)) {
19080Sstevel@tonic-gate 		sf_stats.ss_empty_waits++;
19090Sstevel@tonic-gate 		cv_wait(&sr->sr_cv, &sr->sr_lock);
19100Sstevel@tonic-gate 	}
19110Sstevel@tonic-gate 	/* Handle (a) and (b) first  - the normal case. */
19120Sstevel@tonic-gate 	if (((sr->sr_read_error & ~SR_READ_DONE) == 0) &&
19130Sstevel@tonic-gate 	    (sr->sr_mp_head != NULL)) {
19140Sstevel@tonic-gate 		mp = sr->sr_mp_head;
19150Sstevel@tonic-gate 		sr->sr_mp_head = mp->b_next;
19160Sstevel@tonic-gate 		sr->sr_qlen -= MBLKL(mp);
19170Sstevel@tonic-gate 		if (sr->sr_qlen < sr->sr_lowat)
19180Sstevel@tonic-gate 			cv_signal(&sr->sr_cv);
19190Sstevel@tonic-gate 		mutex_exit(&sr->sr_lock);
19200Sstevel@tonic-gate 		mp->b_next = NULL;
19210Sstevel@tonic-gate 		return (mp);
19220Sstevel@tonic-gate 	}
19230Sstevel@tonic-gate 	/* Handle (c) and (d). */
19240Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
19250Sstevel@tonic-gate 	return (NULL);
19260Sstevel@tonic-gate }
19270Sstevel@tonic-gate 
19280Sstevel@tonic-gate /*
19290Sstevel@tonic-gate  * Reads data from the filesystem and queues it for network processing.
19300Sstevel@tonic-gate  */
19310Sstevel@tonic-gate void
19320Sstevel@tonic-gate snf_async_read(snf_req_t *sr)
19330Sstevel@tonic-gate {
19340Sstevel@tonic-gate 	size_t iosize;
19350Sstevel@tonic-gate 	u_offset_t fileoff;
19360Sstevel@tonic-gate 	u_offset_t size;
19370Sstevel@tonic-gate 	int ret_size;
19380Sstevel@tonic-gate 	int error;
19390Sstevel@tonic-gate 	file_t *fp;
19400Sstevel@tonic-gate 	mblk_t *mp;
1941*6240Skrishna 	struct vnode *vp;
1942*6240Skrishna 	int extra = 0;
19430Sstevel@tonic-gate 
19440Sstevel@tonic-gate 	fp = sr->sr_fp;
19450Sstevel@tonic-gate 	size = sr->sr_file_size;
19460Sstevel@tonic-gate 	fileoff = sr->sr_file_off;
19470Sstevel@tonic-gate 
19480Sstevel@tonic-gate 	/*
19490Sstevel@tonic-gate 	 * Ignore the error for filesystems that doesn't support DIRECTIO.
19500Sstevel@tonic-gate 	 */
19510Sstevel@tonic-gate 	(void) VOP_IOCTL(fp->f_vnode, _FIODIRECTIO, DIRECTIO_ON, 0,
19525331Samw 	    kcred, NULL, NULL);
19530Sstevel@tonic-gate 
1954*6240Skrishna 	vp = fp->f_vnode;
1955*6240Skrishna 	if (vp->v_type == VSOCK) {
1956*6240Skrishna 		stdata_t *stp;
1957*6240Skrishna 
1958*6240Skrishna 		/*
1959*6240Skrishna 		 * Get the extra space to insert a header and a trailer.
1960*6240Skrishna 		 */
1961*6240Skrishna 		stp = vp->v_stream;
1962*6240Skrishna 		extra = (int)(stp->sd_wroff + stp->sd_tail);
1963*6240Skrishna 	}
1964*6240Skrishna 
19650Sstevel@tonic-gate 	while ((size != 0) && (sr->sr_write_error == 0)) {
19660Sstevel@tonic-gate 
19670Sstevel@tonic-gate 		iosize = (int)MIN(sr->sr_maxpsz, size);
19680Sstevel@tonic-gate 
1969*6240Skrishna 		if ((mp = allocb(iosize + extra, BPRI_MED)) == NULL) {
19700Sstevel@tonic-gate 			error = EAGAIN;
19710Sstevel@tonic-gate 			break;
19720Sstevel@tonic-gate 		}
19730Sstevel@tonic-gate 		ret_size = soreadfile(fp, mp->b_rptr, fileoff, &error, iosize);
19740Sstevel@tonic-gate 
19750Sstevel@tonic-gate 		/* Error or Reached EOF ? */
19760Sstevel@tonic-gate 		if ((error != 0) || (ret_size == 0)) {
19770Sstevel@tonic-gate 			freeb(mp);
19780Sstevel@tonic-gate 			break;
19790Sstevel@tonic-gate 		}
19800Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + ret_size;
19810Sstevel@tonic-gate 
19820Sstevel@tonic-gate 		snf_enque(sr, mp);
19830Sstevel@tonic-gate 		size -= ret_size;
19840Sstevel@tonic-gate 		fileoff += ret_size;
19850Sstevel@tonic-gate 	}
19860Sstevel@tonic-gate 	(void) VOP_IOCTL(fp->f_vnode, _FIODIRECTIO, DIRECTIO_OFF, 0,
19875331Samw 	    kcred, NULL, NULL);
19880Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
19890Sstevel@tonic-gate 	sr->sr_read_error = error;
19900Sstevel@tonic-gate 	sr->sr_read_error |= SR_READ_DONE;
19910Sstevel@tonic-gate 	cv_signal(&sr->sr_cv);
19920Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
19930Sstevel@tonic-gate }
19940Sstevel@tonic-gate 
19950Sstevel@tonic-gate void
19960Sstevel@tonic-gate snf_async_thread(void)
19970Sstevel@tonic-gate {
19980Sstevel@tonic-gate 	snf_req_t *sr;
19990Sstevel@tonic-gate 	callb_cpr_t cprinfo;
20000Sstevel@tonic-gate 	clock_t time_left = 1;
20010Sstevel@tonic-gate 	clock_t now;
20020Sstevel@tonic-gate 
20030Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &snfq->snfq_lock, callb_generic_cpr, "snfq");
20040Sstevel@tonic-gate 
20050Sstevel@tonic-gate 	mutex_enter(&snfq->snfq_lock);
20060Sstevel@tonic-gate 	for (;;) {
20070Sstevel@tonic-gate 		/*
20080Sstevel@tonic-gate 		 * If we didn't find a entry, then block until woken up
20090Sstevel@tonic-gate 		 * again and then look through the queues again.
20100Sstevel@tonic-gate 		 */
20110Sstevel@tonic-gate 		while ((sr = snfq->snfq_req_head) == NULL) {
20120Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
20130Sstevel@tonic-gate 			if (time_left <= 0) {
20140Sstevel@tonic-gate 				snfq->snfq_svc_threads--;
20150Sstevel@tonic-gate 				CALLB_CPR_EXIT(&cprinfo);
20160Sstevel@tonic-gate 				thread_exit();
20170Sstevel@tonic-gate 				/* NOTREACHED */
20180Sstevel@tonic-gate 			}
20190Sstevel@tonic-gate 			snfq->snfq_idle_cnt++;
20200Sstevel@tonic-gate 
20210Sstevel@tonic-gate 			time_to_wait(&now, snfq_timeout);
20220Sstevel@tonic-gate 			time_left = cv_timedwait(&snfq->snfq_cv,
20230Sstevel@tonic-gate 			    &snfq->snfq_lock, now);
20240Sstevel@tonic-gate 			snfq->snfq_idle_cnt--;
20250Sstevel@tonic-gate 
20260Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, &snfq->snfq_lock);
20270Sstevel@tonic-gate 		}
20280Sstevel@tonic-gate 		snfq->snfq_req_head = sr->sr_next;
20290Sstevel@tonic-gate 		snfq->snfq_req_cnt--;
20300Sstevel@tonic-gate 		mutex_exit(&snfq->snfq_lock);
20310Sstevel@tonic-gate 		snf_async_read(sr);
20320Sstevel@tonic-gate 		mutex_enter(&snfq->snfq_lock);
20330Sstevel@tonic-gate 	}
20340Sstevel@tonic-gate }
20350Sstevel@tonic-gate 
20360Sstevel@tonic-gate 
20370Sstevel@tonic-gate snf_req_t *
20380Sstevel@tonic-gate create_thread(int operation, struct vnode *vp, file_t *fp,
20390Sstevel@tonic-gate     u_offset_t fileoff, u_offset_t size)
20400Sstevel@tonic-gate {
20410Sstevel@tonic-gate 	snf_req_t *sr;
20420Sstevel@tonic-gate 	stdata_t *stp;
20430Sstevel@tonic-gate 
20440Sstevel@tonic-gate 	sr = (snf_req_t *)kmem_zalloc(sizeof (snf_req_t), KM_SLEEP);
20450Sstevel@tonic-gate 
20460Sstevel@tonic-gate 	sr->sr_vp = vp;
20470Sstevel@tonic-gate 	sr->sr_fp = fp;
20480Sstevel@tonic-gate 	stp = vp->v_stream;
20490Sstevel@tonic-gate 
20500Sstevel@tonic-gate 	/*
20510Sstevel@tonic-gate 	 * store sd_qn_maxpsz into sr_maxpsz while we have stream head.
20520Sstevel@tonic-gate 	 * stream might be closed before thread returns from snf_async_read.
20530Sstevel@tonic-gate 	 */
20540Sstevel@tonic-gate 	if (stp->sd_qn_maxpsz > 0) {
20550Sstevel@tonic-gate 		sr->sr_maxpsz = MIN(MAXBSIZE, stp->sd_qn_maxpsz);
20560Sstevel@tonic-gate 	} else {
20570Sstevel@tonic-gate 		sr->sr_maxpsz = MAXBSIZE;
20580Sstevel@tonic-gate 	}
20590Sstevel@tonic-gate 
20600Sstevel@tonic-gate 	sr->sr_operation = operation;
20610Sstevel@tonic-gate 	sr->sr_file_off = fileoff;
20620Sstevel@tonic-gate 	sr->sr_file_size = size;
20630Sstevel@tonic-gate 	sr->sr_hiwat = sendfile_req_hiwat;
20640Sstevel@tonic-gate 	sr->sr_lowat = sendfile_req_lowat;
20650Sstevel@tonic-gate 	mutex_init(&sr->sr_lock, NULL, MUTEX_DEFAULT, NULL);
20660Sstevel@tonic-gate 	cv_init(&sr->sr_cv, NULL, CV_DEFAULT, NULL);
20670Sstevel@tonic-gate 	/*
20680Sstevel@tonic-gate 	 * See whether we need another thread for servicing this
20690Sstevel@tonic-gate 	 * request. If there are already enough requests queued
20700Sstevel@tonic-gate 	 * for the threads, create one if not exceeding
20710Sstevel@tonic-gate 	 * snfq_max_threads.
20720Sstevel@tonic-gate 	 */
20730Sstevel@tonic-gate 	mutex_enter(&snfq->snfq_lock);
20740Sstevel@tonic-gate 	if (snfq->snfq_req_cnt >= snfq->snfq_idle_cnt &&
20750Sstevel@tonic-gate 	    snfq->snfq_svc_threads < snfq->snfq_max_threads) {
20760Sstevel@tonic-gate 		(void) thread_create(NULL, 0, &snf_async_thread, 0, 0, &p0,
20770Sstevel@tonic-gate 		    TS_RUN, minclsyspri);
20780Sstevel@tonic-gate 		snfq->snfq_svc_threads++;
20790Sstevel@tonic-gate 	}
20800Sstevel@tonic-gate 	if (snfq->snfq_req_head == NULL) {
20810Sstevel@tonic-gate 		snfq->snfq_req_head = snfq->snfq_req_tail = sr;
20820Sstevel@tonic-gate 		cv_signal(&snfq->snfq_cv);
20830Sstevel@tonic-gate 	} else {
20840Sstevel@tonic-gate 		snfq->snfq_req_tail->sr_next = sr;
20850Sstevel@tonic-gate 		snfq->snfq_req_tail = sr;
20860Sstevel@tonic-gate 	}
20870Sstevel@tonic-gate 	snfq->snfq_req_cnt++;
20880Sstevel@tonic-gate 	mutex_exit(&snfq->snfq_lock);
20890Sstevel@tonic-gate 	return (sr);
20900Sstevel@tonic-gate }
20910Sstevel@tonic-gate 
20920Sstevel@tonic-gate int
20930Sstevel@tonic-gate snf_direct_io(file_t *fp, file_t *rfp, u_offset_t fileoff, u_offset_t size,
20940Sstevel@tonic-gate     ssize_t *count)
20950Sstevel@tonic-gate {
20960Sstevel@tonic-gate 	snf_req_t *sr;
20970Sstevel@tonic-gate 	mblk_t *mp;
20980Sstevel@tonic-gate 	int iosize;
20990Sstevel@tonic-gate 	int error = 0;
21000Sstevel@tonic-gate 	short fflag;
21010Sstevel@tonic-gate 	struct vnode *vp;
21020Sstevel@tonic-gate 	int ksize;
21030Sstevel@tonic-gate 
21040Sstevel@tonic-gate 	ksize = 0;
21050Sstevel@tonic-gate 	*count = 0;
21060Sstevel@tonic-gate 
21070Sstevel@tonic-gate 	vp = fp->f_vnode;
21080Sstevel@tonic-gate 	fflag = fp->f_flag;
21090Sstevel@tonic-gate 	if ((sr = create_thread(READ_OP, vp, rfp, fileoff, size)) == NULL)
21100Sstevel@tonic-gate 		return (EAGAIN);
21110Sstevel@tonic-gate 
21120Sstevel@tonic-gate 	/*
21130Sstevel@tonic-gate 	 * We check for read error in snf_deque. It has to check
21140Sstevel@tonic-gate 	 * for successful READ_DONE and return NULL, and we might
21150Sstevel@tonic-gate 	 * as well make an additional check there.
21160Sstevel@tonic-gate 	 */
21170Sstevel@tonic-gate 	while ((mp = snf_deque(sr)) != NULL) {
21180Sstevel@tonic-gate 
21190Sstevel@tonic-gate 		if (ISSIG(curthread, JUSTLOOKING)) {
21200Sstevel@tonic-gate 			freeb(mp);
21210Sstevel@tonic-gate 			error = EINTR;
21220Sstevel@tonic-gate 			break;
21230Sstevel@tonic-gate 		}
21240Sstevel@tonic-gate 		iosize = MBLKL(mp);
21250Sstevel@tonic-gate 
21260Sstevel@tonic-gate 		if ((error = kstrwritemp(vp, mp, fflag)) != 0) {
21270Sstevel@tonic-gate 			freeb(mp);
21280Sstevel@tonic-gate 			break;
21290Sstevel@tonic-gate 		}
21300Sstevel@tonic-gate 		ksize += iosize;
21310Sstevel@tonic-gate 	}
21320Sstevel@tonic-gate 	*count = ksize;
21330Sstevel@tonic-gate 
21340Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
21350Sstevel@tonic-gate 	sr->sr_write_error = error;
21360Sstevel@tonic-gate 	/* Look at the big comments on why we cv_signal here. */
21370Sstevel@tonic-gate 	cv_signal(&sr->sr_cv);
21380Sstevel@tonic-gate 
21390Sstevel@tonic-gate 	/* Wait for the reader to complete always. */
21400Sstevel@tonic-gate 	while (!(sr->sr_read_error & SR_READ_DONE)) {
21410Sstevel@tonic-gate 		cv_wait(&sr->sr_cv, &sr->sr_lock);
21420Sstevel@tonic-gate 	}
21430Sstevel@tonic-gate 	/* If there is no write error, check for read error. */
21440Sstevel@tonic-gate 	if (error == 0)
21450Sstevel@tonic-gate 		error = (sr->sr_read_error & ~SR_READ_DONE);
21460Sstevel@tonic-gate 
21470Sstevel@tonic-gate 	if (error != 0) {
21480Sstevel@tonic-gate 		mblk_t *next_mp;
21490Sstevel@tonic-gate 
21500Sstevel@tonic-gate 		mp = sr->sr_mp_head;
21510Sstevel@tonic-gate 		while (mp != NULL) {
21520Sstevel@tonic-gate 			next_mp = mp->b_next;
21530Sstevel@tonic-gate 			mp->b_next = NULL;
21540Sstevel@tonic-gate 			freeb(mp);
21550Sstevel@tonic-gate 			mp = next_mp;
21560Sstevel@tonic-gate 		}
21570Sstevel@tonic-gate 	}
21580Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
21590Sstevel@tonic-gate 	kmem_free(sr, sizeof (snf_req_t));
21600Sstevel@tonic-gate 	return (error);
21610Sstevel@tonic-gate }
21620Sstevel@tonic-gate 
21630Sstevel@tonic-gate typedef struct {
21640Sstevel@tonic-gate 	frtn_t		snfi_frtn;
21650Sstevel@tonic-gate 	caddr_t		snfi_base;
21660Sstevel@tonic-gate 	uint_t		snfi_mapoff;
21670Sstevel@tonic-gate 	size_t		snfi_len;
21680Sstevel@tonic-gate 	vnode_t		*snfi_vp;
21690Sstevel@tonic-gate } snf_smap_desbinfo;
21700Sstevel@tonic-gate 
21710Sstevel@tonic-gate /*
21720Sstevel@tonic-gate  * The callback function when the last ref of the mblk is dropped,
21730Sstevel@tonic-gate  * normally occurs when TCP receives the ack. But it can be the driver
21740Sstevel@tonic-gate  * too due to lazy reclaim.
21750Sstevel@tonic-gate  */
21760Sstevel@tonic-gate void
21770Sstevel@tonic-gate snf_smap_desbfree(snf_smap_desbinfo *snfi)
21780Sstevel@tonic-gate {
21790Sstevel@tonic-gate 	if (!segmap_kpm) {
21800Sstevel@tonic-gate 		/*
21810Sstevel@tonic-gate 		 * We don't need to call segmap_fault(F_SOFTUNLOCK) for
21820Sstevel@tonic-gate 		 * segmap_kpm as long as the latter never falls back to
21830Sstevel@tonic-gate 		 * "use_segmap_range". (See segmap_getmapflt().)
21840Sstevel@tonic-gate 		 *
21850Sstevel@tonic-gate 		 * Using S_OTHER saves an redundant hat_setref() in
21860Sstevel@tonic-gate 		 * segmap_unlock()
21870Sstevel@tonic-gate 		 */
21880Sstevel@tonic-gate 		(void) segmap_fault(kas.a_hat, segkmap,
2189408Skrgopi 		    (caddr_t)(uintptr_t)(((uintptr_t)snfi->snfi_base +
2190408Skrgopi 		    snfi->snfi_mapoff) & PAGEMASK), snfi->snfi_len,
2191408Skrgopi 		    F_SOFTUNLOCK, S_OTHER);
21920Sstevel@tonic-gate 	}
21930Sstevel@tonic-gate 	(void) segmap_release(segkmap, snfi->snfi_base, SM_DONTNEED);
21940Sstevel@tonic-gate 	VN_RELE(snfi->snfi_vp);
21950Sstevel@tonic-gate 	kmem_free(snfi, sizeof (*snfi));
21960Sstevel@tonic-gate }
21970Sstevel@tonic-gate 
21980Sstevel@tonic-gate /*
21990Sstevel@tonic-gate  * Use segmap instead of bcopy to send down a chain of desballoca'ed, mblks.
22000Sstevel@tonic-gate  * Each mblk contains a segmap slot of no more than MAXBSIZE. The total
22010Sstevel@tonic-gate  * length of a chain is no more than sd_qn_maxpsz.
22020Sstevel@tonic-gate  *
22030Sstevel@tonic-gate  * At the end of the whole sendfile() operation, we wait till the data from
22040Sstevel@tonic-gate  * the last mblk is ack'ed by the transport before returning so that the
22050Sstevel@tonic-gate  * caller of sendfile() can safely modify the file content.
22060Sstevel@tonic-gate  */
22070Sstevel@tonic-gate int
22080Sstevel@tonic-gate snf_segmap(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t size,
22090Sstevel@tonic-gate     uint_t maxpsz, ssize_t *count, boolean_t nowait)
22100Sstevel@tonic-gate {
22110Sstevel@tonic-gate 	caddr_t base;
22120Sstevel@tonic-gate 	int mapoff;
22130Sstevel@tonic-gate 	vnode_t *vp;
22140Sstevel@tonic-gate 	mblk_t *mp, *mp1;
22150Sstevel@tonic-gate 	int iosize, iosize1;
22160Sstevel@tonic-gate 	int error;
22170Sstevel@tonic-gate 	short fflag;
22180Sstevel@tonic-gate 	int ksize;
22190Sstevel@tonic-gate 	snf_smap_desbinfo *snfi;
22200Sstevel@tonic-gate 	struct vattr va;
22210Sstevel@tonic-gate 	boolean_t dowait = B_FALSE;
22220Sstevel@tonic-gate 
22230Sstevel@tonic-gate 	vp = fp->f_vnode;
22240Sstevel@tonic-gate 	fflag = fp->f_flag;
22250Sstevel@tonic-gate 	ksize = 0;
22260Sstevel@tonic-gate 	for (;;) {
22270Sstevel@tonic-gate 		if (ISSIG(curthread, JUSTLOOKING)) {
22280Sstevel@tonic-gate 			error = EINTR;
22290Sstevel@tonic-gate 			break;
22300Sstevel@tonic-gate 		}
22310Sstevel@tonic-gate 		iosize = 0;
22320Sstevel@tonic-gate 		mp = NULL;
22330Sstevel@tonic-gate 		do {
22340Sstevel@tonic-gate 			mapoff = fileoff & MAXBOFFSET;
22350Sstevel@tonic-gate 			iosize1 = MAXBSIZE - mapoff;
22360Sstevel@tonic-gate 			if (iosize1 > size)
22370Sstevel@tonic-gate 				iosize1 = size;
22380Sstevel@tonic-gate 			/*
22390Sstevel@tonic-gate 			 * we don't forcefault because we'll call
22400Sstevel@tonic-gate 			 * segmap_fault(F_SOFTLOCK) next.
22410Sstevel@tonic-gate 			 *
22420Sstevel@tonic-gate 			 * S_READ will get the ref bit set (by either
22430Sstevel@tonic-gate 			 * segmap_getmapflt() or segmap_fault()) and page
22440Sstevel@tonic-gate 			 * shared locked.
22450Sstevel@tonic-gate 			 */
22460Sstevel@tonic-gate 			base = segmap_getmapflt(segkmap, fvp, fileoff, iosize1,
22470Sstevel@tonic-gate 			    segmap_kpm ? SM_FAULT : 0, S_READ);
22480Sstevel@tonic-gate 
22490Sstevel@tonic-gate 			snfi = kmem_alloc(sizeof (*snfi), KM_SLEEP);
22500Sstevel@tonic-gate 			snfi->snfi_len = (size_t)roundup(mapoff+iosize1,
22510Sstevel@tonic-gate 			    PAGESIZE)- (mapoff & PAGEMASK);
22520Sstevel@tonic-gate 			/*
22530Sstevel@tonic-gate 			 * We must call segmap_fault() even for segmap_kpm
22540Sstevel@tonic-gate 			 * because that's how error gets returned.
22550Sstevel@tonic-gate 			 * (segmap_getmapflt() never fails but segmap_fault()
22560Sstevel@tonic-gate 			 * does.)
22570Sstevel@tonic-gate 			 */
22580Sstevel@tonic-gate 			if (segmap_fault(kas.a_hat, segkmap,
2259408Skrgopi 			    (caddr_t)(uintptr_t)(((uintptr_t)base + mapoff) &
2260408Skrgopi 			    PAGEMASK), snfi->snfi_len, F_SOFTLOCK,
2261408Skrgopi 			    S_READ) != 0) {
22620Sstevel@tonic-gate 				(void) segmap_release(segkmap, base, 0);
22630Sstevel@tonic-gate 				kmem_free(snfi, sizeof (*snfi));
22640Sstevel@tonic-gate 				freemsg(mp);
22650Sstevel@tonic-gate 				error = EIO;
22660Sstevel@tonic-gate 				goto out;
22670Sstevel@tonic-gate 			}
22680Sstevel@tonic-gate 			snfi->snfi_frtn.free_func = snf_smap_desbfree;
22690Sstevel@tonic-gate 			snfi->snfi_frtn.free_arg = (caddr_t)snfi;
22700Sstevel@tonic-gate 			snfi->snfi_base = base;
22710Sstevel@tonic-gate 			snfi->snfi_mapoff = mapoff;
22722994Sss146032 			mp1 = esballoca((uchar_t *)base + mapoff,
22730Sstevel@tonic-gate 			    iosize1, BPRI_HI, &snfi->snfi_frtn);
22740Sstevel@tonic-gate 
22750Sstevel@tonic-gate 			if (mp1 == NULL) {
22760Sstevel@tonic-gate 				(void) segmap_fault(kas.a_hat, segkmap,
2277408Skrgopi 				    (caddr_t)(uintptr_t)(((uintptr_t)base +
2278408Skrgopi 				    mapoff) & PAGEMASK), snfi->snfi_len,
22790Sstevel@tonic-gate 				    F_SOFTUNLOCK, S_OTHER);
22800Sstevel@tonic-gate 				(void) segmap_release(segkmap, base, 0);
22810Sstevel@tonic-gate 				kmem_free(snfi, sizeof (*snfi));
22820Sstevel@tonic-gate 				freemsg(mp);
22830Sstevel@tonic-gate 				error = EAGAIN;
22840Sstevel@tonic-gate 				goto out;
22850Sstevel@tonic-gate 			}
22860Sstevel@tonic-gate 			VN_HOLD(fvp);
22870Sstevel@tonic-gate 			snfi->snfi_vp = fvp;
22880Sstevel@tonic-gate 			mp1->b_wptr += iosize1;
22890Sstevel@tonic-gate 
22900Sstevel@tonic-gate 			/* Mark this dblk with the zero-copy flag */
22910Sstevel@tonic-gate 			mp1->b_datap->db_struioflag |= STRUIO_ZC;
22920Sstevel@tonic-gate 			if (mp == NULL)
22930Sstevel@tonic-gate 				mp = mp1;
22940Sstevel@tonic-gate 			else
22950Sstevel@tonic-gate 				linkb(mp, mp1);
22960Sstevel@tonic-gate 			iosize += iosize1;
22970Sstevel@tonic-gate 			fileoff += iosize1;
22980Sstevel@tonic-gate 			size -= iosize1;
22990Sstevel@tonic-gate 		} while (iosize < maxpsz && size != 0);
23000Sstevel@tonic-gate 
23010Sstevel@tonic-gate 		if (size == 0 && !nowait) {
23020Sstevel@tonic-gate 			ASSERT(!dowait);
23030Sstevel@tonic-gate 			dowait = B_TRUE;
23040Sstevel@tonic-gate 			mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY;
23050Sstevel@tonic-gate 		}
23060Sstevel@tonic-gate 		VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
23070Sstevel@tonic-gate 		if ((error = kstrwritemp(vp, mp, fflag)) != 0) {
23080Sstevel@tonic-gate 			*count = ksize;
23090Sstevel@tonic-gate 			freemsg(mp);
23100Sstevel@tonic-gate 			return (error);
23110Sstevel@tonic-gate 		}
23120Sstevel@tonic-gate 		ksize += iosize;
23130Sstevel@tonic-gate 		if (size == 0)
23140Sstevel@tonic-gate 			goto done;
23150Sstevel@tonic-gate 
23160Sstevel@tonic-gate 		(void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL);
23170Sstevel@tonic-gate 		va.va_mask = AT_SIZE;
23185331Samw 		error = VOP_GETATTR(fvp, &va, 0, kcred, NULL);
23190Sstevel@tonic-gate 		if (error)
23200Sstevel@tonic-gate 			break;
23210Sstevel@tonic-gate 		/* Read as much as possible. */
23220Sstevel@tonic-gate 		if (fileoff >= va.va_size)
23230Sstevel@tonic-gate 			break;
23240Sstevel@tonic-gate 		if (size + fileoff > va.va_size)
23250Sstevel@tonic-gate 			size = va.va_size - fileoff;
23260Sstevel@tonic-gate 	}
23270Sstevel@tonic-gate out:
23280Sstevel@tonic-gate 	VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
23290Sstevel@tonic-gate done:
23300Sstevel@tonic-gate 	*count = ksize;
23310Sstevel@tonic-gate 	if (dowait) {
23320Sstevel@tonic-gate 		stdata_t *stp;
23330Sstevel@tonic-gate 
23340Sstevel@tonic-gate 		stp = vp->v_stream;
23350Sstevel@tonic-gate 		mutex_enter(&stp->sd_lock);
23360Sstevel@tonic-gate 		while (!(stp->sd_flag & STZCNOTIFY)) {
23373415Samehta 			if (cv_wait_sig(&stp->sd_zcopy_wait,
23385227Stz204579 			    &stp->sd_lock) == 0) {
23393415Samehta 				error = EINTR;
23403415Samehta 				break;
23413415Samehta 			}
23420Sstevel@tonic-gate 		}
23430Sstevel@tonic-gate 		stp->sd_flag &= ~STZCNOTIFY;
23440Sstevel@tonic-gate 		mutex_exit(&stp->sd_lock);
23450Sstevel@tonic-gate 	}
23460Sstevel@tonic-gate 	return (error);
23470Sstevel@tonic-gate }
23480Sstevel@tonic-gate 
23490Sstevel@tonic-gate int
23500Sstevel@tonic-gate snf_cache(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t size,
23510Sstevel@tonic-gate     uint_t maxpsz, ssize_t *count)
23520Sstevel@tonic-gate {
23530Sstevel@tonic-gate 	struct vnode *vp;
23540Sstevel@tonic-gate 	mblk_t *mp;
23550Sstevel@tonic-gate 	int iosize;
2356*6240Skrishna 	int extra = 0;
23570Sstevel@tonic-gate 	int error;
23580Sstevel@tonic-gate 	short fflag;
23590Sstevel@tonic-gate 	int ksize;
23600Sstevel@tonic-gate 	int ioflag;
23610Sstevel@tonic-gate 	struct uio auio;
23620Sstevel@tonic-gate 	struct iovec aiov;
23630Sstevel@tonic-gate 	struct vattr va;
23640Sstevel@tonic-gate 
23650Sstevel@tonic-gate 	vp = fp->f_vnode;
2366*6240Skrishna 	if (vp->v_type == VSOCK) {
2367*6240Skrishna 		stdata_t *stp;
2368*6240Skrishna 
2369*6240Skrishna 		/*
2370*6240Skrishna 		 * Get the extra space to insert a header and a trailer.
2371*6240Skrishna 		 */
2372*6240Skrishna 		stp = vp->v_stream;
2373*6240Skrishna 		extra = (int)(stp->sd_wroff + stp->sd_tail);
2374*6240Skrishna 	}
2375*6240Skrishna 
23760Sstevel@tonic-gate 	fflag = fp->f_flag;
23770Sstevel@tonic-gate 	ksize = 0;
23780Sstevel@tonic-gate 	auio.uio_iov = &aiov;
23790Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
23800Sstevel@tonic-gate 	auio.uio_segflg = UIO_SYSSPACE;
23810Sstevel@tonic-gate 	auio.uio_llimit = MAXOFFSET_T;
23820Sstevel@tonic-gate 	auio.uio_fmode = fflag;
23830Sstevel@tonic-gate 	auio.uio_extflg = UIO_COPY_CACHED;
23840Sstevel@tonic-gate 	ioflag = auio.uio_fmode & (FSYNC|FDSYNC|FRSYNC);
23850Sstevel@tonic-gate 	/* If read sync is not asked for, filter sync flags */
23860Sstevel@tonic-gate 	if ((ioflag & FRSYNC) == 0)
23870Sstevel@tonic-gate 		ioflag &= ~(FSYNC|FDSYNC);
23880Sstevel@tonic-gate 	for (;;) {
23890Sstevel@tonic-gate 		if (ISSIG(curthread, JUSTLOOKING)) {
23900Sstevel@tonic-gate 			error = EINTR;
23910Sstevel@tonic-gate 			break;
23920Sstevel@tonic-gate 		}
23930Sstevel@tonic-gate 		iosize = (int)MIN(maxpsz, size);
2394*6240Skrishna 		if ((mp = allocb(iosize + extra, BPRI_MED)) == NULL) {
23950Sstevel@tonic-gate 			error = EAGAIN;
23960Sstevel@tonic-gate 			break;
23970Sstevel@tonic-gate 		}
23980Sstevel@tonic-gate 		aiov.iov_base = (caddr_t)mp->b_rptr;
23990Sstevel@tonic-gate 		aiov.iov_len = iosize;
24000Sstevel@tonic-gate 		auio.uio_loffset = fileoff;
24010Sstevel@tonic-gate 		auio.uio_resid = iosize;
24020Sstevel@tonic-gate 
24030Sstevel@tonic-gate 		error = VOP_READ(fvp, &auio, ioflag, fp->f_cred, NULL);
24040Sstevel@tonic-gate 		iosize -= auio.uio_resid;
24050Sstevel@tonic-gate 
24060Sstevel@tonic-gate 		if (error == EINTR && iosize != 0)
24070Sstevel@tonic-gate 			error = 0;
24080Sstevel@tonic-gate 
24090Sstevel@tonic-gate 		if (error != 0 || iosize == 0) {
24100Sstevel@tonic-gate 			freeb(mp);
24110Sstevel@tonic-gate 			break;
24120Sstevel@tonic-gate 		}
24130Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + iosize;
24140Sstevel@tonic-gate 
24150Sstevel@tonic-gate 		VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
24160Sstevel@tonic-gate 		if ((error = kstrwritemp(vp, mp, fflag)) != 0) {
24170Sstevel@tonic-gate 			*count = ksize;
24180Sstevel@tonic-gate 			freeb(mp);
24190Sstevel@tonic-gate 			return (error);
24200Sstevel@tonic-gate 		}
24210Sstevel@tonic-gate 		ksize += iosize;
24220Sstevel@tonic-gate 		size -= iosize;
24230Sstevel@tonic-gate 		if (size == 0)
24240Sstevel@tonic-gate 			goto done;
24250Sstevel@tonic-gate 
24260Sstevel@tonic-gate 		fileoff += iosize;
24270Sstevel@tonic-gate 		(void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL);
24280Sstevel@tonic-gate 		va.va_mask = AT_SIZE;
24295331Samw 		error = VOP_GETATTR(fvp, &va, 0, kcred, NULL);
24300Sstevel@tonic-gate 		if (error)
24310Sstevel@tonic-gate 			break;
24320Sstevel@tonic-gate 		/* Read as much as possible. */
24330Sstevel@tonic-gate 		if (fileoff >= va.va_size)
24340Sstevel@tonic-gate 			size = 0;
24350Sstevel@tonic-gate 		else if (size + fileoff > va.va_size)
24360Sstevel@tonic-gate 			size = va.va_size - fileoff;
24370Sstevel@tonic-gate 	}
24380Sstevel@tonic-gate 	VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
24390Sstevel@tonic-gate done:
24400Sstevel@tonic-gate 	*count = ksize;
24410Sstevel@tonic-gate 	return (error);
24420Sstevel@tonic-gate }
24430Sstevel@tonic-gate 
24440Sstevel@tonic-gate #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
24450Sstevel@tonic-gate /*
24460Sstevel@tonic-gate  * Largefile support for 32 bit applications only.
24470Sstevel@tonic-gate  */
24480Sstevel@tonic-gate int
24490Sstevel@tonic-gate sosendfile64(file_t *fp, file_t *rfp, const struct ksendfilevec64 *sfv,
24500Sstevel@tonic-gate     ssize32_t *count32)
24510Sstevel@tonic-gate {
24520Sstevel@tonic-gate 	ssize32_t sfv_len;
24530Sstevel@tonic-gate 	u_offset_t sfv_off, va_size;
24540Sstevel@tonic-gate 	struct vnode *vp, *fvp, *realvp;
24550Sstevel@tonic-gate 	struct vattr va;
24560Sstevel@tonic-gate 	stdata_t *stp;
24570Sstevel@tonic-gate 	ssize_t count = 0;
24580Sstevel@tonic-gate 	int error = 0;
24590Sstevel@tonic-gate 	boolean_t dozcopy = B_FALSE;
24600Sstevel@tonic-gate 	uint_t maxpsz;
24610Sstevel@tonic-gate 
24620Sstevel@tonic-gate 	sfv_len = (ssize32_t)sfv->sfv_len;
24630Sstevel@tonic-gate 	if (sfv_len < 0) {
24640Sstevel@tonic-gate 		error = EINVAL;
24650Sstevel@tonic-gate 		goto out;
24660Sstevel@tonic-gate 	}
24670Sstevel@tonic-gate 
24680Sstevel@tonic-gate 	if (sfv_len == 0) goto out;
24690Sstevel@tonic-gate 
24700Sstevel@tonic-gate 	sfv_off = (u_offset_t)sfv->sfv_off;
24710Sstevel@tonic-gate 
24720Sstevel@tonic-gate 	/* Same checks as in pread */
24730Sstevel@tonic-gate 	if (sfv_off > MAXOFFSET_T) {
24740Sstevel@tonic-gate 		error = EINVAL;
24750Sstevel@tonic-gate 		goto out;
24760Sstevel@tonic-gate 	}
24770Sstevel@tonic-gate 	if (sfv_off + sfv_len > MAXOFFSET_T)
24780Sstevel@tonic-gate 		sfv_len = (ssize32_t)(MAXOFFSET_T - sfv_off);
24790Sstevel@tonic-gate 
24800Sstevel@tonic-gate 	/*
24810Sstevel@tonic-gate 	 * There are no more checks on sfv_len. So, we cast it to
24820Sstevel@tonic-gate 	 * u_offset_t and share the snf_direct_io/snf_cache code between
24830Sstevel@tonic-gate 	 * 32 bit and 64 bit.
24840Sstevel@tonic-gate 	 *
24850Sstevel@tonic-gate 	 * TODO: should do nbl_need_check() like read()?
24860Sstevel@tonic-gate 	 */
24870Sstevel@tonic-gate 	if (sfv_len > sendfile_max_size) {
24880Sstevel@tonic-gate 		sf_stats.ss_file_not_cached++;
24890Sstevel@tonic-gate 		error = snf_direct_io(fp, rfp, sfv_off, (u_offset_t)sfv_len,
24900Sstevel@tonic-gate 		    &count);
24910Sstevel@tonic-gate 		goto out;
24920Sstevel@tonic-gate 	}
24930Sstevel@tonic-gate 	fvp = rfp->f_vnode;
24945331Samw 	if (VOP_REALVP(fvp, &realvp, NULL) == 0)
24950Sstevel@tonic-gate 		fvp = realvp;
24960Sstevel@tonic-gate 	/*
24970Sstevel@tonic-gate 	 * Grab the lock as a reader to prevent the file size
24980Sstevel@tonic-gate 	 * from changing underneath.
24990Sstevel@tonic-gate 	 */
25000Sstevel@tonic-gate 	(void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL);
25010Sstevel@tonic-gate 	va.va_mask = AT_SIZE;
25025331Samw 	error = VOP_GETATTR(fvp, &va, 0, kcred, NULL);
25030Sstevel@tonic-gate 	va_size = va.va_size;
25044649Sdm120769 	if ((error != 0) || (va_size == 0) || (sfv_off >= va_size)) {
25050Sstevel@tonic-gate 		VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
25060Sstevel@tonic-gate 		goto out;
25070Sstevel@tonic-gate 	}
25080Sstevel@tonic-gate 	/* Read as much as possible. */
25090Sstevel@tonic-gate 	if (sfv_off + sfv_len > va_size)
25100Sstevel@tonic-gate 		sfv_len = va_size - sfv_off;
25110Sstevel@tonic-gate 
25120Sstevel@tonic-gate 	vp = fp->f_vnode;
25130Sstevel@tonic-gate 	stp = vp->v_stream;
25140Sstevel@tonic-gate 	if (stp->sd_qn_maxpsz == INFPSZ)
25154173Spr14459 		maxpsz = maxphys;
25160Sstevel@tonic-gate 	else
25170Sstevel@tonic-gate 		maxpsz = roundup(stp->sd_qn_maxpsz, MAXBSIZE);
25180Sstevel@tonic-gate 	/*
25190Sstevel@tonic-gate 	 * When the NOWAIT flag is not set, we enable zero-copy only if the
25200Sstevel@tonic-gate 	 * transfer size is large enough. This prevents performance loss
25210Sstevel@tonic-gate 	 * when the caller sends the file piece by piece.
25220Sstevel@tonic-gate 	 */
25230Sstevel@tonic-gate 	if (sfv_len >= MAXBSIZE && (sfv_len >= (va_size >> 1) ||
25240Sstevel@tonic-gate 	    (sfv->sfv_flag & SFV_NOWAIT) || sfv_len >= 0x1000000) &&
25254173Spr14459 	    !vn_has_flocks(fvp) && !(fvp->v_flag & VNOMAP)) {
25260Sstevel@tonic-gate 		if ((stp->sd_copyflag & (STZCVMSAFE|STZCVMUNSAFE)) == 0) {
25270Sstevel@tonic-gate 			int on = 1;
25280Sstevel@tonic-gate 
25290Sstevel@tonic-gate 			if (SOP_SETSOCKOPT(VTOSO(vp), SOL_SOCKET,
25300Sstevel@tonic-gate 			    SO_SND_COPYAVOID, &on, sizeof (on)) == 0)
25310Sstevel@tonic-gate 				dozcopy = B_TRUE;
25320Sstevel@tonic-gate 		} else {
25330Sstevel@tonic-gate 			dozcopy = (stp->sd_copyflag & STZCVMSAFE);
25340Sstevel@tonic-gate 		}
25350Sstevel@tonic-gate 	}
25360Sstevel@tonic-gate 	if (dozcopy) {
25370Sstevel@tonic-gate 		sf_stats.ss_file_segmap++;
25380Sstevel@tonic-gate 		error = snf_segmap(fp, fvp, sfv_off, (u_offset_t)sfv_len,
25390Sstevel@tonic-gate 		    maxpsz, &count, ((sfv->sfv_flag & SFV_NOWAIT) != 0));
25400Sstevel@tonic-gate 	} else {
25410Sstevel@tonic-gate 		sf_stats.ss_file_cached++;
25420Sstevel@tonic-gate 		error = snf_cache(fp, fvp, sfv_off, (u_offset_t)sfv_len,
25430Sstevel@tonic-gate 		    maxpsz, &count);
25440Sstevel@tonic-gate 	}
25450Sstevel@tonic-gate out:
25460Sstevel@tonic-gate 	releasef(sfv->sfv_fd);
25470Sstevel@tonic-gate 	*count32 = (ssize32_t)count;
25480Sstevel@tonic-gate 	return (error);
25490Sstevel@tonic-gate }
25500Sstevel@tonic-gate #endif
25510Sstevel@tonic-gate 
25520Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
25530Sstevel@tonic-gate /*
25540Sstevel@tonic-gate  * recv32(), recvfrom32(), send32(), sendto32(): intentionally return a
25550Sstevel@tonic-gate  * ssize_t rather than ssize32_t; see the comments above read32 for details.
25560Sstevel@tonic-gate  */
25570Sstevel@tonic-gate 
25580Sstevel@tonic-gate ssize_t
25590Sstevel@tonic-gate recv32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags)
25600Sstevel@tonic-gate {
25610Sstevel@tonic-gate 	return (recv(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags));
25620Sstevel@tonic-gate }
25630Sstevel@tonic-gate 
25640Sstevel@tonic-gate ssize_t
25650Sstevel@tonic-gate recvfrom32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags,
25660Sstevel@tonic-gate 	caddr32_t name, caddr32_t namelenp)
25670Sstevel@tonic-gate {
25680Sstevel@tonic-gate 	return (recvfrom(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags,
25690Sstevel@tonic-gate 	    (void *)(uintptr_t)name, (void *)(uintptr_t)namelenp));
25700Sstevel@tonic-gate }
25710Sstevel@tonic-gate 
25720Sstevel@tonic-gate ssize_t
25730Sstevel@tonic-gate send32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags)
25740Sstevel@tonic-gate {
25750Sstevel@tonic-gate 	return (send(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags));
25760Sstevel@tonic-gate }
25770Sstevel@tonic-gate 
25780Sstevel@tonic-gate ssize_t
25790Sstevel@tonic-gate sendto32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags,
25800Sstevel@tonic-gate 	caddr32_t name, socklen_t namelen)
25810Sstevel@tonic-gate {
25820Sstevel@tonic-gate 	return (sendto(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags,
25830Sstevel@tonic-gate 	    (void *)(uintptr_t)name, namelen));
25840Sstevel@tonic-gate }
25850Sstevel@tonic-gate #endif	/* _SYSCALL32_IMPL */
25860Sstevel@tonic-gate 
25870Sstevel@tonic-gate /*
25885331Samw  * Function wrappers (mostly around the sonode switch) for
25890Sstevel@tonic-gate  * backward compatibility.
25900Sstevel@tonic-gate  */
25910Sstevel@tonic-gate 
25920Sstevel@tonic-gate int
25930Sstevel@tonic-gate soaccept(struct sonode *so, int fflag, struct sonode **nsop)
25940Sstevel@tonic-gate {
25950Sstevel@tonic-gate 	return (SOP_ACCEPT(so, fflag, nsop));
25960Sstevel@tonic-gate }
25970Sstevel@tonic-gate 
25980Sstevel@tonic-gate int
25990Sstevel@tonic-gate sobind(struct sonode *so, struct sockaddr *name, socklen_t namelen,
26000Sstevel@tonic-gate     int backlog, int flags)
26010Sstevel@tonic-gate {
26020Sstevel@tonic-gate 	int	error;
26030Sstevel@tonic-gate 
26040Sstevel@tonic-gate 	error = SOP_BIND(so, name, namelen, flags);
26050Sstevel@tonic-gate 	if (error == 0 && backlog != 0)
26060Sstevel@tonic-gate 		return (SOP_LISTEN(so, backlog));
26070Sstevel@tonic-gate 
26080Sstevel@tonic-gate 	return (error);
26090Sstevel@tonic-gate }
26100Sstevel@tonic-gate 
26110Sstevel@tonic-gate int
26120Sstevel@tonic-gate solisten(struct sonode *so, int backlog)
26130Sstevel@tonic-gate {
26140Sstevel@tonic-gate 	return (SOP_LISTEN(so, backlog));
26150Sstevel@tonic-gate }
26160Sstevel@tonic-gate 
26170Sstevel@tonic-gate int
26180Sstevel@tonic-gate soconnect(struct sonode *so, const struct sockaddr *name, socklen_t namelen,
26190Sstevel@tonic-gate     int fflag, int flags)
26200Sstevel@tonic-gate {
26210Sstevel@tonic-gate 	return (SOP_CONNECT(so, name, namelen, fflag, flags));
26220Sstevel@tonic-gate }
26230Sstevel@tonic-gate 
26240Sstevel@tonic-gate int
26250Sstevel@tonic-gate sorecvmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop)
26260Sstevel@tonic-gate {
26270Sstevel@tonic-gate 	return (SOP_RECVMSG(so, msg, uiop));
26280Sstevel@tonic-gate }
26290Sstevel@tonic-gate 
26300Sstevel@tonic-gate int
26310Sstevel@tonic-gate sosendmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop)
26320Sstevel@tonic-gate {
26330Sstevel@tonic-gate 	return (SOP_SENDMSG(so, msg, uiop));
26340Sstevel@tonic-gate }
26350Sstevel@tonic-gate 
26360Sstevel@tonic-gate int
26370Sstevel@tonic-gate sogetpeername(struct sonode *so)
26380Sstevel@tonic-gate {
26390Sstevel@tonic-gate 	return (SOP_GETPEERNAME(so));
26400Sstevel@tonic-gate }
26410Sstevel@tonic-gate 
26420Sstevel@tonic-gate int
26430Sstevel@tonic-gate sogetsockname(struct sonode *so)
26440Sstevel@tonic-gate {
26450Sstevel@tonic-gate 	return (SOP_GETSOCKNAME(so));
26460Sstevel@tonic-gate }
26470Sstevel@tonic-gate 
26480Sstevel@tonic-gate int
26490Sstevel@tonic-gate soshutdown(struct sonode *so, int how)
26500Sstevel@tonic-gate {
26510Sstevel@tonic-gate 	return (SOP_SHUTDOWN(so, how));
26520Sstevel@tonic-gate }
26530Sstevel@tonic-gate 
26540Sstevel@tonic-gate int
26550Sstevel@tonic-gate sogetsockopt(struct sonode *so, int level, int option_name, void *optval,
26560Sstevel@tonic-gate     socklen_t *optlenp, int flags)
26570Sstevel@tonic-gate {
26580Sstevel@tonic-gate 	return (SOP_GETSOCKOPT(so, level, option_name, optval, optlenp,
26590Sstevel@tonic-gate 	    flags));
26600Sstevel@tonic-gate }
26610Sstevel@tonic-gate 
26620Sstevel@tonic-gate int
26630Sstevel@tonic-gate sosetsockopt(struct sonode *so, int level, int option_name, const void *optval,
26640Sstevel@tonic-gate     t_uscalar_t optlen)
26650Sstevel@tonic-gate {
26660Sstevel@tonic-gate 	return (SOP_SETSOCKOPT(so, level, option_name, optval, optlen));
26670Sstevel@tonic-gate }
26680Sstevel@tonic-gate 
26690Sstevel@tonic-gate /*
26700Sstevel@tonic-gate  * Because this is backward compatibility interface it only needs to be
26710Sstevel@tonic-gate  * able to handle the creation of TPI sockfs sockets.
26720Sstevel@tonic-gate  */
26730Sstevel@tonic-gate struct sonode *
26740Sstevel@tonic-gate socreate(vnode_t *accessvp, int domain, int type, int protocol, int version,
26750Sstevel@tonic-gate     struct sonode *tso, int *errorp)
26760Sstevel@tonic-gate {
26770Sstevel@tonic-gate 	return (sotpi_create(accessvp, domain, type, protocol, version, tso,
26780Sstevel@tonic-gate 	    errorp));
26790Sstevel@tonic-gate }
2680