xref: /onnv-gate/usr/src/uts/common/fs/sockfs/socksyscalls.c (revision 5227:c3aad35f7ccc)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51548Srshoaib  * Common Development and Distribution License (the "License").
61548Srshoaib  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211548Srshoaib 
220Sstevel@tonic-gate /*
233415Samehta  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <sys/types.h>
300Sstevel@tonic-gate #include <sys/t_lock.h>
310Sstevel@tonic-gate #include <sys/param.h>
320Sstevel@tonic-gate #include <sys/systm.h>
330Sstevel@tonic-gate #include <sys/buf.h>
340Sstevel@tonic-gate #include <sys/conf.h>
350Sstevel@tonic-gate #include <sys/cred.h>
360Sstevel@tonic-gate #include <sys/kmem.h>
370Sstevel@tonic-gate #include <sys/sysmacros.h>
380Sstevel@tonic-gate #include <sys/vfs.h>
390Sstevel@tonic-gate #include <sys/vnode.h>
400Sstevel@tonic-gate #include <sys/debug.h>
410Sstevel@tonic-gate #include <sys/errno.h>
420Sstevel@tonic-gate #include <sys/time.h>
430Sstevel@tonic-gate #include <sys/file.h>
440Sstevel@tonic-gate #include <sys/user.h>
450Sstevel@tonic-gate #include <sys/stream.h>
460Sstevel@tonic-gate #include <sys/strsubr.h>
470Sstevel@tonic-gate #include <sys/strsun.h>
484173Spr14459 #include <sys/sunddi.h>
490Sstevel@tonic-gate #include <sys/esunddi.h>
500Sstevel@tonic-gate #include <sys/flock.h>
510Sstevel@tonic-gate #include <sys/modctl.h>
520Sstevel@tonic-gate #include <sys/cmn_err.h>
530Sstevel@tonic-gate #include <sys/vmsystm.h>
540Sstevel@tonic-gate #include <sys/policy.h>
550Sstevel@tonic-gate 
560Sstevel@tonic-gate #include <sys/socket.h>
570Sstevel@tonic-gate #include <sys/socketvar.h>
580Sstevel@tonic-gate 
590Sstevel@tonic-gate #include <sys/isa_defs.h>
600Sstevel@tonic-gate #include <sys/inttypes.h>
610Sstevel@tonic-gate #include <sys/systm.h>
620Sstevel@tonic-gate #include <sys/cpuvar.h>
630Sstevel@tonic-gate #include <sys/filio.h>
640Sstevel@tonic-gate #include <sys/sendfile.h>
650Sstevel@tonic-gate #include <sys/ddi.h>
660Sstevel@tonic-gate #include <vm/seg.h>
670Sstevel@tonic-gate #include <vm/seg_map.h>
680Sstevel@tonic-gate #include <vm/seg_kpm.h>
690Sstevel@tonic-gate #include <fs/sockfs/nl7c.h>
700Sstevel@tonic-gate 
710Sstevel@tonic-gate #ifdef SOCK_TEST
720Sstevel@tonic-gate int do_useracc = 1;		/* Controlled by setting SO_DEBUG to 4 */
730Sstevel@tonic-gate #else
740Sstevel@tonic-gate #define	do_useracc	1
750Sstevel@tonic-gate #endif /* SOCK_TEST */
760Sstevel@tonic-gate 
770Sstevel@tonic-gate extern int xnet_truncate_print;
780Sstevel@tonic-gate 
790Sstevel@tonic-gate /*
800Sstevel@tonic-gate  * Note: DEF_IOV_MAX is defined and used as it is in "fs/vncalls.c"
810Sstevel@tonic-gate  *	 as there isn't a formal definition of IOV_MAX ???
820Sstevel@tonic-gate  */
830Sstevel@tonic-gate #define	MSG_MAXIOVLEN	16
840Sstevel@tonic-gate 
850Sstevel@tonic-gate /*
860Sstevel@tonic-gate  * Kernel component of socket creation.
870Sstevel@tonic-gate  *
880Sstevel@tonic-gate  * The socket library determines which version number to use.
890Sstevel@tonic-gate  * First the library calls this with a NULL devpath. If this fails
900Sstevel@tonic-gate  * to find a transport (using solookup) the library will look in /etc/netconfig
910Sstevel@tonic-gate  * for the appropriate transport. If one is found it will pass in the
920Sstevel@tonic-gate  * devpath for the kernel to use.
930Sstevel@tonic-gate  */
940Sstevel@tonic-gate int
950Sstevel@tonic-gate so_socket(int domain, int type, int protocol, char *devpath, int version)
960Sstevel@tonic-gate {
970Sstevel@tonic-gate 	vnode_t *accessvp;
980Sstevel@tonic-gate 	struct sonode *so;
990Sstevel@tonic-gate 	vnode_t *vp;
1000Sstevel@tonic-gate 	struct file *fp;
1010Sstevel@tonic-gate 	int fd;
1020Sstevel@tonic-gate 	int error;
1030Sstevel@tonic-gate 	boolean_t wildcard = B_FALSE;
1040Sstevel@tonic-gate 	int saved_error = 0;
1050Sstevel@tonic-gate 	int sdomain = domain;
1060Sstevel@tonic-gate 
1070Sstevel@tonic-gate 	dprint(1, ("so_socket(%d,%d,%d,%p,%d)\n",
108*5227Stz204579 	    domain, type, protocol, devpath, version));
1090Sstevel@tonic-gate 
1100Sstevel@tonic-gate 	if (domain == AF_NCA) {
1110Sstevel@tonic-gate 		/*
1120Sstevel@tonic-gate 		 * The request is for an NCA socket so for NL7C use the
1130Sstevel@tonic-gate 		 * INET domain instead and mark NL7C_AF_NCA below.
1140Sstevel@tonic-gate 		 */
1150Sstevel@tonic-gate 		domain = AF_INET;
1160Sstevel@tonic-gate 		/*
1170Sstevel@tonic-gate 		 * NL7C is not supported in non-global zones,
1180Sstevel@tonic-gate 		 *  we enforce this restriction here.
1190Sstevel@tonic-gate 		 */
1200Sstevel@tonic-gate 		if (getzoneid() != GLOBAL_ZONEID) {
1210Sstevel@tonic-gate 			return (set_errno(ENOTSUP));
1220Sstevel@tonic-gate 		}
1230Sstevel@tonic-gate 	}
1240Sstevel@tonic-gate 
1250Sstevel@tonic-gate 	accessvp = solookup(domain, type, protocol, devpath, &error);
1260Sstevel@tonic-gate 	if (accessvp == NULL) {
1270Sstevel@tonic-gate 		/*
1280Sstevel@tonic-gate 		 * If there is either an EPROTONOSUPPORT or EPROTOTYPE error
1290Sstevel@tonic-gate 		 * it makes sense doing the wildcard lookup since the
1300Sstevel@tonic-gate 		 * protocol might not be in the table.
1310Sstevel@tonic-gate 		 */
1320Sstevel@tonic-gate 		if (devpath != NULL || protocol == 0 ||
1330Sstevel@tonic-gate 		    !(error == EPROTONOSUPPORT || error == EPROTOTYPE))
1340Sstevel@tonic-gate 			return (set_errno(error));
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate 		saved_error = error;
1370Sstevel@tonic-gate 
1380Sstevel@tonic-gate 		/*
1390Sstevel@tonic-gate 		 * Try wildcard lookup. Never use devpath for wildcards.
1400Sstevel@tonic-gate 		 */
1410Sstevel@tonic-gate 		accessvp = solookup(domain, type, 0, NULL, &error);
1420Sstevel@tonic-gate 		if (accessvp == NULL) {
1430Sstevel@tonic-gate 			/*
1440Sstevel@tonic-gate 			 * Can't find in kernel table - have library
1450Sstevel@tonic-gate 			 * fall back to /etc/netconfig and tell us
1460Sstevel@tonic-gate 			 * the devpath (The library will do this if it didn't
1470Sstevel@tonic-gate 			 * already pass in a devpath).
1480Sstevel@tonic-gate 			 */
1490Sstevel@tonic-gate 			if (saved_error != 0)
1500Sstevel@tonic-gate 				error = saved_error;
1510Sstevel@tonic-gate 			return (set_errno(error));
1520Sstevel@tonic-gate 		}
1530Sstevel@tonic-gate 		wildcard = B_TRUE;
1540Sstevel@tonic-gate 	}
1550Sstevel@tonic-gate 
1560Sstevel@tonic-gate 	/* Check the device policy */
1570Sstevel@tonic-gate 	if ((error = secpolicy_spec_open(CRED(),
1580Sstevel@tonic-gate 	    accessvp, FREAD|FWRITE)) != 0) {
1590Sstevel@tonic-gate 		return (set_errno(error));
1600Sstevel@tonic-gate 	}
1610Sstevel@tonic-gate 
1621548Srshoaib 	if (protocol == IPPROTO_SCTP) {
1630Sstevel@tonic-gate 		so = sosctp_create(accessvp, domain, type, protocol, version,
1640Sstevel@tonic-gate 		    NULL, &error);
1653422Snh145002 	} else if (protocol == PROTO_SDP) {
1663422Snh145002 		so = sosdp_create(accessvp, domain, type, protocol, version,
1673422Snh145002 		    NULL, &error);
1680Sstevel@tonic-gate 	} else {
1690Sstevel@tonic-gate 		so = sotpi_create(accessvp, domain, type, protocol, version,
1700Sstevel@tonic-gate 		    NULL, &error);
1710Sstevel@tonic-gate 	}
1720Sstevel@tonic-gate 	if (so == NULL) {
1730Sstevel@tonic-gate 		return (set_errno(error));
1740Sstevel@tonic-gate 	}
1750Sstevel@tonic-gate 	if (sdomain == AF_NCA && domain == AF_INET) {
1760Sstevel@tonic-gate 		so->so_nl7c_flags = NL7C_AF_NCA;
1770Sstevel@tonic-gate 	}
1780Sstevel@tonic-gate 	vp = SOTOV(so);
1790Sstevel@tonic-gate 
1800Sstevel@tonic-gate 	if (wildcard) {
1810Sstevel@tonic-gate 		/*
1820Sstevel@tonic-gate 		 * Issue SO_PROTOTYPE setsockopt.
1830Sstevel@tonic-gate 		 */
1840Sstevel@tonic-gate 		error = SOP_SETSOCKOPT(so, SOL_SOCKET, SO_PROTOTYPE,
185*5227Stz204579 		    &protocol,
186*5227Stz204579 		    (t_uscalar_t)sizeof (protocol));
1870Sstevel@tonic-gate 		if (error) {
1880Sstevel@tonic-gate 			(void) VOP_CLOSE(vp, 0, 1, 0, CRED());
1890Sstevel@tonic-gate 			VN_RELE(vp);
1900Sstevel@tonic-gate 			/*
1910Sstevel@tonic-gate 			 * Setsockopt often fails with ENOPROTOOPT but socket()
1920Sstevel@tonic-gate 			 * should fail with EPROTONOSUPPORT/EPROTOTYPE.
1930Sstevel@tonic-gate 			 */
1940Sstevel@tonic-gate 			if (saved_error != 0 && error == ENOPROTOOPT)
1950Sstevel@tonic-gate 				error = saved_error;
1960Sstevel@tonic-gate 			else
1970Sstevel@tonic-gate 				error = EPROTONOSUPPORT;
1980Sstevel@tonic-gate 			return (set_errno(error));
1990Sstevel@tonic-gate 		}
2000Sstevel@tonic-gate 	}
2010Sstevel@tonic-gate 	if (error = falloc(vp, FWRITE|FREAD, &fp, &fd)) {
2020Sstevel@tonic-gate 		(void) VOP_CLOSE(vp, 0, 1, 0, CRED());
2030Sstevel@tonic-gate 		VN_RELE(vp);
2040Sstevel@tonic-gate 		return (set_errno(error));
2050Sstevel@tonic-gate 	}
2060Sstevel@tonic-gate 
2070Sstevel@tonic-gate 	/*
2080Sstevel@tonic-gate 	 * Now fill in the entries that falloc reserved
2090Sstevel@tonic-gate 	 */
2100Sstevel@tonic-gate 	mutex_exit(&fp->f_tlock);
2110Sstevel@tonic-gate 	setf(fd, fp);
2120Sstevel@tonic-gate 
2130Sstevel@tonic-gate 	return (fd);
2140Sstevel@tonic-gate }
2150Sstevel@tonic-gate 
2160Sstevel@tonic-gate /*
2170Sstevel@tonic-gate  * Map from a file descriptor to a socket node.
2180Sstevel@tonic-gate  * Returns with the file descriptor held i.e. the caller has to
2190Sstevel@tonic-gate  * use releasef when done with the file descriptor.
2200Sstevel@tonic-gate  */
221*5227Stz204579 struct sonode *
2220Sstevel@tonic-gate getsonode(int sock, int *errorp, file_t **fpp)
2230Sstevel@tonic-gate {
2240Sstevel@tonic-gate 	file_t *fp;
2250Sstevel@tonic-gate 	vnode_t *vp;
2260Sstevel@tonic-gate 	struct sonode *so;
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate 	if ((fp = getf(sock)) == NULL) {
2290Sstevel@tonic-gate 		*errorp = EBADF;
2300Sstevel@tonic-gate 		eprintline(*errorp);
2310Sstevel@tonic-gate 		return (NULL);
2320Sstevel@tonic-gate 	}
2330Sstevel@tonic-gate 	vp = fp->f_vnode;
2340Sstevel@tonic-gate 	/* Check if it is a socket */
2350Sstevel@tonic-gate 	if (vp->v_type != VSOCK) {
2360Sstevel@tonic-gate 		releasef(sock);
2370Sstevel@tonic-gate 		*errorp = ENOTSOCK;
2380Sstevel@tonic-gate 		eprintline(*errorp);
2390Sstevel@tonic-gate 		return (NULL);
2400Sstevel@tonic-gate 	}
2410Sstevel@tonic-gate 	/*
2420Sstevel@tonic-gate 	 * Use the stream head to find the real socket vnode.
2430Sstevel@tonic-gate 	 * This is needed when namefs sits above sockfs.
2440Sstevel@tonic-gate 	 */
2450Sstevel@tonic-gate 	if (vp->v_stream) {
2460Sstevel@tonic-gate 		ASSERT(vp->v_stream->sd_vnode);
2470Sstevel@tonic-gate 		vp = vp->v_stream->sd_vnode;
2480Sstevel@tonic-gate 
2490Sstevel@tonic-gate 		so = VTOSO(vp);
2500Sstevel@tonic-gate 		if (so->so_version == SOV_STREAM) {
2510Sstevel@tonic-gate 			releasef(sock);
2520Sstevel@tonic-gate 			*errorp = ENOTSOCK;
2530Sstevel@tonic-gate 			eprintsoline(so, *errorp);
2540Sstevel@tonic-gate 			return (NULL);
2550Sstevel@tonic-gate 		}
2560Sstevel@tonic-gate 	} else {
2570Sstevel@tonic-gate 		so = VTOSO(vp);
2580Sstevel@tonic-gate 	}
2590Sstevel@tonic-gate 	if (fpp)
2600Sstevel@tonic-gate 		*fpp = fp;
2610Sstevel@tonic-gate 	return (so);
2620Sstevel@tonic-gate }
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate /*
2650Sstevel@tonic-gate  * Allocate and copyin a sockaddr.
2660Sstevel@tonic-gate  * Ensures NULL termination for AF_UNIX addresses by extending them
2670Sstevel@tonic-gate  * with one NULL byte if need be. Verifies that the length is not
2680Sstevel@tonic-gate  * excessive to prevent an application from consuming all of kernel
2690Sstevel@tonic-gate  * memory. Returns NULL when an error occurred.
2700Sstevel@tonic-gate  */
2710Sstevel@tonic-gate static struct sockaddr *
2720Sstevel@tonic-gate copyin_name(struct sonode *so, struct sockaddr *name, socklen_t *namelenp,
2730Sstevel@tonic-gate 	    int *errorp)
2740Sstevel@tonic-gate {
2750Sstevel@tonic-gate 	char	*faddr;
2760Sstevel@tonic-gate 	size_t	namelen = (size_t)*namelenp;
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate 	ASSERT(namelen != 0);
2790Sstevel@tonic-gate 	if (namelen > SO_MAXARGSIZE) {
2800Sstevel@tonic-gate 		*errorp = EINVAL;
2810Sstevel@tonic-gate 		eprintsoline(so, *errorp);
2820Sstevel@tonic-gate 		return (NULL);
2830Sstevel@tonic-gate 	}
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate 	faddr = (char *)kmem_alloc(namelen, KM_SLEEP);
2860Sstevel@tonic-gate 	if (copyin(name, faddr, namelen)) {
2870Sstevel@tonic-gate 		kmem_free(faddr, namelen);
2880Sstevel@tonic-gate 		*errorp = EFAULT;
2890Sstevel@tonic-gate 		eprintsoline(so, *errorp);
2900Sstevel@tonic-gate 		return (NULL);
2910Sstevel@tonic-gate 	}
2920Sstevel@tonic-gate 
2930Sstevel@tonic-gate 	/*
2940Sstevel@tonic-gate 	 * Add space for NULL termination if needed.
2950Sstevel@tonic-gate 	 * Do a quick check if the last byte is NUL.
2960Sstevel@tonic-gate 	 */
2970Sstevel@tonic-gate 	if (so->so_family == AF_UNIX && faddr[namelen - 1] != '\0') {
2980Sstevel@tonic-gate 		/* Check if there is any NULL termination */
2990Sstevel@tonic-gate 		size_t	i;
3000Sstevel@tonic-gate 		int foundnull = 0;
3010Sstevel@tonic-gate 
3020Sstevel@tonic-gate 		for (i = sizeof (name->sa_family); i < namelen; i++) {
3030Sstevel@tonic-gate 			if (faddr[i] == '\0') {
3040Sstevel@tonic-gate 				foundnull = 1;
3050Sstevel@tonic-gate 				break;
3060Sstevel@tonic-gate 			}
3070Sstevel@tonic-gate 		}
3080Sstevel@tonic-gate 		if (!foundnull) {
3090Sstevel@tonic-gate 			/* Add extra byte for NUL padding */
3100Sstevel@tonic-gate 			char *nfaddr;
3110Sstevel@tonic-gate 
3120Sstevel@tonic-gate 			nfaddr = (char *)kmem_alloc(namelen + 1, KM_SLEEP);
3130Sstevel@tonic-gate 			bcopy(faddr, nfaddr, namelen);
3140Sstevel@tonic-gate 			kmem_free(faddr, namelen);
3150Sstevel@tonic-gate 
3160Sstevel@tonic-gate 			/* NUL terminate */
3170Sstevel@tonic-gate 			nfaddr[namelen] = '\0';
3180Sstevel@tonic-gate 			namelen++;
3190Sstevel@tonic-gate 			ASSERT((socklen_t)namelen == namelen);
3200Sstevel@tonic-gate 			*namelenp = (socklen_t)namelen;
3210Sstevel@tonic-gate 			faddr = nfaddr;
3220Sstevel@tonic-gate 		}
3230Sstevel@tonic-gate 	}
3240Sstevel@tonic-gate 	return ((struct sockaddr *)faddr);
3250Sstevel@tonic-gate }
3260Sstevel@tonic-gate 
3270Sstevel@tonic-gate /*
3280Sstevel@tonic-gate  * Copy from kaddr/klen to uaddr/ulen. Updates ulenp if non-NULL.
3290Sstevel@tonic-gate  */
3300Sstevel@tonic-gate static int
3310Sstevel@tonic-gate copyout_arg(void *uaddr, socklen_t ulen, void *ulenp,
3320Sstevel@tonic-gate 		void *kaddr, socklen_t klen)
3330Sstevel@tonic-gate {
3340Sstevel@tonic-gate 	if (uaddr != NULL) {
3350Sstevel@tonic-gate 		if (ulen > klen)
3360Sstevel@tonic-gate 			ulen = klen;
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate 		if (ulen != 0) {
3390Sstevel@tonic-gate 			if (copyout(kaddr, uaddr, ulen))
3400Sstevel@tonic-gate 				return (EFAULT);
3410Sstevel@tonic-gate 		}
3420Sstevel@tonic-gate 	} else
3430Sstevel@tonic-gate 		ulen = 0;
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate 	if (ulenp != NULL) {
3460Sstevel@tonic-gate 		if (copyout(&ulen, ulenp, sizeof (ulen)))
3470Sstevel@tonic-gate 			return (EFAULT);
3480Sstevel@tonic-gate 	}
3490Sstevel@tonic-gate 	return (0);
3500Sstevel@tonic-gate }
3510Sstevel@tonic-gate 
3520Sstevel@tonic-gate /*
3530Sstevel@tonic-gate  * Copy from kaddr/klen to uaddr/ulen. Updates ulenp if non-NULL.
3540Sstevel@tonic-gate  * If klen is greater than ulen it still uses the non-truncated
3550Sstevel@tonic-gate  * klen to update ulenp.
3560Sstevel@tonic-gate  */
3570Sstevel@tonic-gate static int
3580Sstevel@tonic-gate copyout_name(void *uaddr, socklen_t ulen, void *ulenp,
3590Sstevel@tonic-gate 		void *kaddr, socklen_t klen)
3600Sstevel@tonic-gate {
3610Sstevel@tonic-gate 	if (uaddr != NULL) {
3620Sstevel@tonic-gate 		if (ulen >= klen)
3630Sstevel@tonic-gate 			ulen = klen;
3640Sstevel@tonic-gate 		else if (ulen != 0 && xnet_truncate_print) {
3650Sstevel@tonic-gate 			printf("sockfs: truncating copyout of address using "
3660Sstevel@tonic-gate 			    "XNET semantics for pid = %d. Lengths %d, %d\n",
3670Sstevel@tonic-gate 			    curproc->p_pid, klen, ulen);
3680Sstevel@tonic-gate 		}
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate 		if (ulen != 0) {
3710Sstevel@tonic-gate 			if (copyout(kaddr, uaddr, ulen))
3720Sstevel@tonic-gate 				return (EFAULT);
3730Sstevel@tonic-gate 		} else
3740Sstevel@tonic-gate 			klen = 0;
3750Sstevel@tonic-gate 	} else
3760Sstevel@tonic-gate 		klen = 0;
3770Sstevel@tonic-gate 
3780Sstevel@tonic-gate 	if (ulenp != NULL) {
3790Sstevel@tonic-gate 		if (copyout(&klen, ulenp, sizeof (klen)))
3800Sstevel@tonic-gate 			return (EFAULT);
3810Sstevel@tonic-gate 	}
3820Sstevel@tonic-gate 	return (0);
3830Sstevel@tonic-gate }
3840Sstevel@tonic-gate 
3850Sstevel@tonic-gate /*
3860Sstevel@tonic-gate  * The socketpair() code in libsocket creates two sockets (using
3870Sstevel@tonic-gate  * the /etc/netconfig fallback if needed) before calling this routine
3880Sstevel@tonic-gate  * to connect the two sockets together.
3890Sstevel@tonic-gate  *
3900Sstevel@tonic-gate  * For a SOCK_STREAM socketpair a listener is needed - in that case this
3910Sstevel@tonic-gate  * routine will create a new file descriptor as part of accepting the
3920Sstevel@tonic-gate  * connection. The library socketpair() will check if svs[2] has changed
3930Sstevel@tonic-gate  * in which case it will close the changed fd.
3940Sstevel@tonic-gate  *
3950Sstevel@tonic-gate  * Note that this code could use the TPI feature of accepting the connection
3960Sstevel@tonic-gate  * on the listening endpoint. However, that would require significant changes
3970Sstevel@tonic-gate  * to soaccept.
3980Sstevel@tonic-gate  */
3990Sstevel@tonic-gate int
4000Sstevel@tonic-gate so_socketpair(int sv[2])
4010Sstevel@tonic-gate {
4020Sstevel@tonic-gate 	int svs[2];
4030Sstevel@tonic-gate 	struct sonode *so1, *so2;
4040Sstevel@tonic-gate 	int error;
4050Sstevel@tonic-gate 	struct sockaddr_ux *name;
4060Sstevel@tonic-gate 	size_t namelen;
4070Sstevel@tonic-gate 
4080Sstevel@tonic-gate 	dprint(1, ("so_socketpair(%p)\n", sv));
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate 	error = useracc(sv, sizeof (svs), B_WRITE);
4110Sstevel@tonic-gate 	if (error && do_useracc)
4120Sstevel@tonic-gate 		return (set_errno(EFAULT));
4130Sstevel@tonic-gate 
4140Sstevel@tonic-gate 	if (copyin(sv, svs, sizeof (svs)))
4150Sstevel@tonic-gate 		return (set_errno(EFAULT));
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 	if ((so1 = getsonode(svs[0], &error, NULL)) == NULL)
4180Sstevel@tonic-gate 		return (set_errno(error));
4190Sstevel@tonic-gate 
4200Sstevel@tonic-gate 	if ((so2 = getsonode(svs[1], &error, NULL)) == NULL) {
4210Sstevel@tonic-gate 		releasef(svs[0]);
4220Sstevel@tonic-gate 		return (set_errno(error));
4230Sstevel@tonic-gate 	}
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate 	if (so1->so_family != AF_UNIX || so2->so_family != AF_UNIX) {
4260Sstevel@tonic-gate 		error = EOPNOTSUPP;
4270Sstevel@tonic-gate 		goto done;
4280Sstevel@tonic-gate 	}
4290Sstevel@tonic-gate 
4300Sstevel@tonic-gate 	/*
4310Sstevel@tonic-gate 	 * The code below makes assumptions about the "sockfs" implementation.
4320Sstevel@tonic-gate 	 * So make sure that the correct implementation is really used.
4330Sstevel@tonic-gate 	 */
4340Sstevel@tonic-gate 	ASSERT(so1->so_ops == &sotpi_sonodeops);
4350Sstevel@tonic-gate 	ASSERT(so2->so_ops == &sotpi_sonodeops);
4360Sstevel@tonic-gate 
4370Sstevel@tonic-gate 	if (so1->so_type == SOCK_DGRAM) {
4380Sstevel@tonic-gate 		/*
4390Sstevel@tonic-gate 		 * Bind both sockets and connect them with each other.
4400Sstevel@tonic-gate 		 * Need to allocate name/namelen for soconnect.
4410Sstevel@tonic-gate 		 */
4420Sstevel@tonic-gate 		error = SOP_BIND(so1, NULL, 0, _SOBIND_UNSPEC);
4430Sstevel@tonic-gate 		if (error) {
4440Sstevel@tonic-gate 			eprintsoline(so1, error);
4450Sstevel@tonic-gate 			goto done;
4460Sstevel@tonic-gate 		}
4470Sstevel@tonic-gate 		error = SOP_BIND(so2, NULL, 0, _SOBIND_UNSPEC);
4480Sstevel@tonic-gate 		if (error) {
4490Sstevel@tonic-gate 			eprintsoline(so2, error);
4500Sstevel@tonic-gate 			goto done;
4510Sstevel@tonic-gate 		}
4520Sstevel@tonic-gate 		namelen = sizeof (struct sockaddr_ux);
4530Sstevel@tonic-gate 		name = kmem_alloc(namelen, KM_SLEEP);
4540Sstevel@tonic-gate 		name->sou_family = AF_UNIX;
4550Sstevel@tonic-gate 		name->sou_addr = so2->so_ux_laddr;
4560Sstevel@tonic-gate 		error = SOP_CONNECT(so1,
457*5227Stz204579 		    (struct sockaddr *)name,
458*5227Stz204579 		    (socklen_t)namelen,
459*5227Stz204579 		    0, _SOCONNECT_NOXLATE);
4600Sstevel@tonic-gate 		if (error) {
4610Sstevel@tonic-gate 			kmem_free(name, namelen);
4620Sstevel@tonic-gate 			eprintsoline(so1, error);
4630Sstevel@tonic-gate 			goto done;
4640Sstevel@tonic-gate 		}
4650Sstevel@tonic-gate 		name->sou_addr = so1->so_ux_laddr;
4660Sstevel@tonic-gate 		error = SOP_CONNECT(so2,
467*5227Stz204579 		    (struct sockaddr *)name,
468*5227Stz204579 		    (socklen_t)namelen,
469*5227Stz204579 		    0, _SOCONNECT_NOXLATE);
4700Sstevel@tonic-gate 		kmem_free(name, namelen);
4710Sstevel@tonic-gate 		if (error) {
4720Sstevel@tonic-gate 			eprintsoline(so2, error);
4730Sstevel@tonic-gate 			goto done;
4740Sstevel@tonic-gate 		}
4750Sstevel@tonic-gate 		releasef(svs[0]);
4760Sstevel@tonic-gate 		releasef(svs[1]);
4770Sstevel@tonic-gate 	} else {
4780Sstevel@tonic-gate 		/*
4790Sstevel@tonic-gate 		 * Bind both sockets, with so1 being a listener.
4800Sstevel@tonic-gate 		 * Connect so2 to so1 - nonblocking to avoid waiting for
4810Sstevel@tonic-gate 		 * soaccept to complete.
4820Sstevel@tonic-gate 		 * Accept a connection on so1. Pass out the new fd as sv[0].
4830Sstevel@tonic-gate 		 * The library will detect the changed fd and close
4840Sstevel@tonic-gate 		 * the original one.
4850Sstevel@tonic-gate 		 */
4860Sstevel@tonic-gate 		struct sonode *nso;
4870Sstevel@tonic-gate 		struct vnode *nvp;
4880Sstevel@tonic-gate 		struct file *nfp;
4890Sstevel@tonic-gate 		int nfd;
4900Sstevel@tonic-gate 
4910Sstevel@tonic-gate 		/*
4920Sstevel@tonic-gate 		 * We could simply call SOP_LISTEN() here (which would do the
4930Sstevel@tonic-gate 		 * binding automatically) if the code didn't rely on passing
4940Sstevel@tonic-gate 		 * _SOBIND_NOXLATE to the TPI implementation of SOP_BIND().
4950Sstevel@tonic-gate 		 */
4960Sstevel@tonic-gate 		error = SOP_BIND(so1, NULL, 0, _SOBIND_UNSPEC|_SOBIND_NOXLATE|
4970Sstevel@tonic-gate 		    _SOBIND_LISTEN|_SOBIND_SOCKETPAIR);
4980Sstevel@tonic-gate 		if (error) {
4990Sstevel@tonic-gate 			eprintsoline(so1, error);
5000Sstevel@tonic-gate 			goto done;
5010Sstevel@tonic-gate 		}
5020Sstevel@tonic-gate 		error = SOP_BIND(so2, NULL, 0, _SOBIND_UNSPEC);
5030Sstevel@tonic-gate 		if (error) {
5040Sstevel@tonic-gate 			eprintsoline(so2, error);
5050Sstevel@tonic-gate 			goto done;
5060Sstevel@tonic-gate 		}
5070Sstevel@tonic-gate 
5080Sstevel@tonic-gate 		namelen = sizeof (struct sockaddr_ux);
5090Sstevel@tonic-gate 		name = kmem_alloc(namelen, KM_SLEEP);
5100Sstevel@tonic-gate 		name->sou_family = AF_UNIX;
5110Sstevel@tonic-gate 		name->sou_addr = so1->so_ux_laddr;
5120Sstevel@tonic-gate 		error = SOP_CONNECT(so2,
513*5227Stz204579 		    (struct sockaddr *)name,
514*5227Stz204579 		    (socklen_t)namelen,
515*5227Stz204579 		    FNONBLOCK, _SOCONNECT_NOXLATE);
5160Sstevel@tonic-gate 		kmem_free(name, namelen);
5170Sstevel@tonic-gate 		if (error) {
5180Sstevel@tonic-gate 			if (error != EINPROGRESS) {
5190Sstevel@tonic-gate 				eprintsoline(so2, error);
5200Sstevel@tonic-gate 				goto done;
5210Sstevel@tonic-gate 			}
5220Sstevel@tonic-gate 		}
5230Sstevel@tonic-gate 
5240Sstevel@tonic-gate 		error = SOP_ACCEPT(so1, 0, &nso);
5250Sstevel@tonic-gate 		if (error) {
5260Sstevel@tonic-gate 			eprintsoline(so1, error);
5270Sstevel@tonic-gate 			goto done;
5280Sstevel@tonic-gate 		}
5290Sstevel@tonic-gate 
5300Sstevel@tonic-gate 		/* wait for so2 being SS_CONNECTED ignoring signals */
5310Sstevel@tonic-gate 		mutex_enter(&so2->so_lock);
5320Sstevel@tonic-gate 		error = sowaitconnected(so2, 0, 1);
5330Sstevel@tonic-gate 		mutex_exit(&so2->so_lock);
5340Sstevel@tonic-gate 		nvp = SOTOV(nso);
5350Sstevel@tonic-gate 		if (error != 0) {
5360Sstevel@tonic-gate 			(void) VOP_CLOSE(nvp, 0, 1, 0, CRED());
5370Sstevel@tonic-gate 			VN_RELE(nvp);
5380Sstevel@tonic-gate 			eprintsoline(so2, error);
5390Sstevel@tonic-gate 			goto done;
5400Sstevel@tonic-gate 		}
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate 		if (error = falloc(nvp, FWRITE|FREAD, &nfp, &nfd)) {
5430Sstevel@tonic-gate 			(void) VOP_CLOSE(nvp, 0, 1, 0, CRED());
5440Sstevel@tonic-gate 			VN_RELE(nvp);
5450Sstevel@tonic-gate 			eprintsoline(nso, error);
5460Sstevel@tonic-gate 			goto done;
5470Sstevel@tonic-gate 		}
5480Sstevel@tonic-gate 		/*
5490Sstevel@tonic-gate 		 * fill in the entries that falloc reserved
5500Sstevel@tonic-gate 		 */
5510Sstevel@tonic-gate 		mutex_exit(&nfp->f_tlock);
5520Sstevel@tonic-gate 		setf(nfd, nfp);
5530Sstevel@tonic-gate 
5540Sstevel@tonic-gate 		releasef(svs[0]);
5550Sstevel@tonic-gate 		releasef(svs[1]);
5560Sstevel@tonic-gate 		svs[0] = nfd;
5570Sstevel@tonic-gate 
5580Sstevel@tonic-gate 		/*
5590Sstevel@tonic-gate 		 * The socketpair library routine will close the original
5600Sstevel@tonic-gate 		 * svs[0] when this code passes out a different file
5610Sstevel@tonic-gate 		 * descriptor.
5620Sstevel@tonic-gate 		 */
5630Sstevel@tonic-gate 		if (copyout(svs, sv, sizeof (svs))) {
5640Sstevel@tonic-gate 			(void) closeandsetf(nfd, NULL);
5650Sstevel@tonic-gate 			eprintline(EFAULT);
5660Sstevel@tonic-gate 			return (set_errno(EFAULT));
5670Sstevel@tonic-gate 		}
5680Sstevel@tonic-gate 	}
5690Sstevel@tonic-gate 	return (0);
5700Sstevel@tonic-gate 
5710Sstevel@tonic-gate done:
5720Sstevel@tonic-gate 	releasef(svs[0]);
5730Sstevel@tonic-gate 	releasef(svs[1]);
5740Sstevel@tonic-gate 	return (set_errno(error));
5750Sstevel@tonic-gate }
5760Sstevel@tonic-gate 
5770Sstevel@tonic-gate int
5780Sstevel@tonic-gate bind(int sock, struct sockaddr *name, socklen_t namelen, int version)
5790Sstevel@tonic-gate {
5800Sstevel@tonic-gate 	struct sonode *so;
5810Sstevel@tonic-gate 	int error;
5820Sstevel@tonic-gate 
5830Sstevel@tonic-gate 	dprint(1, ("bind(%d, %p, %d)\n",
584*5227Stz204579 	    sock, name, namelen));
5850Sstevel@tonic-gate 
5860Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
5870Sstevel@tonic-gate 		return (set_errno(error));
5880Sstevel@tonic-gate 
5890Sstevel@tonic-gate 	/* Allocate and copyin name */
5900Sstevel@tonic-gate 	/*
5910Sstevel@tonic-gate 	 * X/Open test does not expect EFAULT with NULL name and non-zero
5920Sstevel@tonic-gate 	 * namelen.
5930Sstevel@tonic-gate 	 */
5940Sstevel@tonic-gate 	if (name != NULL && namelen != 0) {
5950Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
5960Sstevel@tonic-gate 		name = copyin_name(so, name, &namelen, &error);
5970Sstevel@tonic-gate 		if (name == NULL) {
5980Sstevel@tonic-gate 			releasef(sock);
5990Sstevel@tonic-gate 			return (set_errno(error));
6000Sstevel@tonic-gate 		}
6010Sstevel@tonic-gate 	} else {
6020Sstevel@tonic-gate 		name = NULL;
6030Sstevel@tonic-gate 		namelen = 0;
6040Sstevel@tonic-gate 	}
6050Sstevel@tonic-gate 
6060Sstevel@tonic-gate 	switch (version) {
6070Sstevel@tonic-gate 	default:
6080Sstevel@tonic-gate 		error = SOP_BIND(so, name, namelen, 0);
6090Sstevel@tonic-gate 		break;
6100Sstevel@tonic-gate 	case SOV_XPG4_2:
6110Sstevel@tonic-gate 		error = SOP_BIND(so, name, namelen, _SOBIND_XPG4_2);
6120Sstevel@tonic-gate 		break;
6130Sstevel@tonic-gate 	case SOV_SOCKBSD:
6140Sstevel@tonic-gate 		error = SOP_BIND(so, name, namelen, _SOBIND_SOCKBSD);
6150Sstevel@tonic-gate 		break;
6160Sstevel@tonic-gate 	}
6170Sstevel@tonic-gate done:
6180Sstevel@tonic-gate 	releasef(sock);
6190Sstevel@tonic-gate 	if (name != NULL)
6200Sstevel@tonic-gate 		kmem_free(name, (size_t)namelen);
6210Sstevel@tonic-gate 
6220Sstevel@tonic-gate 	if (error)
6230Sstevel@tonic-gate 		return (set_errno(error));
6240Sstevel@tonic-gate 	return (0);
6250Sstevel@tonic-gate }
6260Sstevel@tonic-gate 
6270Sstevel@tonic-gate /* ARGSUSED2 */
6280Sstevel@tonic-gate int
6290Sstevel@tonic-gate listen(int sock, int backlog, int version)
6300Sstevel@tonic-gate {
6310Sstevel@tonic-gate 	struct sonode *so;
6320Sstevel@tonic-gate 	int error;
6330Sstevel@tonic-gate 
6340Sstevel@tonic-gate 	dprint(1, ("listen(%d, %d)\n",
635*5227Stz204579 	    sock, backlog));
6360Sstevel@tonic-gate 
6370Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
6380Sstevel@tonic-gate 		return (set_errno(error));
6390Sstevel@tonic-gate 
6400Sstevel@tonic-gate 	error = SOP_LISTEN(so, backlog);
6410Sstevel@tonic-gate 
6420Sstevel@tonic-gate 	releasef(sock);
6430Sstevel@tonic-gate 	if (error)
6440Sstevel@tonic-gate 		return (set_errno(error));
6450Sstevel@tonic-gate 	return (0);
6460Sstevel@tonic-gate }
6470Sstevel@tonic-gate 
6480Sstevel@tonic-gate /*ARGSUSED3*/
6490Sstevel@tonic-gate int
6500Sstevel@tonic-gate accept(int sock, struct sockaddr *name, socklen_t *namelenp, int version)
6510Sstevel@tonic-gate {
6520Sstevel@tonic-gate 	struct sonode *so;
6530Sstevel@tonic-gate 	file_t *fp;
6540Sstevel@tonic-gate 	int error;
6550Sstevel@tonic-gate 	socklen_t namelen;
6560Sstevel@tonic-gate 	struct sonode *nso;
6570Sstevel@tonic-gate 	struct vnode *nvp;
6580Sstevel@tonic-gate 	struct file *nfp;
6590Sstevel@tonic-gate 	int nfd;
6600Sstevel@tonic-gate 
6610Sstevel@tonic-gate 	dprint(1, ("accept(%d, %p, %p)\n",
662*5227Stz204579 	    sock, name, namelenp));
6630Sstevel@tonic-gate 
6640Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
6650Sstevel@tonic-gate 		return (set_errno(error));
6660Sstevel@tonic-gate 
6670Sstevel@tonic-gate 	if (name != NULL) {
6680Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
6690Sstevel@tonic-gate 		if (copyin(namelenp, &namelen, sizeof (namelen))) {
6700Sstevel@tonic-gate 			releasef(sock);
6710Sstevel@tonic-gate 			return (set_errno(EFAULT));
6720Sstevel@tonic-gate 		}
6730Sstevel@tonic-gate 		if (namelen != 0) {
6740Sstevel@tonic-gate 			error = useracc(name, (size_t)namelen, B_WRITE);
6750Sstevel@tonic-gate 			if (error && do_useracc) {
6760Sstevel@tonic-gate 				releasef(sock);
6770Sstevel@tonic-gate 				return (set_errno(EFAULT));
6780Sstevel@tonic-gate 			}
6790Sstevel@tonic-gate 		} else
6800Sstevel@tonic-gate 			name = NULL;
6810Sstevel@tonic-gate 	} else {
6820Sstevel@tonic-gate 		namelen = 0;
6830Sstevel@tonic-gate 	}
6840Sstevel@tonic-gate 
6850Sstevel@tonic-gate 	/*
6860Sstevel@tonic-gate 	 * Allocate the user fd before SOP_ACCEPT() in order to
6870Sstevel@tonic-gate 	 * catch EMFILE errors before calling SOP_ACCEPT().
6880Sstevel@tonic-gate 	 */
6890Sstevel@tonic-gate 	if ((nfd = ufalloc(0)) == -1) {
6900Sstevel@tonic-gate 		eprintsoline(so, EMFILE);
6910Sstevel@tonic-gate 		releasef(sock);
6920Sstevel@tonic-gate 		return (set_errno(EMFILE));
6930Sstevel@tonic-gate 	}
6940Sstevel@tonic-gate 	error = SOP_ACCEPT(so, fp->f_flag, &nso);
6950Sstevel@tonic-gate 	releasef(sock);
6960Sstevel@tonic-gate 	if (error) {
6970Sstevel@tonic-gate 		setf(nfd, NULL);
6980Sstevel@tonic-gate 		return (set_errno(error));
6990Sstevel@tonic-gate 	}
7000Sstevel@tonic-gate 
7010Sstevel@tonic-gate 	nvp = SOTOV(nso);
7020Sstevel@tonic-gate 
7030Sstevel@tonic-gate 	/*
7040Sstevel@tonic-gate 	 * so_faddr_sa can not go away even though we are not holding so_lock.
7050Sstevel@tonic-gate 	 * However, in theory its content could change from underneath us.
7060Sstevel@tonic-gate 	 * But this is not possible in practice since it can only
7070Sstevel@tonic-gate 	 * change due to either some socket system call
7080Sstevel@tonic-gate 	 * or due to a T_CONN_CON being received from the stream head.
7090Sstevel@tonic-gate 	 * Since the falloc/setf have not yet been done no thread
7100Sstevel@tonic-gate 	 * can do any system call on nso and T_CONN_CON can not arrive
7110Sstevel@tonic-gate 	 * on a socket that is already connected.
7120Sstevel@tonic-gate 	 * Thus there is no reason to hold so_lock here.
7130Sstevel@tonic-gate 	 *
7140Sstevel@tonic-gate 	 * SOP_ACCEPT() is required to have set the valid bit for the faddr,
7150Sstevel@tonic-gate 	 * but it could be instantly cleared by a disconnect from the transport.
7160Sstevel@tonic-gate 	 * For that reason we ignore it here.
7170Sstevel@tonic-gate 	 */
7180Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&nso->so_lock));
7190Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp,
7200Sstevel@tonic-gate 	    nso->so_faddr_sa, (socklen_t)nso->so_faddr_len);
7210Sstevel@tonic-gate 	if (error) {
7220Sstevel@tonic-gate 		setf(nfd, NULL);
7230Sstevel@tonic-gate 		(void) VOP_CLOSE(nvp, 0, 1, 0, CRED());
7240Sstevel@tonic-gate 		VN_RELE(nvp);
7250Sstevel@tonic-gate 		return (set_errno(error));
7260Sstevel@tonic-gate 	}
7270Sstevel@tonic-gate 	if (error = falloc(NULL, FWRITE|FREAD, &nfp, NULL)) {
7280Sstevel@tonic-gate 		setf(nfd, NULL);
7290Sstevel@tonic-gate 		(void) VOP_CLOSE(nvp, 0, 1, 0, CRED());
7300Sstevel@tonic-gate 		VN_RELE(nvp);
7310Sstevel@tonic-gate 		eprintsoline(so, error);
7320Sstevel@tonic-gate 		return (set_errno(error));
7330Sstevel@tonic-gate 	}
7340Sstevel@tonic-gate 	/*
7350Sstevel@tonic-gate 	 * fill in the entries that falloc reserved
7360Sstevel@tonic-gate 	 */
7370Sstevel@tonic-gate 	nfp->f_vnode = nvp;
7380Sstevel@tonic-gate 	mutex_exit(&nfp->f_tlock);
7390Sstevel@tonic-gate 	setf(nfd, nfp);
7400Sstevel@tonic-gate 
7410Sstevel@tonic-gate 	/*
7420Sstevel@tonic-gate 	 * Copy FNDELAY and FNONBLOCK from listener to acceptor
7430Sstevel@tonic-gate 	 */
7440Sstevel@tonic-gate 	if (so->so_state & (SS_NDELAY|SS_NONBLOCK)) {
7450Sstevel@tonic-gate 		uint_t oflag = nfp->f_flag;
7460Sstevel@tonic-gate 		int arg = 0;
7470Sstevel@tonic-gate 
7480Sstevel@tonic-gate 		if (so->so_state & SS_NONBLOCK)
7490Sstevel@tonic-gate 			arg |= FNONBLOCK;
7500Sstevel@tonic-gate 		else if (so->so_state & SS_NDELAY)
7510Sstevel@tonic-gate 			arg |= FNDELAY;
7520Sstevel@tonic-gate 
7530Sstevel@tonic-gate 		/*
7540Sstevel@tonic-gate 		 * This code is a simplification of the F_SETFL code in fcntl()
7550Sstevel@tonic-gate 		 * Ignore any errors from VOP_SETFL.
7560Sstevel@tonic-gate 		 */
7570Sstevel@tonic-gate 		if ((error = VOP_SETFL(nvp, oflag, arg, nfp->f_cred)) != 0) {
7580Sstevel@tonic-gate 			eprintsoline(so, error);
7590Sstevel@tonic-gate 			error = 0;
7600Sstevel@tonic-gate 		} else {
7610Sstevel@tonic-gate 			mutex_enter(&nfp->f_tlock);
7620Sstevel@tonic-gate 			nfp->f_flag &= ~FMASK | (FREAD|FWRITE);
7630Sstevel@tonic-gate 			nfp->f_flag |= arg;
7640Sstevel@tonic-gate 			mutex_exit(&nfp->f_tlock);
7650Sstevel@tonic-gate 		}
7660Sstevel@tonic-gate 	}
7670Sstevel@tonic-gate 	return (nfd);
7680Sstevel@tonic-gate }
7690Sstevel@tonic-gate 
7700Sstevel@tonic-gate int
7710Sstevel@tonic-gate connect(int sock, struct sockaddr *name, socklen_t namelen, int version)
7720Sstevel@tonic-gate {
7730Sstevel@tonic-gate 	struct sonode *so;
7740Sstevel@tonic-gate 	file_t *fp;
7750Sstevel@tonic-gate 	int error;
7760Sstevel@tonic-gate 
7770Sstevel@tonic-gate 	dprint(1, ("connect(%d, %p, %d)\n",
778*5227Stz204579 	    sock, name, namelen));
7790Sstevel@tonic-gate 
7800Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
7810Sstevel@tonic-gate 		return (set_errno(error));
7820Sstevel@tonic-gate 
7830Sstevel@tonic-gate 	/* Allocate and copyin name */
7840Sstevel@tonic-gate 	if (namelen != 0) {
7850Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
7860Sstevel@tonic-gate 		name = copyin_name(so, name, &namelen, &error);
7870Sstevel@tonic-gate 		if (name == NULL) {
7880Sstevel@tonic-gate 			releasef(sock);
7890Sstevel@tonic-gate 			return (set_errno(error));
7900Sstevel@tonic-gate 		}
7910Sstevel@tonic-gate 	} else
7920Sstevel@tonic-gate 		name = NULL;
7930Sstevel@tonic-gate 
7940Sstevel@tonic-gate 	error = SOP_CONNECT(so, name, namelen, fp->f_flag,
7950Sstevel@tonic-gate 	    (version != SOV_XPG4_2) ? 0 : _SOCONNECT_XPG4_2);
7960Sstevel@tonic-gate 	releasef(sock);
7970Sstevel@tonic-gate 	if (name)
7980Sstevel@tonic-gate 		kmem_free(name, (size_t)namelen);
7990Sstevel@tonic-gate 	if (error)
8000Sstevel@tonic-gate 		return (set_errno(error));
8010Sstevel@tonic-gate 	return (0);
8020Sstevel@tonic-gate }
8030Sstevel@tonic-gate 
8040Sstevel@tonic-gate /*ARGSUSED2*/
8050Sstevel@tonic-gate int
8060Sstevel@tonic-gate shutdown(int sock, int how, int version)
8070Sstevel@tonic-gate {
8080Sstevel@tonic-gate 	struct sonode *so;
8090Sstevel@tonic-gate 	int error;
8100Sstevel@tonic-gate 
8110Sstevel@tonic-gate 	dprint(1, ("shutdown(%d, %d)\n",
812*5227Stz204579 	    sock, how));
8130Sstevel@tonic-gate 
8140Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
8150Sstevel@tonic-gate 		return (set_errno(error));
8160Sstevel@tonic-gate 
8170Sstevel@tonic-gate 	error = SOP_SHUTDOWN(so, how);
8180Sstevel@tonic-gate 
8190Sstevel@tonic-gate 	releasef(sock);
8200Sstevel@tonic-gate 	if (error)
8210Sstevel@tonic-gate 		return (set_errno(error));
8220Sstevel@tonic-gate 	return (0);
8230Sstevel@tonic-gate }
8240Sstevel@tonic-gate 
8250Sstevel@tonic-gate /*
8260Sstevel@tonic-gate  * Common receive routine.
8270Sstevel@tonic-gate  */
8280Sstevel@tonic-gate static ssize_t
8290Sstevel@tonic-gate recvit(int sock,
8300Sstevel@tonic-gate 	struct nmsghdr *msg,
8310Sstevel@tonic-gate 	struct uio *uiop,
8320Sstevel@tonic-gate 	int flags,
8330Sstevel@tonic-gate 	socklen_t *namelenp,
8340Sstevel@tonic-gate 	socklen_t *controllenp,
8350Sstevel@tonic-gate 	int *flagsp)
8360Sstevel@tonic-gate {
8370Sstevel@tonic-gate 	struct sonode *so;
8380Sstevel@tonic-gate 	file_t *fp;
8390Sstevel@tonic-gate 	void *name;
8400Sstevel@tonic-gate 	socklen_t namelen;
8410Sstevel@tonic-gate 	void *control;
8420Sstevel@tonic-gate 	socklen_t controllen;
8430Sstevel@tonic-gate 	ssize_t len;
8440Sstevel@tonic-gate 	int error;
8450Sstevel@tonic-gate 
8460Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
8470Sstevel@tonic-gate 		return (set_errno(error));
8480Sstevel@tonic-gate 
8490Sstevel@tonic-gate 	len = uiop->uio_resid;
8500Sstevel@tonic-gate 	uiop->uio_fmode = fp->f_flag;
8510Sstevel@tonic-gate 	uiop->uio_extflg = UIO_COPY_CACHED;
8520Sstevel@tonic-gate 
8530Sstevel@tonic-gate 	name = msg->msg_name;
8540Sstevel@tonic-gate 	namelen = msg->msg_namelen;
8550Sstevel@tonic-gate 	control = msg->msg_control;
8560Sstevel@tonic-gate 	controllen = msg->msg_controllen;
8570Sstevel@tonic-gate 
8580Sstevel@tonic-gate 	msg->msg_flags = flags & (MSG_OOB | MSG_PEEK | MSG_WAITALL |
8590Sstevel@tonic-gate 	    MSG_DONTWAIT | MSG_XPG4_2);
8600Sstevel@tonic-gate 
8610Sstevel@tonic-gate 	error = SOP_RECVMSG(so, msg, uiop);
8620Sstevel@tonic-gate 	if (error) {
8630Sstevel@tonic-gate 		releasef(sock);
8640Sstevel@tonic-gate 		return (set_errno(error));
8650Sstevel@tonic-gate 	}
8660Sstevel@tonic-gate 	lwp_stat_update(LWP_STAT_MSGRCV, 1);
8670Sstevel@tonic-gate 	so_update_attrs(so, SOACC);
8680Sstevel@tonic-gate 	releasef(sock);
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp,
8710Sstevel@tonic-gate 	    msg->msg_name, msg->msg_namelen);
8720Sstevel@tonic-gate 	if (error)
8730Sstevel@tonic-gate 		goto err;
8740Sstevel@tonic-gate 
8750Sstevel@tonic-gate 	if (flagsp != NULL) {
8760Sstevel@tonic-gate 		/*
8770Sstevel@tonic-gate 		 * Clear internal flag.
8780Sstevel@tonic-gate 		 */
8790Sstevel@tonic-gate 		msg->msg_flags &= ~MSG_XPG4_2;
8800Sstevel@tonic-gate 
8810Sstevel@tonic-gate 		/*
8820Sstevel@tonic-gate 		 * Determine MSG_CTRUNC. sorecvmsg sets MSG_CTRUNC only
8830Sstevel@tonic-gate 		 * when controllen is zero and there is control data to
8840Sstevel@tonic-gate 		 * copy out.
8850Sstevel@tonic-gate 		 */
8860Sstevel@tonic-gate 		if (controllen != 0 &&
8870Sstevel@tonic-gate 		    (msg->msg_controllen > controllen || control == NULL)) {
8880Sstevel@tonic-gate 			dprint(1, ("recvit: CTRUNC %d %d %p\n",
8890Sstevel@tonic-gate 			    msg->msg_controllen, controllen, control));
8900Sstevel@tonic-gate 
8910Sstevel@tonic-gate 			msg->msg_flags |= MSG_CTRUNC;
8920Sstevel@tonic-gate 		}
8930Sstevel@tonic-gate 		if (copyout(&msg->msg_flags, flagsp,
8940Sstevel@tonic-gate 		    sizeof (msg->msg_flags))) {
8950Sstevel@tonic-gate 			error = EFAULT;
8960Sstevel@tonic-gate 			goto err;
8970Sstevel@tonic-gate 		}
8980Sstevel@tonic-gate 	}
8990Sstevel@tonic-gate 	/*
9000Sstevel@tonic-gate 	 * Note: This MUST be done last. There can be no "goto err" after this
9010Sstevel@tonic-gate 	 * point since it could make so_closefds run twice on some part
9020Sstevel@tonic-gate 	 * of the file descriptor array.
9030Sstevel@tonic-gate 	 */
9040Sstevel@tonic-gate 	if (controllen != 0) {
9050Sstevel@tonic-gate 		if (!(flags & MSG_XPG4_2)) {
9060Sstevel@tonic-gate 			/*
9070Sstevel@tonic-gate 			 * Good old msg_accrights can only return a multiple
9080Sstevel@tonic-gate 			 * of 4 bytes.
9090Sstevel@tonic-gate 			 */
9100Sstevel@tonic-gate 			controllen &= ~((int)sizeof (uint32_t) - 1);
9110Sstevel@tonic-gate 		}
9120Sstevel@tonic-gate 		error = copyout_arg(control, controllen, controllenp,
9130Sstevel@tonic-gate 		    msg->msg_control, msg->msg_controllen);
9140Sstevel@tonic-gate 		if (error)
9150Sstevel@tonic-gate 			goto err;
9160Sstevel@tonic-gate 
9170Sstevel@tonic-gate 		if (msg->msg_controllen > controllen || control == NULL) {
9180Sstevel@tonic-gate 			if (control == NULL)
9190Sstevel@tonic-gate 				controllen = 0;
9200Sstevel@tonic-gate 			so_closefds(msg->msg_control, msg->msg_controllen,
9210Sstevel@tonic-gate 			    !(flags & MSG_XPG4_2), controllen);
9220Sstevel@tonic-gate 		}
9230Sstevel@tonic-gate 	}
9240Sstevel@tonic-gate 	if (msg->msg_namelen != 0)
9250Sstevel@tonic-gate 		kmem_free(msg->msg_name, (size_t)msg->msg_namelen);
9260Sstevel@tonic-gate 	if (msg->msg_controllen != 0)
9270Sstevel@tonic-gate 		kmem_free(msg->msg_control, (size_t)msg->msg_controllen);
9280Sstevel@tonic-gate 	return (len - uiop->uio_resid);
9290Sstevel@tonic-gate 
9300Sstevel@tonic-gate err:
9310Sstevel@tonic-gate 	/*
9320Sstevel@tonic-gate 	 * If we fail and the control part contains file descriptors
9330Sstevel@tonic-gate 	 * we have to close the fd's.
9340Sstevel@tonic-gate 	 */
9350Sstevel@tonic-gate 	if (msg->msg_controllen != 0)
9360Sstevel@tonic-gate 		so_closefds(msg->msg_control, msg->msg_controllen,
9370Sstevel@tonic-gate 		    !(flags & MSG_XPG4_2), 0);
9380Sstevel@tonic-gate 	if (msg->msg_namelen != 0)
9390Sstevel@tonic-gate 		kmem_free(msg->msg_name, (size_t)msg->msg_namelen);
9400Sstevel@tonic-gate 	if (msg->msg_controllen != 0)
9410Sstevel@tonic-gate 		kmem_free(msg->msg_control, (size_t)msg->msg_controllen);
9420Sstevel@tonic-gate 	return (set_errno(error));
9430Sstevel@tonic-gate }
9440Sstevel@tonic-gate 
9450Sstevel@tonic-gate /*
9460Sstevel@tonic-gate  * Native system call
9470Sstevel@tonic-gate  */
9480Sstevel@tonic-gate ssize_t
9490Sstevel@tonic-gate recv(int sock, void *buffer, size_t len, int flags)
9500Sstevel@tonic-gate {
9510Sstevel@tonic-gate 	struct nmsghdr lmsg;
9520Sstevel@tonic-gate 	struct uio auio;
9530Sstevel@tonic-gate 	struct iovec aiov[1];
9540Sstevel@tonic-gate 
9550Sstevel@tonic-gate 	dprint(1, ("recv(%d, %p, %ld, %d)\n",
956*5227Stz204579 	    sock, buffer, len, flags));
9570Sstevel@tonic-gate 
9580Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
9590Sstevel@tonic-gate 		return (set_errno(EINVAL));
9600Sstevel@tonic-gate 	}
9610Sstevel@tonic-gate 
9620Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
9630Sstevel@tonic-gate 	aiov[0].iov_len = len;
9640Sstevel@tonic-gate 	auio.uio_loffset = 0;
9650Sstevel@tonic-gate 	auio.uio_iov = aiov;
9660Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
9670Sstevel@tonic-gate 	auio.uio_resid = len;
9680Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
9690Sstevel@tonic-gate 	auio.uio_limit = 0;
9700Sstevel@tonic-gate 
9710Sstevel@tonic-gate 	lmsg.msg_namelen = 0;
9720Sstevel@tonic-gate 	lmsg.msg_controllen = 0;
9730Sstevel@tonic-gate 	lmsg.msg_flags = 0;
9740Sstevel@tonic-gate 	return (recvit(sock, &lmsg, &auio, flags, NULL, NULL, NULL));
9750Sstevel@tonic-gate }
9760Sstevel@tonic-gate 
9770Sstevel@tonic-gate ssize_t
9780Sstevel@tonic-gate recvfrom(int sock, void *buffer, size_t len, int flags,
9790Sstevel@tonic-gate 	struct sockaddr *name, socklen_t *namelenp)
9800Sstevel@tonic-gate {
9810Sstevel@tonic-gate 	struct nmsghdr lmsg;
9820Sstevel@tonic-gate 	struct uio auio;
9830Sstevel@tonic-gate 	struct iovec aiov[1];
9840Sstevel@tonic-gate 
9850Sstevel@tonic-gate 	dprint(1, ("recvfrom(%d, %p, %ld, %d, %p, %p)\n",
986*5227Stz204579 	    sock, buffer, len, flags, name, namelenp));
9870Sstevel@tonic-gate 
9880Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
9890Sstevel@tonic-gate 		return (set_errno(EINVAL));
9900Sstevel@tonic-gate 	}
9910Sstevel@tonic-gate 
9920Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
9930Sstevel@tonic-gate 	aiov[0].iov_len = len;
9940Sstevel@tonic-gate 	auio.uio_loffset = 0;
9950Sstevel@tonic-gate 	auio.uio_iov = aiov;
9960Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
9970Sstevel@tonic-gate 	auio.uio_resid = len;
9980Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
9990Sstevel@tonic-gate 	auio.uio_limit = 0;
10000Sstevel@tonic-gate 
10010Sstevel@tonic-gate 	lmsg.msg_name = (char *)name;
10020Sstevel@tonic-gate 	if (namelenp != NULL) {
10030Sstevel@tonic-gate 		if (copyin(namelenp, &lmsg.msg_namelen,
10040Sstevel@tonic-gate 		    sizeof (lmsg.msg_namelen)))
10050Sstevel@tonic-gate 			return (set_errno(EFAULT));
10060Sstevel@tonic-gate 	} else {
10070Sstevel@tonic-gate 		lmsg.msg_namelen = 0;
10080Sstevel@tonic-gate 	}
10090Sstevel@tonic-gate 	lmsg.msg_controllen = 0;
10100Sstevel@tonic-gate 	lmsg.msg_flags = 0;
10110Sstevel@tonic-gate 
10120Sstevel@tonic-gate 	return (recvit(sock, &lmsg, &auio, flags, namelenp, NULL, NULL));
10130Sstevel@tonic-gate }
10140Sstevel@tonic-gate 
10150Sstevel@tonic-gate /*
10160Sstevel@tonic-gate  * Uses the MSG_XPG4_2 flag to determine if the caller is using
10170Sstevel@tonic-gate  * struct omsghdr or struct nmsghdr.
10180Sstevel@tonic-gate  */
10190Sstevel@tonic-gate ssize_t
10200Sstevel@tonic-gate recvmsg(int sock, struct nmsghdr *msg, int flags)
10210Sstevel@tonic-gate {
10220Sstevel@tonic-gate 	STRUCT_DECL(nmsghdr, u_lmsg);
10230Sstevel@tonic-gate 	STRUCT_HANDLE(nmsghdr, umsgptr);
10240Sstevel@tonic-gate 	struct nmsghdr lmsg;
10250Sstevel@tonic-gate 	struct uio auio;
10260Sstevel@tonic-gate 	struct iovec aiov[MSG_MAXIOVLEN];
10270Sstevel@tonic-gate 	int iovcnt;
10280Sstevel@tonic-gate 	ssize_t len;
10290Sstevel@tonic-gate 	int i;
10300Sstevel@tonic-gate 	int *flagsp;
10310Sstevel@tonic-gate 	model_t	model;
10320Sstevel@tonic-gate 
10330Sstevel@tonic-gate 	dprint(1, ("recvmsg(%d, %p, %d)\n",
1034*5227Stz204579 	    sock, msg, flags));
10350Sstevel@tonic-gate 
10360Sstevel@tonic-gate 	model = get_udatamodel();
10370Sstevel@tonic-gate 	STRUCT_INIT(u_lmsg, model);
10380Sstevel@tonic-gate 	STRUCT_SET_HANDLE(umsgptr, model, msg);
10390Sstevel@tonic-gate 
10400Sstevel@tonic-gate 	if (flags & MSG_XPG4_2) {
10410Sstevel@tonic-gate 		if (copyin(msg, STRUCT_BUF(u_lmsg), STRUCT_SIZE(u_lmsg)))
10420Sstevel@tonic-gate 			return (set_errno(EFAULT));
10430Sstevel@tonic-gate 		flagsp = STRUCT_FADDR(umsgptr, msg_flags);
10440Sstevel@tonic-gate 	} else {
10450Sstevel@tonic-gate 		/*
10460Sstevel@tonic-gate 		 * Assumes that nmsghdr and omsghdr are identically shaped
10470Sstevel@tonic-gate 		 * except for the added msg_flags field.
10480Sstevel@tonic-gate 		 */
10490Sstevel@tonic-gate 		if (copyin(msg, STRUCT_BUF(u_lmsg),
10500Sstevel@tonic-gate 		    SIZEOF_STRUCT(omsghdr, model)))
10510Sstevel@tonic-gate 			return (set_errno(EFAULT));
10520Sstevel@tonic-gate 		STRUCT_FSET(u_lmsg, msg_flags, 0);
10530Sstevel@tonic-gate 		flagsp = NULL;
10540Sstevel@tonic-gate 	}
10550Sstevel@tonic-gate 
10560Sstevel@tonic-gate 	/*
10570Sstevel@tonic-gate 	 * Code below us will kmem_alloc memory and hang it
10580Sstevel@tonic-gate 	 * off msg_control and msg_name fields. This forces
10590Sstevel@tonic-gate 	 * us to copy the structure to its native form.
10600Sstevel@tonic-gate 	 */
10610Sstevel@tonic-gate 	lmsg.msg_name = STRUCT_FGETP(u_lmsg, msg_name);
10620Sstevel@tonic-gate 	lmsg.msg_namelen = STRUCT_FGET(u_lmsg, msg_namelen);
10630Sstevel@tonic-gate 	lmsg.msg_iov = STRUCT_FGETP(u_lmsg, msg_iov);
10640Sstevel@tonic-gate 	lmsg.msg_iovlen = STRUCT_FGET(u_lmsg, msg_iovlen);
10650Sstevel@tonic-gate 	lmsg.msg_control = STRUCT_FGETP(u_lmsg, msg_control);
10660Sstevel@tonic-gate 	lmsg.msg_controllen = STRUCT_FGET(u_lmsg, msg_controllen);
10670Sstevel@tonic-gate 	lmsg.msg_flags = STRUCT_FGET(u_lmsg, msg_flags);
10680Sstevel@tonic-gate 
10690Sstevel@tonic-gate 	iovcnt = lmsg.msg_iovlen;
10700Sstevel@tonic-gate 
10710Sstevel@tonic-gate 	if (iovcnt <= 0 || iovcnt > MSG_MAXIOVLEN) {
10720Sstevel@tonic-gate 		return (set_errno(EMSGSIZE));
10730Sstevel@tonic-gate 	}
10740Sstevel@tonic-gate 
10750Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
10760Sstevel@tonic-gate 	/*
10770Sstevel@tonic-gate 	 * 32-bit callers need to have their iovec expanded, while ensuring
10780Sstevel@tonic-gate 	 * that they can't move more than 2Gbytes of data in a single call.
10790Sstevel@tonic-gate 	 */
10800Sstevel@tonic-gate 	if (model == DATAMODEL_ILP32) {
10810Sstevel@tonic-gate 		struct iovec32 aiov32[MSG_MAXIOVLEN];
10820Sstevel@tonic-gate 		ssize32_t count32;
10830Sstevel@tonic-gate 
10840Sstevel@tonic-gate 		if (copyin((struct iovec32 *)lmsg.msg_iov, aiov32,
10850Sstevel@tonic-gate 		    iovcnt * sizeof (struct iovec32)))
10860Sstevel@tonic-gate 			return (set_errno(EFAULT));
10870Sstevel@tonic-gate 
10880Sstevel@tonic-gate 		count32 = 0;
10890Sstevel@tonic-gate 		for (i = 0; i < iovcnt; i++) {
10900Sstevel@tonic-gate 			ssize32_t iovlen32;
10910Sstevel@tonic-gate 
10920Sstevel@tonic-gate 			iovlen32 = aiov32[i].iov_len;
10930Sstevel@tonic-gate 			count32 += iovlen32;
10940Sstevel@tonic-gate 			if (iovlen32 < 0 || count32 < 0)
10950Sstevel@tonic-gate 				return (set_errno(EINVAL));
10960Sstevel@tonic-gate 			aiov[i].iov_len = iovlen32;
10970Sstevel@tonic-gate 			aiov[i].iov_base =
10980Sstevel@tonic-gate 			    (caddr_t)(uintptr_t)aiov32[i].iov_base;
10990Sstevel@tonic-gate 		}
11000Sstevel@tonic-gate 	} else
11010Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
11020Sstevel@tonic-gate 	if (copyin(lmsg.msg_iov, aiov, iovcnt * sizeof (struct iovec))) {
11030Sstevel@tonic-gate 		return (set_errno(EFAULT));
11040Sstevel@tonic-gate 	}
11050Sstevel@tonic-gate 	len = 0;
11060Sstevel@tonic-gate 	for (i = 0; i < iovcnt; i++) {
11070Sstevel@tonic-gate 		ssize_t iovlen = aiov[i].iov_len;
11080Sstevel@tonic-gate 		len += iovlen;
11090Sstevel@tonic-gate 		if (iovlen < 0 || len < 0) {
11100Sstevel@tonic-gate 			return (set_errno(EINVAL));
11110Sstevel@tonic-gate 		}
11120Sstevel@tonic-gate 	}
11130Sstevel@tonic-gate 	auio.uio_loffset = 0;
11140Sstevel@tonic-gate 	auio.uio_iov = aiov;
11150Sstevel@tonic-gate 	auio.uio_iovcnt = iovcnt;
11160Sstevel@tonic-gate 	auio.uio_resid = len;
11170Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
11180Sstevel@tonic-gate 	auio.uio_limit = 0;
11190Sstevel@tonic-gate 
11200Sstevel@tonic-gate 	if (lmsg.msg_control != NULL &&
11210Sstevel@tonic-gate 	    (do_useracc == 0 ||
11220Sstevel@tonic-gate 	    useracc(lmsg.msg_control, lmsg.msg_controllen,
1123*5227Stz204579 	    B_WRITE) != 0)) {
11240Sstevel@tonic-gate 		return (set_errno(EFAULT));
11250Sstevel@tonic-gate 	}
11260Sstevel@tonic-gate 
11270Sstevel@tonic-gate 	return (recvit(sock, &lmsg, &auio, flags,
1128*5227Stz204579 	    STRUCT_FADDR(umsgptr, msg_namelen),
1129*5227Stz204579 	    STRUCT_FADDR(umsgptr, msg_controllen), flagsp));
11300Sstevel@tonic-gate }
11310Sstevel@tonic-gate 
11320Sstevel@tonic-gate /*
11330Sstevel@tonic-gate  * Common send function.
11340Sstevel@tonic-gate  */
11350Sstevel@tonic-gate static ssize_t
11360Sstevel@tonic-gate sendit(int sock, struct nmsghdr *msg, struct uio *uiop, int flags)
11370Sstevel@tonic-gate {
11380Sstevel@tonic-gate 	struct sonode *so;
11390Sstevel@tonic-gate 	file_t *fp;
11400Sstevel@tonic-gate 	void *name;
11410Sstevel@tonic-gate 	socklen_t namelen;
11420Sstevel@tonic-gate 	void *control;
11430Sstevel@tonic-gate 	socklen_t controllen;
11440Sstevel@tonic-gate 	ssize_t len;
11450Sstevel@tonic-gate 	int error;
11460Sstevel@tonic-gate 
11470Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, &fp)) == NULL)
11480Sstevel@tonic-gate 		return (set_errno(error));
11490Sstevel@tonic-gate 
11500Sstevel@tonic-gate 	uiop->uio_fmode = fp->f_flag;
11510Sstevel@tonic-gate 
11520Sstevel@tonic-gate 	if (so->so_family == AF_UNIX)
11530Sstevel@tonic-gate 		uiop->uio_extflg = UIO_COPY_CACHED;
11540Sstevel@tonic-gate 	else
11550Sstevel@tonic-gate 		uiop->uio_extflg = UIO_COPY_DEFAULT;
11560Sstevel@tonic-gate 
11570Sstevel@tonic-gate 	/* Allocate and copyin name and control */
11580Sstevel@tonic-gate 	name = msg->msg_name;
11590Sstevel@tonic-gate 	namelen = msg->msg_namelen;
11600Sstevel@tonic-gate 	if (name != NULL && namelen != 0) {
11610Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
11620Sstevel@tonic-gate 		name = copyin_name(so,
1163*5227Stz204579 		    (struct sockaddr *)name,
1164*5227Stz204579 		    &namelen, &error);
11650Sstevel@tonic-gate 		if (name == NULL)
11660Sstevel@tonic-gate 			goto done3;
11670Sstevel@tonic-gate 		/* copyin_name null terminates addresses for AF_UNIX */
11680Sstevel@tonic-gate 		msg->msg_namelen = namelen;
11690Sstevel@tonic-gate 		msg->msg_name = name;
11700Sstevel@tonic-gate 	} else {
11710Sstevel@tonic-gate 		msg->msg_name = name = NULL;
11720Sstevel@tonic-gate 		msg->msg_namelen = namelen = 0;
11730Sstevel@tonic-gate 	}
11740Sstevel@tonic-gate 
11750Sstevel@tonic-gate 	control = msg->msg_control;
11760Sstevel@tonic-gate 	controllen = msg->msg_controllen;
11770Sstevel@tonic-gate 	if ((control != NULL) && (controllen != 0)) {
11780Sstevel@tonic-gate 		/*
11790Sstevel@tonic-gate 		 * Verify that the length is not excessive to prevent
11800Sstevel@tonic-gate 		 * an application from consuming all of kernel memory.
11810Sstevel@tonic-gate 		 */
11820Sstevel@tonic-gate 		if (controllen > SO_MAXARGSIZE) {
11830Sstevel@tonic-gate 			error = EINVAL;
11840Sstevel@tonic-gate 			goto done2;
11850Sstevel@tonic-gate 		}
11860Sstevel@tonic-gate 		control = kmem_alloc(controllen, KM_SLEEP);
11870Sstevel@tonic-gate 
11880Sstevel@tonic-gate 		ASSERT(MUTEX_NOT_HELD(&so->so_lock));
11890Sstevel@tonic-gate 		if (copyin(msg->msg_control, control, controllen)) {
11900Sstevel@tonic-gate 			error = EFAULT;
11910Sstevel@tonic-gate 			goto done1;
11920Sstevel@tonic-gate 		}
11930Sstevel@tonic-gate 		msg->msg_control = control;
11940Sstevel@tonic-gate 	} else {
11950Sstevel@tonic-gate 		msg->msg_control = control = NULL;
11960Sstevel@tonic-gate 		msg->msg_controllen = controllen = 0;
11970Sstevel@tonic-gate 	}
11980Sstevel@tonic-gate 
11990Sstevel@tonic-gate 	len = uiop->uio_resid;
12000Sstevel@tonic-gate 	msg->msg_flags = flags;
12010Sstevel@tonic-gate 
12020Sstevel@tonic-gate 	error = SOP_SENDMSG(so, msg, uiop);
12030Sstevel@tonic-gate done1:
12040Sstevel@tonic-gate 	if (control != NULL)
12050Sstevel@tonic-gate 		kmem_free(control, controllen);
12060Sstevel@tonic-gate done2:
12070Sstevel@tonic-gate 	if (name != NULL)
12080Sstevel@tonic-gate 		kmem_free(name, namelen);
12090Sstevel@tonic-gate done3:
12100Sstevel@tonic-gate 	if (error != 0) {
12110Sstevel@tonic-gate 		releasef(sock);
12120Sstevel@tonic-gate 		return (set_errno(error));
12130Sstevel@tonic-gate 	}
12140Sstevel@tonic-gate 	lwp_stat_update(LWP_STAT_MSGSND, 1);
12150Sstevel@tonic-gate 	so_update_attrs(so, SOMOD);
12160Sstevel@tonic-gate 	releasef(sock);
12170Sstevel@tonic-gate 	return (len - uiop->uio_resid);
12180Sstevel@tonic-gate }
12190Sstevel@tonic-gate 
12200Sstevel@tonic-gate /*
12210Sstevel@tonic-gate  * Native system call
12220Sstevel@tonic-gate  */
12230Sstevel@tonic-gate ssize_t
12240Sstevel@tonic-gate send(int sock, void *buffer, size_t len, int flags)
12250Sstevel@tonic-gate {
12260Sstevel@tonic-gate 	struct nmsghdr lmsg;
12270Sstevel@tonic-gate 	struct uio auio;
12280Sstevel@tonic-gate 	struct iovec aiov[1];
12290Sstevel@tonic-gate 
12300Sstevel@tonic-gate 	dprint(1, ("send(%d, %p, %ld, %d)\n",
1231*5227Stz204579 	    sock, buffer, len, flags));
12320Sstevel@tonic-gate 
12330Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
12340Sstevel@tonic-gate 		return (set_errno(EINVAL));
12350Sstevel@tonic-gate 	}
12360Sstevel@tonic-gate 
12370Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
12380Sstevel@tonic-gate 	aiov[0].iov_len = len;
12390Sstevel@tonic-gate 	auio.uio_loffset = 0;
12400Sstevel@tonic-gate 	auio.uio_iov = aiov;
12410Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
12420Sstevel@tonic-gate 	auio.uio_resid = len;
12430Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
12440Sstevel@tonic-gate 	auio.uio_limit = 0;
12450Sstevel@tonic-gate 
12460Sstevel@tonic-gate 	lmsg.msg_name = NULL;
12470Sstevel@tonic-gate 	lmsg.msg_control = NULL;
12480Sstevel@tonic-gate 	if (!(flags & MSG_XPG4_2)) {
12490Sstevel@tonic-gate 		/*
12500Sstevel@tonic-gate 		 * In order to be compatible with the libsocket/sockmod
12510Sstevel@tonic-gate 		 * implementation we set EOR for all send* calls.
12520Sstevel@tonic-gate 		 */
12530Sstevel@tonic-gate 		flags |= MSG_EOR;
12540Sstevel@tonic-gate 	}
12550Sstevel@tonic-gate 	return (sendit(sock, &lmsg, &auio, flags));
12560Sstevel@tonic-gate }
12570Sstevel@tonic-gate 
12580Sstevel@tonic-gate /*
12590Sstevel@tonic-gate  * Uses the MSG_XPG4_2 flag to determine if the caller is using
12600Sstevel@tonic-gate  * struct omsghdr or struct nmsghdr.
12610Sstevel@tonic-gate  */
12620Sstevel@tonic-gate ssize_t
12630Sstevel@tonic-gate sendmsg(int sock, struct nmsghdr *msg, int flags)
12640Sstevel@tonic-gate {
12650Sstevel@tonic-gate 	struct nmsghdr lmsg;
12660Sstevel@tonic-gate 	STRUCT_DECL(nmsghdr, u_lmsg);
12670Sstevel@tonic-gate 	struct uio auio;
12680Sstevel@tonic-gate 	struct iovec aiov[MSG_MAXIOVLEN];
12690Sstevel@tonic-gate 	int iovcnt;
12700Sstevel@tonic-gate 	ssize_t len;
12710Sstevel@tonic-gate 	int i;
12720Sstevel@tonic-gate 	model_t	model;
12730Sstevel@tonic-gate 
12740Sstevel@tonic-gate 	dprint(1, ("sendmsg(%d, %p, %d)\n", sock, msg, flags));
12750Sstevel@tonic-gate 
12760Sstevel@tonic-gate 	model = get_udatamodel();
12770Sstevel@tonic-gate 	STRUCT_INIT(u_lmsg, model);
12780Sstevel@tonic-gate 
12790Sstevel@tonic-gate 	if (flags & MSG_XPG4_2) {
12800Sstevel@tonic-gate 		if (copyin(msg, (char *)STRUCT_BUF(u_lmsg),
12810Sstevel@tonic-gate 		    STRUCT_SIZE(u_lmsg)))
12820Sstevel@tonic-gate 			return (set_errno(EFAULT));
12830Sstevel@tonic-gate 	} else {
12840Sstevel@tonic-gate 		/*
12850Sstevel@tonic-gate 		 * Assumes that nmsghdr and omsghdr are identically shaped
12860Sstevel@tonic-gate 		 * except for the added msg_flags field.
12870Sstevel@tonic-gate 		 */
12880Sstevel@tonic-gate 		if (copyin(msg, (char *)STRUCT_BUF(u_lmsg),
12890Sstevel@tonic-gate 		    SIZEOF_STRUCT(omsghdr, model)))
12900Sstevel@tonic-gate 			return (set_errno(EFAULT));
12910Sstevel@tonic-gate 		/*
12920Sstevel@tonic-gate 		 * In order to be compatible with the libsocket/sockmod
12930Sstevel@tonic-gate 		 * implementation we set EOR for all send* calls.
12940Sstevel@tonic-gate 		 */
12950Sstevel@tonic-gate 		flags |= MSG_EOR;
12960Sstevel@tonic-gate 	}
12970Sstevel@tonic-gate 
12980Sstevel@tonic-gate 	/*
12990Sstevel@tonic-gate 	 * Code below us will kmem_alloc memory and hang it
13000Sstevel@tonic-gate 	 * off msg_control and msg_name fields. This forces
13010Sstevel@tonic-gate 	 * us to copy the structure to its native form.
13020Sstevel@tonic-gate 	 */
13030Sstevel@tonic-gate 	lmsg.msg_name = STRUCT_FGETP(u_lmsg, msg_name);
13040Sstevel@tonic-gate 	lmsg.msg_namelen = STRUCT_FGET(u_lmsg, msg_namelen);
13050Sstevel@tonic-gate 	lmsg.msg_iov = STRUCT_FGETP(u_lmsg, msg_iov);
13060Sstevel@tonic-gate 	lmsg.msg_iovlen = STRUCT_FGET(u_lmsg, msg_iovlen);
13070Sstevel@tonic-gate 	lmsg.msg_control = STRUCT_FGETP(u_lmsg, msg_control);
13080Sstevel@tonic-gate 	lmsg.msg_controllen = STRUCT_FGET(u_lmsg, msg_controllen);
13090Sstevel@tonic-gate 	lmsg.msg_flags = STRUCT_FGET(u_lmsg, msg_flags);
13100Sstevel@tonic-gate 
13110Sstevel@tonic-gate 	iovcnt = lmsg.msg_iovlen;
13120Sstevel@tonic-gate 
13130Sstevel@tonic-gate 	if (iovcnt <= 0 || iovcnt > MSG_MAXIOVLEN) {
13140Sstevel@tonic-gate 		/*
13150Sstevel@tonic-gate 		 * Unless this is XPG 4.2 we allow iovcnt == 0 to
13160Sstevel@tonic-gate 		 * be compatible with SunOS 4.X and 4.4BSD.
13170Sstevel@tonic-gate 		 */
13180Sstevel@tonic-gate 		if (iovcnt != 0 || (flags & MSG_XPG4_2))
13190Sstevel@tonic-gate 			return (set_errno(EMSGSIZE));
13200Sstevel@tonic-gate 	}
13210Sstevel@tonic-gate 
13220Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
13230Sstevel@tonic-gate 	/*
13240Sstevel@tonic-gate 	 * 32-bit callers need to have their iovec expanded, while ensuring
13250Sstevel@tonic-gate 	 * that they can't move more than 2Gbytes of data in a single call.
13260Sstevel@tonic-gate 	 */
13270Sstevel@tonic-gate 	if (model == DATAMODEL_ILP32) {
13280Sstevel@tonic-gate 		struct iovec32 aiov32[MSG_MAXIOVLEN];
13290Sstevel@tonic-gate 		ssize32_t count32;
13300Sstevel@tonic-gate 
13310Sstevel@tonic-gate 		if (iovcnt != 0 &&
13320Sstevel@tonic-gate 		    copyin((struct iovec32 *)lmsg.msg_iov, aiov32,
13330Sstevel@tonic-gate 		    iovcnt * sizeof (struct iovec32)))
13340Sstevel@tonic-gate 			return (set_errno(EFAULT));
13350Sstevel@tonic-gate 
13360Sstevel@tonic-gate 		count32 = 0;
13370Sstevel@tonic-gate 		for (i = 0; i < iovcnt; i++) {
13380Sstevel@tonic-gate 			ssize32_t iovlen32;
13390Sstevel@tonic-gate 
13400Sstevel@tonic-gate 			iovlen32 = aiov32[i].iov_len;
13410Sstevel@tonic-gate 			count32 += iovlen32;
13420Sstevel@tonic-gate 			if (iovlen32 < 0 || count32 < 0)
13430Sstevel@tonic-gate 				return (set_errno(EINVAL));
13440Sstevel@tonic-gate 			aiov[i].iov_len = iovlen32;
13450Sstevel@tonic-gate 			aiov[i].iov_base =
13460Sstevel@tonic-gate 			    (caddr_t)(uintptr_t)aiov32[i].iov_base;
13470Sstevel@tonic-gate 		}
13480Sstevel@tonic-gate 	} else
13490Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
13500Sstevel@tonic-gate 	if (iovcnt != 0 &&
13510Sstevel@tonic-gate 	    copyin(lmsg.msg_iov, aiov,
13520Sstevel@tonic-gate 	    (unsigned)iovcnt * sizeof (struct iovec))) {
13530Sstevel@tonic-gate 		return (set_errno(EFAULT));
13540Sstevel@tonic-gate 	}
13550Sstevel@tonic-gate 	len = 0;
13560Sstevel@tonic-gate 	for (i = 0; i < iovcnt; i++) {
13570Sstevel@tonic-gate 		ssize_t iovlen = aiov[i].iov_len;
13580Sstevel@tonic-gate 		len += iovlen;
13590Sstevel@tonic-gate 		if (iovlen < 0 || len < 0) {
13600Sstevel@tonic-gate 			return (set_errno(EINVAL));
13610Sstevel@tonic-gate 		}
13620Sstevel@tonic-gate 	}
13630Sstevel@tonic-gate 	auio.uio_loffset = 0;
13640Sstevel@tonic-gate 	auio.uio_iov = aiov;
13650Sstevel@tonic-gate 	auio.uio_iovcnt = iovcnt;
13660Sstevel@tonic-gate 	auio.uio_resid = len;
13670Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
13680Sstevel@tonic-gate 	auio.uio_limit = 0;
13690Sstevel@tonic-gate 
13700Sstevel@tonic-gate 	return (sendit(sock, &lmsg, &auio, flags));
13710Sstevel@tonic-gate }
13720Sstevel@tonic-gate 
13730Sstevel@tonic-gate ssize_t
13740Sstevel@tonic-gate sendto(int sock, void *buffer, size_t len, int flags,
13750Sstevel@tonic-gate     struct sockaddr *name, socklen_t namelen)
13760Sstevel@tonic-gate {
13770Sstevel@tonic-gate 	struct nmsghdr lmsg;
13780Sstevel@tonic-gate 	struct uio auio;
13790Sstevel@tonic-gate 	struct iovec aiov[1];
13800Sstevel@tonic-gate 
13810Sstevel@tonic-gate 	dprint(1, ("sendto(%d, %p, %ld, %d, %p, %d)\n",
1382*5227Stz204579 	    sock, buffer, len, flags, name, namelen));
13830Sstevel@tonic-gate 
13840Sstevel@tonic-gate 	if ((ssize_t)len < 0) {
13850Sstevel@tonic-gate 		return (set_errno(EINVAL));
13860Sstevel@tonic-gate 	}
13870Sstevel@tonic-gate 
13880Sstevel@tonic-gate 	aiov[0].iov_base = buffer;
13890Sstevel@tonic-gate 	aiov[0].iov_len = len;
13900Sstevel@tonic-gate 	auio.uio_loffset = 0;
13910Sstevel@tonic-gate 	auio.uio_iov = aiov;
13920Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
13930Sstevel@tonic-gate 	auio.uio_resid = len;
13940Sstevel@tonic-gate 	auio.uio_segflg = UIO_USERSPACE;
13950Sstevel@tonic-gate 	auio.uio_limit = 0;
13960Sstevel@tonic-gate 
13970Sstevel@tonic-gate 	lmsg.msg_name = (char *)name;
13980Sstevel@tonic-gate 	lmsg.msg_namelen = namelen;
13990Sstevel@tonic-gate 	lmsg.msg_control = NULL;
14000Sstevel@tonic-gate 	if (!(flags & MSG_XPG4_2)) {
14010Sstevel@tonic-gate 		/*
14020Sstevel@tonic-gate 		 * In order to be compatible with the libsocket/sockmod
14030Sstevel@tonic-gate 		 * implementation we set EOR for all send* calls.
14040Sstevel@tonic-gate 		 */
14050Sstevel@tonic-gate 		flags |= MSG_EOR;
14060Sstevel@tonic-gate 	}
14070Sstevel@tonic-gate 	return (sendit(sock, &lmsg, &auio, flags));
14080Sstevel@tonic-gate }
14090Sstevel@tonic-gate 
14100Sstevel@tonic-gate /*ARGSUSED3*/
14110Sstevel@tonic-gate int
14120Sstevel@tonic-gate getpeername(int sock, struct sockaddr *name, socklen_t *namelenp, int version)
14130Sstevel@tonic-gate {
14140Sstevel@tonic-gate 	struct sonode *so;
14150Sstevel@tonic-gate 	int error;
14160Sstevel@tonic-gate 	socklen_t namelen;
14170Sstevel@tonic-gate 	union {
14180Sstevel@tonic-gate 		struct sockaddr_in sin;
14190Sstevel@tonic-gate 		struct sockaddr_in6 sin6;
14200Sstevel@tonic-gate 	} sin;			/* Temporary buffer, common case */
14210Sstevel@tonic-gate 	void *addr;		/* Temporary buffer, uncommon case */
14220Sstevel@tonic-gate 	socklen_t addrlen, size;
14230Sstevel@tonic-gate 
14240Sstevel@tonic-gate 	dprint(1, ("getpeername(%d, %p, %p)\n",
1425*5227Stz204579 	    sock, name, namelenp));
14260Sstevel@tonic-gate 
14270Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
14280Sstevel@tonic-gate 		goto bad;
14290Sstevel@tonic-gate 
14300Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
14310Sstevel@tonic-gate 	if (copyin(namelenp, &namelen, sizeof (namelen)) ||
14320Sstevel@tonic-gate 	    (name == NULL && namelen != 0)) {
14330Sstevel@tonic-gate 		error = EFAULT;
14340Sstevel@tonic-gate 		goto rel_out;
14350Sstevel@tonic-gate 	}
14360Sstevel@tonic-gate 	/*
14370Sstevel@tonic-gate 	 * If a connect or accept has been done, unless we're an Xnet socket,
14380Sstevel@tonic-gate 	 * the remote address has already been updated in so_faddr_sa.
14390Sstevel@tonic-gate 	 */
14400Sstevel@tonic-gate 	if (so->so_version != SOV_SOCKSTREAM && so->so_version != SOV_SOCKBSD ||
14410Sstevel@tonic-gate 	    !(so->so_state & SS_FADDR_VALID)) {
14420Sstevel@tonic-gate 		if ((error = SOP_GETPEERNAME(so)) != 0)
14430Sstevel@tonic-gate 			goto rel_out;
14440Sstevel@tonic-gate 	}
14450Sstevel@tonic-gate 
14460Sstevel@tonic-gate 	if (so->so_faddr_maxlen <= sizeof (sin)) {
14470Sstevel@tonic-gate 		size = 0;
14480Sstevel@tonic-gate 		addr = &sin;
14490Sstevel@tonic-gate 	} else {
14500Sstevel@tonic-gate 		/*
14510Sstevel@tonic-gate 		 * Allocate temporary to avoid holding so_lock across
14520Sstevel@tonic-gate 		 * copyout
14530Sstevel@tonic-gate 		 */
14540Sstevel@tonic-gate 		size = so->so_faddr_maxlen;
14550Sstevel@tonic-gate 		addr = kmem_alloc(size, KM_SLEEP);
14560Sstevel@tonic-gate 	}
14570Sstevel@tonic-gate 	/* Prevent so_faddr_sa/len from changing while accessed */
14580Sstevel@tonic-gate 	mutex_enter(&so->so_lock);
14590Sstevel@tonic-gate 	if (!(so->so_state & SS_ISCONNECTED)) {
14600Sstevel@tonic-gate 		mutex_exit(&so->so_lock);
14610Sstevel@tonic-gate 		error = ENOTCONN;
14620Sstevel@tonic-gate 		goto free_out;
14630Sstevel@tonic-gate 	}
14640Sstevel@tonic-gate 	addrlen = so->so_faddr_len;
14650Sstevel@tonic-gate 	bcopy(so->so_faddr_sa, addr, addrlen);
14660Sstevel@tonic-gate 	mutex_exit(&so->so_lock);
14670Sstevel@tonic-gate 
14680Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
14690Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp, addr,
14700Sstevel@tonic-gate 	    (so->so_state & SS_FADDR_NOXLATE) ? 0 : addrlen);
14710Sstevel@tonic-gate free_out:
14720Sstevel@tonic-gate 	if (size != 0)
14730Sstevel@tonic-gate 		kmem_free(addr, size);
14740Sstevel@tonic-gate rel_out:
14750Sstevel@tonic-gate 	releasef(sock);
14760Sstevel@tonic-gate bad:	return (error != 0 ? set_errno(error) : 0);
14770Sstevel@tonic-gate }
14780Sstevel@tonic-gate 
14790Sstevel@tonic-gate /*ARGSUSED3*/
14800Sstevel@tonic-gate int
14810Sstevel@tonic-gate getsockname(int sock, struct sockaddr *name,
14820Sstevel@tonic-gate 		socklen_t *namelenp, int version)
14830Sstevel@tonic-gate {
14840Sstevel@tonic-gate 	struct sonode *so;
14850Sstevel@tonic-gate 	int error;
14860Sstevel@tonic-gate 	socklen_t namelen;
14870Sstevel@tonic-gate 	union {
14880Sstevel@tonic-gate 		struct sockaddr_in sin;
14890Sstevel@tonic-gate 		struct sockaddr_in6 sin6;
14900Sstevel@tonic-gate 	} sin;			/* Temporary buffer, common case */
14910Sstevel@tonic-gate 	void *addr;		/* Temporary buffer, uncommon case */
14920Sstevel@tonic-gate 	socklen_t addrlen, size;
14930Sstevel@tonic-gate 
14940Sstevel@tonic-gate 	dprint(1, ("getsockname(%d, %p, %p)\n",
1495*5227Stz204579 	    sock, name, namelenp));
14960Sstevel@tonic-gate 
14970Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
14980Sstevel@tonic-gate 		goto bad;
14990Sstevel@tonic-gate 
15000Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
15010Sstevel@tonic-gate 	if (copyin(namelenp, &namelen, sizeof (namelen)) ||
15020Sstevel@tonic-gate 	    (name == NULL && namelen != 0)) {
15030Sstevel@tonic-gate 		error = EFAULT;
15040Sstevel@tonic-gate 		goto rel_out;
15050Sstevel@tonic-gate 	}
15060Sstevel@tonic-gate 
15070Sstevel@tonic-gate 	/*
15080Sstevel@tonic-gate 	 * If a bind or accept has been done, unless we're an Xnet endpoint,
15090Sstevel@tonic-gate 	 * the local address has already been updated in so_laddr_sa.
15100Sstevel@tonic-gate 	 */
15110Sstevel@tonic-gate 	if ((so->so_version != SOV_SOCKSTREAM &&
15120Sstevel@tonic-gate 	    so->so_version != SOV_SOCKBSD) ||
15130Sstevel@tonic-gate 	    !(so->so_state & SS_LADDR_VALID)) {
15140Sstevel@tonic-gate 		if ((error = SOP_GETSOCKNAME(so)) != 0)
15150Sstevel@tonic-gate 			goto rel_out;
15160Sstevel@tonic-gate 	}
15170Sstevel@tonic-gate 
15180Sstevel@tonic-gate 	if (so->so_laddr_maxlen <= sizeof (sin)) {
15190Sstevel@tonic-gate 		size = 0;
15200Sstevel@tonic-gate 		addr = &sin;
15210Sstevel@tonic-gate 	} else {
15220Sstevel@tonic-gate 		/*
15230Sstevel@tonic-gate 		 * Allocate temporary to avoid holding so_lock across
15240Sstevel@tonic-gate 		 * copyout
15250Sstevel@tonic-gate 		 */
15260Sstevel@tonic-gate 		size = so->so_laddr_maxlen;
15270Sstevel@tonic-gate 		addr = kmem_alloc(size, KM_SLEEP);
15280Sstevel@tonic-gate 	}
15290Sstevel@tonic-gate 	/* Prevent so_laddr_sa/len from changing while accessed */
15300Sstevel@tonic-gate 	mutex_enter(&so->so_lock);
15310Sstevel@tonic-gate 	addrlen = so->so_laddr_len;
15320Sstevel@tonic-gate 	bcopy(so->so_laddr_sa, addr, addrlen);
15330Sstevel@tonic-gate 	mutex_exit(&so->so_lock);
15340Sstevel@tonic-gate 
15350Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
15360Sstevel@tonic-gate 	error = copyout_name(name, namelen, namelenp,
15370Sstevel@tonic-gate 	    addr, addrlen);
15380Sstevel@tonic-gate 	if (size != 0)
15390Sstevel@tonic-gate 		kmem_free(addr, size);
15400Sstevel@tonic-gate rel_out:
15410Sstevel@tonic-gate 	releasef(sock);
15420Sstevel@tonic-gate bad:	return (error != 0 ? set_errno(error) : 0);
15430Sstevel@tonic-gate }
15440Sstevel@tonic-gate 
15450Sstevel@tonic-gate /*ARGSUSED5*/
15460Sstevel@tonic-gate int
15470Sstevel@tonic-gate getsockopt(int sock,
15480Sstevel@tonic-gate 	int level,
15490Sstevel@tonic-gate 	int option_name,
15500Sstevel@tonic-gate 	void *option_value,
15510Sstevel@tonic-gate 	socklen_t *option_lenp,
15520Sstevel@tonic-gate 	int version)
15530Sstevel@tonic-gate {
15540Sstevel@tonic-gate 	struct sonode *so;
15550Sstevel@tonic-gate 	socklen_t optlen, optlen_res;
15560Sstevel@tonic-gate 	void *optval;
15570Sstevel@tonic-gate 	int error;
15580Sstevel@tonic-gate 
15590Sstevel@tonic-gate 	dprint(1, ("getsockopt(%d, %d, %d, %p, %p)\n",
1560*5227Stz204579 	    sock, level, option_name, option_value, option_lenp));
15610Sstevel@tonic-gate 
15620Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
15630Sstevel@tonic-gate 		return (set_errno(error));
15640Sstevel@tonic-gate 
15650Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&so->so_lock));
15660Sstevel@tonic-gate 	if (copyin(option_lenp, &optlen, sizeof (optlen))) {
15670Sstevel@tonic-gate 		releasef(sock);
15680Sstevel@tonic-gate 		return (set_errno(EFAULT));
15690Sstevel@tonic-gate 	}
15700Sstevel@tonic-gate 	/*
15710Sstevel@tonic-gate 	 * Verify that the length is not excessive to prevent
15720Sstevel@tonic-gate 	 * an application from consuming all of kernel memory.
15730Sstevel@tonic-gate 	 */
15740Sstevel@tonic-gate 	if (optlen > SO_MAXARGSIZE) {
15750Sstevel@tonic-gate 		error = EINVAL;
15760Sstevel@tonic-gate 		releasef(sock);
15770Sstevel@tonic-gate 		return (set_errno(error));
15780Sstevel@tonic-gate 	}
15790Sstevel@tonic-gate 	optval = kmem_alloc(optlen, KM_SLEEP);
15800Sstevel@tonic-gate 	optlen_res = optlen;
15810Sstevel@tonic-gate 	error = SOP_GETSOCKOPT(so, level, option_name, optval,
15820Sstevel@tonic-gate 	    &optlen_res, (version != SOV_XPG4_2) ? 0 : _SOGETSOCKOPT_XPG4_2);
15830Sstevel@tonic-gate 	releasef(sock);
15840Sstevel@tonic-gate 	if (error) {
15850Sstevel@tonic-gate 		kmem_free(optval, optlen);
15860Sstevel@tonic-gate 		return (set_errno(error));
15870Sstevel@tonic-gate 	}
15880Sstevel@tonic-gate 	error = copyout_arg(option_value, optlen, option_lenp,
15890Sstevel@tonic-gate 	    optval, optlen_res);
15900Sstevel@tonic-gate 	kmem_free(optval, optlen);
15910Sstevel@tonic-gate 	if (error)
15920Sstevel@tonic-gate 		return (set_errno(error));
15930Sstevel@tonic-gate 	return (0);
15940Sstevel@tonic-gate }
15950Sstevel@tonic-gate 
15960Sstevel@tonic-gate /*ARGSUSED5*/
15970Sstevel@tonic-gate int
15980Sstevel@tonic-gate setsockopt(int sock,
15990Sstevel@tonic-gate 	int level,
16000Sstevel@tonic-gate 	int option_name,
16010Sstevel@tonic-gate 	void *option_value,
16020Sstevel@tonic-gate 	socklen_t option_len,
16030Sstevel@tonic-gate 	int version)
16040Sstevel@tonic-gate {
16050Sstevel@tonic-gate 	struct sonode *so;
16060Sstevel@tonic-gate 	intptr_t buffer[2];
16070Sstevel@tonic-gate 	void *optval = NULL;
16080Sstevel@tonic-gate 	int error;
16090Sstevel@tonic-gate 
16100Sstevel@tonic-gate 	dprint(1, ("setsockopt(%d, %d, %d, %p, %d)\n",
1611*5227Stz204579 	    sock, level, option_name, option_value, option_len));
16120Sstevel@tonic-gate 
16130Sstevel@tonic-gate 	if ((so = getsonode(sock, &error, NULL)) == NULL)
16140Sstevel@tonic-gate 		return (set_errno(error));
16150Sstevel@tonic-gate 
16160Sstevel@tonic-gate 	if (option_value != NULL) {
16170Sstevel@tonic-gate 		if (option_len != 0) {
16180Sstevel@tonic-gate 			/*
16190Sstevel@tonic-gate 			 * Verify that the length is not excessive to prevent
16200Sstevel@tonic-gate 			 * an application from consuming all of kernel memory.
16210Sstevel@tonic-gate 			 */
16220Sstevel@tonic-gate 			if (option_len > SO_MAXARGSIZE) {
16230Sstevel@tonic-gate 				error = EINVAL;
16240Sstevel@tonic-gate 				goto done2;
16250Sstevel@tonic-gate 			}
16260Sstevel@tonic-gate 			optval = option_len <= sizeof (buffer) ?
16270Sstevel@tonic-gate 			    &buffer : kmem_alloc((size_t)option_len, KM_SLEEP);
16280Sstevel@tonic-gate 			ASSERT(MUTEX_NOT_HELD(&so->so_lock));
16290Sstevel@tonic-gate 			if (copyin(option_value, optval, (size_t)option_len)) {
16300Sstevel@tonic-gate 				error = EFAULT;
16310Sstevel@tonic-gate 				goto done1;
16320Sstevel@tonic-gate 			}
16330Sstevel@tonic-gate 		}
16340Sstevel@tonic-gate 	} else
16350Sstevel@tonic-gate 		option_len = 0;
16360Sstevel@tonic-gate 
16370Sstevel@tonic-gate 	error = SOP_SETSOCKOPT(so, level, option_name, optval,
16380Sstevel@tonic-gate 	    (t_uscalar_t)option_len);
16390Sstevel@tonic-gate done1:
16400Sstevel@tonic-gate 	if (optval != buffer)
16410Sstevel@tonic-gate 		kmem_free(optval, (size_t)option_len);
16420Sstevel@tonic-gate done2:
16430Sstevel@tonic-gate 	releasef(sock);
16440Sstevel@tonic-gate 	if (error)
16450Sstevel@tonic-gate 		return (set_errno(error));
16460Sstevel@tonic-gate 	return (0);
16470Sstevel@tonic-gate }
16480Sstevel@tonic-gate 
16490Sstevel@tonic-gate /*
16500Sstevel@tonic-gate  * Add config info when devpath is non-NULL; delete info when devpath is NULL.
16510Sstevel@tonic-gate  * devpath is a user address.
16520Sstevel@tonic-gate  */
16530Sstevel@tonic-gate int
16540Sstevel@tonic-gate sockconfig(int domain, int type, int protocol, char *devpath)
16550Sstevel@tonic-gate {
16560Sstevel@tonic-gate 	char *kdevpath;		/* Copied in devpath string */
16570Sstevel@tonic-gate 	size_t kdevpathlen;
16580Sstevel@tonic-gate 	int error = 0;
16590Sstevel@tonic-gate 
16600Sstevel@tonic-gate 	dprint(1, ("sockconfig(%d, %d, %d, %p)\n",
1661*5227Stz204579 	    domain, type, protocol, devpath));
16620Sstevel@tonic-gate 
16630Sstevel@tonic-gate 	if (secpolicy_net_config(CRED(), B_FALSE) != 0)
16640Sstevel@tonic-gate 		return (set_errno(EPERM));
16650Sstevel@tonic-gate 
16660Sstevel@tonic-gate 	if (devpath == NULL) {
16670Sstevel@tonic-gate 		/* Deleting an entry */
16680Sstevel@tonic-gate 		kdevpath = NULL;
16690Sstevel@tonic-gate 		kdevpathlen = 0;
16700Sstevel@tonic-gate 	} else {
16710Sstevel@tonic-gate 		/*
16720Sstevel@tonic-gate 		 * Adding an entry.
16730Sstevel@tonic-gate 		 * Copyin the devpath.
16740Sstevel@tonic-gate 		 * This also makes it possible to check for too long pathnames.
16750Sstevel@tonic-gate 		 * Compress the space needed for the devpath before passing it
16760Sstevel@tonic-gate 		 * to soconfig - soconfig will store the string until
16770Sstevel@tonic-gate 		 * the configuration is removed.
16780Sstevel@tonic-gate 		 */
16790Sstevel@tonic-gate 		char *buf;
16800Sstevel@tonic-gate 
16810Sstevel@tonic-gate 		buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
16820Sstevel@tonic-gate 		if ((error = copyinstr(devpath, buf, MAXPATHLEN,
16830Sstevel@tonic-gate 		    &kdevpathlen)) != 0) {
16840Sstevel@tonic-gate 			kmem_free(buf, MAXPATHLEN);
16850Sstevel@tonic-gate 			goto done;
16860Sstevel@tonic-gate 		}
16870Sstevel@tonic-gate 
16880Sstevel@tonic-gate 		kdevpath = kmem_alloc(kdevpathlen, KM_SLEEP);
16890Sstevel@tonic-gate 		bcopy(buf, kdevpath, kdevpathlen);
16900Sstevel@tonic-gate 		kdevpath[kdevpathlen - 1] = '\0';
16910Sstevel@tonic-gate 
16920Sstevel@tonic-gate 		kmem_free(buf, MAXPATHLEN);
16930Sstevel@tonic-gate 	}
16940Sstevel@tonic-gate 	error = soconfig(domain, type, protocol, kdevpath, (int)kdevpathlen);
16950Sstevel@tonic-gate done:
16960Sstevel@tonic-gate 	if (error) {
16970Sstevel@tonic-gate 		eprintline(error);
16980Sstevel@tonic-gate 		return (set_errno(error));
16990Sstevel@tonic-gate 	}
17000Sstevel@tonic-gate 	return (0);
17010Sstevel@tonic-gate }
17020Sstevel@tonic-gate 
17030Sstevel@tonic-gate 
17040Sstevel@tonic-gate /*
17050Sstevel@tonic-gate  * Sendfile is implemented through two schemes, direct I/O or by
17060Sstevel@tonic-gate  * caching in the filesystem page cache. We cache the input file by
17070Sstevel@tonic-gate  * default and use direct I/O only if sendfile_max_size is set
17080Sstevel@tonic-gate  * appropriately as explained below. Note that this logic is consistent
17090Sstevel@tonic-gate  * with other filesystems where caching is turned on by default
17100Sstevel@tonic-gate  * unless explicitly turned off by using the DIRECTIO ioctl.
17110Sstevel@tonic-gate  *
17120Sstevel@tonic-gate  * We choose a slightly different scheme here. One can turn off
17130Sstevel@tonic-gate  * caching by setting sendfile_max_size to 0. One can also enable
17140Sstevel@tonic-gate  * caching of files <= sendfile_max_size by setting sendfile_max_size
17150Sstevel@tonic-gate  * to an appropriate value. By default sendfile_max_size is set to the
17160Sstevel@tonic-gate  * maximum value so that all files are cached. In future, we may provide
17170Sstevel@tonic-gate  * better interfaces for caching the file.
17180Sstevel@tonic-gate  *
17190Sstevel@tonic-gate  * Sendfile through Direct I/O (Zero copy)
17200Sstevel@tonic-gate  * --------------------------------------
17210Sstevel@tonic-gate  *
17220Sstevel@tonic-gate  * As disks are normally slower than the network, we can't have a
17230Sstevel@tonic-gate  * single thread that reads the disk and writes to the network. We
17240Sstevel@tonic-gate  * need to have parallelism. This is done by having the sendfile
17250Sstevel@tonic-gate  * thread create another thread that reads from the filesystem
17260Sstevel@tonic-gate  * and queues it for network processing. In this scheme, the data
17270Sstevel@tonic-gate  * is never copied anywhere i.e it is zero copy unlike the other
17280Sstevel@tonic-gate  * scheme.
17290Sstevel@tonic-gate  *
17300Sstevel@tonic-gate  * We have a sendfile queue (snfq) where each sendfile
17310Sstevel@tonic-gate  * request (snf_req_t) is queued for processing by a thread. Number
17320Sstevel@tonic-gate  * of threads is dynamically allocated and they exit if they are idling
17330Sstevel@tonic-gate  * beyond a specified amount of time. When each request (snf_req_t) is
17340Sstevel@tonic-gate  * processed by a thread, it produces a number of mblk_t structures to
17350Sstevel@tonic-gate  * be consumed by the sendfile thread. snf_deque and snf_enque are
17360Sstevel@tonic-gate  * used for consuming and producing mblks. Size of the filesystem
17370Sstevel@tonic-gate  * read is determined by the tuneable (sendfile_read_size). A single
17380Sstevel@tonic-gate  * mblk holds sendfile_read_size worth of data (except the last
17390Sstevel@tonic-gate  * read of the file) which is sent down as a whole to the network.
17400Sstevel@tonic-gate  * sendfile_read_size is set to 1 MB as this seems to be the optimal
17410Sstevel@tonic-gate  * value for the UFS filesystem backed by a striped storage array.
17420Sstevel@tonic-gate  *
17430Sstevel@tonic-gate  * Synchronisation between read (producer) and write (consumer) threads.
17440Sstevel@tonic-gate  * --------------------------------------------------------------------
17450Sstevel@tonic-gate  *
17460Sstevel@tonic-gate  * sr_lock protects sr_ib_head and sr_ib_tail. The lock is held while
17470Sstevel@tonic-gate  * adding and deleting items in this list. Error can happen anytime
17480Sstevel@tonic-gate  * during read or write. There could be unprocessed mblks in the
17490Sstevel@tonic-gate  * sr_ib_XXX list when a read or write error occurs. Whenever error
17500Sstevel@tonic-gate  * is encountered, we need two things to happen :
17510Sstevel@tonic-gate  *
17520Sstevel@tonic-gate  * a) One of the threads need to clean the mblks.
17530Sstevel@tonic-gate  * b) When one thread encounters an error, the other should stop.
17540Sstevel@tonic-gate  *
17550Sstevel@tonic-gate  * For (a), we don't want to penalise the reader thread as it could do
17560Sstevel@tonic-gate  * some useful work processing other requests. For (b), the error can
17570Sstevel@tonic-gate  * be detected by examining sr_read_error or sr_write_error.
17580Sstevel@tonic-gate  * sr_lock protects sr_read_error and sr_write_error. If both reader and
17590Sstevel@tonic-gate  * writer encounters error, we need to report the write error back to
17600Sstevel@tonic-gate  * the application as that's what would have happened if the operations
17610Sstevel@tonic-gate  * were done sequentially. With this in mind, following should work :
17620Sstevel@tonic-gate  *
17630Sstevel@tonic-gate  * 	- Check for errors before read or write.
17640Sstevel@tonic-gate  *	- If the reader encounters error, set the error in sr_read_error.
17650Sstevel@tonic-gate  *	  Check sr_write_error, if it is set, send cv_signal as it is
17660Sstevel@tonic-gate  *	  waiting for reader to complete. If it is not set, the writer
17670Sstevel@tonic-gate  *	  is either running sinking data to the network or blocked
17680Sstevel@tonic-gate  *        because of flow control. For handling the latter case, we
17690Sstevel@tonic-gate  *	  always send a signal. In any case, it will examine sr_read_error
17700Sstevel@tonic-gate  *	  and return. sr_read_error is marked with SR_READ_DONE to tell
17710Sstevel@tonic-gate  *	  the writer that the reader is done in all the cases.
17720Sstevel@tonic-gate  *	- If the writer encounters error, set the error in sr_write_error.
17730Sstevel@tonic-gate  *	  The reader thread is either blocked because of flow control or
17740Sstevel@tonic-gate  *	  running reading data from the disk. For the former, we need to
17750Sstevel@tonic-gate  *	  wakeup the thread. Again to keep it simple, we always wake up
17760Sstevel@tonic-gate  *	  the reader thread. Then, wait for the read thread to complete
17770Sstevel@tonic-gate  *	  if it is not done yet. Cleanup and return.
17780Sstevel@tonic-gate  *
17790Sstevel@tonic-gate  * High and low water marks for the read thread.
17800Sstevel@tonic-gate  * --------------------------------------------
17810Sstevel@tonic-gate  *
17820Sstevel@tonic-gate  * If sendfile() is used to send data over a slow network, we need to
17830Sstevel@tonic-gate  * make sure that the read thread does not produce data at a faster
17840Sstevel@tonic-gate  * rate than the network. This can happen if the disk is faster than
17850Sstevel@tonic-gate  * the network. In such a case, we don't want to build a very large queue.
17860Sstevel@tonic-gate  * But we would still like to get all of the network throughput possible.
17870Sstevel@tonic-gate  * This implies that network should never block waiting for data.
17880Sstevel@tonic-gate  * As there are lot of disk throughput/network throughput combinations
17890Sstevel@tonic-gate  * possible, it is difficult to come up with an accurate number.
17900Sstevel@tonic-gate  * A typical 10K RPM disk has a max seek latency 17ms and rotational
17910Sstevel@tonic-gate  * latency of 3ms for reading a disk block. Thus, the total latency to
17920Sstevel@tonic-gate  * initiate a new read, transfer data from the disk and queue for
17930Sstevel@tonic-gate  * transmission would take about a max of 25ms. Todays max transfer rate
17940Sstevel@tonic-gate  * for network is 100MB/sec. If the thread is blocked because of flow
17950Sstevel@tonic-gate  * control, it would take 25ms to get new data ready for transmission.
17960Sstevel@tonic-gate  * We have to make sure that network is not idling, while we are initiating
17970Sstevel@tonic-gate  * new transfers. So, at 100MB/sec, to keep network busy we would need
17980Sstevel@tonic-gate  * 2.5MB of data. Roundig off, we keep the low water mark to be 3MB of data.
17990Sstevel@tonic-gate  * We need to pick a high water mark so that the woken up thread would
18000Sstevel@tonic-gate  * do considerable work before blocking again to prevent thrashing. Currently,
18010Sstevel@tonic-gate  * we pick this to be 10 times that of the low water mark.
18020Sstevel@tonic-gate  *
18030Sstevel@tonic-gate  * Sendfile with segmap caching (One copy from page cache to mblks).
18040Sstevel@tonic-gate  * ----------------------------------------------------------------
18050Sstevel@tonic-gate  *
18060Sstevel@tonic-gate  * We use the segmap cache for caching the file, if the size of file
18070Sstevel@tonic-gate  * is <= sendfile_max_size. In this case we don't use threads as VM
18080Sstevel@tonic-gate  * is reasonably fast enough to keep up with the network. If the underlying
18090Sstevel@tonic-gate  * transport allows, we call segmap_getmapflt() to map MAXBSIZE (8K) worth
18100Sstevel@tonic-gate  * of data into segmap space, and use the virtual address from segmap
18110Sstevel@tonic-gate  * directly through desballoc() to avoid copy. Once the transport is done
18120Sstevel@tonic-gate  * with the data, the mapping will be released through segmap_release()
18130Sstevel@tonic-gate  * called by the call-back routine.
18140Sstevel@tonic-gate  *
18150Sstevel@tonic-gate  * If zero-copy is not allowed by the transport, we simply call VOP_READ()
18160Sstevel@tonic-gate  * to copy the data from the filesystem into our temporary network buffer.
18170Sstevel@tonic-gate  *
18180Sstevel@tonic-gate  * To disable caching, set sendfile_max_size to 0.
18190Sstevel@tonic-gate  */
18200Sstevel@tonic-gate 
18210Sstevel@tonic-gate uint_t sendfile_read_size = 1024 * 1024;
18220Sstevel@tonic-gate #define	SENDFILE_REQ_LOWAT	3 * 1024 * 1024
18230Sstevel@tonic-gate uint_t sendfile_req_lowat = SENDFILE_REQ_LOWAT;
18240Sstevel@tonic-gate uint_t sendfile_req_hiwat = 10 * SENDFILE_REQ_LOWAT;
18250Sstevel@tonic-gate struct sendfile_stats sf_stats;
18260Sstevel@tonic-gate struct sendfile_queue *snfq;
18270Sstevel@tonic-gate clock_t snfq_timeout;
18280Sstevel@tonic-gate off64_t sendfile_max_size;
18290Sstevel@tonic-gate 
18300Sstevel@tonic-gate static void snf_enque(snf_req_t *, mblk_t *);
18310Sstevel@tonic-gate static mblk_t *snf_deque(snf_req_t *);
18320Sstevel@tonic-gate 
18330Sstevel@tonic-gate void
18340Sstevel@tonic-gate sendfile_init(void)
18350Sstevel@tonic-gate {
18360Sstevel@tonic-gate 	snfq = kmem_zalloc(sizeof (struct sendfile_queue), KM_SLEEP);
18370Sstevel@tonic-gate 
18380Sstevel@tonic-gate 	mutex_init(&snfq->snfq_lock, NULL, MUTEX_DEFAULT, NULL);
18390Sstevel@tonic-gate 	cv_init(&snfq->snfq_cv, NULL, CV_DEFAULT, NULL);
18400Sstevel@tonic-gate 	snfq->snfq_max_threads = max_ncpus;
18410Sstevel@tonic-gate 	snfq_timeout = SNFQ_TIMEOUT;
18420Sstevel@tonic-gate 	/* Cache all files by default. */
18430Sstevel@tonic-gate 	sendfile_max_size = MAXOFFSET_T;
18440Sstevel@tonic-gate }
18450Sstevel@tonic-gate 
18460Sstevel@tonic-gate /*
18470Sstevel@tonic-gate  * Queues a mblk_t for network processing.
18480Sstevel@tonic-gate  */
18490Sstevel@tonic-gate static void
18500Sstevel@tonic-gate snf_enque(snf_req_t *sr, mblk_t *mp)
18510Sstevel@tonic-gate {
18520Sstevel@tonic-gate 	mp->b_next = NULL;
18530Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
18540Sstevel@tonic-gate 	if (sr->sr_mp_head == NULL) {
18550Sstevel@tonic-gate 		sr->sr_mp_head = sr->sr_mp_tail = mp;
18560Sstevel@tonic-gate 		cv_signal(&sr->sr_cv);
18570Sstevel@tonic-gate 	} else {
18580Sstevel@tonic-gate 		sr->sr_mp_tail->b_next = mp;
18590Sstevel@tonic-gate 		sr->sr_mp_tail = mp;
18600Sstevel@tonic-gate 	}
18610Sstevel@tonic-gate 	sr->sr_qlen += MBLKL(mp);
18620Sstevel@tonic-gate 	while ((sr->sr_qlen > sr->sr_hiwat) &&
18630Sstevel@tonic-gate 	    (sr->sr_write_error == 0)) {
18640Sstevel@tonic-gate 		sf_stats.ss_full_waits++;
18650Sstevel@tonic-gate 		cv_wait(&sr->sr_cv, &sr->sr_lock);
18660Sstevel@tonic-gate 	}
18670Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
18680Sstevel@tonic-gate }
18690Sstevel@tonic-gate 
18700Sstevel@tonic-gate /*
18710Sstevel@tonic-gate  * De-queues a mblk_t for network processing.
18720Sstevel@tonic-gate  */
18730Sstevel@tonic-gate static mblk_t *
18740Sstevel@tonic-gate snf_deque(snf_req_t *sr)
18750Sstevel@tonic-gate {
18760Sstevel@tonic-gate 	mblk_t *mp;
18770Sstevel@tonic-gate 
18780Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
18790Sstevel@tonic-gate 	/*
18800Sstevel@tonic-gate 	 * If we have encountered an error on read or read is
18810Sstevel@tonic-gate 	 * completed and no more mblks, return NULL.
18820Sstevel@tonic-gate 	 * We need to check for NULL sr_mp_head also as
18830Sstevel@tonic-gate 	 * the reads could have completed and there is
18840Sstevel@tonic-gate 	 * nothing more to come.
18850Sstevel@tonic-gate 	 */
18860Sstevel@tonic-gate 	if (((sr->sr_read_error & ~SR_READ_DONE) != 0) ||
18870Sstevel@tonic-gate 	    ((sr->sr_read_error & SR_READ_DONE) &&
18880Sstevel@tonic-gate 	    sr->sr_mp_head == NULL)) {
18890Sstevel@tonic-gate 		mutex_exit(&sr->sr_lock);
18900Sstevel@tonic-gate 		return (NULL);
18910Sstevel@tonic-gate 	}
18920Sstevel@tonic-gate 	/*
18930Sstevel@tonic-gate 	 * To start with neither SR_READ_DONE is marked nor
18940Sstevel@tonic-gate 	 * the error is set. When we wake up from cv_wait,
18950Sstevel@tonic-gate 	 * following are the possibilities :
18960Sstevel@tonic-gate 	 *
18970Sstevel@tonic-gate 	 *	a) sr_read_error is zero and mblks are queued.
18980Sstevel@tonic-gate 	 *	b) sr_read_error is set to SR_READ_DONE
18990Sstevel@tonic-gate 	 *	   and mblks are queued.
19000Sstevel@tonic-gate 	 *	c) sr_read_error is set to SR_READ_DONE
19010Sstevel@tonic-gate 	 *	   and no mblks.
19020Sstevel@tonic-gate 	 *	d) sr_read_error is set to some error other
19030Sstevel@tonic-gate 	 *	   than SR_READ_DONE.
19040Sstevel@tonic-gate 	 */
19050Sstevel@tonic-gate 
19060Sstevel@tonic-gate 	while ((sr->sr_read_error == 0) && (sr->sr_mp_head == NULL)) {
19070Sstevel@tonic-gate 		sf_stats.ss_empty_waits++;
19080Sstevel@tonic-gate 		cv_wait(&sr->sr_cv, &sr->sr_lock);
19090Sstevel@tonic-gate 	}
19100Sstevel@tonic-gate 	/* Handle (a) and (b) first  - the normal case. */
19110Sstevel@tonic-gate 	if (((sr->sr_read_error & ~SR_READ_DONE) == 0) &&
19120Sstevel@tonic-gate 	    (sr->sr_mp_head != NULL)) {
19130Sstevel@tonic-gate 		mp = sr->sr_mp_head;
19140Sstevel@tonic-gate 		sr->sr_mp_head = mp->b_next;
19150Sstevel@tonic-gate 		sr->sr_qlen -= MBLKL(mp);
19160Sstevel@tonic-gate 		if (sr->sr_qlen < sr->sr_lowat)
19170Sstevel@tonic-gate 			cv_signal(&sr->sr_cv);
19180Sstevel@tonic-gate 		mutex_exit(&sr->sr_lock);
19190Sstevel@tonic-gate 		mp->b_next = NULL;
19200Sstevel@tonic-gate 		return (mp);
19210Sstevel@tonic-gate 	}
19220Sstevel@tonic-gate 	/* Handle (c) and (d). */
19230Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
19240Sstevel@tonic-gate 	return (NULL);
19250Sstevel@tonic-gate }
19260Sstevel@tonic-gate 
19270Sstevel@tonic-gate /*
19280Sstevel@tonic-gate  * Reads data from the filesystem and queues it for network processing.
19290Sstevel@tonic-gate  */
19300Sstevel@tonic-gate void
19310Sstevel@tonic-gate snf_async_read(snf_req_t *sr)
19320Sstevel@tonic-gate {
19330Sstevel@tonic-gate 	size_t iosize;
19340Sstevel@tonic-gate 	u_offset_t fileoff;
19350Sstevel@tonic-gate 	u_offset_t size;
19360Sstevel@tonic-gate 	int ret_size;
19370Sstevel@tonic-gate 	int error;
19380Sstevel@tonic-gate 	file_t *fp;
19390Sstevel@tonic-gate 	mblk_t *mp;
19400Sstevel@tonic-gate 
19410Sstevel@tonic-gate 	fp = sr->sr_fp;
19420Sstevel@tonic-gate 	size = sr->sr_file_size;
19430Sstevel@tonic-gate 	fileoff = sr->sr_file_off;
19440Sstevel@tonic-gate 
19450Sstevel@tonic-gate 	/*
19460Sstevel@tonic-gate 	 * Ignore the error for filesystems that doesn't support DIRECTIO.
19470Sstevel@tonic-gate 	 */
19480Sstevel@tonic-gate 	(void) VOP_IOCTL(fp->f_vnode, _FIODIRECTIO, DIRECTIO_ON, 0,
19490Sstevel@tonic-gate 	    kcred, NULL);
19500Sstevel@tonic-gate 
19510Sstevel@tonic-gate 	while ((size != 0) && (sr->sr_write_error == 0)) {
19520Sstevel@tonic-gate 
19530Sstevel@tonic-gate 		iosize = (int)MIN(sr->sr_maxpsz, size);
19540Sstevel@tonic-gate 
19550Sstevel@tonic-gate 		if ((mp = allocb(iosize, BPRI_MED)) == NULL) {
19560Sstevel@tonic-gate 			error = EAGAIN;
19570Sstevel@tonic-gate 			break;
19580Sstevel@tonic-gate 		}
19590Sstevel@tonic-gate 		ret_size = soreadfile(fp, mp->b_rptr, fileoff, &error, iosize);
19600Sstevel@tonic-gate 
19610Sstevel@tonic-gate 		/* Error or Reached EOF ? */
19620Sstevel@tonic-gate 		if ((error != 0) || (ret_size == 0)) {
19630Sstevel@tonic-gate 			freeb(mp);
19640Sstevel@tonic-gate 			break;
19650Sstevel@tonic-gate 		}
19660Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + ret_size;
19670Sstevel@tonic-gate 
19680Sstevel@tonic-gate 		snf_enque(sr, mp);
19690Sstevel@tonic-gate 		size -= ret_size;
19700Sstevel@tonic-gate 		fileoff += ret_size;
19710Sstevel@tonic-gate 	}
19720Sstevel@tonic-gate 	(void) VOP_IOCTL(fp->f_vnode, _FIODIRECTIO, DIRECTIO_OFF, 0,
19730Sstevel@tonic-gate 	    kcred, NULL);
19740Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
19750Sstevel@tonic-gate 	sr->sr_read_error = error;
19760Sstevel@tonic-gate 	sr->sr_read_error |= SR_READ_DONE;
19770Sstevel@tonic-gate 	cv_signal(&sr->sr_cv);
19780Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
19790Sstevel@tonic-gate }
19800Sstevel@tonic-gate 
19810Sstevel@tonic-gate void
19820Sstevel@tonic-gate snf_async_thread(void)
19830Sstevel@tonic-gate {
19840Sstevel@tonic-gate 	snf_req_t *sr;
19850Sstevel@tonic-gate 	callb_cpr_t cprinfo;
19860Sstevel@tonic-gate 	clock_t time_left = 1;
19870Sstevel@tonic-gate 	clock_t now;
19880Sstevel@tonic-gate 
19890Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &snfq->snfq_lock, callb_generic_cpr, "snfq");
19900Sstevel@tonic-gate 
19910Sstevel@tonic-gate 	mutex_enter(&snfq->snfq_lock);
19920Sstevel@tonic-gate 	for (;;) {
19930Sstevel@tonic-gate 		/*
19940Sstevel@tonic-gate 		 * If we didn't find a entry, then block until woken up
19950Sstevel@tonic-gate 		 * again and then look through the queues again.
19960Sstevel@tonic-gate 		 */
19970Sstevel@tonic-gate 		while ((sr = snfq->snfq_req_head) == NULL) {
19980Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
19990Sstevel@tonic-gate 			if (time_left <= 0) {
20000Sstevel@tonic-gate 				snfq->snfq_svc_threads--;
20010Sstevel@tonic-gate 				CALLB_CPR_EXIT(&cprinfo);
20020Sstevel@tonic-gate 				thread_exit();
20030Sstevel@tonic-gate 				/* NOTREACHED */
20040Sstevel@tonic-gate 			}
20050Sstevel@tonic-gate 			snfq->snfq_idle_cnt++;
20060Sstevel@tonic-gate 
20070Sstevel@tonic-gate 			time_to_wait(&now, snfq_timeout);
20080Sstevel@tonic-gate 			time_left = cv_timedwait(&snfq->snfq_cv,
20090Sstevel@tonic-gate 			    &snfq->snfq_lock, now);
20100Sstevel@tonic-gate 			snfq->snfq_idle_cnt--;
20110Sstevel@tonic-gate 
20120Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, &snfq->snfq_lock);
20130Sstevel@tonic-gate 		}
20140Sstevel@tonic-gate 		snfq->snfq_req_head = sr->sr_next;
20150Sstevel@tonic-gate 		snfq->snfq_req_cnt--;
20160Sstevel@tonic-gate 		mutex_exit(&snfq->snfq_lock);
20170Sstevel@tonic-gate 		snf_async_read(sr);
20180Sstevel@tonic-gate 		mutex_enter(&snfq->snfq_lock);
20190Sstevel@tonic-gate 	}
20200Sstevel@tonic-gate }
20210Sstevel@tonic-gate 
20220Sstevel@tonic-gate 
20230Sstevel@tonic-gate snf_req_t *
20240Sstevel@tonic-gate create_thread(int operation, struct vnode *vp, file_t *fp,
20250Sstevel@tonic-gate     u_offset_t fileoff, u_offset_t size)
20260Sstevel@tonic-gate {
20270Sstevel@tonic-gate 	snf_req_t *sr;
20280Sstevel@tonic-gate 	stdata_t *stp;
20290Sstevel@tonic-gate 
20300Sstevel@tonic-gate 	sr = (snf_req_t *)kmem_zalloc(sizeof (snf_req_t), KM_SLEEP);
20310Sstevel@tonic-gate 
20320Sstevel@tonic-gate 	sr->sr_vp = vp;
20330Sstevel@tonic-gate 	sr->sr_fp = fp;
20340Sstevel@tonic-gate 	stp = vp->v_stream;
20350Sstevel@tonic-gate 
20360Sstevel@tonic-gate 	/*
20370Sstevel@tonic-gate 	 * store sd_qn_maxpsz into sr_maxpsz while we have stream head.
20380Sstevel@tonic-gate 	 * stream might be closed before thread returns from snf_async_read.
20390Sstevel@tonic-gate 	 */
20400Sstevel@tonic-gate 	if (stp->sd_qn_maxpsz > 0) {
20410Sstevel@tonic-gate 		sr->sr_maxpsz = MIN(MAXBSIZE, stp->sd_qn_maxpsz);
20420Sstevel@tonic-gate 	} else {
20430Sstevel@tonic-gate 		sr->sr_maxpsz = MAXBSIZE;
20440Sstevel@tonic-gate 	}
20450Sstevel@tonic-gate 
20460Sstevel@tonic-gate 	sr->sr_operation = operation;
20470Sstevel@tonic-gate 	sr->sr_file_off = fileoff;
20480Sstevel@tonic-gate 	sr->sr_file_size = size;
20490Sstevel@tonic-gate 	sr->sr_hiwat = sendfile_req_hiwat;
20500Sstevel@tonic-gate 	sr->sr_lowat = sendfile_req_lowat;
20510Sstevel@tonic-gate 	mutex_init(&sr->sr_lock, NULL, MUTEX_DEFAULT, NULL);
20520Sstevel@tonic-gate 	cv_init(&sr->sr_cv, NULL, CV_DEFAULT, NULL);
20530Sstevel@tonic-gate 	/*
20540Sstevel@tonic-gate 	 * See whether we need another thread for servicing this
20550Sstevel@tonic-gate 	 * request. If there are already enough requests queued
20560Sstevel@tonic-gate 	 * for the threads, create one if not exceeding
20570Sstevel@tonic-gate 	 * snfq_max_threads.
20580Sstevel@tonic-gate 	 */
20590Sstevel@tonic-gate 	mutex_enter(&snfq->snfq_lock);
20600Sstevel@tonic-gate 	if (snfq->snfq_req_cnt >= snfq->snfq_idle_cnt &&
20610Sstevel@tonic-gate 	    snfq->snfq_svc_threads < snfq->snfq_max_threads) {
20620Sstevel@tonic-gate 		(void) thread_create(NULL, 0, &snf_async_thread, 0, 0, &p0,
20630Sstevel@tonic-gate 		    TS_RUN, minclsyspri);
20640Sstevel@tonic-gate 		snfq->snfq_svc_threads++;
20650Sstevel@tonic-gate 	}
20660Sstevel@tonic-gate 	if (snfq->snfq_req_head == NULL) {
20670Sstevel@tonic-gate 		snfq->snfq_req_head = snfq->snfq_req_tail = sr;
20680Sstevel@tonic-gate 		cv_signal(&snfq->snfq_cv);
20690Sstevel@tonic-gate 	} else {
20700Sstevel@tonic-gate 		snfq->snfq_req_tail->sr_next = sr;
20710Sstevel@tonic-gate 		snfq->snfq_req_tail = sr;
20720Sstevel@tonic-gate 	}
20730Sstevel@tonic-gate 	snfq->snfq_req_cnt++;
20740Sstevel@tonic-gate 	mutex_exit(&snfq->snfq_lock);
20750Sstevel@tonic-gate 	return (sr);
20760Sstevel@tonic-gate }
20770Sstevel@tonic-gate 
20780Sstevel@tonic-gate int
20790Sstevel@tonic-gate snf_direct_io(file_t *fp, file_t *rfp, u_offset_t fileoff, u_offset_t size,
20800Sstevel@tonic-gate     ssize_t *count)
20810Sstevel@tonic-gate {
20820Sstevel@tonic-gate 	snf_req_t *sr;
20830Sstevel@tonic-gate 	mblk_t *mp;
20840Sstevel@tonic-gate 	int iosize;
20850Sstevel@tonic-gate 	int error = 0;
20860Sstevel@tonic-gate 	short fflag;
20870Sstevel@tonic-gate 	struct vnode *vp;
20880Sstevel@tonic-gate 	int ksize;
20890Sstevel@tonic-gate 
20900Sstevel@tonic-gate 	ksize = 0;
20910Sstevel@tonic-gate 	*count = 0;
20920Sstevel@tonic-gate 
20930Sstevel@tonic-gate 	vp = fp->f_vnode;
20940Sstevel@tonic-gate 	fflag = fp->f_flag;
20950Sstevel@tonic-gate 	if ((sr = create_thread(READ_OP, vp, rfp, fileoff, size)) == NULL)
20960Sstevel@tonic-gate 		return (EAGAIN);
20970Sstevel@tonic-gate 
20980Sstevel@tonic-gate 	/*
20990Sstevel@tonic-gate 	 * We check for read error in snf_deque. It has to check
21000Sstevel@tonic-gate 	 * for successful READ_DONE and return NULL, and we might
21010Sstevel@tonic-gate 	 * as well make an additional check there.
21020Sstevel@tonic-gate 	 */
21030Sstevel@tonic-gate 	while ((mp = snf_deque(sr)) != NULL) {
21040Sstevel@tonic-gate 
21050Sstevel@tonic-gate 		if (ISSIG(curthread, JUSTLOOKING)) {
21060Sstevel@tonic-gate 			freeb(mp);
21070Sstevel@tonic-gate 			error = EINTR;
21080Sstevel@tonic-gate 			break;
21090Sstevel@tonic-gate 		}
21100Sstevel@tonic-gate 		iosize = MBLKL(mp);
21110Sstevel@tonic-gate 
21120Sstevel@tonic-gate 		if ((error = kstrwritemp(vp, mp, fflag)) != 0) {
21130Sstevel@tonic-gate 			freeb(mp);
21140Sstevel@tonic-gate 			break;
21150Sstevel@tonic-gate 		}
21160Sstevel@tonic-gate 		ksize += iosize;
21170Sstevel@tonic-gate 	}
21180Sstevel@tonic-gate 	*count = ksize;
21190Sstevel@tonic-gate 
21200Sstevel@tonic-gate 	mutex_enter(&sr->sr_lock);
21210Sstevel@tonic-gate 	sr->sr_write_error = error;
21220Sstevel@tonic-gate 	/* Look at the big comments on why we cv_signal here. */
21230Sstevel@tonic-gate 	cv_signal(&sr->sr_cv);
21240Sstevel@tonic-gate 
21250Sstevel@tonic-gate 	/* Wait for the reader to complete always. */
21260Sstevel@tonic-gate 	while (!(sr->sr_read_error & SR_READ_DONE)) {
21270Sstevel@tonic-gate 		cv_wait(&sr->sr_cv, &sr->sr_lock);
21280Sstevel@tonic-gate 	}
21290Sstevel@tonic-gate 	/* If there is no write error, check for read error. */
21300Sstevel@tonic-gate 	if (error == 0)
21310Sstevel@tonic-gate 		error = (sr->sr_read_error & ~SR_READ_DONE);
21320Sstevel@tonic-gate 
21330Sstevel@tonic-gate 	if (error != 0) {
21340Sstevel@tonic-gate 		mblk_t *next_mp;
21350Sstevel@tonic-gate 
21360Sstevel@tonic-gate 		mp = sr->sr_mp_head;
21370Sstevel@tonic-gate 		while (mp != NULL) {
21380Sstevel@tonic-gate 			next_mp = mp->b_next;
21390Sstevel@tonic-gate 			mp->b_next = NULL;
21400Sstevel@tonic-gate 			freeb(mp);
21410Sstevel@tonic-gate 			mp = next_mp;
21420Sstevel@tonic-gate 		}
21430Sstevel@tonic-gate 	}
21440Sstevel@tonic-gate 	mutex_exit(&sr->sr_lock);
21450Sstevel@tonic-gate 	kmem_free(sr, sizeof (snf_req_t));
21460Sstevel@tonic-gate 	return (error);
21470Sstevel@tonic-gate }
21480Sstevel@tonic-gate 
21490Sstevel@tonic-gate typedef struct {
21500Sstevel@tonic-gate 	frtn_t		snfi_frtn;
21510Sstevel@tonic-gate 	caddr_t		snfi_base;
21520Sstevel@tonic-gate 	uint_t		snfi_mapoff;
21530Sstevel@tonic-gate 	size_t		snfi_len;
21540Sstevel@tonic-gate 	vnode_t		*snfi_vp;
21550Sstevel@tonic-gate } snf_smap_desbinfo;
21560Sstevel@tonic-gate 
21570Sstevel@tonic-gate /*
21580Sstevel@tonic-gate  * The callback function when the last ref of the mblk is dropped,
21590Sstevel@tonic-gate  * normally occurs when TCP receives the ack. But it can be the driver
21600Sstevel@tonic-gate  * too due to lazy reclaim.
21610Sstevel@tonic-gate  */
21620Sstevel@tonic-gate void
21630Sstevel@tonic-gate snf_smap_desbfree(snf_smap_desbinfo *snfi)
21640Sstevel@tonic-gate {
21650Sstevel@tonic-gate 	if (!segmap_kpm) {
21660Sstevel@tonic-gate 		/*
21670Sstevel@tonic-gate 		 * We don't need to call segmap_fault(F_SOFTUNLOCK) for
21680Sstevel@tonic-gate 		 * segmap_kpm as long as the latter never falls back to
21690Sstevel@tonic-gate 		 * "use_segmap_range". (See segmap_getmapflt().)
21700Sstevel@tonic-gate 		 *
21710Sstevel@tonic-gate 		 * Using S_OTHER saves an redundant hat_setref() in
21720Sstevel@tonic-gate 		 * segmap_unlock()
21730Sstevel@tonic-gate 		 */
21740Sstevel@tonic-gate 		(void) segmap_fault(kas.a_hat, segkmap,
2175408Skrgopi 		    (caddr_t)(uintptr_t)(((uintptr_t)snfi->snfi_base +
2176408Skrgopi 		    snfi->snfi_mapoff) & PAGEMASK), snfi->snfi_len,
2177408Skrgopi 		    F_SOFTUNLOCK, S_OTHER);
21780Sstevel@tonic-gate 	}
21790Sstevel@tonic-gate 	(void) segmap_release(segkmap, snfi->snfi_base, SM_DONTNEED);
21800Sstevel@tonic-gate 	VN_RELE(snfi->snfi_vp);
21810Sstevel@tonic-gate 	kmem_free(snfi, sizeof (*snfi));
21820Sstevel@tonic-gate }
21830Sstevel@tonic-gate 
21840Sstevel@tonic-gate /*
21850Sstevel@tonic-gate  * Use segmap instead of bcopy to send down a chain of desballoca'ed, mblks.
21860Sstevel@tonic-gate  * Each mblk contains a segmap slot of no more than MAXBSIZE. The total
21870Sstevel@tonic-gate  * length of a chain is no more than sd_qn_maxpsz.
21880Sstevel@tonic-gate  *
21890Sstevel@tonic-gate  * At the end of the whole sendfile() operation, we wait till the data from
21900Sstevel@tonic-gate  * the last mblk is ack'ed by the transport before returning so that the
21910Sstevel@tonic-gate  * caller of sendfile() can safely modify the file content.
21920Sstevel@tonic-gate  */
21930Sstevel@tonic-gate int
21940Sstevel@tonic-gate snf_segmap(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t size,
21950Sstevel@tonic-gate     uint_t maxpsz, ssize_t *count, boolean_t nowait)
21960Sstevel@tonic-gate {
21970Sstevel@tonic-gate 	caddr_t base;
21980Sstevel@tonic-gate 	int mapoff;
21990Sstevel@tonic-gate 	vnode_t *vp;
22000Sstevel@tonic-gate 	mblk_t *mp, *mp1;
22010Sstevel@tonic-gate 	int iosize, iosize1;
22020Sstevel@tonic-gate 	int error;
22030Sstevel@tonic-gate 	short fflag;
22040Sstevel@tonic-gate 	int ksize;
22050Sstevel@tonic-gate 	snf_smap_desbinfo *snfi;
22060Sstevel@tonic-gate 	struct vattr va;
22070Sstevel@tonic-gate 	boolean_t dowait = B_FALSE;
22080Sstevel@tonic-gate 
22090Sstevel@tonic-gate 	vp = fp->f_vnode;
22100Sstevel@tonic-gate 	fflag = fp->f_flag;
22110Sstevel@tonic-gate 	ksize = 0;
22120Sstevel@tonic-gate 	for (;;) {
22130Sstevel@tonic-gate 		if (ISSIG(curthread, JUSTLOOKING)) {
22140Sstevel@tonic-gate 			error = EINTR;
22150Sstevel@tonic-gate 			break;
22160Sstevel@tonic-gate 		}
22170Sstevel@tonic-gate 		iosize = 0;
22180Sstevel@tonic-gate 		mp = NULL;
22190Sstevel@tonic-gate 		do {
22200Sstevel@tonic-gate 			mapoff = fileoff & MAXBOFFSET;
22210Sstevel@tonic-gate 			iosize1 = MAXBSIZE - mapoff;
22220Sstevel@tonic-gate 			if (iosize1 > size)
22230Sstevel@tonic-gate 				iosize1 = size;
22240Sstevel@tonic-gate 			/*
22250Sstevel@tonic-gate 			 * we don't forcefault because we'll call
22260Sstevel@tonic-gate 			 * segmap_fault(F_SOFTLOCK) next.
22270Sstevel@tonic-gate 			 *
22280Sstevel@tonic-gate 			 * S_READ will get the ref bit set (by either
22290Sstevel@tonic-gate 			 * segmap_getmapflt() or segmap_fault()) and page
22300Sstevel@tonic-gate 			 * shared locked.
22310Sstevel@tonic-gate 			 */
22320Sstevel@tonic-gate 			base = segmap_getmapflt(segkmap, fvp, fileoff, iosize1,
22330Sstevel@tonic-gate 			    segmap_kpm ? SM_FAULT : 0, S_READ);
22340Sstevel@tonic-gate 
22350Sstevel@tonic-gate 			snfi = kmem_alloc(sizeof (*snfi), KM_SLEEP);
22360Sstevel@tonic-gate 			snfi->snfi_len = (size_t)roundup(mapoff+iosize1,
22370Sstevel@tonic-gate 			    PAGESIZE)- (mapoff & PAGEMASK);
22380Sstevel@tonic-gate 			/*
22390Sstevel@tonic-gate 			 * We must call segmap_fault() even for segmap_kpm
22400Sstevel@tonic-gate 			 * because that's how error gets returned.
22410Sstevel@tonic-gate 			 * (segmap_getmapflt() never fails but segmap_fault()
22420Sstevel@tonic-gate 			 * does.)
22430Sstevel@tonic-gate 			 */
22440Sstevel@tonic-gate 			if (segmap_fault(kas.a_hat, segkmap,
2245408Skrgopi 			    (caddr_t)(uintptr_t)(((uintptr_t)base + mapoff) &
2246408Skrgopi 			    PAGEMASK), snfi->snfi_len, F_SOFTLOCK,
2247408Skrgopi 			    S_READ) != 0) {
22480Sstevel@tonic-gate 				(void) segmap_release(segkmap, base, 0);
22490Sstevel@tonic-gate 				kmem_free(snfi, sizeof (*snfi));
22500Sstevel@tonic-gate 				freemsg(mp);
22510Sstevel@tonic-gate 				error = EIO;
22520Sstevel@tonic-gate 				goto out;
22530Sstevel@tonic-gate 			}
22540Sstevel@tonic-gate 			snfi->snfi_frtn.free_func = snf_smap_desbfree;
22550Sstevel@tonic-gate 			snfi->snfi_frtn.free_arg = (caddr_t)snfi;
22560Sstevel@tonic-gate 			snfi->snfi_base = base;
22570Sstevel@tonic-gate 			snfi->snfi_mapoff = mapoff;
22582994Sss146032 			mp1 = esballoca((uchar_t *)base + mapoff,
22590Sstevel@tonic-gate 			    iosize1, BPRI_HI, &snfi->snfi_frtn);
22600Sstevel@tonic-gate 
22610Sstevel@tonic-gate 			if (mp1 == NULL) {
22620Sstevel@tonic-gate 				(void) segmap_fault(kas.a_hat, segkmap,
2263408Skrgopi 				    (caddr_t)(uintptr_t)(((uintptr_t)base +
2264408Skrgopi 				    mapoff) & PAGEMASK), snfi->snfi_len,
22650Sstevel@tonic-gate 				    F_SOFTUNLOCK, S_OTHER);
22660Sstevel@tonic-gate 				(void) segmap_release(segkmap, base, 0);
22670Sstevel@tonic-gate 				kmem_free(snfi, sizeof (*snfi));
22680Sstevel@tonic-gate 				freemsg(mp);
22690Sstevel@tonic-gate 				error = EAGAIN;
22700Sstevel@tonic-gate 				goto out;
22710Sstevel@tonic-gate 			}
22720Sstevel@tonic-gate 			VN_HOLD(fvp);
22730Sstevel@tonic-gate 			snfi->snfi_vp = fvp;
22740Sstevel@tonic-gate 			mp1->b_wptr += iosize1;
22750Sstevel@tonic-gate 
22760Sstevel@tonic-gate 			/* Mark this dblk with the zero-copy flag */
22770Sstevel@tonic-gate 			mp1->b_datap->db_struioflag |= STRUIO_ZC;
22780Sstevel@tonic-gate 			if (mp == NULL)
22790Sstevel@tonic-gate 				mp = mp1;
22800Sstevel@tonic-gate 			else
22810Sstevel@tonic-gate 				linkb(mp, mp1);
22820Sstevel@tonic-gate 			iosize += iosize1;
22830Sstevel@tonic-gate 			fileoff += iosize1;
22840Sstevel@tonic-gate 			size -= iosize1;
22850Sstevel@tonic-gate 		} while (iosize < maxpsz && size != 0);
22860Sstevel@tonic-gate 
22870Sstevel@tonic-gate 		if (size == 0 && !nowait) {
22880Sstevel@tonic-gate 			ASSERT(!dowait);
22890Sstevel@tonic-gate 			dowait = B_TRUE;
22900Sstevel@tonic-gate 			mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY;
22910Sstevel@tonic-gate 		}
22920Sstevel@tonic-gate 		VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
22930Sstevel@tonic-gate 		if ((error = kstrwritemp(vp, mp, fflag)) != 0) {
22940Sstevel@tonic-gate 			*count = ksize;
22950Sstevel@tonic-gate 			freemsg(mp);
22960Sstevel@tonic-gate 			return (error);
22970Sstevel@tonic-gate 		}
22980Sstevel@tonic-gate 		ksize += iosize;
22990Sstevel@tonic-gate 		if (size == 0)
23000Sstevel@tonic-gate 			goto done;
23010Sstevel@tonic-gate 
23020Sstevel@tonic-gate 		(void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL);
23030Sstevel@tonic-gate 		va.va_mask = AT_SIZE;
23040Sstevel@tonic-gate 		error = VOP_GETATTR(fvp, &va, 0, kcred);
23050Sstevel@tonic-gate 		if (error)
23060Sstevel@tonic-gate 			break;
23070Sstevel@tonic-gate 		/* Read as much as possible. */
23080Sstevel@tonic-gate 		if (fileoff >= va.va_size)
23090Sstevel@tonic-gate 			break;
23100Sstevel@tonic-gate 		if (size + fileoff > va.va_size)
23110Sstevel@tonic-gate 			size = va.va_size - fileoff;
23120Sstevel@tonic-gate 	}
23130Sstevel@tonic-gate out:
23140Sstevel@tonic-gate 	VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
23150Sstevel@tonic-gate done:
23160Sstevel@tonic-gate 	*count = ksize;
23170Sstevel@tonic-gate 	if (dowait) {
23180Sstevel@tonic-gate 		stdata_t *stp;
23190Sstevel@tonic-gate 
23200Sstevel@tonic-gate 		stp = vp->v_stream;
23210Sstevel@tonic-gate 		mutex_enter(&stp->sd_lock);
23220Sstevel@tonic-gate 		while (!(stp->sd_flag & STZCNOTIFY)) {
23233415Samehta 			if (cv_wait_sig(&stp->sd_zcopy_wait,
2324*5227Stz204579 			    &stp->sd_lock) == 0) {
23253415Samehta 				error = EINTR;
23263415Samehta 				break;
23273415Samehta 			}
23280Sstevel@tonic-gate 		}
23290Sstevel@tonic-gate 		stp->sd_flag &= ~STZCNOTIFY;
23300Sstevel@tonic-gate 		mutex_exit(&stp->sd_lock);
23310Sstevel@tonic-gate 	}
23320Sstevel@tonic-gate 	return (error);
23330Sstevel@tonic-gate }
23340Sstevel@tonic-gate 
23350Sstevel@tonic-gate int
23360Sstevel@tonic-gate snf_cache(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t size,
23370Sstevel@tonic-gate     uint_t maxpsz, ssize_t *count)
23380Sstevel@tonic-gate {
23390Sstevel@tonic-gate 	struct vnode *vp;
23400Sstevel@tonic-gate 	mblk_t *mp;
23410Sstevel@tonic-gate 	int iosize;
23420Sstevel@tonic-gate 	int error;
23430Sstevel@tonic-gate 	short fflag;
23440Sstevel@tonic-gate 	int ksize;
23450Sstevel@tonic-gate 	int ioflag;
23460Sstevel@tonic-gate 	struct uio auio;
23470Sstevel@tonic-gate 	struct iovec aiov;
23480Sstevel@tonic-gate 	struct vattr va;
23490Sstevel@tonic-gate 
23500Sstevel@tonic-gate 	vp = fp->f_vnode;
23510Sstevel@tonic-gate 	fflag = fp->f_flag;
23520Sstevel@tonic-gate 	ksize = 0;
23530Sstevel@tonic-gate 	auio.uio_iov = &aiov;
23540Sstevel@tonic-gate 	auio.uio_iovcnt = 1;
23550Sstevel@tonic-gate 	auio.uio_segflg = UIO_SYSSPACE;
23560Sstevel@tonic-gate 	auio.uio_llimit = MAXOFFSET_T;
23570Sstevel@tonic-gate 	auio.uio_fmode = fflag;
23580Sstevel@tonic-gate 	auio.uio_extflg = UIO_COPY_CACHED;
23590Sstevel@tonic-gate 	ioflag = auio.uio_fmode & (FSYNC|FDSYNC|FRSYNC);
23600Sstevel@tonic-gate 	/* If read sync is not asked for, filter sync flags */
23610Sstevel@tonic-gate 	if ((ioflag & FRSYNC) == 0)
23620Sstevel@tonic-gate 		ioflag &= ~(FSYNC|FDSYNC);
23630Sstevel@tonic-gate 	for (;;) {
23640Sstevel@tonic-gate 		if (ISSIG(curthread, JUSTLOOKING)) {
23650Sstevel@tonic-gate 			error = EINTR;
23660Sstevel@tonic-gate 			break;
23670Sstevel@tonic-gate 		}
23680Sstevel@tonic-gate 		iosize = (int)MIN(maxpsz, size);
23690Sstevel@tonic-gate 		if ((mp = allocb(iosize, BPRI_MED)) == NULL) {
23700Sstevel@tonic-gate 			error = EAGAIN;
23710Sstevel@tonic-gate 			break;
23720Sstevel@tonic-gate 		}
23730Sstevel@tonic-gate 		aiov.iov_base = (caddr_t)mp->b_rptr;
23740Sstevel@tonic-gate 		aiov.iov_len = iosize;
23750Sstevel@tonic-gate 		auio.uio_loffset = fileoff;
23760Sstevel@tonic-gate 		auio.uio_resid = iosize;
23770Sstevel@tonic-gate 
23780Sstevel@tonic-gate 		error = VOP_READ(fvp, &auio, ioflag, fp->f_cred, NULL);
23790Sstevel@tonic-gate 		iosize -= auio.uio_resid;
23800Sstevel@tonic-gate 
23810Sstevel@tonic-gate 		if (error == EINTR && iosize != 0)
23820Sstevel@tonic-gate 			error = 0;
23830Sstevel@tonic-gate 
23840Sstevel@tonic-gate 		if (error != 0 || iosize == 0) {
23850Sstevel@tonic-gate 			freeb(mp);
23860Sstevel@tonic-gate 			break;
23870Sstevel@tonic-gate 		}
23880Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + iosize;
23890Sstevel@tonic-gate 
23900Sstevel@tonic-gate 		VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
23910Sstevel@tonic-gate 		if ((error = kstrwritemp(vp, mp, fflag)) != 0) {
23920Sstevel@tonic-gate 			*count = ksize;
23930Sstevel@tonic-gate 			freeb(mp);
23940Sstevel@tonic-gate 			return (error);
23950Sstevel@tonic-gate 		}
23960Sstevel@tonic-gate 		ksize += iosize;
23970Sstevel@tonic-gate 		size -= iosize;
23980Sstevel@tonic-gate 		if (size == 0)
23990Sstevel@tonic-gate 			goto done;
24000Sstevel@tonic-gate 
24010Sstevel@tonic-gate 		fileoff += iosize;
24020Sstevel@tonic-gate 		(void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL);
24030Sstevel@tonic-gate 		va.va_mask = AT_SIZE;
24040Sstevel@tonic-gate 		error = VOP_GETATTR(fvp, &va, 0, kcred);
24050Sstevel@tonic-gate 		if (error)
24060Sstevel@tonic-gate 			break;
24070Sstevel@tonic-gate 		/* Read as much as possible. */
24080Sstevel@tonic-gate 		if (fileoff >= va.va_size)
24090Sstevel@tonic-gate 			size = 0;
24100Sstevel@tonic-gate 		else if (size + fileoff > va.va_size)
24110Sstevel@tonic-gate 			size = va.va_size - fileoff;
24120Sstevel@tonic-gate 	}
24130Sstevel@tonic-gate 	VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
24140Sstevel@tonic-gate done:
24150Sstevel@tonic-gate 	*count = ksize;
24160Sstevel@tonic-gate 	return (error);
24170Sstevel@tonic-gate }
24180Sstevel@tonic-gate 
24190Sstevel@tonic-gate #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
24200Sstevel@tonic-gate /*
24210Sstevel@tonic-gate  * Largefile support for 32 bit applications only.
24220Sstevel@tonic-gate  */
24230Sstevel@tonic-gate int
24240Sstevel@tonic-gate sosendfile64(file_t *fp, file_t *rfp, const struct ksendfilevec64 *sfv,
24250Sstevel@tonic-gate     ssize32_t *count32)
24260Sstevel@tonic-gate {
24270Sstevel@tonic-gate 	ssize32_t sfv_len;
24280Sstevel@tonic-gate 	u_offset_t sfv_off, va_size;
24290Sstevel@tonic-gate 	struct vnode *vp, *fvp, *realvp;
24300Sstevel@tonic-gate 	struct vattr va;
24310Sstevel@tonic-gate 	stdata_t *stp;
24320Sstevel@tonic-gate 	ssize_t count = 0;
24330Sstevel@tonic-gate 	int error = 0;
24340Sstevel@tonic-gate 	boolean_t dozcopy = B_FALSE;
24350Sstevel@tonic-gate 	uint_t maxpsz;
24360Sstevel@tonic-gate 
24370Sstevel@tonic-gate 	sfv_len = (ssize32_t)sfv->sfv_len;
24380Sstevel@tonic-gate 	if (sfv_len < 0) {
24390Sstevel@tonic-gate 		error = EINVAL;
24400Sstevel@tonic-gate 		goto out;
24410Sstevel@tonic-gate 	}
24420Sstevel@tonic-gate 
24430Sstevel@tonic-gate 	if (sfv_len == 0) goto out;
24440Sstevel@tonic-gate 
24450Sstevel@tonic-gate 	sfv_off = (u_offset_t)sfv->sfv_off;
24460Sstevel@tonic-gate 
24470Sstevel@tonic-gate 	/* Same checks as in pread */
24480Sstevel@tonic-gate 	if (sfv_off > MAXOFFSET_T) {
24490Sstevel@tonic-gate 		error = EINVAL;
24500Sstevel@tonic-gate 		goto out;
24510Sstevel@tonic-gate 	}
24520Sstevel@tonic-gate 	if (sfv_off + sfv_len > MAXOFFSET_T)
24530Sstevel@tonic-gate 		sfv_len = (ssize32_t)(MAXOFFSET_T - sfv_off);
24540Sstevel@tonic-gate 
24550Sstevel@tonic-gate 	/*
24560Sstevel@tonic-gate 	 * There are no more checks on sfv_len. So, we cast it to
24570Sstevel@tonic-gate 	 * u_offset_t and share the snf_direct_io/snf_cache code between
24580Sstevel@tonic-gate 	 * 32 bit and 64 bit.
24590Sstevel@tonic-gate 	 *
24600Sstevel@tonic-gate 	 * TODO: should do nbl_need_check() like read()?
24610Sstevel@tonic-gate 	 */
24620Sstevel@tonic-gate 	if (sfv_len > sendfile_max_size) {
24630Sstevel@tonic-gate 		sf_stats.ss_file_not_cached++;
24640Sstevel@tonic-gate 		error = snf_direct_io(fp, rfp, sfv_off, (u_offset_t)sfv_len,
24650Sstevel@tonic-gate 		    &count);
24660Sstevel@tonic-gate 		goto out;
24670Sstevel@tonic-gate 	}
24680Sstevel@tonic-gate 	fvp = rfp->f_vnode;
24690Sstevel@tonic-gate 	if (VOP_REALVP(fvp, &realvp) == 0)
24700Sstevel@tonic-gate 		fvp = realvp;
24710Sstevel@tonic-gate 	/*
24720Sstevel@tonic-gate 	 * Grab the lock as a reader to prevent the file size
24730Sstevel@tonic-gate 	 * from changing underneath.
24740Sstevel@tonic-gate 	 */
24750Sstevel@tonic-gate 	(void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL);
24760Sstevel@tonic-gate 	va.va_mask = AT_SIZE;
24770Sstevel@tonic-gate 	error = VOP_GETATTR(fvp, &va, 0, kcred);
24780Sstevel@tonic-gate 	va_size = va.va_size;
24794649Sdm120769 	if ((error != 0) || (va_size == 0) || (sfv_off >= va_size)) {
24800Sstevel@tonic-gate 		VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL);
24810Sstevel@tonic-gate 		goto out;
24820Sstevel@tonic-gate 	}
24830Sstevel@tonic-gate 	/* Read as much as possible. */
24840Sstevel@tonic-gate 	if (sfv_off + sfv_len > va_size)
24850Sstevel@tonic-gate 		sfv_len = va_size - sfv_off;
24860Sstevel@tonic-gate 
24870Sstevel@tonic-gate 	vp = fp->f_vnode;
24880Sstevel@tonic-gate 	stp = vp->v_stream;
24890Sstevel@tonic-gate 	if (stp->sd_qn_maxpsz == INFPSZ)
24904173Spr14459 		maxpsz = maxphys;
24910Sstevel@tonic-gate 	else
24920Sstevel@tonic-gate 		maxpsz = roundup(stp->sd_qn_maxpsz, MAXBSIZE);
24930Sstevel@tonic-gate 	/*
24940Sstevel@tonic-gate 	 * When the NOWAIT flag is not set, we enable zero-copy only if the
24950Sstevel@tonic-gate 	 * transfer size is large enough. This prevents performance loss
24960Sstevel@tonic-gate 	 * when the caller sends the file piece by piece.
24970Sstevel@tonic-gate 	 */
24980Sstevel@tonic-gate 	if (sfv_len >= MAXBSIZE && (sfv_len >= (va_size >> 1) ||
24990Sstevel@tonic-gate 	    (sfv->sfv_flag & SFV_NOWAIT) || sfv_len >= 0x1000000) &&
25004173Spr14459 	    !vn_has_flocks(fvp) && !(fvp->v_flag & VNOMAP)) {
25010Sstevel@tonic-gate 		if ((stp->sd_copyflag & (STZCVMSAFE|STZCVMUNSAFE)) == 0) {
25020Sstevel@tonic-gate 			int on = 1;
25030Sstevel@tonic-gate 
25040Sstevel@tonic-gate 			if (SOP_SETSOCKOPT(VTOSO(vp), SOL_SOCKET,
25050Sstevel@tonic-gate 			    SO_SND_COPYAVOID, &on, sizeof (on)) == 0)
25060Sstevel@tonic-gate 				dozcopy = B_TRUE;
25070Sstevel@tonic-gate 		} else {
25080Sstevel@tonic-gate 			dozcopy = (stp->sd_copyflag & STZCVMSAFE);
25090Sstevel@tonic-gate 		}
25100Sstevel@tonic-gate 	}
25110Sstevel@tonic-gate 	if (dozcopy) {
25120Sstevel@tonic-gate 		sf_stats.ss_file_segmap++;
25130Sstevel@tonic-gate 		error = snf_segmap(fp, fvp, sfv_off, (u_offset_t)sfv_len,
25140Sstevel@tonic-gate 		    maxpsz, &count, ((sfv->sfv_flag & SFV_NOWAIT) != 0));
25150Sstevel@tonic-gate 	} else {
25160Sstevel@tonic-gate 		sf_stats.ss_file_cached++;
25170Sstevel@tonic-gate 		error = snf_cache(fp, fvp, sfv_off, (u_offset_t)sfv_len,
25180Sstevel@tonic-gate 		    maxpsz, &count);
25190Sstevel@tonic-gate 	}
25200Sstevel@tonic-gate out:
25210Sstevel@tonic-gate 	releasef(sfv->sfv_fd);
25220Sstevel@tonic-gate 	*count32 = (ssize32_t)count;
25230Sstevel@tonic-gate 	return (error);
25240Sstevel@tonic-gate }
25250Sstevel@tonic-gate #endif
25260Sstevel@tonic-gate 
25270Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
25280Sstevel@tonic-gate /*
25290Sstevel@tonic-gate  * recv32(), recvfrom32(), send32(), sendto32(): intentionally return a
25300Sstevel@tonic-gate  * ssize_t rather than ssize32_t; see the comments above read32 for details.
25310Sstevel@tonic-gate  */
25320Sstevel@tonic-gate 
25330Sstevel@tonic-gate ssize_t
25340Sstevel@tonic-gate recv32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags)
25350Sstevel@tonic-gate {
25360Sstevel@tonic-gate 	return (recv(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags));
25370Sstevel@tonic-gate }
25380Sstevel@tonic-gate 
25390Sstevel@tonic-gate ssize_t
25400Sstevel@tonic-gate recvfrom32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags,
25410Sstevel@tonic-gate 	caddr32_t name, caddr32_t namelenp)
25420Sstevel@tonic-gate {
25430Sstevel@tonic-gate 	return (recvfrom(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags,
25440Sstevel@tonic-gate 	    (void *)(uintptr_t)name, (void *)(uintptr_t)namelenp));
25450Sstevel@tonic-gate }
25460Sstevel@tonic-gate 
25470Sstevel@tonic-gate ssize_t
25480Sstevel@tonic-gate send32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags)
25490Sstevel@tonic-gate {
25500Sstevel@tonic-gate 	return (send(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags));
25510Sstevel@tonic-gate }
25520Sstevel@tonic-gate 
25530Sstevel@tonic-gate ssize_t
25540Sstevel@tonic-gate sendto32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags,
25550Sstevel@tonic-gate 	caddr32_t name, socklen_t namelen)
25560Sstevel@tonic-gate {
25570Sstevel@tonic-gate 	return (sendto(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags,
25580Sstevel@tonic-gate 	    (void *)(uintptr_t)name, namelen));
25590Sstevel@tonic-gate }
25600Sstevel@tonic-gate #endif	/* _SYSCALL32_IMPL */
25610Sstevel@tonic-gate 
25620Sstevel@tonic-gate /*
25630Sstevel@tonic-gate  * Function wrappers (mostly arround the sonode switch) for
25640Sstevel@tonic-gate  * backward compatibility.
25650Sstevel@tonic-gate  */
25660Sstevel@tonic-gate 
25670Sstevel@tonic-gate int
25680Sstevel@tonic-gate soaccept(struct sonode *so, int fflag, struct sonode **nsop)
25690Sstevel@tonic-gate {
25700Sstevel@tonic-gate 	return (SOP_ACCEPT(so, fflag, nsop));
25710Sstevel@tonic-gate }
25720Sstevel@tonic-gate 
25730Sstevel@tonic-gate int
25740Sstevel@tonic-gate sobind(struct sonode *so, struct sockaddr *name, socklen_t namelen,
25750Sstevel@tonic-gate     int backlog, int flags)
25760Sstevel@tonic-gate {
25770Sstevel@tonic-gate 	int	error;
25780Sstevel@tonic-gate 
25790Sstevel@tonic-gate 	error = SOP_BIND(so, name, namelen, flags);
25800Sstevel@tonic-gate 	if (error == 0 && backlog != 0)
25810Sstevel@tonic-gate 		return (SOP_LISTEN(so, backlog));
25820Sstevel@tonic-gate 
25830Sstevel@tonic-gate 	return (error);
25840Sstevel@tonic-gate }
25850Sstevel@tonic-gate 
25860Sstevel@tonic-gate int
25870Sstevel@tonic-gate solisten(struct sonode *so, int backlog)
25880Sstevel@tonic-gate {
25890Sstevel@tonic-gate 	return (SOP_LISTEN(so, backlog));
25900Sstevel@tonic-gate }
25910Sstevel@tonic-gate 
25920Sstevel@tonic-gate int
25930Sstevel@tonic-gate soconnect(struct sonode *so, const struct sockaddr *name, socklen_t namelen,
25940Sstevel@tonic-gate     int fflag, int flags)
25950Sstevel@tonic-gate {
25960Sstevel@tonic-gate 	return (SOP_CONNECT(so, name, namelen, fflag, flags));
25970Sstevel@tonic-gate }
25980Sstevel@tonic-gate 
25990Sstevel@tonic-gate int
26000Sstevel@tonic-gate sorecvmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop)
26010Sstevel@tonic-gate {
26020Sstevel@tonic-gate 	return (SOP_RECVMSG(so, msg, uiop));
26030Sstevel@tonic-gate }
26040Sstevel@tonic-gate 
26050Sstevel@tonic-gate int
26060Sstevel@tonic-gate sosendmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop)
26070Sstevel@tonic-gate {
26080Sstevel@tonic-gate 	return (SOP_SENDMSG(so, msg, uiop));
26090Sstevel@tonic-gate }
26100Sstevel@tonic-gate 
26110Sstevel@tonic-gate int
26120Sstevel@tonic-gate sogetpeername(struct sonode *so)
26130Sstevel@tonic-gate {
26140Sstevel@tonic-gate 	return (SOP_GETPEERNAME(so));
26150Sstevel@tonic-gate }
26160Sstevel@tonic-gate 
26170Sstevel@tonic-gate int
26180Sstevel@tonic-gate sogetsockname(struct sonode *so)
26190Sstevel@tonic-gate {
26200Sstevel@tonic-gate 	return (SOP_GETSOCKNAME(so));
26210Sstevel@tonic-gate }
26220Sstevel@tonic-gate 
26230Sstevel@tonic-gate int
26240Sstevel@tonic-gate soshutdown(struct sonode *so, int how)
26250Sstevel@tonic-gate {
26260Sstevel@tonic-gate 	return (SOP_SHUTDOWN(so, how));
26270Sstevel@tonic-gate }
26280Sstevel@tonic-gate 
26290Sstevel@tonic-gate int
26300Sstevel@tonic-gate sogetsockopt(struct sonode *so, int level, int option_name, void *optval,
26310Sstevel@tonic-gate     socklen_t *optlenp, int flags)
26320Sstevel@tonic-gate {
26330Sstevel@tonic-gate 	return (SOP_GETSOCKOPT(so, level, option_name, optval, optlenp,
26340Sstevel@tonic-gate 	    flags));
26350Sstevel@tonic-gate }
26360Sstevel@tonic-gate 
26370Sstevel@tonic-gate int
26380Sstevel@tonic-gate sosetsockopt(struct sonode *so, int level, int option_name, const void *optval,
26390Sstevel@tonic-gate     t_uscalar_t optlen)
26400Sstevel@tonic-gate {
26410Sstevel@tonic-gate 	return (SOP_SETSOCKOPT(so, level, option_name, optval, optlen));
26420Sstevel@tonic-gate }
26430Sstevel@tonic-gate 
26440Sstevel@tonic-gate /*
26450Sstevel@tonic-gate  * Because this is backward compatibility interface it only needs to be
26460Sstevel@tonic-gate  * able to handle the creation of TPI sockfs sockets.
26470Sstevel@tonic-gate  */
26480Sstevel@tonic-gate struct sonode *
26490Sstevel@tonic-gate socreate(vnode_t *accessvp, int domain, int type, int protocol, int version,
26500Sstevel@tonic-gate     struct sonode *tso, int *errorp)
26510Sstevel@tonic-gate {
26520Sstevel@tonic-gate 	return (sotpi_create(accessvp, domain, type, protocol, version, tso,
26530Sstevel@tonic-gate 	    errorp));
26540Sstevel@tonic-gate }
2655