xref: /onnv-gate/usr/src/uts/common/os/move.c (revision 6707:c3bc7e4da11b)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*6707Sbrutus  * Common Development and Distribution License (the "License").
6*6707Sbrutus  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*6707Sbrutus  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
270Sstevel@tonic-gate /*	  All Rights Reserved  	*/
280Sstevel@tonic-gate 
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate  * University Copyright- Copyright (c) 1982, 1986, 1988
310Sstevel@tonic-gate  * The Regents of the University of California
320Sstevel@tonic-gate  * All Rights Reserved
330Sstevel@tonic-gate  *
340Sstevel@tonic-gate  * University Acknowledgment- Portions of this document are derived from
350Sstevel@tonic-gate  * software developed by the University of California, Berkeley, and its
360Sstevel@tonic-gate  * contributors.
370Sstevel@tonic-gate  */
380Sstevel@tonic-gate 
390Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
400Sstevel@tonic-gate 
410Sstevel@tonic-gate #include <sys/types.h>
420Sstevel@tonic-gate #include <sys/sysmacros.h>
430Sstevel@tonic-gate #include <sys/param.h>
440Sstevel@tonic-gate #include <sys/systm.h>
450Sstevel@tonic-gate #include <sys/uio.h>
460Sstevel@tonic-gate #include <sys/errno.h>
47*6707Sbrutus #include <sys/vmsystm.h>
48*6707Sbrutus #include <sys/cmn_err.h>
49*6707Sbrutus #include <vm/as.h>
50*6707Sbrutus #include <vm/page.h>
51*6707Sbrutus 
52*6707Sbrutus #include <sys/dcopy.h>
53*6707Sbrutus 
54*6707Sbrutus int64_t uioa_maxpoll = -1;	/* <0 = noblock, 0 = block, >0 = block after */
55*6707Sbrutus #define	UIO_DCOPY_CHANNEL	0
56*6707Sbrutus #define	UIO_DCOPY_CMD		1
570Sstevel@tonic-gate 
580Sstevel@tonic-gate /*
590Sstevel@tonic-gate  * Move "n" bytes at byte address "p"; "rw" indicates the direction
600Sstevel@tonic-gate  * of the move, and the I/O parameters are provided in "uio", which is
610Sstevel@tonic-gate  * update to reflect the data which was moved.  Returns 0 on success or
620Sstevel@tonic-gate  * a non-zero errno on failure.
630Sstevel@tonic-gate  */
640Sstevel@tonic-gate int
650Sstevel@tonic-gate uiomove(void *p, size_t n, enum uio_rw rw, struct uio *uio)
660Sstevel@tonic-gate {
670Sstevel@tonic-gate 	struct iovec *iov;
680Sstevel@tonic-gate 	ulong_t cnt;
690Sstevel@tonic-gate 	int error;
700Sstevel@tonic-gate 
710Sstevel@tonic-gate 	while (n && uio->uio_resid) {
720Sstevel@tonic-gate 		iov = uio->uio_iov;
730Sstevel@tonic-gate 		cnt = MIN(iov->iov_len, n);
740Sstevel@tonic-gate 		if (cnt == 0l) {
750Sstevel@tonic-gate 			uio->uio_iov++;
760Sstevel@tonic-gate 			uio->uio_iovcnt--;
770Sstevel@tonic-gate 			continue;
780Sstevel@tonic-gate 		}
790Sstevel@tonic-gate 		switch (uio->uio_segflg) {
800Sstevel@tonic-gate 
810Sstevel@tonic-gate 		case UIO_USERSPACE:
820Sstevel@tonic-gate 		case UIO_USERISPACE:
830Sstevel@tonic-gate 			if (rw == UIO_READ) {
840Sstevel@tonic-gate 				error = xcopyout_nta(p, iov->iov_base, cnt,
850Sstevel@tonic-gate 				    (uio->uio_extflg & UIO_COPY_CACHED));
860Sstevel@tonic-gate 			} else {
870Sstevel@tonic-gate 				error = xcopyin_nta(iov->iov_base, p, cnt,
880Sstevel@tonic-gate 				    (uio->uio_extflg & UIO_COPY_CACHED));
890Sstevel@tonic-gate 			}
900Sstevel@tonic-gate 
910Sstevel@tonic-gate 			if (error)
920Sstevel@tonic-gate 				return (error);
930Sstevel@tonic-gate 			break;
940Sstevel@tonic-gate 
950Sstevel@tonic-gate 		case UIO_SYSSPACE:
960Sstevel@tonic-gate 			if (rw == UIO_READ)
970Sstevel@tonic-gate 				error = kcopy_nta(p, iov->iov_base, cnt,
980Sstevel@tonic-gate 				    (uio->uio_extflg & UIO_COPY_CACHED));
990Sstevel@tonic-gate 			else
1000Sstevel@tonic-gate 				error = kcopy_nta(iov->iov_base, p, cnt,
1010Sstevel@tonic-gate 				    (uio->uio_extflg & UIO_COPY_CACHED));
1020Sstevel@tonic-gate 			if (error)
1030Sstevel@tonic-gate 				return (error);
1040Sstevel@tonic-gate 			break;
1050Sstevel@tonic-gate 		}
1060Sstevel@tonic-gate 		iov->iov_base += cnt;
1070Sstevel@tonic-gate 		iov->iov_len -= cnt;
1080Sstevel@tonic-gate 		uio->uio_resid -= cnt;
1090Sstevel@tonic-gate 		uio->uio_loffset += cnt;
1100Sstevel@tonic-gate 		p = (caddr_t)p + cnt;
1110Sstevel@tonic-gate 		n -= cnt;
1120Sstevel@tonic-gate 	}
1130Sstevel@tonic-gate 	return (0);
1140Sstevel@tonic-gate }
1150Sstevel@tonic-gate 
1160Sstevel@tonic-gate /*
1170Sstevel@tonic-gate  * transfer a character value into the address space
1180Sstevel@tonic-gate  * delineated by a uio and update fields within the
1190Sstevel@tonic-gate  * uio for next character. Return 0 for success, EFAULT
1200Sstevel@tonic-gate  * for error.
1210Sstevel@tonic-gate  */
1220Sstevel@tonic-gate int
1230Sstevel@tonic-gate ureadc(int val, struct uio *uiop)
1240Sstevel@tonic-gate {
1250Sstevel@tonic-gate 	struct iovec *iovp;
1260Sstevel@tonic-gate 	unsigned char c;
1270Sstevel@tonic-gate 
1280Sstevel@tonic-gate 	/*
1290Sstevel@tonic-gate 	 * first determine if uio is valid.  uiop should be
1300Sstevel@tonic-gate 	 * non-NULL and the resid count > 0.
1310Sstevel@tonic-gate 	 */
1320Sstevel@tonic-gate 	if (!(uiop && uiop->uio_resid > 0))
1330Sstevel@tonic-gate 		return (EFAULT);
1340Sstevel@tonic-gate 
1350Sstevel@tonic-gate 	/*
1360Sstevel@tonic-gate 	 * scan through iovecs until one is found that is non-empty.
1370Sstevel@tonic-gate 	 * Return EFAULT if none found.
1380Sstevel@tonic-gate 	 */
1390Sstevel@tonic-gate 	while (uiop->uio_iovcnt > 0) {
1400Sstevel@tonic-gate 		iovp = uiop->uio_iov;
1410Sstevel@tonic-gate 		if (iovp->iov_len <= 0) {
1420Sstevel@tonic-gate 			uiop->uio_iovcnt--;
1430Sstevel@tonic-gate 			uiop->uio_iov++;
1440Sstevel@tonic-gate 		} else
1450Sstevel@tonic-gate 			break;
1460Sstevel@tonic-gate 	}
1470Sstevel@tonic-gate 
1480Sstevel@tonic-gate 	if (uiop->uio_iovcnt <= 0)
1490Sstevel@tonic-gate 		return (EFAULT);
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate 	/*
1520Sstevel@tonic-gate 	 * Transfer character to uio space.
1530Sstevel@tonic-gate 	 */
1540Sstevel@tonic-gate 
1550Sstevel@tonic-gate 	c = (unsigned char) (val & 0xFF);
1560Sstevel@tonic-gate 
1570Sstevel@tonic-gate 	switch (uiop->uio_segflg) {
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate 	case UIO_USERISPACE:
1600Sstevel@tonic-gate 	case UIO_USERSPACE:
1610Sstevel@tonic-gate 		if (copyout(&c, iovp->iov_base, sizeof (unsigned char)))
1620Sstevel@tonic-gate 			return (EFAULT);
1630Sstevel@tonic-gate 		break;
1640Sstevel@tonic-gate 
1650Sstevel@tonic-gate 	case UIO_SYSSPACE: /* can do direct copy since kernel-kernel */
1660Sstevel@tonic-gate 		*iovp->iov_base = c;
1670Sstevel@tonic-gate 		break;
1680Sstevel@tonic-gate 
1690Sstevel@tonic-gate 	default:
1700Sstevel@tonic-gate 		return (EFAULT); /* invalid segflg value */
1710Sstevel@tonic-gate 	}
1720Sstevel@tonic-gate 
1730Sstevel@tonic-gate 	/*
1740Sstevel@tonic-gate 	 * bump up/down iovec and uio members to reflect transfer.
1750Sstevel@tonic-gate 	 */
1760Sstevel@tonic-gate 	iovp->iov_base++;
1770Sstevel@tonic-gate 	iovp->iov_len--;
1780Sstevel@tonic-gate 	uiop->uio_resid--;
1790Sstevel@tonic-gate 	uiop->uio_loffset++;
1800Sstevel@tonic-gate 	return (0); /* success */
1810Sstevel@tonic-gate }
1820Sstevel@tonic-gate 
1830Sstevel@tonic-gate /*
1840Sstevel@tonic-gate  * return a character value from the address space
1850Sstevel@tonic-gate  * delineated by a uio and update fields within the
1860Sstevel@tonic-gate  * uio for next character. Return the character for success,
1870Sstevel@tonic-gate  * -1 for error.
1880Sstevel@tonic-gate  */
1890Sstevel@tonic-gate int
1900Sstevel@tonic-gate uwritec(struct uio *uiop)
1910Sstevel@tonic-gate {
1920Sstevel@tonic-gate 	struct iovec *iovp;
1930Sstevel@tonic-gate 	unsigned char c;
1940Sstevel@tonic-gate 
1950Sstevel@tonic-gate 	/*
1960Sstevel@tonic-gate 	 * verify we were passed a valid uio structure.
1970Sstevel@tonic-gate 	 * (1) non-NULL uiop, (2) positive resid count
1980Sstevel@tonic-gate 	 * (3) there is an iovec with positive length
1990Sstevel@tonic-gate 	 */
2000Sstevel@tonic-gate 
2010Sstevel@tonic-gate 	if (!(uiop && uiop->uio_resid > 0))
2020Sstevel@tonic-gate 		return (-1);
2030Sstevel@tonic-gate 
2040Sstevel@tonic-gate 	while (uiop->uio_iovcnt > 0) {
2050Sstevel@tonic-gate 		iovp = uiop->uio_iov;
2060Sstevel@tonic-gate 		if (iovp->iov_len <= 0) {
2070Sstevel@tonic-gate 			uiop->uio_iovcnt--;
2080Sstevel@tonic-gate 			uiop->uio_iov++;
2090Sstevel@tonic-gate 		} else
2100Sstevel@tonic-gate 			break;
2110Sstevel@tonic-gate 	}
2120Sstevel@tonic-gate 
2130Sstevel@tonic-gate 	if (uiop->uio_iovcnt <= 0)
2140Sstevel@tonic-gate 		return (-1);
2150Sstevel@tonic-gate 
2160Sstevel@tonic-gate 	/*
2170Sstevel@tonic-gate 	 * Get the character from the uio address space.
2180Sstevel@tonic-gate 	 */
2190Sstevel@tonic-gate 	switch (uiop->uio_segflg) {
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate 	case UIO_USERISPACE:
2220Sstevel@tonic-gate 	case UIO_USERSPACE:
2230Sstevel@tonic-gate 		if (copyin(iovp->iov_base, &c, sizeof (unsigned char)))
2240Sstevel@tonic-gate 			return (-1);
2250Sstevel@tonic-gate 		break;
2260Sstevel@tonic-gate 
2270Sstevel@tonic-gate 	case UIO_SYSSPACE:
2280Sstevel@tonic-gate 		c = *iovp->iov_base;
2290Sstevel@tonic-gate 		break;
2300Sstevel@tonic-gate 
2310Sstevel@tonic-gate 	default:
2320Sstevel@tonic-gate 		return (-1); /* invalid segflg */
2330Sstevel@tonic-gate 	}
2340Sstevel@tonic-gate 
2350Sstevel@tonic-gate 	/*
2360Sstevel@tonic-gate 	 * Adjust fields of iovec and uio appropriately.
2370Sstevel@tonic-gate 	 */
2380Sstevel@tonic-gate 	iovp->iov_base++;
2390Sstevel@tonic-gate 	iovp->iov_len--;
2400Sstevel@tonic-gate 	uiop->uio_resid--;
2410Sstevel@tonic-gate 	uiop->uio_loffset++;
2420Sstevel@tonic-gate 	return ((int)c & 0xFF); /* success */
2430Sstevel@tonic-gate }
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate /*
2460Sstevel@tonic-gate  * Drop the next n chars out of *uiop.
2470Sstevel@tonic-gate  */
2480Sstevel@tonic-gate void
2490Sstevel@tonic-gate uioskip(uio_t *uiop, size_t n)
2500Sstevel@tonic-gate {
2510Sstevel@tonic-gate 	if (n > uiop->uio_resid)
2520Sstevel@tonic-gate 		return;
2530Sstevel@tonic-gate 	while (n != 0) {
2540Sstevel@tonic-gate 		register iovec_t	*iovp = uiop->uio_iov;
2550Sstevel@tonic-gate 		register size_t		niovb = MIN(iovp->iov_len, n);
2560Sstevel@tonic-gate 
2570Sstevel@tonic-gate 		if (niovb == 0) {
2580Sstevel@tonic-gate 			uiop->uio_iov++;
2590Sstevel@tonic-gate 			uiop->uio_iovcnt--;
2600Sstevel@tonic-gate 			continue;
2610Sstevel@tonic-gate 		}
2620Sstevel@tonic-gate 		iovp->iov_base += niovb;
2630Sstevel@tonic-gate 		uiop->uio_loffset += niovb;
2640Sstevel@tonic-gate 		iovp->iov_len -= niovb;
2650Sstevel@tonic-gate 		uiop->uio_resid -= niovb;
2660Sstevel@tonic-gate 		n -= niovb;
2670Sstevel@tonic-gate 	}
2680Sstevel@tonic-gate }
2690Sstevel@tonic-gate 
2700Sstevel@tonic-gate /*
2710Sstevel@tonic-gate  * Dup the suio into the duio and diovec of size diov_cnt. If diov
2720Sstevel@tonic-gate  * is too small to dup suio then an error will be returned, else 0.
2730Sstevel@tonic-gate  */
2740Sstevel@tonic-gate int
2750Sstevel@tonic-gate uiodup(uio_t *suio, uio_t *duio, iovec_t *diov, int diov_cnt)
2760Sstevel@tonic-gate {
2770Sstevel@tonic-gate 	int ix;
2780Sstevel@tonic-gate 	iovec_t *siov = suio->uio_iov;
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate 	*duio = *suio;
2810Sstevel@tonic-gate 	for (ix = 0; ix < suio->uio_iovcnt; ix++) {
2820Sstevel@tonic-gate 		diov[ix] = siov[ix];
2830Sstevel@tonic-gate 		if (ix >= diov_cnt)
2840Sstevel@tonic-gate 			return (1);
2850Sstevel@tonic-gate 	}
2860Sstevel@tonic-gate 	duio->uio_iov = diov;
2870Sstevel@tonic-gate 	return (0);
2880Sstevel@tonic-gate }
289*6707Sbrutus 
290*6707Sbrutus /*
291*6707Sbrutus  * Shadow state for checking if a platform has hardware asynchronous
292*6707Sbrutus  * copy capability and minimum copy size, e.g. Intel's I/OAT dma engine,
293*6707Sbrutus  *
294*6707Sbrutus  * Dcopy does a call-back to uioa_dcopy_enable() when a dma device calls
295*6707Sbrutus  * into dcopy to register and uioa_dcopy_disable() when the device calls
296*6707Sbrutus  * into dcopy to unregister.
297*6707Sbrutus  */
298*6707Sbrutus uioasync_t uioasync = {B_FALSE, 1024};
299*6707Sbrutus 
300*6707Sbrutus void
301*6707Sbrutus uioa_dcopy_enable()
302*6707Sbrutus {
303*6707Sbrutus 	uioasync.enabled = B_TRUE;
304*6707Sbrutus }
305*6707Sbrutus 
306*6707Sbrutus void
307*6707Sbrutus uioa_dcopy_disable()
308*6707Sbrutus {
309*6707Sbrutus 	uioasync.enabled = B_FALSE;
310*6707Sbrutus }
311*6707Sbrutus 
312*6707Sbrutus /*
313*6707Sbrutus  * Schedule an asynchronous move of "n" bytes at byte address "p",
314*6707Sbrutus  * "rw" indicates the direction of the move, I/O parameters and
315*6707Sbrutus  * async state are provided in "uioa" which is update to reflect
316*6707Sbrutus  * the data which is to be moved.
317*6707Sbrutus  *
318*6707Sbrutus  * Returns 0 on success or a non-zero errno on failure.
319*6707Sbrutus  *
320*6707Sbrutus  * Note, while the uioasync APIs are general purpose in design
321*6707Sbrutus  * the current implementation is Intel I/OAT specific.
322*6707Sbrutus  */
323*6707Sbrutus int
324*6707Sbrutus uioamove(void *p, size_t n, enum uio_rw rw, uioa_t *uioa)
325*6707Sbrutus {
326*6707Sbrutus 	int		soff, doff;
327*6707Sbrutus 	uint64_t	pa;
328*6707Sbrutus 	int		cnt;
329*6707Sbrutus 	iovec_t		*iov;
330*6707Sbrutus 	dcopy_handle_t	channel;
331*6707Sbrutus 	dcopy_cmd_t	cmd;
332*6707Sbrutus 	int		ret = 0;
333*6707Sbrutus 	int		dcopy_flags;
334*6707Sbrutus 
335*6707Sbrutus 	if (!(uioa->uioa_state & UIOA_ENABLED)) {
336*6707Sbrutus 		/* The uioa_t isn't enabled */
337*6707Sbrutus 		return (ENXIO);
338*6707Sbrutus 	}
339*6707Sbrutus 
340*6707Sbrutus 	if (uioa->uio_segflg != UIO_USERSPACE || rw != UIO_READ) {
341*6707Sbrutus 		/* Only support to user-land from kernel */
342*6707Sbrutus 		return (ENOTSUP);
343*6707Sbrutus 	}
344*6707Sbrutus 
345*6707Sbrutus 
346*6707Sbrutus 	channel = uioa->uioa_hwst[UIO_DCOPY_CHANNEL];
347*6707Sbrutus 	cmd = uioa->uioa_hwst[UIO_DCOPY_CMD];
348*6707Sbrutus 	dcopy_flags = DCOPY_NOSLEEP;
349*6707Sbrutus 
350*6707Sbrutus 	/*
351*6707Sbrutus 	 * While source bytes and destination bytes.
352*6707Sbrutus 	 */
353*6707Sbrutus 	while (n > 0 && uioa->uio_resid > 0) {
354*6707Sbrutus 		iov = uioa->uio_iov;
355*6707Sbrutus 		if (iov->iov_len == 0l) {
356*6707Sbrutus 			uioa->uio_iov++;
357*6707Sbrutus 			uioa->uio_iovcnt--;
358*6707Sbrutus 			uioa->uioa_lcur++;
359*6707Sbrutus 			uioa->uioa_lppp = uioa->uioa_lcur->uioa_ppp;
360*6707Sbrutus 			continue;
361*6707Sbrutus 		}
362*6707Sbrutus 		/*
363*6707Sbrutus 		 * While source bytes schedule an async
364*6707Sbrutus 		 * dma for destination page by page.
365*6707Sbrutus 		 */
366*6707Sbrutus 		while (n > 0) {
367*6707Sbrutus 			/* Addr offset in page src/dst */
368*6707Sbrutus 			soff = (uintptr_t)p & PAGEOFFSET;
369*6707Sbrutus 			doff = (uintptr_t)iov->iov_base & PAGEOFFSET;
370*6707Sbrutus 			/* Min copy count src and dst and page sized */
371*6707Sbrutus 			cnt = MIN(n, iov->iov_len);
372*6707Sbrutus 			cnt = MIN(cnt, PAGESIZE - soff);
373*6707Sbrutus 			cnt = MIN(cnt, PAGESIZE - doff);
374*6707Sbrutus 			/* XXX if next page(s) contiguous could use multipage */
375*6707Sbrutus 
376*6707Sbrutus 			/*
377*6707Sbrutus 			 * if we have an old command, we want to link all
378*6707Sbrutus 			 * other commands to the next command we alloced so
379*6707Sbrutus 			 * we only need to track the last command but can
380*6707Sbrutus 			 * still free them all.
381*6707Sbrutus 			 */
382*6707Sbrutus 			if (cmd != NULL) {
383*6707Sbrutus 				dcopy_flags |= DCOPY_ALLOC_LINK;
384*6707Sbrutus 			}
385*6707Sbrutus 			ret = dcopy_cmd_alloc(channel, dcopy_flags, &cmd);
386*6707Sbrutus 			if (ret != DCOPY_SUCCESS) {
387*6707Sbrutus 				/* Error of some sort */
388*6707Sbrutus 				return (EIO);
389*6707Sbrutus 			}
390*6707Sbrutus 			uioa->uioa_hwst[UIO_DCOPY_CMD] = cmd;
391*6707Sbrutus 
392*6707Sbrutus 			ASSERT(cmd->dp_version == DCOPY_CMD_V0);
393*6707Sbrutus 			if (uioa_maxpoll >= 0) {
394*6707Sbrutus 				/* Blocking (>0 may be) used in uioafini() */
395*6707Sbrutus 				cmd->dp_flags = DCOPY_CMD_INTR;
396*6707Sbrutus 			} else {
397*6707Sbrutus 				/* Non blocking uioafini() so no intr */
398*6707Sbrutus 				cmd->dp_flags = DCOPY_CMD_NOFLAGS;
399*6707Sbrutus 			}
400*6707Sbrutus 			cmd->dp_cmd = DCOPY_CMD_COPY;
401*6707Sbrutus 			pa = ptob((uint64_t)hat_getpfnum(kas.a_hat, p));
402*6707Sbrutus 			cmd->dp.copy.cc_source = pa + soff;
403*6707Sbrutus 			if (uioa->uioa_lcur->uioa_pfncnt == 0) {
404*6707Sbrutus 				/* Have a (page_t **) */
405*6707Sbrutus 				pa = ptob((uint64_t)(
406*6707Sbrutus 				    *(page_t **)uioa->uioa_lppp)->p_pagenum);
407*6707Sbrutus 			} else {
408*6707Sbrutus 				/* Have a (pfn_t *) */
409*6707Sbrutus 				pa = ptob((uint64_t)(
410*6707Sbrutus 				    *(pfn_t *)uioa->uioa_lppp));
411*6707Sbrutus 			}
412*6707Sbrutus 			cmd->dp.copy.cc_dest = pa + doff;
413*6707Sbrutus 			cmd->dp.copy.cc_size = cnt;
414*6707Sbrutus 			ret = dcopy_cmd_post(cmd);
415*6707Sbrutus 			if (ret != DCOPY_SUCCESS) {
416*6707Sbrutus 				/* Error of some sort */
417*6707Sbrutus 				return (EIO);
418*6707Sbrutus 			}
419*6707Sbrutus 			ret = 0;
420*6707Sbrutus 
421*6707Sbrutus 			/* If UIOA_POLL not set, set it */
422*6707Sbrutus 			if (!(uioa->uioa_state & UIOA_POLL))
423*6707Sbrutus 				uioa->uioa_state |= UIOA_POLL;
424*6707Sbrutus 
425*6707Sbrutus 			/* Update iov, uio, and local pointers/counters */
426*6707Sbrutus 			iov->iov_base += cnt;
427*6707Sbrutus 			iov->iov_len -= cnt;
428*6707Sbrutus 			uioa->uio_resid -= cnt;
429*6707Sbrutus 			uioa->uio_loffset += cnt;
430*6707Sbrutus 			p = (caddr_t)p + cnt;
431*6707Sbrutus 			n -= cnt;
432*6707Sbrutus 
433*6707Sbrutus 			/* End of iovec? */
434*6707Sbrutus 			if (iov->iov_len == 0) {
435*6707Sbrutus 				/* Yup, next iovec */
436*6707Sbrutus 				break;
437*6707Sbrutus 			}
438*6707Sbrutus 
439*6707Sbrutus 			/* Next dst addr page? */
440*6707Sbrutus 			if (doff + cnt == PAGESIZE) {
441*6707Sbrutus 				/* Yup, next page_t */
442*6707Sbrutus 				uioa->uioa_lppp++;
443*6707Sbrutus 			}
444*6707Sbrutus 		}
445*6707Sbrutus 	}
446*6707Sbrutus 
447*6707Sbrutus 	return (ret);
448*6707Sbrutus }
449*6707Sbrutus 
450*6707Sbrutus /*
451*6707Sbrutus  * Initialize a uioa_t for a given uio_t for the current user context,
452*6707Sbrutus  * copy the common uio_t to the uioa_t, walk the shared iovec_t and
453*6707Sbrutus  * lock down the user-land page(s) containing iovec_t data, then mapin
454*6707Sbrutus  * user-land pages using segkpm.
455*6707Sbrutus  */
456*6707Sbrutus int
457*6707Sbrutus uioainit(uio_t *uiop, uioa_t *uioap)
458*6707Sbrutus {
459*6707Sbrutus 	caddr_t	addr;
460*6707Sbrutus 	page_t		**pages;
461*6707Sbrutus 	int		off;
462*6707Sbrutus 	int		len;
463*6707Sbrutus 	proc_t		*procp = ttoproc(curthread);
464*6707Sbrutus 	struct as	*as = procp->p_as;
465*6707Sbrutus 	iovec_t		*iov = uiop->uio_iov;
466*6707Sbrutus 	int32_t		iovcnt = uiop->uio_iovcnt;
467*6707Sbrutus 	uioa_page_t	*locked = uioap->uioa_locked;
468*6707Sbrutus 	dcopy_handle_t	channel;
469*6707Sbrutus 	int		error;
470*6707Sbrutus 
471*6707Sbrutus 	if (! (uioap->uioa_state & UIOA_ALLOC)) {
472*6707Sbrutus 		/* Can only init() a freshly allocated uioa_t */
473*6707Sbrutus 		return (EINVAL);
474*6707Sbrutus 	}
475*6707Sbrutus 
476*6707Sbrutus 	error = dcopy_alloc(DCOPY_NOSLEEP, &channel);
477*6707Sbrutus 	if (error == DCOPY_NORESOURCES) {
478*6707Sbrutus 		/* Turn off uioa */
479*6707Sbrutus 		uioasync.enabled = B_FALSE;
480*6707Sbrutus 		return (ENODEV);
481*6707Sbrutus 	}
482*6707Sbrutus 	if (error != DCOPY_SUCCESS) {
483*6707Sbrutus 		/* Alloc failed */
484*6707Sbrutus 		return (EIO);
485*6707Sbrutus 	}
486*6707Sbrutus 
487*6707Sbrutus 	uioap->uioa_hwst[UIO_DCOPY_CHANNEL] = channel;
488*6707Sbrutus 	uioap->uioa_hwst[UIO_DCOPY_CMD] = NULL;
489*6707Sbrutus 
490*6707Sbrutus 	/* Indicate uioa_t (will be) initialized */
491*6707Sbrutus 	uioap->uioa_state = UIOA_INIT;
492*6707Sbrutus 
493*6707Sbrutus 	/* uio_t/uioa_t uio_t common struct copy */
494*6707Sbrutus 	*((uio_t *)uioap) = *uiop;
495*6707Sbrutus 
496*6707Sbrutus 	/* initialize *uiop->uio_iov */
497*6707Sbrutus 	if (iovcnt > UIOA_IOV_MAX) {
498*6707Sbrutus 		/* Too big? */
499*6707Sbrutus 		return (E2BIG);
500*6707Sbrutus 	}
501*6707Sbrutus 	uioap->uio_iov = iov;
502*6707Sbrutus 	uioap->uio_iovcnt = iovcnt;
503*6707Sbrutus 
504*6707Sbrutus 	/* Mark the uioap as such */
505*6707Sbrutus 	uioap->uio_extflg |= UIO_ASYNC;
506*6707Sbrutus 
507*6707Sbrutus 	/*
508*6707Sbrutus 	 * For each iovec_t, lock-down the page(s) backing the iovec_t
509*6707Sbrutus 	 * and save the page_t list for phys addr use in uioamove().
510*6707Sbrutus 	 */
511*6707Sbrutus 	iov = uiop->uio_iov;
512*6707Sbrutus 	iovcnt = uiop->uio_iovcnt;
513*6707Sbrutus 	while (iovcnt > 0) {
514*6707Sbrutus 		addr = iov->iov_base;
515*6707Sbrutus 		off = (uintptr_t)addr & PAGEOFFSET;
516*6707Sbrutus 		addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
517*6707Sbrutus 		len = iov->iov_len + off;
518*6707Sbrutus 
519*6707Sbrutus 		/* Lock down page(s) for the iov span */
520*6707Sbrutus 		if ((error = as_pagelock(as, &pages,
521*6707Sbrutus 		    iov->iov_base, iov->iov_len, S_WRITE)) != 0) {
522*6707Sbrutus 			/* Error */
523*6707Sbrutus 			goto cleanup;
524*6707Sbrutus 		}
525*6707Sbrutus 
526*6707Sbrutus 		if (pages == NULL) {
527*6707Sbrutus 			/*
528*6707Sbrutus 			 * Need page_t list, really only need
529*6707Sbrutus 			 * a pfn list so build one.
530*6707Sbrutus 			 */
531*6707Sbrutus 			pfn_t   *pfnp;
532*6707Sbrutus 			int	pcnt = len >> PAGESHIFT;
533*6707Sbrutus 
534*6707Sbrutus 			if (off)
535*6707Sbrutus 				pcnt++;
536*6707Sbrutus 			if ((pfnp = kmem_alloc(pcnt * sizeof (pfnp),
537*6707Sbrutus 			    KM_NOSLEEP)) == NULL) {
538*6707Sbrutus 				error = ENOMEM;
539*6707Sbrutus 				goto cleanup;
540*6707Sbrutus 			}
541*6707Sbrutus 			locked->uioa_ppp = (void **)pfnp;
542*6707Sbrutus 			locked->uioa_pfncnt = pcnt;
543*6707Sbrutus 			AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
544*6707Sbrutus 			while (pcnt-- > 0) {
545*6707Sbrutus 				*pfnp++ = hat_getpfnum(as->a_hat, addr);
546*6707Sbrutus 				addr += PAGESIZE;
547*6707Sbrutus 			}
548*6707Sbrutus 			AS_LOCK_EXIT(as, &as->a_lock);
549*6707Sbrutus 		} else {
550*6707Sbrutus 			/* Have a page_t list, save it */
551*6707Sbrutus 			locked->uioa_ppp = (void **)pages;
552*6707Sbrutus 			locked->uioa_pfncnt = 0;
553*6707Sbrutus 		}
554*6707Sbrutus 		/* Save for as_pageunlock() in uioafini() */
555*6707Sbrutus 		locked->uioa_base = iov->iov_base;
556*6707Sbrutus 		locked->uioa_len = iov->iov_len;
557*6707Sbrutus 		locked++;
558*6707Sbrutus 
559*6707Sbrutus 		/* Next iovec_t */
560*6707Sbrutus 		iov++;
561*6707Sbrutus 		iovcnt--;
562*6707Sbrutus 	}
563*6707Sbrutus 	/* Initialize curret pointer into uioa_locked[] and it's uioa_ppp */
564*6707Sbrutus 	uioap->uioa_lcur = uioap->uioa_locked;
565*6707Sbrutus 	uioap->uioa_lppp = uioap->uioa_lcur->uioa_ppp;
566*6707Sbrutus 	return (0);
567*6707Sbrutus 
568*6707Sbrutus cleanup:
569*6707Sbrutus 	/* Unlock any previously locked page_t(s) */
570*6707Sbrutus 	while (locked > uioap->uioa_locked) {
571*6707Sbrutus 		locked--;
572*6707Sbrutus 		as_pageunlock(as, (page_t **)locked->uioa_ppp,
573*6707Sbrutus 		    locked->uioa_base, locked->uioa_len, S_WRITE);
574*6707Sbrutus 	}
575*6707Sbrutus 
576*6707Sbrutus 	/* Last indicate uioa_t still in alloc state */
577*6707Sbrutus 	uioap->uioa_state = UIOA_ALLOC;
578*6707Sbrutus 
579*6707Sbrutus 	return (error);
580*6707Sbrutus }
581*6707Sbrutus 
582*6707Sbrutus /*
583*6707Sbrutus  * Finish processing of a uioa_t by cleanup any pending "uioap" actions.
584*6707Sbrutus  */
585*6707Sbrutus int
586*6707Sbrutus uioafini(uio_t *uiop, uioa_t *uioap)
587*6707Sbrutus {
588*6707Sbrutus 	int32_t		iovcnt = uiop->uio_iovcnt;
589*6707Sbrutus 	uioa_page_t	*locked = uioap->uioa_locked;
590*6707Sbrutus 	struct as	*as = ttoproc(curthread)->p_as;
591*6707Sbrutus 	dcopy_handle_t	channel;
592*6707Sbrutus 	dcopy_cmd_t	cmd;
593*6707Sbrutus 	int		ret = 0;
594*6707Sbrutus 
595*6707Sbrutus 	ASSERT(uioap->uio_extflg & UIO_ASYNC);
596*6707Sbrutus 
597*6707Sbrutus 	if (!(uioap->uioa_state & (UIOA_ENABLED|UIOA_FINI))) {
598*6707Sbrutus 		/* Must be an active uioa_t */
599*6707Sbrutus 		return (EINVAL);
600*6707Sbrutus 	}
601*6707Sbrutus 
602*6707Sbrutus 	channel = uioap->uioa_hwst[UIO_DCOPY_CHANNEL];
603*6707Sbrutus 	cmd = uioap->uioa_hwst[UIO_DCOPY_CMD];
604*6707Sbrutus 
605*6707Sbrutus 	/* XXX - why do we get cmd == NULL sometimes? */
606*6707Sbrutus 	if (cmd != NULL) {
607*6707Sbrutus 		if (uioap->uioa_state & UIOA_POLL) {
608*6707Sbrutus 			/* Wait for last dcopy() to finish */
609*6707Sbrutus 			int64_t poll = 1;
610*6707Sbrutus 			int poll_flag = DCOPY_POLL_NOFLAGS;
611*6707Sbrutus 
612*6707Sbrutus 			do {
613*6707Sbrutus 				if (uioa_maxpoll == 0 ||
614*6707Sbrutus 				    (uioa_maxpoll > 0 &&
615*6707Sbrutus 				    poll >= uioa_maxpoll)) {
616*6707Sbrutus 					/* Always block or after maxpoll */
617*6707Sbrutus 					poll_flag = DCOPY_POLL_BLOCK;
618*6707Sbrutus 				} else {
619*6707Sbrutus 					/* No block, poll */
620*6707Sbrutus 					poll++;
621*6707Sbrutus 				}
622*6707Sbrutus 				ret = dcopy_cmd_poll(cmd, poll_flag);
623*6707Sbrutus 			} while (ret == DCOPY_PENDING);
624*6707Sbrutus 
625*6707Sbrutus 			if (ret == DCOPY_COMPLETED) {
626*6707Sbrutus 				/* Poll/block succeeded */
627*6707Sbrutus 				ret = 0;
628*6707Sbrutus 			} else {
629*6707Sbrutus 				/* Poll/block failed */
630*6707Sbrutus 				ret = EIO;
631*6707Sbrutus 			}
632*6707Sbrutus 		}
633*6707Sbrutus 		dcopy_cmd_free(&cmd);
634*6707Sbrutus 	}
635*6707Sbrutus 
636*6707Sbrutus 	dcopy_free(&channel);
637*6707Sbrutus 
638*6707Sbrutus 	/* Unlock all page(s) iovec_t by iovec_t */
639*6707Sbrutus 	while (iovcnt-- > 0) {
640*6707Sbrutus 		page_t **pages;
641*6707Sbrutus 
642*6707Sbrutus 		if (locked->uioa_pfncnt == 0) {
643*6707Sbrutus 			/* A as_pagelock() returned (page_t **) */
644*6707Sbrutus 			pages = (page_t **)locked->uioa_ppp;
645*6707Sbrutus 		} else {
646*6707Sbrutus 			/* Our pfn_t array */
647*6707Sbrutus 			pages = NULL;
648*6707Sbrutus 			kmem_free(locked->uioa_ppp, locked->uioa_pfncnt *
649*6707Sbrutus 			    sizeof (pfn_t *));
650*6707Sbrutus 		}
651*6707Sbrutus 		as_pageunlock(as, pages, locked->uioa_base, locked->uioa_len,
652*6707Sbrutus 		    S_WRITE);
653*6707Sbrutus 
654*6707Sbrutus 		locked++;
655*6707Sbrutus 	}
656*6707Sbrutus 	/* uioa_t->uio_t common struct copy */
657*6707Sbrutus 	*uiop = *((uio_t *)uioap);
658*6707Sbrutus 
659*6707Sbrutus 	/*
660*6707Sbrutus 	 * Last, reset uioa state to alloc.
661*6707Sbrutus 	 *
662*6707Sbrutus 	 * Note, we only initialize the state here, all other members
663*6707Sbrutus 	 * will be initialized in a subsequent uioainit().
664*6707Sbrutus 	 */
665*6707Sbrutus 	uioap->uioa_state = UIOA_ALLOC;
666*6707Sbrutus 
667*6707Sbrutus 	uioap->uioa_hwst[UIO_DCOPY_CMD] = NULL;
668*6707Sbrutus 	uioap->uioa_hwst[UIO_DCOPY_CHANNEL] = NULL;
669*6707Sbrutus 
670*6707Sbrutus 	return (ret);
671*6707Sbrutus }
672