xref: /onnv-gate/usr/src/uts/common/os/aio.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate  * Kernel asynchronous I/O.
31*0Sstevel@tonic-gate  * This is only for raw devices now (as of Nov. 1993).
32*0Sstevel@tonic-gate  */
33*0Sstevel@tonic-gate 
34*0Sstevel@tonic-gate #include <sys/types.h>
35*0Sstevel@tonic-gate #include <sys/errno.h>
36*0Sstevel@tonic-gate #include <sys/conf.h>
37*0Sstevel@tonic-gate #include <sys/file.h>
38*0Sstevel@tonic-gate #include <sys/fs/snode.h>
39*0Sstevel@tonic-gate #include <sys/unistd.h>
40*0Sstevel@tonic-gate #include <sys/cmn_err.h>
41*0Sstevel@tonic-gate #include <vm/as.h>
42*0Sstevel@tonic-gate #include <vm/faultcode.h>
43*0Sstevel@tonic-gate #include <sys/sysmacros.h>
44*0Sstevel@tonic-gate #include <sys/procfs.h>
45*0Sstevel@tonic-gate #include <sys/kmem.h>
46*0Sstevel@tonic-gate #include <sys/autoconf.h>
47*0Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
48*0Sstevel@tonic-gate #include <sys/sunddi.h>
49*0Sstevel@tonic-gate #include <sys/aio_impl.h>
50*0Sstevel@tonic-gate #include <sys/debug.h>
51*0Sstevel@tonic-gate #include <sys/param.h>
52*0Sstevel@tonic-gate #include <sys/systm.h>
53*0Sstevel@tonic-gate #include <sys/vmsystm.h>
54*0Sstevel@tonic-gate #include <sys/fs/pxfs_ki.h>
55*0Sstevel@tonic-gate #include <sys/contract/process_impl.h>
56*0Sstevel@tonic-gate 
57*0Sstevel@tonic-gate /*
58*0Sstevel@tonic-gate  * external entry point.
59*0Sstevel@tonic-gate  */
60*0Sstevel@tonic-gate #ifdef _LP64
61*0Sstevel@tonic-gate static int64_t kaioc(long, long, long, long, long, long);
62*0Sstevel@tonic-gate #endif
63*0Sstevel@tonic-gate static int kaio(ulong_t *, rval_t *);
64*0Sstevel@tonic-gate 
65*0Sstevel@tonic-gate 
66*0Sstevel@tonic-gate #define	AIO_64	0
67*0Sstevel@tonic-gate #define	AIO_32	1
68*0Sstevel@tonic-gate #define	AIO_LARGEFILE	2
69*0Sstevel@tonic-gate 
70*0Sstevel@tonic-gate /*
71*0Sstevel@tonic-gate  * implementation specific functions (private)
72*0Sstevel@tonic-gate  */
73*0Sstevel@tonic-gate #ifdef _LP64
74*0Sstevel@tonic-gate static int alio(int, int, aiocb_t **, int, struct sigevent *);
75*0Sstevel@tonic-gate #endif
76*0Sstevel@tonic-gate static int aionotify(void);
77*0Sstevel@tonic-gate static int aioinit(void);
78*0Sstevel@tonic-gate static int aiostart(void);
79*0Sstevel@tonic-gate static void alio_cleanup(aio_t *, aiocb_t **, int, int);
80*0Sstevel@tonic-gate static int (*check_vp(struct vnode *, int))(vnode_t *, struct aio_req *,
81*0Sstevel@tonic-gate     cred_t *);
82*0Sstevel@tonic-gate static void lio_set_error(aio_req_t *);
83*0Sstevel@tonic-gate static aio_t *aio_aiop_alloc();
84*0Sstevel@tonic-gate static int aio_req_alloc(aio_req_t **, aio_result_t *);
85*0Sstevel@tonic-gate static int aio_lio_alloc(aio_lio_t **);
86*0Sstevel@tonic-gate static aio_req_t *aio_req_done(void *);
87*0Sstevel@tonic-gate static aio_req_t *aio_req_remove(aio_req_t *);
88*0Sstevel@tonic-gate static int aio_req_find(aio_result_t *, aio_req_t **);
89*0Sstevel@tonic-gate static int aio_hash_insert(struct aio_req_t *, aio_t *);
90*0Sstevel@tonic-gate static int aio_req_setup(aio_req_t **, aio_t *, aiocb_t *,
91*0Sstevel@tonic-gate     aio_result_t *, int, vnode_t *);
92*0Sstevel@tonic-gate static int aio_cleanup_thread(aio_t *);
93*0Sstevel@tonic-gate static aio_lio_t *aio_list_get(aio_result_t *);
94*0Sstevel@tonic-gate static void lio_set_uerror(void *, int);
95*0Sstevel@tonic-gate extern void aio_zerolen(aio_req_t *);
96*0Sstevel@tonic-gate static int aiowait(struct timeval *, int, long	*);
97*0Sstevel@tonic-gate static int aiowaitn(void *, uint_t, uint_t *, timespec_t *);
98*0Sstevel@tonic-gate static int aio_unlock_requests(caddr_t iocblist, int iocb_index,
99*0Sstevel@tonic-gate     aio_req_t *reqlist, aio_t *aiop, model_t model);
100*0Sstevel@tonic-gate static int aio_reqlist_concat(aio_t *aiop, aio_req_t **reqlist, int max);
101*0Sstevel@tonic-gate static int aiosuspend(void *, int, struct  timespec *, int,
102*0Sstevel@tonic-gate     long	*, int);
103*0Sstevel@tonic-gate static int aliowait(int, void *, int, void *, int);
104*0Sstevel@tonic-gate static int aioerror(void *, int);
105*0Sstevel@tonic-gate static int aio_cancel(int, void *, long	*, int);
106*0Sstevel@tonic-gate static int arw(int, int, char *, int, offset_t, aio_result_t *, int);
107*0Sstevel@tonic-gate static int aiorw(int, void *, int, int);
108*0Sstevel@tonic-gate 
109*0Sstevel@tonic-gate static int alioLF(int, void *, int, void *);
110*0Sstevel@tonic-gate static int aio_req_setupLF(aio_req_t **, aio_t *,
111*0Sstevel@tonic-gate     aiocb64_32_t *, aio_result_t *, int, vnode_t *);
112*0Sstevel@tonic-gate static int alio32(int, void *, int, void *);
113*0Sstevel@tonic-gate static int driver_aio_write(vnode_t *vp, struct aio_req *aio, cred_t *cred_p);
114*0Sstevel@tonic-gate static int driver_aio_read(vnode_t *vp, struct aio_req *aio, cred_t *cred_p);
115*0Sstevel@tonic-gate 
116*0Sstevel@tonic-gate #ifdef  _SYSCALL32_IMPL
117*0Sstevel@tonic-gate static void aiocb_LFton(aiocb64_32_t *, aiocb_t *);
118*0Sstevel@tonic-gate void	aiocb_32ton(aiocb32_t *, aiocb_t *);
119*0Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
120*0Sstevel@tonic-gate 
121*0Sstevel@tonic-gate /*
122*0Sstevel@tonic-gate  * implementation specific functions (external)
123*0Sstevel@tonic-gate  */
124*0Sstevel@tonic-gate void aio_req_free(aio_t *, aio_req_t *);
125*0Sstevel@tonic-gate 
126*0Sstevel@tonic-gate /*
127*0Sstevel@tonic-gate  * Event Port framework
128*0Sstevel@tonic-gate  */
129*0Sstevel@tonic-gate 
130*0Sstevel@tonic-gate void aio_req_free_port(aio_t *, aio_req_t *);
131*0Sstevel@tonic-gate static int aio_port_callback(void *, int *, pid_t, int, void *);
132*0Sstevel@tonic-gate 
133*0Sstevel@tonic-gate /*
134*0Sstevel@tonic-gate  * This is the loadable module wrapper.
135*0Sstevel@tonic-gate  */
136*0Sstevel@tonic-gate #include <sys/modctl.h>
137*0Sstevel@tonic-gate #include <sys/syscall.h>
138*0Sstevel@tonic-gate 
139*0Sstevel@tonic-gate #ifdef _LP64
140*0Sstevel@tonic-gate 
141*0Sstevel@tonic-gate static struct sysent kaio_sysent = {
142*0Sstevel@tonic-gate 	6,
143*0Sstevel@tonic-gate 	SE_NOUNLOAD | SE_64RVAL | SE_ARGC,
144*0Sstevel@tonic-gate 	(int (*)())kaioc
145*0Sstevel@tonic-gate };
146*0Sstevel@tonic-gate 
147*0Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
148*0Sstevel@tonic-gate static struct sysent kaio_sysent32 = {
149*0Sstevel@tonic-gate 	7,
150*0Sstevel@tonic-gate 	SE_NOUNLOAD | SE_64RVAL,
151*0Sstevel@tonic-gate 	kaio
152*0Sstevel@tonic-gate };
153*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
154*0Sstevel@tonic-gate 
155*0Sstevel@tonic-gate #else   /* _LP64 */
156*0Sstevel@tonic-gate 
157*0Sstevel@tonic-gate static struct sysent kaio_sysent = {
158*0Sstevel@tonic-gate 	7,
159*0Sstevel@tonic-gate 	SE_NOUNLOAD | SE_32RVAL1,
160*0Sstevel@tonic-gate 	kaio
161*0Sstevel@tonic-gate };
162*0Sstevel@tonic-gate 
163*0Sstevel@tonic-gate #endif  /* _LP64 */
164*0Sstevel@tonic-gate 
165*0Sstevel@tonic-gate /*
166*0Sstevel@tonic-gate  * Module linkage information for the kernel.
167*0Sstevel@tonic-gate  */
168*0Sstevel@tonic-gate 
169*0Sstevel@tonic-gate static struct modlsys modlsys = {
170*0Sstevel@tonic-gate 	&mod_syscallops,
171*0Sstevel@tonic-gate 	"kernel Async I/O",
172*0Sstevel@tonic-gate 	&kaio_sysent
173*0Sstevel@tonic-gate };
174*0Sstevel@tonic-gate 
175*0Sstevel@tonic-gate #ifdef  _SYSCALL32_IMPL
176*0Sstevel@tonic-gate static struct modlsys modlsys32 = {
177*0Sstevel@tonic-gate 	&mod_syscallops32,
178*0Sstevel@tonic-gate 	"kernel Async I/O for 32 bit compatibility",
179*0Sstevel@tonic-gate 	&kaio_sysent32
180*0Sstevel@tonic-gate };
181*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
182*0Sstevel@tonic-gate 
183*0Sstevel@tonic-gate 
184*0Sstevel@tonic-gate static struct modlinkage modlinkage = {
185*0Sstevel@tonic-gate 	MODREV_1,
186*0Sstevel@tonic-gate 	&modlsys,
187*0Sstevel@tonic-gate #ifdef  _SYSCALL32_IMPL
188*0Sstevel@tonic-gate 	&modlsys32,
189*0Sstevel@tonic-gate #endif
190*0Sstevel@tonic-gate 	NULL
191*0Sstevel@tonic-gate };
192*0Sstevel@tonic-gate 
193*0Sstevel@tonic-gate int
194*0Sstevel@tonic-gate _init(void)
195*0Sstevel@tonic-gate {
196*0Sstevel@tonic-gate 	int retval;
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate 	if ((retval = mod_install(&modlinkage)) != 0)
199*0Sstevel@tonic-gate 		return (retval);
200*0Sstevel@tonic-gate 
201*0Sstevel@tonic-gate 	return (0);
202*0Sstevel@tonic-gate }
203*0Sstevel@tonic-gate 
204*0Sstevel@tonic-gate int
205*0Sstevel@tonic-gate _fini(void)
206*0Sstevel@tonic-gate {
207*0Sstevel@tonic-gate 	int retval;
208*0Sstevel@tonic-gate 
209*0Sstevel@tonic-gate 	retval = mod_remove(&modlinkage);
210*0Sstevel@tonic-gate 
211*0Sstevel@tonic-gate 	return (retval);
212*0Sstevel@tonic-gate }
213*0Sstevel@tonic-gate 
214*0Sstevel@tonic-gate int
215*0Sstevel@tonic-gate _info(struct modinfo *modinfop)
216*0Sstevel@tonic-gate {
217*0Sstevel@tonic-gate 	return (mod_info(&modlinkage, modinfop));
218*0Sstevel@tonic-gate }
219*0Sstevel@tonic-gate 
220*0Sstevel@tonic-gate #ifdef	_LP64
221*0Sstevel@tonic-gate static int64_t
222*0Sstevel@tonic-gate kaioc(
223*0Sstevel@tonic-gate 	long	a0,
224*0Sstevel@tonic-gate 	long	a1,
225*0Sstevel@tonic-gate 	long	a2,
226*0Sstevel@tonic-gate 	long	a3,
227*0Sstevel@tonic-gate 	long	a4,
228*0Sstevel@tonic-gate 	long	a5)
229*0Sstevel@tonic-gate {
230*0Sstevel@tonic-gate 	int	error;
231*0Sstevel@tonic-gate 	long	rval = 0;
232*0Sstevel@tonic-gate 
233*0Sstevel@tonic-gate 	switch ((int)a0 & ~AIO_POLL_BIT) {
234*0Sstevel@tonic-gate 	case AIOREAD:
235*0Sstevel@tonic-gate 		error = arw((int)a0, (int)a1, (char *)a2, (int)a3,
236*0Sstevel@tonic-gate 		    (offset_t)a4, (aio_result_t *)a5, FREAD);
237*0Sstevel@tonic-gate 		break;
238*0Sstevel@tonic-gate 	case AIOWRITE:
239*0Sstevel@tonic-gate 		error = arw((int)a0, (int)a1, (char *)a2, (int)a3,
240*0Sstevel@tonic-gate 		    (offset_t)a4, (aio_result_t *)a5, FWRITE);
241*0Sstevel@tonic-gate 		break;
242*0Sstevel@tonic-gate 	case AIOWAIT:
243*0Sstevel@tonic-gate 		error = aiowait((struct timeval *)a1, (int)a2, &rval);
244*0Sstevel@tonic-gate 		break;
245*0Sstevel@tonic-gate 	case AIOWAITN:
246*0Sstevel@tonic-gate 		error = aiowaitn((void *)a1, (uint_t)a2, (uint_t *)a3,
247*0Sstevel@tonic-gate 		    (timespec_t *)a4);
248*0Sstevel@tonic-gate 		break;
249*0Sstevel@tonic-gate 	case AIONOTIFY:
250*0Sstevel@tonic-gate 		error = aionotify();
251*0Sstevel@tonic-gate 		break;
252*0Sstevel@tonic-gate 	case AIOINIT:
253*0Sstevel@tonic-gate 		error = aioinit();
254*0Sstevel@tonic-gate 		break;
255*0Sstevel@tonic-gate 	case AIOSTART:
256*0Sstevel@tonic-gate 		error = aiostart();
257*0Sstevel@tonic-gate 		break;
258*0Sstevel@tonic-gate 	case AIOLIO:
259*0Sstevel@tonic-gate 		error = alio((int)a0, (int)a1, (aiocb_t **)a2, (int)a3,
260*0Sstevel@tonic-gate 		    (struct sigevent *)a4);
261*0Sstevel@tonic-gate 		break;
262*0Sstevel@tonic-gate 	case AIOLIOWAIT:
263*0Sstevel@tonic-gate 		error = aliowait((int)a1, (void *)a2, (int)a3,
264*0Sstevel@tonic-gate 		    (struct sigevent *)a4, AIO_64);
265*0Sstevel@tonic-gate 		break;
266*0Sstevel@tonic-gate 	case AIOSUSPEND:
267*0Sstevel@tonic-gate 		error = aiosuspend((void *)a1, (int)a2, (timespec_t *)a3,
268*0Sstevel@tonic-gate 		    (int)a4, &rval, AIO_64);
269*0Sstevel@tonic-gate 		break;
270*0Sstevel@tonic-gate 	case AIOERROR:
271*0Sstevel@tonic-gate 		error = aioerror((void *)a1, AIO_64);
272*0Sstevel@tonic-gate 		break;
273*0Sstevel@tonic-gate 	case AIOAREAD:
274*0Sstevel@tonic-gate 		error = aiorw((int)a0, (void *)a1, FREAD, AIO_64);
275*0Sstevel@tonic-gate 		break;
276*0Sstevel@tonic-gate 	case AIOAWRITE:
277*0Sstevel@tonic-gate 		error = aiorw((int)a0, (void *)a1, FWRITE, AIO_64);
278*0Sstevel@tonic-gate 		break;
279*0Sstevel@tonic-gate 	case AIOCANCEL:
280*0Sstevel@tonic-gate 		error = aio_cancel((int)a1, (void *)a2, &rval, AIO_64);
281*0Sstevel@tonic-gate 		break;
282*0Sstevel@tonic-gate 
283*0Sstevel@tonic-gate 	/*
284*0Sstevel@tonic-gate 	 * The large file related stuff is valid only for
285*0Sstevel@tonic-gate 	 * 32 bit kernel and not for 64 bit kernel
286*0Sstevel@tonic-gate 	 * On 64 bit kernel we convert large file calls
287*0Sstevel@tonic-gate 	 * to regular 64bit calls.
288*0Sstevel@tonic-gate 	 */
289*0Sstevel@tonic-gate 
290*0Sstevel@tonic-gate 	default:
291*0Sstevel@tonic-gate 		error = EINVAL;
292*0Sstevel@tonic-gate 	}
293*0Sstevel@tonic-gate 	if (error)
294*0Sstevel@tonic-gate 		return ((int64_t)set_errno(error));
295*0Sstevel@tonic-gate 	return (rval);
296*0Sstevel@tonic-gate }
297*0Sstevel@tonic-gate #endif
298*0Sstevel@tonic-gate 
299*0Sstevel@tonic-gate static int
300*0Sstevel@tonic-gate kaio(
301*0Sstevel@tonic-gate 	ulong_t *uap,
302*0Sstevel@tonic-gate 	rval_t *rvp)
303*0Sstevel@tonic-gate {
304*0Sstevel@tonic-gate 	long rval = 0;
305*0Sstevel@tonic-gate 	int	error = 0;
306*0Sstevel@tonic-gate 	offset_t	off;
307*0Sstevel@tonic-gate 
308*0Sstevel@tonic-gate 
309*0Sstevel@tonic-gate 		rvp->r_vals = 0;
310*0Sstevel@tonic-gate #if defined(_LITTLE_ENDIAN)
311*0Sstevel@tonic-gate 	off = ((u_offset_t)uap[5] << 32) | (u_offset_t)uap[4];
312*0Sstevel@tonic-gate #else
313*0Sstevel@tonic-gate 	off = ((u_offset_t)uap[4] << 32) | (u_offset_t)uap[5];
314*0Sstevel@tonic-gate #endif
315*0Sstevel@tonic-gate 
316*0Sstevel@tonic-gate 	switch (uap[0] & ~AIO_POLL_BIT) {
317*0Sstevel@tonic-gate 	/*
318*0Sstevel@tonic-gate 	 * It must be the 32 bit system call on 64 bit kernel
319*0Sstevel@tonic-gate 	 */
320*0Sstevel@tonic-gate 	case AIOREAD:
321*0Sstevel@tonic-gate 		return (arw((int)uap[0], (int)uap[1], (char *)uap[2],
322*0Sstevel@tonic-gate 		    (int)uap[3], off, (aio_result_t *)uap[6], FREAD));
323*0Sstevel@tonic-gate 	case AIOWRITE:
324*0Sstevel@tonic-gate 		return (arw((int)uap[0], (int)uap[1], (char *)uap[2],
325*0Sstevel@tonic-gate 		    (int)uap[3], off, (aio_result_t *)uap[6], FWRITE));
326*0Sstevel@tonic-gate 	case AIOWAIT:
327*0Sstevel@tonic-gate 		error = aiowait((struct	timeval *)uap[1], (int)uap[2],
328*0Sstevel@tonic-gate 		    &rval);
329*0Sstevel@tonic-gate 		break;
330*0Sstevel@tonic-gate 	case AIOWAITN:
331*0Sstevel@tonic-gate 		error = aiowaitn((void *)uap[1], (uint_t)uap[2],
332*0Sstevel@tonic-gate 		    (uint_t *)uap[3], (timespec_t *)uap[4]);
333*0Sstevel@tonic-gate 		break;
334*0Sstevel@tonic-gate 	case AIONOTIFY:
335*0Sstevel@tonic-gate 		return (aionotify());
336*0Sstevel@tonic-gate 	case AIOINIT:
337*0Sstevel@tonic-gate 		return (aioinit());
338*0Sstevel@tonic-gate 	case AIOSTART:
339*0Sstevel@tonic-gate 		return (aiostart());
340*0Sstevel@tonic-gate 	case AIOLIO:
341*0Sstevel@tonic-gate 		return (alio32((int)uap[1], (void *)uap[2], (int)uap[3],
342*0Sstevel@tonic-gate 		    (void *)uap[4]));
343*0Sstevel@tonic-gate 	case AIOLIOWAIT:
344*0Sstevel@tonic-gate 		return (aliowait((int)uap[1], (void *)uap[2],
345*0Sstevel@tonic-gate 		    (int)uap[3], (struct sigevent *)uap[4], AIO_32));
346*0Sstevel@tonic-gate 	case AIOSUSPEND:
347*0Sstevel@tonic-gate 		error = aiosuspend((void *)uap[1], (int)uap[2],
348*0Sstevel@tonic-gate 		    (timespec_t *)uap[3], (int)uap[4],
349*0Sstevel@tonic-gate 		    &rval, AIO_32);
350*0Sstevel@tonic-gate 		break;
351*0Sstevel@tonic-gate 	case AIOERROR:
352*0Sstevel@tonic-gate 		return (aioerror((void *)uap[1], AIO_32));
353*0Sstevel@tonic-gate 	case AIOAREAD:
354*0Sstevel@tonic-gate 		return (aiorw((int)uap[0], (void *)uap[1],
355*0Sstevel@tonic-gate 		    FREAD, AIO_32));
356*0Sstevel@tonic-gate 	case AIOAWRITE:
357*0Sstevel@tonic-gate 		return (aiorw((int)uap[0], (void *)uap[1],
358*0Sstevel@tonic-gate 		    FWRITE, AIO_32));
359*0Sstevel@tonic-gate 	case AIOCANCEL:
360*0Sstevel@tonic-gate 		error = (aio_cancel((int)uap[1], (void *)uap[2], &rval,
361*0Sstevel@tonic-gate 		    AIO_32));
362*0Sstevel@tonic-gate 		break;
363*0Sstevel@tonic-gate 	case AIOLIO64:
364*0Sstevel@tonic-gate 		return (alioLF((int)uap[1], (void *)uap[2],
365*0Sstevel@tonic-gate 		    (int)uap[3], (void *)uap[4]));
366*0Sstevel@tonic-gate 	case AIOLIOWAIT64:
367*0Sstevel@tonic-gate 		return (aliowait(uap[1], (void *)uap[2],
368*0Sstevel@tonic-gate 		    (int)uap[3], (void *)uap[4], AIO_LARGEFILE));
369*0Sstevel@tonic-gate 	case AIOSUSPEND64:
370*0Sstevel@tonic-gate 		error = aiosuspend((void *)uap[1], (int)uap[2],
371*0Sstevel@tonic-gate 		    (timespec_t *)uap[3], (int)uap[4], &rval,
372*0Sstevel@tonic-gate 		    AIO_LARGEFILE);
373*0Sstevel@tonic-gate 		break;
374*0Sstevel@tonic-gate 	case AIOERROR64:
375*0Sstevel@tonic-gate 		return (aioerror((void *)uap[1], AIO_LARGEFILE));
376*0Sstevel@tonic-gate 	case AIOAREAD64:
377*0Sstevel@tonic-gate 		return (aiorw((int)uap[0], (void *)uap[1], FREAD,
378*0Sstevel@tonic-gate 		    AIO_LARGEFILE));
379*0Sstevel@tonic-gate 	case AIOAWRITE64:
380*0Sstevel@tonic-gate 		return (aiorw((int)uap[0], (void *)uap[1], FWRITE,
381*0Sstevel@tonic-gate 		    AIO_LARGEFILE));
382*0Sstevel@tonic-gate 	case AIOCANCEL64:
383*0Sstevel@tonic-gate 		error = (aio_cancel((int)uap[1], (void *)uap[2],
384*0Sstevel@tonic-gate 		    &rval, AIO_LARGEFILE));
385*0Sstevel@tonic-gate 		break;
386*0Sstevel@tonic-gate 	default:
387*0Sstevel@tonic-gate 		return (EINVAL);
388*0Sstevel@tonic-gate 	}
389*0Sstevel@tonic-gate 
390*0Sstevel@tonic-gate 	rvp->r_val1 = rval;
391*0Sstevel@tonic-gate 	return (error);
392*0Sstevel@tonic-gate }
393*0Sstevel@tonic-gate 
394*0Sstevel@tonic-gate /*
395*0Sstevel@tonic-gate  * wake up LWPs in this process that are sleeping in
396*0Sstevel@tonic-gate  * aiowait().
397*0Sstevel@tonic-gate  */
398*0Sstevel@tonic-gate static int
399*0Sstevel@tonic-gate aionotify(void)
400*0Sstevel@tonic-gate {
401*0Sstevel@tonic-gate 	aio_t	*aiop;
402*0Sstevel@tonic-gate 
403*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
404*0Sstevel@tonic-gate 	if (aiop == NULL)
405*0Sstevel@tonic-gate 		return (0);
406*0Sstevel@tonic-gate 
407*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
408*0Sstevel@tonic-gate 	aiop->aio_notifycnt++;
409*0Sstevel@tonic-gate 	cv_broadcast(&aiop->aio_waitcv);
410*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
411*0Sstevel@tonic-gate 
412*0Sstevel@tonic-gate 	return (0);
413*0Sstevel@tonic-gate }
414*0Sstevel@tonic-gate 
415*0Sstevel@tonic-gate static int
416*0Sstevel@tonic-gate timeval2reltime(struct timeval *timout, timestruc_t *rqtime,
417*0Sstevel@tonic-gate 	timestruc_t **rqtp, int *blocking)
418*0Sstevel@tonic-gate {
419*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
420*0Sstevel@tonic-gate 	struct timeval32 wait_time_32;
421*0Sstevel@tonic-gate #endif
422*0Sstevel@tonic-gate 	struct timeval wait_time;
423*0Sstevel@tonic-gate 	model_t	model = get_udatamodel();
424*0Sstevel@tonic-gate 
425*0Sstevel@tonic-gate 	*rqtp = NULL;
426*0Sstevel@tonic-gate 	if (timout == NULL) {		/* wait indefinitely */
427*0Sstevel@tonic-gate 		*blocking = 1;
428*0Sstevel@tonic-gate 		return (0);
429*0Sstevel@tonic-gate 	}
430*0Sstevel@tonic-gate 
431*0Sstevel@tonic-gate 	/*
432*0Sstevel@tonic-gate 	 * Need to correctly compare with the -1 passed in for a user
433*0Sstevel@tonic-gate 	 * address pointer, with both 32 bit and 64 bit apps.
434*0Sstevel@tonic-gate 	 */
435*0Sstevel@tonic-gate 	if (model == DATAMODEL_NATIVE) {
436*0Sstevel@tonic-gate 		if ((intptr_t)timout == (intptr_t)-1) {	/* don't wait */
437*0Sstevel@tonic-gate 			*blocking = 0;
438*0Sstevel@tonic-gate 			return (0);
439*0Sstevel@tonic-gate 		}
440*0Sstevel@tonic-gate 
441*0Sstevel@tonic-gate 		if (copyin(timout, &wait_time, sizeof (wait_time)))
442*0Sstevel@tonic-gate 			return (EFAULT);
443*0Sstevel@tonic-gate 	}
444*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
445*0Sstevel@tonic-gate 	else {
446*0Sstevel@tonic-gate 		/*
447*0Sstevel@tonic-gate 		 * -1 from a 32bit app. It will not get sign extended.
448*0Sstevel@tonic-gate 		 * don't wait if -1.
449*0Sstevel@tonic-gate 		 */
450*0Sstevel@tonic-gate 		if ((intptr_t)timout == (intptr_t)((uint32_t)-1)) {
451*0Sstevel@tonic-gate 			*blocking = 0;
452*0Sstevel@tonic-gate 			return (0);
453*0Sstevel@tonic-gate 		}
454*0Sstevel@tonic-gate 
455*0Sstevel@tonic-gate 		if (copyin(timout, &wait_time_32, sizeof (wait_time_32)))
456*0Sstevel@tonic-gate 			return (EFAULT);
457*0Sstevel@tonic-gate 		TIMEVAL32_TO_TIMEVAL(&wait_time, &wait_time_32);
458*0Sstevel@tonic-gate 	}
459*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
460*0Sstevel@tonic-gate 
461*0Sstevel@tonic-gate 	if (wait_time.tv_sec == 0 && wait_time.tv_usec == 0) {	/* don't wait */
462*0Sstevel@tonic-gate 		*blocking = 0;
463*0Sstevel@tonic-gate 		return (0);
464*0Sstevel@tonic-gate 	}
465*0Sstevel@tonic-gate 
466*0Sstevel@tonic-gate 	if (wait_time.tv_sec < 0 ||
467*0Sstevel@tonic-gate 	    wait_time.tv_usec < 0 || wait_time.tv_usec >= MICROSEC)
468*0Sstevel@tonic-gate 		return (EINVAL);
469*0Sstevel@tonic-gate 
470*0Sstevel@tonic-gate 	rqtime->tv_sec = wait_time.tv_sec;
471*0Sstevel@tonic-gate 	rqtime->tv_nsec = wait_time.tv_usec * 1000;
472*0Sstevel@tonic-gate 	*rqtp = rqtime;
473*0Sstevel@tonic-gate 	*blocking = 1;
474*0Sstevel@tonic-gate 
475*0Sstevel@tonic-gate 	return (0);
476*0Sstevel@tonic-gate }
477*0Sstevel@tonic-gate 
478*0Sstevel@tonic-gate static int
479*0Sstevel@tonic-gate timespec2reltime(timespec_t *timout, timestruc_t *rqtime,
480*0Sstevel@tonic-gate 	timestruc_t **rqtp, int *blocking)
481*0Sstevel@tonic-gate {
482*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
483*0Sstevel@tonic-gate 	timespec32_t wait_time_32;
484*0Sstevel@tonic-gate #endif
485*0Sstevel@tonic-gate 	model_t	model = get_udatamodel();
486*0Sstevel@tonic-gate 
487*0Sstevel@tonic-gate 	*rqtp = NULL;
488*0Sstevel@tonic-gate 	if (timout == NULL) {
489*0Sstevel@tonic-gate 		*blocking = 1;
490*0Sstevel@tonic-gate 		return (0);
491*0Sstevel@tonic-gate 	}
492*0Sstevel@tonic-gate 
493*0Sstevel@tonic-gate 	if (model == DATAMODEL_NATIVE) {
494*0Sstevel@tonic-gate 		if (copyin(timout, rqtime, sizeof (*rqtime)))
495*0Sstevel@tonic-gate 			return (EFAULT);
496*0Sstevel@tonic-gate 	}
497*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
498*0Sstevel@tonic-gate 	else {
499*0Sstevel@tonic-gate 		if (copyin(timout, &wait_time_32, sizeof (wait_time_32)))
500*0Sstevel@tonic-gate 			return (EFAULT);
501*0Sstevel@tonic-gate 		TIMESPEC32_TO_TIMESPEC(rqtime, &wait_time_32);
502*0Sstevel@tonic-gate 	}
503*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
504*0Sstevel@tonic-gate 
505*0Sstevel@tonic-gate 	if (rqtime->tv_sec == 0 && rqtime->tv_nsec == 0) {
506*0Sstevel@tonic-gate 		*blocking = 0;
507*0Sstevel@tonic-gate 		return (0);
508*0Sstevel@tonic-gate 	}
509*0Sstevel@tonic-gate 
510*0Sstevel@tonic-gate 	if (rqtime->tv_sec < 0 ||
511*0Sstevel@tonic-gate 	    rqtime->tv_nsec < 0 || rqtime->tv_nsec >= NANOSEC)
512*0Sstevel@tonic-gate 		return (EINVAL);
513*0Sstevel@tonic-gate 
514*0Sstevel@tonic-gate 	*rqtp = rqtime;
515*0Sstevel@tonic-gate 	*blocking = 1;
516*0Sstevel@tonic-gate 
517*0Sstevel@tonic-gate 	return (0);
518*0Sstevel@tonic-gate }
519*0Sstevel@tonic-gate 
520*0Sstevel@tonic-gate /*ARGSUSED*/
521*0Sstevel@tonic-gate static int
522*0Sstevel@tonic-gate aiowait(
523*0Sstevel@tonic-gate 	struct timeval	*timout,
524*0Sstevel@tonic-gate 	int	dontblockflg,
525*0Sstevel@tonic-gate 	long	*rval)
526*0Sstevel@tonic-gate {
527*0Sstevel@tonic-gate 	int 		error;
528*0Sstevel@tonic-gate 	aio_t		*aiop;
529*0Sstevel@tonic-gate 	aio_req_t	*reqp;
530*0Sstevel@tonic-gate 	clock_t		status;
531*0Sstevel@tonic-gate 	int		blocking;
532*0Sstevel@tonic-gate 	int		timecheck;
533*0Sstevel@tonic-gate 	timestruc_t	rqtime;
534*0Sstevel@tonic-gate 	timestruc_t	*rqtp;
535*0Sstevel@tonic-gate 
536*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
537*0Sstevel@tonic-gate 	if (aiop == NULL)
538*0Sstevel@tonic-gate 		return (EINVAL);
539*0Sstevel@tonic-gate 
540*0Sstevel@tonic-gate 	/*
541*0Sstevel@tonic-gate 	 * Establish the absolute future time for the timeout.
542*0Sstevel@tonic-gate 	 */
543*0Sstevel@tonic-gate 	error = timeval2reltime(timout, &rqtime, &rqtp, &blocking);
544*0Sstevel@tonic-gate 	if (error)
545*0Sstevel@tonic-gate 		return (error);
546*0Sstevel@tonic-gate 	if (rqtp) {
547*0Sstevel@tonic-gate 		timestruc_t now;
548*0Sstevel@tonic-gate 		timecheck = timechanged;
549*0Sstevel@tonic-gate 		gethrestime(&now);
550*0Sstevel@tonic-gate 		timespecadd(rqtp, &now);
551*0Sstevel@tonic-gate 	}
552*0Sstevel@tonic-gate 
553*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
554*0Sstevel@tonic-gate 	for (;;) {
555*0Sstevel@tonic-gate 		/* process requests on poll queue */
556*0Sstevel@tonic-gate 		if (aiop->aio_pollq) {
557*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
558*0Sstevel@tonic-gate 			aio_cleanup(0);
559*0Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
560*0Sstevel@tonic-gate 		}
561*0Sstevel@tonic-gate 		if ((reqp = aio_req_remove(NULL)) != NULL) {
562*0Sstevel@tonic-gate 			*rval = (long)reqp->aio_req_resultp;
563*0Sstevel@tonic-gate 			break;
564*0Sstevel@tonic-gate 		}
565*0Sstevel@tonic-gate 		/* user-level done queue might not be empty */
566*0Sstevel@tonic-gate 		if (aiop->aio_notifycnt > 0) {
567*0Sstevel@tonic-gate 			aiop->aio_notifycnt--;
568*0Sstevel@tonic-gate 			*rval = 1;
569*0Sstevel@tonic-gate 			break;
570*0Sstevel@tonic-gate 		}
571*0Sstevel@tonic-gate 		/* don't block if no outstanding aio */
572*0Sstevel@tonic-gate 		if (aiop->aio_outstanding == 0 && dontblockflg) {
573*0Sstevel@tonic-gate 			error = EINVAL;
574*0Sstevel@tonic-gate 			break;
575*0Sstevel@tonic-gate 		}
576*0Sstevel@tonic-gate 		if (blocking) {
577*0Sstevel@tonic-gate 			status = cv_waituntil_sig(&aiop->aio_waitcv,
578*0Sstevel@tonic-gate 			    &aiop->aio_mutex, rqtp, timecheck);
579*0Sstevel@tonic-gate 
580*0Sstevel@tonic-gate 			if (status > 0)		/* check done queue again */
581*0Sstevel@tonic-gate 				continue;
582*0Sstevel@tonic-gate 			if (status == 0) {	/* interrupted by a signal */
583*0Sstevel@tonic-gate 				error = EINTR;
584*0Sstevel@tonic-gate 				*rval = -1;
585*0Sstevel@tonic-gate 			} else {		/* timer expired */
586*0Sstevel@tonic-gate 				error = ETIME;
587*0Sstevel@tonic-gate 			}
588*0Sstevel@tonic-gate 		}
589*0Sstevel@tonic-gate 		break;
590*0Sstevel@tonic-gate 	}
591*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
592*0Sstevel@tonic-gate 	if (reqp) {
593*0Sstevel@tonic-gate 		aphysio_unlock(reqp);
594*0Sstevel@tonic-gate 		aio_copyout_result(reqp);
595*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
596*0Sstevel@tonic-gate 		aio_req_free(aiop, reqp);
597*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
598*0Sstevel@tonic-gate 	}
599*0Sstevel@tonic-gate 	return (error);
600*0Sstevel@tonic-gate }
601*0Sstevel@tonic-gate 
602*0Sstevel@tonic-gate /*
603*0Sstevel@tonic-gate  * aiowaitn can be used to reap completed asynchronous requests submitted with
604*0Sstevel@tonic-gate  * lio_listio, aio_read or aio_write.
605*0Sstevel@tonic-gate  * This function only reaps asynchronous raw I/Os.
606*0Sstevel@tonic-gate  */
607*0Sstevel@tonic-gate 
608*0Sstevel@tonic-gate /*ARGSUSED*/
609*0Sstevel@tonic-gate static int
610*0Sstevel@tonic-gate aiowaitn(void *uiocb, uint_t nent, uint_t *nwait, timespec_t *timout)
611*0Sstevel@tonic-gate {
612*0Sstevel@tonic-gate 	int 		error = 0;
613*0Sstevel@tonic-gate 	aio_t		*aiop;
614*0Sstevel@tonic-gate 	aio_req_t	*reqlist = NULL;
615*0Sstevel@tonic-gate 	caddr_t		iocblist = NULL;	/* array of iocb ptr's */
616*0Sstevel@tonic-gate 	uint_t		waitcnt, cnt = 0;	/* iocb cnt */
617*0Sstevel@tonic-gate 	size_t		iocbsz;			/* users iocb size */
618*0Sstevel@tonic-gate 	size_t		riocbsz;		/* returned iocb size */
619*0Sstevel@tonic-gate 	int		iocb_index = 0;
620*0Sstevel@tonic-gate 	model_t		model = get_udatamodel();
621*0Sstevel@tonic-gate 	int		blocking = 1;
622*0Sstevel@tonic-gate 	int		timecheck;
623*0Sstevel@tonic-gate 	timestruc_t	rqtime;
624*0Sstevel@tonic-gate 	timestruc_t	*rqtp;
625*0Sstevel@tonic-gate 
626*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
627*0Sstevel@tonic-gate 	if (aiop == NULL)
628*0Sstevel@tonic-gate 		return (EINVAL);
629*0Sstevel@tonic-gate 
630*0Sstevel@tonic-gate 	if (aiop->aio_outstanding == 0)
631*0Sstevel@tonic-gate 		return (EAGAIN);
632*0Sstevel@tonic-gate 
633*0Sstevel@tonic-gate 	if (copyin(nwait, &waitcnt, sizeof (uint_t)))
634*0Sstevel@tonic-gate 		return (EFAULT);
635*0Sstevel@tonic-gate 
636*0Sstevel@tonic-gate 	/* set *nwait to zero, if we must return prematurely */
637*0Sstevel@tonic-gate 	if (copyout(&cnt, nwait, sizeof (uint_t)))
638*0Sstevel@tonic-gate 		return (EFAULT);
639*0Sstevel@tonic-gate 
640*0Sstevel@tonic-gate 	if (waitcnt == 0) {
641*0Sstevel@tonic-gate 		blocking = 0;
642*0Sstevel@tonic-gate 		rqtp = NULL;
643*0Sstevel@tonic-gate 		waitcnt = nent;
644*0Sstevel@tonic-gate 	} else {
645*0Sstevel@tonic-gate 		error = timespec2reltime(timout, &rqtime, &rqtp, &blocking);
646*0Sstevel@tonic-gate 		if (error)
647*0Sstevel@tonic-gate 			return (error);
648*0Sstevel@tonic-gate 	}
649*0Sstevel@tonic-gate 
650*0Sstevel@tonic-gate 	if (model == DATAMODEL_NATIVE)
651*0Sstevel@tonic-gate 		iocbsz = (sizeof (aiocb_t *) * nent);
652*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
653*0Sstevel@tonic-gate 	else
654*0Sstevel@tonic-gate 		iocbsz = (sizeof (caddr32_t) * nent);
655*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
656*0Sstevel@tonic-gate 
657*0Sstevel@tonic-gate 	/*
658*0Sstevel@tonic-gate 	 * Only one aio_waitn call is allowed at a time.
659*0Sstevel@tonic-gate 	 * The active aio_waitn will collect all requests
660*0Sstevel@tonic-gate 	 * out of the "done" list and if necessary it will wait
661*0Sstevel@tonic-gate 	 * for some/all pending requests to fulfill the nwait
662*0Sstevel@tonic-gate 	 * parameter.
663*0Sstevel@tonic-gate 	 * A second or further aio_waitn calls will sleep here
664*0Sstevel@tonic-gate 	 * until the active aio_waitn finishes and leaves the kernel
665*0Sstevel@tonic-gate 	 * If the second call does not block (poll), then return
666*0Sstevel@tonic-gate 	 * immediately with the error code : EAGAIN.
667*0Sstevel@tonic-gate 	 * If the second call should block, then sleep here, but
668*0Sstevel@tonic-gate 	 * do not touch the timeout. The timeout starts when this
669*0Sstevel@tonic-gate 	 * aio_waitn-call becomes active.
670*0Sstevel@tonic-gate 	 */
671*0Sstevel@tonic-gate 
672*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
673*0Sstevel@tonic-gate 
674*0Sstevel@tonic-gate 	while (aiop->aio_flags & AIO_WAITN) {
675*0Sstevel@tonic-gate 		if (blocking == 0) {
676*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
677*0Sstevel@tonic-gate 			return (EAGAIN);
678*0Sstevel@tonic-gate 		}
679*0Sstevel@tonic-gate 
680*0Sstevel@tonic-gate 		/* block, no timeout */
681*0Sstevel@tonic-gate 		aiop->aio_flags |= AIO_WAITN_PENDING;
682*0Sstevel@tonic-gate 		if (!cv_wait_sig(&aiop->aio_waitncv, &aiop->aio_mutex)) {
683*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
684*0Sstevel@tonic-gate 			return (EINTR);
685*0Sstevel@tonic-gate 		}
686*0Sstevel@tonic-gate 	}
687*0Sstevel@tonic-gate 
688*0Sstevel@tonic-gate 	/*
689*0Sstevel@tonic-gate 	 * Establish the absolute future time for the timeout.
690*0Sstevel@tonic-gate 	 */
691*0Sstevel@tonic-gate 	if (rqtp) {
692*0Sstevel@tonic-gate 		timestruc_t now;
693*0Sstevel@tonic-gate 		timecheck = timechanged;
694*0Sstevel@tonic-gate 		gethrestime(&now);
695*0Sstevel@tonic-gate 		timespecadd(rqtp, &now);
696*0Sstevel@tonic-gate 	}
697*0Sstevel@tonic-gate 
698*0Sstevel@tonic-gate 	if (iocbsz > aiop->aio_iocbsz && aiop->aio_iocb != NULL) {
699*0Sstevel@tonic-gate 		kmem_free(aiop->aio_iocb, aiop->aio_iocbsz);
700*0Sstevel@tonic-gate 		aiop->aio_iocb = NULL;
701*0Sstevel@tonic-gate 	}
702*0Sstevel@tonic-gate 
703*0Sstevel@tonic-gate 	if (aiop->aio_iocb == NULL) {
704*0Sstevel@tonic-gate 		iocblist = kmem_zalloc(iocbsz, KM_NOSLEEP);
705*0Sstevel@tonic-gate 		if (iocblist == NULL) {
706*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
707*0Sstevel@tonic-gate 			return (ENOMEM);
708*0Sstevel@tonic-gate 		}
709*0Sstevel@tonic-gate 		aiop->aio_iocb = (aiocb_t **)iocblist;
710*0Sstevel@tonic-gate 		aiop->aio_iocbsz = iocbsz;
711*0Sstevel@tonic-gate 	} else {
712*0Sstevel@tonic-gate 		iocblist = (char *)aiop->aio_iocb;
713*0Sstevel@tonic-gate 	}
714*0Sstevel@tonic-gate 
715*0Sstevel@tonic-gate 	aiop->aio_waitncnt = waitcnt;
716*0Sstevel@tonic-gate 	aiop->aio_flags |= AIO_WAITN;
717*0Sstevel@tonic-gate 
718*0Sstevel@tonic-gate 	for (;;) {
719*0Sstevel@tonic-gate 		/* push requests on poll queue to done queue */
720*0Sstevel@tonic-gate 		if (aiop->aio_pollq) {
721*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
722*0Sstevel@tonic-gate 			aio_cleanup(0);
723*0Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
724*0Sstevel@tonic-gate 		}
725*0Sstevel@tonic-gate 
726*0Sstevel@tonic-gate 		/* check for requests on done queue */
727*0Sstevel@tonic-gate 		if (aiop->aio_doneq) {
728*0Sstevel@tonic-gate 			cnt += aio_reqlist_concat(aiop, &reqlist, nent - cnt);
729*0Sstevel@tonic-gate 			aiop->aio_waitncnt = waitcnt - cnt;
730*0Sstevel@tonic-gate 		}
731*0Sstevel@tonic-gate 
732*0Sstevel@tonic-gate 		/* user-level done queue might not be empty */
733*0Sstevel@tonic-gate 		if (aiop->aio_notifycnt > 0) {
734*0Sstevel@tonic-gate 			aiop->aio_notifycnt--;
735*0Sstevel@tonic-gate 			error = 0;
736*0Sstevel@tonic-gate 			break;
737*0Sstevel@tonic-gate 		}
738*0Sstevel@tonic-gate 
739*0Sstevel@tonic-gate 		/*
740*0Sstevel@tonic-gate 		 * if we are here second time as a result of timer
741*0Sstevel@tonic-gate 		 * expiration, we reset error if there are enough
742*0Sstevel@tonic-gate 		 * aiocb's to satisfy request.
743*0Sstevel@tonic-gate 		 * We return also if all requests are already done
744*0Sstevel@tonic-gate 		 * and we picked up the whole done queue.
745*0Sstevel@tonic-gate 		 */
746*0Sstevel@tonic-gate 
747*0Sstevel@tonic-gate 		if ((cnt >= waitcnt) || (cnt > 0 && aiop->aio_pending == 0 &&
748*0Sstevel@tonic-gate 		    aiop->aio_doneq == NULL)) {
749*0Sstevel@tonic-gate 			error = 0;
750*0Sstevel@tonic-gate 			break;
751*0Sstevel@tonic-gate 		}
752*0Sstevel@tonic-gate 
753*0Sstevel@tonic-gate 		if ((cnt < waitcnt) && blocking) {
754*0Sstevel@tonic-gate 			int rval = cv_waituntil_sig(&aiop->aio_waitcv,
755*0Sstevel@tonic-gate 				&aiop->aio_mutex, rqtp, timecheck);
756*0Sstevel@tonic-gate 			if (rval > 0)
757*0Sstevel@tonic-gate 				continue;
758*0Sstevel@tonic-gate 			if (rval < 0) {
759*0Sstevel@tonic-gate 				error = ETIME;
760*0Sstevel@tonic-gate 				blocking = 0;
761*0Sstevel@tonic-gate 				continue;
762*0Sstevel@tonic-gate 			}
763*0Sstevel@tonic-gate 			error = EINTR;
764*0Sstevel@tonic-gate 		}
765*0Sstevel@tonic-gate 		break;
766*0Sstevel@tonic-gate 	}
767*0Sstevel@tonic-gate 
768*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
769*0Sstevel@tonic-gate 
770*0Sstevel@tonic-gate 	if (cnt > 0) {
771*0Sstevel@tonic-gate 
772*0Sstevel@tonic-gate 		iocb_index = aio_unlock_requests(iocblist, iocb_index, reqlist,
773*0Sstevel@tonic-gate 		    aiop, model);
774*0Sstevel@tonic-gate 
775*0Sstevel@tonic-gate 		if (model == DATAMODEL_NATIVE)
776*0Sstevel@tonic-gate 			riocbsz = (sizeof (aiocb_t *) * cnt);
777*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
778*0Sstevel@tonic-gate 		else
779*0Sstevel@tonic-gate 			riocbsz = (sizeof (caddr32_t) * cnt);
780*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
781*0Sstevel@tonic-gate 
782*0Sstevel@tonic-gate 		if (copyout(iocblist, uiocb, riocbsz) ||
783*0Sstevel@tonic-gate 		    copyout(&cnt, nwait, sizeof (uint_t)))
784*0Sstevel@tonic-gate 			error = EFAULT;
785*0Sstevel@tonic-gate 	}
786*0Sstevel@tonic-gate 
787*0Sstevel@tonic-gate 	if (aiop->aio_iocbsz > AIO_IOCB_MAX) {
788*0Sstevel@tonic-gate 		kmem_free(iocblist, aiop->aio_iocbsz);
789*0Sstevel@tonic-gate 		aiop->aio_iocb = NULL;
790*0Sstevel@tonic-gate 	}
791*0Sstevel@tonic-gate 
792*0Sstevel@tonic-gate 	/* check if there is another thread waiting for execution */
793*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
794*0Sstevel@tonic-gate 	aiop->aio_flags &= ~AIO_WAITN;
795*0Sstevel@tonic-gate 	if (aiop->aio_flags & AIO_WAITN_PENDING) {
796*0Sstevel@tonic-gate 		aiop->aio_flags &= ~AIO_WAITN_PENDING;
797*0Sstevel@tonic-gate 		cv_signal(&aiop->aio_waitncv);
798*0Sstevel@tonic-gate 	}
799*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
800*0Sstevel@tonic-gate 
801*0Sstevel@tonic-gate 	return (error);
802*0Sstevel@tonic-gate }
803*0Sstevel@tonic-gate 
804*0Sstevel@tonic-gate /*
805*0Sstevel@tonic-gate  * aio_unlock_requests
806*0Sstevel@tonic-gate  * copyouts the result of the request as well as the return value.
807*0Sstevel@tonic-gate  * It builds the list of completed asynchronous requests,
808*0Sstevel@tonic-gate  * unlocks the allocated memory ranges and
809*0Sstevel@tonic-gate  * put the aio request structure back into the free list.
810*0Sstevel@tonic-gate  */
811*0Sstevel@tonic-gate 
812*0Sstevel@tonic-gate static int
813*0Sstevel@tonic-gate aio_unlock_requests(
814*0Sstevel@tonic-gate 	caddr_t	iocblist,
815*0Sstevel@tonic-gate 	int	iocb_index,
816*0Sstevel@tonic-gate 	aio_req_t *reqlist,
817*0Sstevel@tonic-gate 	aio_t	*aiop,
818*0Sstevel@tonic-gate 	model_t	model)
819*0Sstevel@tonic-gate {
820*0Sstevel@tonic-gate 	aio_req_t	*reqp, *nreqp;
821*0Sstevel@tonic-gate 
822*0Sstevel@tonic-gate 	if (model == DATAMODEL_NATIVE) {
823*0Sstevel@tonic-gate 		for (reqp = reqlist; reqp != NULL;  reqp = nreqp) {
824*0Sstevel@tonic-gate 			(((caddr_t *)iocblist)[iocb_index++]) =
825*0Sstevel@tonic-gate 			    reqp->aio_req_iocb.iocb;
826*0Sstevel@tonic-gate 			nreqp = reqp->aio_req_next;
827*0Sstevel@tonic-gate 			aphysio_unlock(reqp);
828*0Sstevel@tonic-gate 			aio_copyout_result(reqp);
829*0Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
830*0Sstevel@tonic-gate 			aio_req_free(aiop, reqp);
831*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
832*0Sstevel@tonic-gate 		}
833*0Sstevel@tonic-gate 	}
834*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
835*0Sstevel@tonic-gate 	else {
836*0Sstevel@tonic-gate 		for (reqp = reqlist; reqp != NULL;  reqp = nreqp) {
837*0Sstevel@tonic-gate 			((caddr32_t *)iocblist)[iocb_index++] =
838*0Sstevel@tonic-gate 			    reqp->aio_req_iocb.iocb32;
839*0Sstevel@tonic-gate 			nreqp = reqp->aio_req_next;
840*0Sstevel@tonic-gate 			aphysio_unlock(reqp);
841*0Sstevel@tonic-gate 			aio_copyout_result(reqp);
842*0Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
843*0Sstevel@tonic-gate 			aio_req_free(aiop, reqp);
844*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
845*0Sstevel@tonic-gate 		}
846*0Sstevel@tonic-gate 	}
847*0Sstevel@tonic-gate #endif	/* _SYSCALL32_IMPL */
848*0Sstevel@tonic-gate 	return (iocb_index);
849*0Sstevel@tonic-gate }
850*0Sstevel@tonic-gate 
851*0Sstevel@tonic-gate /*
852*0Sstevel@tonic-gate  * aio_reqlist_concat
853*0Sstevel@tonic-gate  * moves "max" elements from the done queue to the reqlist queue and removes
854*0Sstevel@tonic-gate  * the AIO_DONEQ flag.
855*0Sstevel@tonic-gate  * - reqlist queue is a simple linked list
856*0Sstevel@tonic-gate  * - done queue is a double linked list
857*0Sstevel@tonic-gate  */
858*0Sstevel@tonic-gate 
859*0Sstevel@tonic-gate static int
860*0Sstevel@tonic-gate aio_reqlist_concat(aio_t *aiop, aio_req_t **reqlist, int max)
861*0Sstevel@tonic-gate {
862*0Sstevel@tonic-gate 	aio_req_t *q2, *q2work, *list;
863*0Sstevel@tonic-gate 	int count = 0;
864*0Sstevel@tonic-gate 
865*0Sstevel@tonic-gate 	list = *reqlist;
866*0Sstevel@tonic-gate 	q2 = aiop->aio_doneq;
867*0Sstevel@tonic-gate 	q2work = q2;
868*0Sstevel@tonic-gate 	while (max-- > 0) {
869*0Sstevel@tonic-gate 		q2work->aio_req_flags &= ~AIO_DONEQ;
870*0Sstevel@tonic-gate 		q2work = q2work->aio_req_next;
871*0Sstevel@tonic-gate 		count++;
872*0Sstevel@tonic-gate 		if (q2work == q2)
873*0Sstevel@tonic-gate 			break;
874*0Sstevel@tonic-gate 	}
875*0Sstevel@tonic-gate 
876*0Sstevel@tonic-gate 	if (q2work == q2) {
877*0Sstevel@tonic-gate 		/* all elements revised */
878*0Sstevel@tonic-gate 		q2->aio_req_prev->aio_req_next = list;
879*0Sstevel@tonic-gate 		list = q2;
880*0Sstevel@tonic-gate 		aiop->aio_doneq = NULL;
881*0Sstevel@tonic-gate 	} else {
882*0Sstevel@tonic-gate 		/*
883*0Sstevel@tonic-gate 		 * max < elements in the doneq
884*0Sstevel@tonic-gate 		 * detach only the required amount of elements
885*0Sstevel@tonic-gate 		 * out of the doneq
886*0Sstevel@tonic-gate 		 */
887*0Sstevel@tonic-gate 		q2work->aio_req_prev->aio_req_next = list;
888*0Sstevel@tonic-gate 		list = q2;
889*0Sstevel@tonic-gate 
890*0Sstevel@tonic-gate 		aiop->aio_doneq = q2work;
891*0Sstevel@tonic-gate 		q2work->aio_req_prev = q2->aio_req_prev;
892*0Sstevel@tonic-gate 		q2->aio_req_prev->aio_req_next = q2work;
893*0Sstevel@tonic-gate 	}
894*0Sstevel@tonic-gate 	*reqlist = list;
895*0Sstevel@tonic-gate 	return (count);
896*0Sstevel@tonic-gate }
897*0Sstevel@tonic-gate 
898*0Sstevel@tonic-gate /*ARGSUSED*/
899*0Sstevel@tonic-gate static int
900*0Sstevel@tonic-gate aiosuspend(
901*0Sstevel@tonic-gate 	void	*aiocb,
902*0Sstevel@tonic-gate 	int	nent,
903*0Sstevel@tonic-gate 	struct	timespec	*timout,
904*0Sstevel@tonic-gate 	int	flag,
905*0Sstevel@tonic-gate 	long	*rval,
906*0Sstevel@tonic-gate 	int	run_mode)
907*0Sstevel@tonic-gate {
908*0Sstevel@tonic-gate 	int 		error;
909*0Sstevel@tonic-gate 	aio_t		*aiop;
910*0Sstevel@tonic-gate 	aio_req_t	*reqp, *found, *next;
911*0Sstevel@tonic-gate 	caddr_t		cbplist = NULL;
912*0Sstevel@tonic-gate 	aiocb_t		*cbp, **ucbp;
913*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
914*0Sstevel@tonic-gate 	aiocb32_t	*cbp32;
915*0Sstevel@tonic-gate 	caddr32_t	*ucbp32;
916*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
917*0Sstevel@tonic-gate 	aiocb64_32_t	*cbp64;
918*0Sstevel@tonic-gate 	int		rv;
919*0Sstevel@tonic-gate 	int		i;
920*0Sstevel@tonic-gate 	size_t		ssize;
921*0Sstevel@tonic-gate 	model_t		model = get_udatamodel();
922*0Sstevel@tonic-gate 	int		blocking;
923*0Sstevel@tonic-gate 	int		timecheck;
924*0Sstevel@tonic-gate 	timestruc_t	rqtime;
925*0Sstevel@tonic-gate 	timestruc_t	*rqtp;
926*0Sstevel@tonic-gate 
927*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
928*0Sstevel@tonic-gate 	if (aiop == NULL || nent <= 0)
929*0Sstevel@tonic-gate 		return (EINVAL);
930*0Sstevel@tonic-gate 
931*0Sstevel@tonic-gate 	/*
932*0Sstevel@tonic-gate 	 * Establish the absolute future time for the timeout.
933*0Sstevel@tonic-gate 	 */
934*0Sstevel@tonic-gate 	error = timespec2reltime(timout, &rqtime, &rqtp, &blocking);
935*0Sstevel@tonic-gate 	if (error)
936*0Sstevel@tonic-gate 		return (error);
937*0Sstevel@tonic-gate 	if (rqtp) {
938*0Sstevel@tonic-gate 		timestruc_t now;
939*0Sstevel@tonic-gate 		timecheck = timechanged;
940*0Sstevel@tonic-gate 		gethrestime(&now);
941*0Sstevel@tonic-gate 		timespecadd(rqtp, &now);
942*0Sstevel@tonic-gate 	}
943*0Sstevel@tonic-gate 
944*0Sstevel@tonic-gate 	/*
945*0Sstevel@tonic-gate 	 * If we are not blocking and there's no IO complete
946*0Sstevel@tonic-gate 	 * skip aiocb copyin.
947*0Sstevel@tonic-gate 	 */
948*0Sstevel@tonic-gate 	if (!blocking && (aiop->aio_pollq == NULL) &&
949*0Sstevel@tonic-gate 	    (aiop->aio_doneq == NULL)) {
950*0Sstevel@tonic-gate 		return (EAGAIN);
951*0Sstevel@tonic-gate 	}
952*0Sstevel@tonic-gate 
953*0Sstevel@tonic-gate 	if (model == DATAMODEL_NATIVE)
954*0Sstevel@tonic-gate 		ssize = (sizeof (aiocb_t *) * nent);
955*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
956*0Sstevel@tonic-gate 	else
957*0Sstevel@tonic-gate 		ssize = (sizeof (caddr32_t) * nent);
958*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
959*0Sstevel@tonic-gate 
960*0Sstevel@tonic-gate 	cbplist = kmem_alloc(ssize, KM_NOSLEEP);
961*0Sstevel@tonic-gate 	if (cbplist == NULL)
962*0Sstevel@tonic-gate 		return (ENOMEM);
963*0Sstevel@tonic-gate 
964*0Sstevel@tonic-gate 	if (copyin(aiocb, cbplist, ssize)) {
965*0Sstevel@tonic-gate 		error = EFAULT;
966*0Sstevel@tonic-gate 		goto done;
967*0Sstevel@tonic-gate 	}
968*0Sstevel@tonic-gate 
969*0Sstevel@tonic-gate 	found = NULL;
970*0Sstevel@tonic-gate 	/*
971*0Sstevel@tonic-gate 	 * we need to get the aio_cleanupq_mutex since we call
972*0Sstevel@tonic-gate 	 * aio_req_done().
973*0Sstevel@tonic-gate 	 */
974*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_cleanupq_mutex);
975*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
976*0Sstevel@tonic-gate 	for (;;) {
977*0Sstevel@tonic-gate 		/* push requests on poll queue to done queue */
978*0Sstevel@tonic-gate 		if (aiop->aio_pollq) {
979*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
980*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_cleanupq_mutex);
981*0Sstevel@tonic-gate 			aio_cleanup(0);
982*0Sstevel@tonic-gate 			mutex_enter(&aiop->aio_cleanupq_mutex);
983*0Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
984*0Sstevel@tonic-gate 		}
985*0Sstevel@tonic-gate 		/* check for requests on done queue */
986*0Sstevel@tonic-gate 		if (aiop->aio_doneq) {
987*0Sstevel@tonic-gate 			if (model == DATAMODEL_NATIVE)
988*0Sstevel@tonic-gate 				ucbp = (aiocb_t **)cbplist;
989*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
990*0Sstevel@tonic-gate 			else
991*0Sstevel@tonic-gate 				ucbp32 = (caddr32_t *)cbplist;
992*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
993*0Sstevel@tonic-gate 			for (i = 0; i < nent; i++) {
994*0Sstevel@tonic-gate 				if (model == DATAMODEL_NATIVE) {
995*0Sstevel@tonic-gate 					if ((cbp = *ucbp++) == NULL)
996*0Sstevel@tonic-gate 						continue;
997*0Sstevel@tonic-gate 					if (run_mode != AIO_LARGEFILE)
998*0Sstevel@tonic-gate 						reqp = aio_req_done(
999*0Sstevel@tonic-gate 						    &cbp->aio_resultp);
1000*0Sstevel@tonic-gate 					else {
1001*0Sstevel@tonic-gate 						cbp64 = (aiocb64_32_t *)cbp;
1002*0Sstevel@tonic-gate 						reqp = aio_req_done(
1003*0Sstevel@tonic-gate 						    &cbp64->aio_resultp);
1004*0Sstevel@tonic-gate 					}
1005*0Sstevel@tonic-gate 				}
1006*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
1007*0Sstevel@tonic-gate 				else {
1008*0Sstevel@tonic-gate 					if (run_mode == AIO_32) {
1009*0Sstevel@tonic-gate 						if ((cbp32 =
1010*0Sstevel@tonic-gate 						    (aiocb32_t *)(uintptr_t)
1011*0Sstevel@tonic-gate 						    *ucbp32++) == NULL)
1012*0Sstevel@tonic-gate 							continue;
1013*0Sstevel@tonic-gate 						reqp = aio_req_done(
1014*0Sstevel@tonic-gate 						    &cbp32->aio_resultp);
1015*0Sstevel@tonic-gate 					} else if (run_mode == AIO_LARGEFILE) {
1016*0Sstevel@tonic-gate 						if ((cbp64 =
1017*0Sstevel@tonic-gate 						    (aiocb64_32_t *)(uintptr_t)
1018*0Sstevel@tonic-gate 						    *ucbp32++) == NULL)
1019*0Sstevel@tonic-gate 							continue;
1020*0Sstevel@tonic-gate 						    reqp = aio_req_done(
1021*0Sstevel@tonic-gate 							&cbp64->aio_resultp);
1022*0Sstevel@tonic-gate 					}
1023*0Sstevel@tonic-gate 
1024*0Sstevel@tonic-gate 				}
1025*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
1026*0Sstevel@tonic-gate 				if (reqp) {
1027*0Sstevel@tonic-gate 					reqp->aio_req_next = found;
1028*0Sstevel@tonic-gate 					found = reqp;
1029*0Sstevel@tonic-gate 				}
1030*0Sstevel@tonic-gate 				if (aiop->aio_doneq == NULL)
1031*0Sstevel@tonic-gate 					break;
1032*0Sstevel@tonic-gate 			}
1033*0Sstevel@tonic-gate 			if (found)
1034*0Sstevel@tonic-gate 				break;
1035*0Sstevel@tonic-gate 		}
1036*0Sstevel@tonic-gate 		if (aiop->aio_notifycnt > 0) {
1037*0Sstevel@tonic-gate 			/*
1038*0Sstevel@tonic-gate 			 * nothing on the kernel's queue. the user
1039*0Sstevel@tonic-gate 			 * has notified the kernel that it has items
1040*0Sstevel@tonic-gate 			 * on a user-level queue.
1041*0Sstevel@tonic-gate 			 */
1042*0Sstevel@tonic-gate 			aiop->aio_notifycnt--;
1043*0Sstevel@tonic-gate 			*rval = 1;
1044*0Sstevel@tonic-gate 			error = 0;
1045*0Sstevel@tonic-gate 			break;
1046*0Sstevel@tonic-gate 		}
1047*0Sstevel@tonic-gate 		/* don't block if nothing is outstanding */
1048*0Sstevel@tonic-gate 		if (aiop->aio_outstanding == 0) {
1049*0Sstevel@tonic-gate 			error = EAGAIN;
1050*0Sstevel@tonic-gate 			break;
1051*0Sstevel@tonic-gate 		}
1052*0Sstevel@tonic-gate 		if (blocking) {
1053*0Sstevel@tonic-gate 			/*
1054*0Sstevel@tonic-gate 			 * drop the aio_cleanupq_mutex as we are
1055*0Sstevel@tonic-gate 			 * going to block.
1056*0Sstevel@tonic-gate 			 */
1057*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_cleanupq_mutex);
1058*0Sstevel@tonic-gate 			rv = cv_waituntil_sig(&aiop->aio_waitcv,
1059*0Sstevel@tonic-gate 				&aiop->aio_mutex, rqtp, timecheck);
1060*0Sstevel@tonic-gate 			/*
1061*0Sstevel@tonic-gate 			 * we have to drop aio_mutex and
1062*0Sstevel@tonic-gate 			 * grab it in the right order.
1063*0Sstevel@tonic-gate 			 */
1064*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
1065*0Sstevel@tonic-gate 			mutex_enter(&aiop->aio_cleanupq_mutex);
1066*0Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
1067*0Sstevel@tonic-gate 			if (rv > 0)	/* check done queue again */
1068*0Sstevel@tonic-gate 				continue;
1069*0Sstevel@tonic-gate 			if (rv == 0)	/* interrupted by a signal */
1070*0Sstevel@tonic-gate 				error = EINTR;
1071*0Sstevel@tonic-gate 			else		/* timer expired */
1072*0Sstevel@tonic-gate 				error = ETIME;
1073*0Sstevel@tonic-gate 		} else {
1074*0Sstevel@tonic-gate 			error = EAGAIN;
1075*0Sstevel@tonic-gate 		}
1076*0Sstevel@tonic-gate 		break;
1077*0Sstevel@tonic-gate 	}
1078*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
1079*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_cleanupq_mutex);
1080*0Sstevel@tonic-gate 	for (reqp = found; reqp != NULL; reqp = next) {
1081*0Sstevel@tonic-gate 		next = reqp->aio_req_next;
1082*0Sstevel@tonic-gate 		aphysio_unlock(reqp);
1083*0Sstevel@tonic-gate 		aio_copyout_result(reqp);
1084*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
1085*0Sstevel@tonic-gate 		aio_req_free(aiop, reqp);
1086*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
1087*0Sstevel@tonic-gate 	}
1088*0Sstevel@tonic-gate done:
1089*0Sstevel@tonic-gate 	kmem_free(cbplist, ssize);
1090*0Sstevel@tonic-gate 	return (error);
1091*0Sstevel@tonic-gate }
1092*0Sstevel@tonic-gate 
1093*0Sstevel@tonic-gate /*
1094*0Sstevel@tonic-gate  * initialize aio by allocating an aio_t struct for this
1095*0Sstevel@tonic-gate  * process.
1096*0Sstevel@tonic-gate  */
1097*0Sstevel@tonic-gate static int
1098*0Sstevel@tonic-gate aioinit(void)
1099*0Sstevel@tonic-gate {
1100*0Sstevel@tonic-gate 	proc_t *p = curproc;
1101*0Sstevel@tonic-gate 	aio_t *aiop;
1102*0Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
1103*0Sstevel@tonic-gate 	if ((aiop = p->p_aio) == NULL) {
1104*0Sstevel@tonic-gate 		aiop = aio_aiop_alloc();
1105*0Sstevel@tonic-gate 		p->p_aio = aiop;
1106*0Sstevel@tonic-gate 	}
1107*0Sstevel@tonic-gate 	mutex_exit(&p->p_lock);
1108*0Sstevel@tonic-gate 	if (aiop == NULL)
1109*0Sstevel@tonic-gate 		return (ENOMEM);
1110*0Sstevel@tonic-gate 	return (0);
1111*0Sstevel@tonic-gate }
1112*0Sstevel@tonic-gate 
1113*0Sstevel@tonic-gate /*
1114*0Sstevel@tonic-gate  * start a special thread that will cleanup after aio requests
1115*0Sstevel@tonic-gate  * that are preventing a segment from being unmapped. as_unmap()
1116*0Sstevel@tonic-gate  * blocks until all phsyio to this segment is completed. this
1117*0Sstevel@tonic-gate  * doesn't happen until all the pages in this segment are not
1118*0Sstevel@tonic-gate  * SOFTLOCKed. Some pages will be SOFTLOCKed when there are aio
1119*0Sstevel@tonic-gate  * requests still outstanding. this special thread will make sure
1120*0Sstevel@tonic-gate  * that these SOFTLOCKed pages will eventually be SOFTUNLOCKed.
1121*0Sstevel@tonic-gate  *
1122*0Sstevel@tonic-gate  * this function will return an error if the process has only
1123*0Sstevel@tonic-gate  * one LWP. the assumption is that the caller is a separate LWP
1124*0Sstevel@tonic-gate  * that remains blocked in the kernel for the life of this process.
1125*0Sstevel@tonic-gate  */
1126*0Sstevel@tonic-gate static int
1127*0Sstevel@tonic-gate aiostart(void)
1128*0Sstevel@tonic-gate {
1129*0Sstevel@tonic-gate 	proc_t *p = curproc;
1130*0Sstevel@tonic-gate 	aio_t *aiop;
1131*0Sstevel@tonic-gate 	int first, error = 0;
1132*0Sstevel@tonic-gate 
1133*0Sstevel@tonic-gate 	if (p->p_lwpcnt == 1)
1134*0Sstevel@tonic-gate 		return (EDEADLK);
1135*0Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
1136*0Sstevel@tonic-gate 	if ((aiop = p->p_aio) == NULL)
1137*0Sstevel@tonic-gate 		error = EINVAL;
1138*0Sstevel@tonic-gate 	else {
1139*0Sstevel@tonic-gate 		first = aiop->aio_ok;
1140*0Sstevel@tonic-gate 		if (aiop->aio_ok == 0)
1141*0Sstevel@tonic-gate 			aiop->aio_ok = 1;
1142*0Sstevel@tonic-gate 	}
1143*0Sstevel@tonic-gate 	mutex_exit(&p->p_lock);
1144*0Sstevel@tonic-gate 	if (error == 0 && first == 0) {
1145*0Sstevel@tonic-gate 		return (aio_cleanup_thread(aiop));
1146*0Sstevel@tonic-gate 		/* should return only to exit */
1147*0Sstevel@tonic-gate 	}
1148*0Sstevel@tonic-gate 	return (error);
1149*0Sstevel@tonic-gate }
1150*0Sstevel@tonic-gate 
1151*0Sstevel@tonic-gate /*
1152*0Sstevel@tonic-gate  * Associate an aiocb with a port.
1153*0Sstevel@tonic-gate  * This function is used by aiorw() to associate a transaction with a port.
1154*0Sstevel@tonic-gate  * Allocate an event port structure (port_alloc_event()) and store the
1155*0Sstevel@tonic-gate  * delivered user pointer (portnfy_user) in the portkev_user field of the
1156*0Sstevel@tonic-gate  * port_kevent_t structure..
1157*0Sstevel@tonic-gate  * The aio_req_portkev pointer in the aio_req_t structure was added to identify
1158*0Sstevel@tonic-gate  * the port association.
1159*0Sstevel@tonic-gate  */
1160*0Sstevel@tonic-gate 
1161*0Sstevel@tonic-gate static int
1162*0Sstevel@tonic-gate aio_req_assoc_port_rw(port_notify_t *pntfy, aiocb_t *cbp, aio_req_t *reqp)
1163*0Sstevel@tonic-gate {
1164*0Sstevel@tonic-gate 	port_kevent_t	*pkevp = NULL;
1165*0Sstevel@tonic-gate 	int		error;
1166*0Sstevel@tonic-gate 
1167*0Sstevel@tonic-gate 	error = port_alloc_event(pntfy->portnfy_port, PORT_ALLOC_DEFAULT,
1168*0Sstevel@tonic-gate 	    PORT_SOURCE_AIO, &pkevp);
1169*0Sstevel@tonic-gate 	if (error) {
1170*0Sstevel@tonic-gate 		if ((error == ENOMEM) || (error == EAGAIN))
1171*0Sstevel@tonic-gate 			error = EAGAIN;
1172*0Sstevel@tonic-gate 		else
1173*0Sstevel@tonic-gate 			error = EINVAL;
1174*0Sstevel@tonic-gate 	} else {
1175*0Sstevel@tonic-gate 		port_init_event(pkevp, (uintptr_t)cbp, pntfy->portnfy_user,
1176*0Sstevel@tonic-gate 		    aio_port_callback, reqp);
1177*0Sstevel@tonic-gate 		reqp->aio_req_portkev = pkevp;
1178*0Sstevel@tonic-gate 		reqp->aio_req_port = pntfy->portnfy_port;
1179*0Sstevel@tonic-gate 	}
1180*0Sstevel@tonic-gate 	return (error);
1181*0Sstevel@tonic-gate }
1182*0Sstevel@tonic-gate 
1183*0Sstevel@tonic-gate /*
1184*0Sstevel@tonic-gate  * Associate an aiocb with a port.
1185*0Sstevel@tonic-gate  * This function is used by lio_listio() to associate a transaction with a port.
1186*0Sstevel@tonic-gate  * Allocate an event port structure (port_alloc_event()) and store the
1187*0Sstevel@tonic-gate  * delivered user pointer (portnfy_user) in the portkev_user field of the
1188*0Sstevel@tonic-gate  * The aio_req_portkev pointer in the aio_req_t structure was added to identify
1189*0Sstevel@tonic-gate  * the port association.
1190*0Sstevel@tonic-gate  * The event port notification can be requested attaching the port_notify_t
1191*0Sstevel@tonic-gate  * structure to the sigevent argument of lio_listio() or attaching the
1192*0Sstevel@tonic-gate  * port_notify_t structure to the sigevent structure which is embedded in the
1193*0Sstevel@tonic-gate  * aiocb.
1194*0Sstevel@tonic-gate  * The attachement to the global sigevent structure is valid for all aiocbs
1195*0Sstevel@tonic-gate  * in the list.
1196*0Sstevel@tonic-gate  */
1197*0Sstevel@tonic-gate 
1198*0Sstevel@tonic-gate static int
1199*0Sstevel@tonic-gate aio_req_assoc_port(struct sigevent *sigev, void	*user, aiocb_t *cbp,
1200*0Sstevel@tonic-gate     aio_req_t *reqp, port_kevent_t *pkevtp)
1201*0Sstevel@tonic-gate {
1202*0Sstevel@tonic-gate 	port_kevent_t	*pkevp = NULL;
1203*0Sstevel@tonic-gate 	port_notify_t	pntfy;
1204*0Sstevel@tonic-gate 	int		error;
1205*0Sstevel@tonic-gate 
1206*0Sstevel@tonic-gate 	if (sigev->sigev_notify == SIGEV_PORT) {
1207*0Sstevel@tonic-gate 		/* aiocb has an own port notification embedded */
1208*0Sstevel@tonic-gate 		if (copyin((void *)sigev->sigev_value.sival_ptr, &pntfy,
1209*0Sstevel@tonic-gate 		    sizeof (port_notify_t)))
1210*0Sstevel@tonic-gate 			return (EFAULT);
1211*0Sstevel@tonic-gate 
1212*0Sstevel@tonic-gate 		error = port_alloc_event(pntfy.portnfy_port, PORT_ALLOC_DEFAULT,
1213*0Sstevel@tonic-gate 		    PORT_SOURCE_AIO, &pkevp);
1214*0Sstevel@tonic-gate 		if (error) {
1215*0Sstevel@tonic-gate 			if ((error == ENOMEM) || (error == EAGAIN))
1216*0Sstevel@tonic-gate 				return (EAGAIN);
1217*0Sstevel@tonic-gate 			else
1218*0Sstevel@tonic-gate 				return (EINVAL);
1219*0Sstevel@tonic-gate 		}
1220*0Sstevel@tonic-gate 		/* use this values instead of the global values in port */
1221*0Sstevel@tonic-gate 
1222*0Sstevel@tonic-gate 		port_init_event(pkevp, (uintptr_t)cbp, pntfy.portnfy_user,
1223*0Sstevel@tonic-gate 		    aio_port_callback, reqp);
1224*0Sstevel@tonic-gate 		reqp->aio_req_port = pntfy.portnfy_port;
1225*0Sstevel@tonic-gate 	} else {
1226*0Sstevel@tonic-gate 		/* use global port notification */
1227*0Sstevel@tonic-gate 		error = port_dup_event(pkevtp, &pkevp, PORT_ALLOC_DEFAULT);
1228*0Sstevel@tonic-gate 		if (error)
1229*0Sstevel@tonic-gate 			return (EAGAIN);
1230*0Sstevel@tonic-gate 		port_init_event(pkevp, (uintptr_t)cbp, user, aio_port_callback,
1231*0Sstevel@tonic-gate 		    reqp);
1232*0Sstevel@tonic-gate 	}
1233*0Sstevel@tonic-gate 	reqp->aio_req_portkev = pkevp;
1234*0Sstevel@tonic-gate 	return (0);
1235*0Sstevel@tonic-gate }
1236*0Sstevel@tonic-gate 
1237*0Sstevel@tonic-gate /*
1238*0Sstevel@tonic-gate  * Same comments as in aio_req_assoc_port(), see above.
1239*0Sstevel@tonic-gate  */
1240*0Sstevel@tonic-gate 
1241*0Sstevel@tonic-gate static int
1242*0Sstevel@tonic-gate aio_req_assoc_port32(struct sigevent32 *sigev, void *user, aiocb_t *cbp,
1243*0Sstevel@tonic-gate     aio_req_t *reqp, port_kevent_t *pkevtp)
1244*0Sstevel@tonic-gate {
1245*0Sstevel@tonic-gate 	port_kevent_t	*pkevp = NULL;
1246*0Sstevel@tonic-gate 	port_notify32_t	pntfy;
1247*0Sstevel@tonic-gate 	int		error;
1248*0Sstevel@tonic-gate 
1249*0Sstevel@tonic-gate 	if (sigev->sigev_notify == SIGEV_PORT) {
1250*0Sstevel@tonic-gate 		if (copyin((void *)(uintptr_t)sigev->sigev_value.sival_int,
1251*0Sstevel@tonic-gate 		    &pntfy, sizeof (port_notify32_t)))
1252*0Sstevel@tonic-gate 			return (EFAULT);
1253*0Sstevel@tonic-gate 
1254*0Sstevel@tonic-gate 		error = port_alloc_event(pntfy.portnfy_port,
1255*0Sstevel@tonic-gate 		    PORT_ALLOC_DEFAULT, PORT_SOURCE_AIO, &pkevp);
1256*0Sstevel@tonic-gate 		if (error) {
1257*0Sstevel@tonic-gate 			if ((error == ENOMEM) || (error == EAGAIN))
1258*0Sstevel@tonic-gate 				return (EAGAIN);
1259*0Sstevel@tonic-gate 			else
1260*0Sstevel@tonic-gate 				return (EINVAL);
1261*0Sstevel@tonic-gate 		}
1262*0Sstevel@tonic-gate 		/* use this values instead of the global values in port */
1263*0Sstevel@tonic-gate 
1264*0Sstevel@tonic-gate 		port_init_event(pkevp, (uintptr_t)cbp,
1265*0Sstevel@tonic-gate 		    (void *)(uintptr_t)pntfy.portnfy_user,
1266*0Sstevel@tonic-gate 		    aio_port_callback, reqp);
1267*0Sstevel@tonic-gate 		reqp->aio_req_port = pntfy.portnfy_port;
1268*0Sstevel@tonic-gate 	} else {
1269*0Sstevel@tonic-gate 		error = port_dup_event(pkevtp, &pkevp, PORT_ALLOC_DEFAULT);
1270*0Sstevel@tonic-gate 		if (error)
1271*0Sstevel@tonic-gate 			return (EAGAIN);
1272*0Sstevel@tonic-gate 		port_init_event(pkevp, (uintptr_t)cbp, user, aio_port_callback,
1273*0Sstevel@tonic-gate 		    reqp);
1274*0Sstevel@tonic-gate 	}
1275*0Sstevel@tonic-gate 	reqp->aio_req_portkev = pkevp;
1276*0Sstevel@tonic-gate 	return (0);
1277*0Sstevel@tonic-gate }
1278*0Sstevel@tonic-gate 
1279*0Sstevel@tonic-gate 
1280*0Sstevel@tonic-gate #ifdef _LP64
1281*0Sstevel@tonic-gate 
1282*0Sstevel@tonic-gate /*
1283*0Sstevel@tonic-gate  * Asynchronous list IO. A chain of aiocb's are copied in
1284*0Sstevel@tonic-gate  * one at a time. If the aiocb is invalid, it is skipped.
1285*0Sstevel@tonic-gate  * For each aiocb, the appropriate driver entry point is
1286*0Sstevel@tonic-gate  * called. Optimize for the common case where the list
1287*0Sstevel@tonic-gate  * of requests is to the same file descriptor.
1288*0Sstevel@tonic-gate  *
1289*0Sstevel@tonic-gate  * One possible optimization is to define a new driver entry
1290*0Sstevel@tonic-gate  * point that supports a list of IO requests. Whether this
1291*0Sstevel@tonic-gate  * improves performance depends somewhat on the driver's
1292*0Sstevel@tonic-gate  * locking strategy. Processing a list could adversely impact
1293*0Sstevel@tonic-gate  * the driver's interrupt latency.
1294*0Sstevel@tonic-gate  */
1295*0Sstevel@tonic-gate /*ARGSUSED*/
1296*0Sstevel@tonic-gate static int
1297*0Sstevel@tonic-gate alio(
1298*0Sstevel@tonic-gate 	int	opcode,
1299*0Sstevel@tonic-gate 	int	mode_arg,
1300*0Sstevel@tonic-gate 	aiocb_t	**aiocb_arg,
1301*0Sstevel@tonic-gate 	int	nent,
1302*0Sstevel@tonic-gate 	struct	sigevent *sigev)
1303*0Sstevel@tonic-gate 
1304*0Sstevel@tonic-gate {
1305*0Sstevel@tonic-gate 	file_t		*fp;
1306*0Sstevel@tonic-gate 	file_t		*prev_fp = NULL;
1307*0Sstevel@tonic-gate 	int		prev_mode = -1;
1308*0Sstevel@tonic-gate 	struct vnode	*vp;
1309*0Sstevel@tonic-gate 	aio_lio_t	*head;
1310*0Sstevel@tonic-gate 	aio_req_t	*reqp;
1311*0Sstevel@tonic-gate 	aio_t		*aiop;
1312*0Sstevel@tonic-gate 	caddr_t		cbplist;
1313*0Sstevel@tonic-gate 	aiocb_t		*cbp, **ucbp;
1314*0Sstevel@tonic-gate 	aiocb_t		cb;
1315*0Sstevel@tonic-gate 	aiocb_t		*aiocb = &cb;
1316*0Sstevel@tonic-gate 	struct sigevent sigevk;
1317*0Sstevel@tonic-gate 	sigqueue_t	*sqp;
1318*0Sstevel@tonic-gate 	int		(*aio_func)();
1319*0Sstevel@tonic-gate 	int		mode;
1320*0Sstevel@tonic-gate 	int		error = 0;
1321*0Sstevel@tonic-gate 	int		aio_errors = 0;
1322*0Sstevel@tonic-gate 	int		i;
1323*0Sstevel@tonic-gate 	size_t		ssize;
1324*0Sstevel@tonic-gate 	int		deadhead = 0;
1325*0Sstevel@tonic-gate 	int		aio_notsupported = 0;
1326*0Sstevel@tonic-gate 	int		aio_use_port = 0;
1327*0Sstevel@tonic-gate 	port_kevent_t	*pkevtp = NULL;
1328*0Sstevel@tonic-gate 	port_notify_t	pnotify;
1329*0Sstevel@tonic-gate 
1330*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
1331*0Sstevel@tonic-gate 	if (aiop == NULL || nent <= 0 || nent > _AIO_LISTIO_MAX)
1332*0Sstevel@tonic-gate 		return (EINVAL);
1333*0Sstevel@tonic-gate 
1334*0Sstevel@tonic-gate 	ssize = (sizeof (aiocb_t *) * nent);
1335*0Sstevel@tonic-gate 	cbplist = kmem_alloc(ssize, KM_SLEEP);
1336*0Sstevel@tonic-gate 	ucbp = (aiocb_t **)cbplist;
1337*0Sstevel@tonic-gate 
1338*0Sstevel@tonic-gate 	if (copyin(aiocb_arg, cbplist, sizeof (aiocb_t *) * nent)) {
1339*0Sstevel@tonic-gate 		kmem_free(cbplist, ssize);
1340*0Sstevel@tonic-gate 		return (EFAULT);
1341*0Sstevel@tonic-gate 	}
1342*0Sstevel@tonic-gate 
1343*0Sstevel@tonic-gate 	if (sigev) {
1344*0Sstevel@tonic-gate 		if (copyin(sigev, &sigevk, sizeof (struct sigevent))) {
1345*0Sstevel@tonic-gate 			kmem_free(cbplist, ssize);
1346*0Sstevel@tonic-gate 			return (EFAULT);
1347*0Sstevel@tonic-gate 		}
1348*0Sstevel@tonic-gate 	}
1349*0Sstevel@tonic-gate 
1350*0Sstevel@tonic-gate 	/*
1351*0Sstevel@tonic-gate 	 * a list head should be allocated if notification is
1352*0Sstevel@tonic-gate 	 * enabled for this list.
1353*0Sstevel@tonic-gate 	 */
1354*0Sstevel@tonic-gate 	head = NULL;
1355*0Sstevel@tonic-gate 
1356*0Sstevel@tonic-gate 	/* Event Ports  */
1357*0Sstevel@tonic-gate 
1358*0Sstevel@tonic-gate 	if (sigev && sigevk.sigev_notify == SIGEV_PORT) {
1359*0Sstevel@tonic-gate 		/* Use port for completion notification */
1360*0Sstevel@tonic-gate 		if (copyin(sigevk.sigev_value.sival_ptr, &pnotify,
1361*0Sstevel@tonic-gate 		    sizeof (port_notify_t))) {
1362*0Sstevel@tonic-gate 			kmem_free(cbplist, ssize);
1363*0Sstevel@tonic-gate 			return (EFAULT);
1364*0Sstevel@tonic-gate 		}
1365*0Sstevel@tonic-gate 		/* use event ports for the list of aiocbs */
1366*0Sstevel@tonic-gate 		aio_use_port = 1;
1367*0Sstevel@tonic-gate 		error = port_alloc_event(pnotify.portnfy_port,
1368*0Sstevel@tonic-gate 		    PORT_ALLOC_PRIVATE, PORT_SOURCE_AIO, &pkevtp);
1369*0Sstevel@tonic-gate 		if (error) {
1370*0Sstevel@tonic-gate 			if ((error == ENOMEM) || (error == EAGAIN))
1371*0Sstevel@tonic-gate 				error = EAGAIN;
1372*0Sstevel@tonic-gate 			else
1373*0Sstevel@tonic-gate 				error = EINVAL;
1374*0Sstevel@tonic-gate 			kmem_free(cbplist, ssize);
1375*0Sstevel@tonic-gate 			return (error);
1376*0Sstevel@tonic-gate 		}
1377*0Sstevel@tonic-gate 	} else if ((mode_arg == LIO_WAIT) || sigev) {
1378*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
1379*0Sstevel@tonic-gate 		error = aio_lio_alloc(&head);
1380*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
1381*0Sstevel@tonic-gate 		if (error)
1382*0Sstevel@tonic-gate 			goto done;
1383*0Sstevel@tonic-gate 		deadhead = 1;
1384*0Sstevel@tonic-gate 		head->lio_nent = nent;
1385*0Sstevel@tonic-gate 		head->lio_refcnt = nent;
1386*0Sstevel@tonic-gate 		if (sigev && (sigevk.sigev_notify == SIGEV_SIGNAL) &&
1387*0Sstevel@tonic-gate 		    (sigevk.sigev_signo > 0 && sigevk.sigev_signo < NSIG)) {
1388*0Sstevel@tonic-gate 			sqp = kmem_zalloc(sizeof (sigqueue_t), KM_NOSLEEP);
1389*0Sstevel@tonic-gate 			if (sqp == NULL) {
1390*0Sstevel@tonic-gate 				error = EAGAIN;
1391*0Sstevel@tonic-gate 				goto done;
1392*0Sstevel@tonic-gate 			}
1393*0Sstevel@tonic-gate 			sqp->sq_func = NULL;
1394*0Sstevel@tonic-gate 			sqp->sq_next = NULL;
1395*0Sstevel@tonic-gate 			sqp->sq_info.si_code = SI_ASYNCIO;
1396*0Sstevel@tonic-gate 			sqp->sq_info.si_pid = curproc->p_pid;
1397*0Sstevel@tonic-gate 			sqp->sq_info.si_ctid = PRCTID(curproc);
1398*0Sstevel@tonic-gate 			sqp->sq_info.si_zoneid = getzoneid();
1399*0Sstevel@tonic-gate 			sqp->sq_info.si_uid = crgetuid(curproc->p_cred);
1400*0Sstevel@tonic-gate 			sqp->sq_info.si_signo = sigevk.sigev_signo;
1401*0Sstevel@tonic-gate 			sqp->sq_info.si_value = sigevk.sigev_value;
1402*0Sstevel@tonic-gate 			head->lio_sigqp = sqp;
1403*0Sstevel@tonic-gate 		} else {
1404*0Sstevel@tonic-gate 			head->lio_sigqp = NULL;
1405*0Sstevel@tonic-gate 		}
1406*0Sstevel@tonic-gate 	}
1407*0Sstevel@tonic-gate 
1408*0Sstevel@tonic-gate 	for (i = 0; i < nent; i++, ucbp++) {
1409*0Sstevel@tonic-gate 
1410*0Sstevel@tonic-gate 		cbp = *ucbp;
1411*0Sstevel@tonic-gate 		/* skip entry if it can't be copied. */
1412*0Sstevel@tonic-gate 		if (cbp == NULL || copyin(cbp, aiocb, sizeof (aiocb_t))) {
1413*0Sstevel@tonic-gate 			if (head) {
1414*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
1415*0Sstevel@tonic-gate 				head->lio_nent--;
1416*0Sstevel@tonic-gate 				head->lio_refcnt--;
1417*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
1418*0Sstevel@tonic-gate 			}
1419*0Sstevel@tonic-gate 			continue;
1420*0Sstevel@tonic-gate 		}
1421*0Sstevel@tonic-gate 
1422*0Sstevel@tonic-gate 		/* skip if opcode for aiocb is LIO_NOP */
1423*0Sstevel@tonic-gate 
1424*0Sstevel@tonic-gate 		mode = aiocb->aio_lio_opcode;
1425*0Sstevel@tonic-gate 		if (mode == LIO_NOP) {
1426*0Sstevel@tonic-gate 			cbp = NULL;
1427*0Sstevel@tonic-gate 			if (head) {
1428*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
1429*0Sstevel@tonic-gate 				head->lio_nent--;
1430*0Sstevel@tonic-gate 				head->lio_refcnt--;
1431*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
1432*0Sstevel@tonic-gate 			}
1433*0Sstevel@tonic-gate 			continue;
1434*0Sstevel@tonic-gate 		}
1435*0Sstevel@tonic-gate 
1436*0Sstevel@tonic-gate 		/* increment file descriptor's ref count. */
1437*0Sstevel@tonic-gate 		if ((fp = getf(aiocb->aio_fildes)) == NULL) {
1438*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, EBADF);
1439*0Sstevel@tonic-gate 			if (head) {
1440*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
1441*0Sstevel@tonic-gate 				head->lio_nent--;
1442*0Sstevel@tonic-gate 				head->lio_refcnt--;
1443*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
1444*0Sstevel@tonic-gate 			}
1445*0Sstevel@tonic-gate 			aio_errors++;
1446*0Sstevel@tonic-gate 			continue;
1447*0Sstevel@tonic-gate 		}
1448*0Sstevel@tonic-gate 
1449*0Sstevel@tonic-gate 		vp = fp->f_vnode;
1450*0Sstevel@tonic-gate 
1451*0Sstevel@tonic-gate 		/*
1452*0Sstevel@tonic-gate 		 * check the permission of the partition
1453*0Sstevel@tonic-gate 		 */
1454*0Sstevel@tonic-gate 		mode = aiocb->aio_lio_opcode;
1455*0Sstevel@tonic-gate 		if ((fp->f_flag & mode) == 0) {
1456*0Sstevel@tonic-gate 			releasef(aiocb->aio_fildes);
1457*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, EBADF);
1458*0Sstevel@tonic-gate 			if (head) {
1459*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
1460*0Sstevel@tonic-gate 				head->lio_nent--;
1461*0Sstevel@tonic-gate 				head->lio_refcnt--;
1462*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
1463*0Sstevel@tonic-gate 			}
1464*0Sstevel@tonic-gate 			aio_errors++;
1465*0Sstevel@tonic-gate 			continue;
1466*0Sstevel@tonic-gate 		}
1467*0Sstevel@tonic-gate 
1468*0Sstevel@tonic-gate 		/*
1469*0Sstevel@tonic-gate 		 * common case where requests are to the same fd for the
1470*0Sstevel@tonic-gate 		 * same r/w operation.
1471*0Sstevel@tonic-gate 		 * for UFS, need to set EBADFD
1472*0Sstevel@tonic-gate 		 */
1473*0Sstevel@tonic-gate 		if ((fp != prev_fp) || (mode != prev_mode)) {
1474*0Sstevel@tonic-gate 			aio_func = check_vp(vp, mode);
1475*0Sstevel@tonic-gate 			if (aio_func == NULL) {
1476*0Sstevel@tonic-gate 				prev_fp = NULL;
1477*0Sstevel@tonic-gate 				releasef(aiocb->aio_fildes);
1478*0Sstevel@tonic-gate 				lio_set_uerror(&cbp->aio_resultp, EBADFD);
1479*0Sstevel@tonic-gate 				aio_notsupported++;
1480*0Sstevel@tonic-gate 				if (head) {
1481*0Sstevel@tonic-gate 					mutex_enter(&aiop->aio_mutex);
1482*0Sstevel@tonic-gate 					head->lio_nent--;
1483*0Sstevel@tonic-gate 					head->lio_refcnt--;
1484*0Sstevel@tonic-gate 					mutex_exit(&aiop->aio_mutex);
1485*0Sstevel@tonic-gate 				}
1486*0Sstevel@tonic-gate 				continue;
1487*0Sstevel@tonic-gate 			} else {
1488*0Sstevel@tonic-gate 				prev_fp = fp;
1489*0Sstevel@tonic-gate 				prev_mode = mode;
1490*0Sstevel@tonic-gate 			}
1491*0Sstevel@tonic-gate 		}
1492*0Sstevel@tonic-gate 
1493*0Sstevel@tonic-gate 		if (error = aio_req_setup(&reqp, aiop, aiocb,
1494*0Sstevel@tonic-gate 		    &cbp->aio_resultp, aio_use_port, vp)) {
1495*0Sstevel@tonic-gate 			releasef(aiocb->aio_fildes);
1496*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, error);
1497*0Sstevel@tonic-gate 			if (head) {
1498*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
1499*0Sstevel@tonic-gate 				head->lio_nent--;
1500*0Sstevel@tonic-gate 				head->lio_refcnt--;
1501*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
1502*0Sstevel@tonic-gate 			}
1503*0Sstevel@tonic-gate 			aio_errors++;
1504*0Sstevel@tonic-gate 			continue;
1505*0Sstevel@tonic-gate 		}
1506*0Sstevel@tonic-gate 
1507*0Sstevel@tonic-gate 		reqp->aio_req_lio = head;
1508*0Sstevel@tonic-gate 		deadhead = 0;
1509*0Sstevel@tonic-gate 
1510*0Sstevel@tonic-gate 		/*
1511*0Sstevel@tonic-gate 		 * Set the errno field now before sending the request to
1512*0Sstevel@tonic-gate 		 * the driver to avoid a race condition
1513*0Sstevel@tonic-gate 		 */
1514*0Sstevel@tonic-gate 		(void) suword32(&cbp->aio_resultp.aio_errno,
1515*0Sstevel@tonic-gate 		    EINPROGRESS);
1516*0Sstevel@tonic-gate 
1517*0Sstevel@tonic-gate 		reqp->aio_req_iocb.iocb = (caddr_t)cbp;
1518*0Sstevel@tonic-gate 
1519*0Sstevel@tonic-gate 		if (aio_use_port) {
1520*0Sstevel@tonic-gate 			reqp->aio_req_port = pnotify.portnfy_port;
1521*0Sstevel@tonic-gate 			error = aio_req_assoc_port(&aiocb->aio_sigevent,
1522*0Sstevel@tonic-gate 			    pnotify.portnfy_user, cbp, reqp, pkevtp);
1523*0Sstevel@tonic-gate 		}
1524*0Sstevel@tonic-gate 
1525*0Sstevel@tonic-gate 		/*
1526*0Sstevel@tonic-gate 		 * send the request to driver.
1527*0Sstevel@tonic-gate 		 * Clustering: If PXFS vnode, call PXFS function.
1528*0Sstevel@tonic-gate 		 */
1529*0Sstevel@tonic-gate 		if (error == 0) {
1530*0Sstevel@tonic-gate 			if (aiocb->aio_nbytes == 0) {
1531*0Sstevel@tonic-gate 				clear_active_fd(aiocb->aio_fildes);
1532*0Sstevel@tonic-gate 				aio_zerolen(reqp);
1533*0Sstevel@tonic-gate 				continue;
1534*0Sstevel@tonic-gate 			}
1535*0Sstevel@tonic-gate 			error = (*aio_func)(vp, (aio_req_t *)&reqp->aio_req,
1536*0Sstevel@tonic-gate 			    CRED());
1537*0Sstevel@tonic-gate 		}
1538*0Sstevel@tonic-gate 		/*
1539*0Sstevel@tonic-gate 		 * the fd's ref count is not decremented until the IO has
1540*0Sstevel@tonic-gate 		 * completed unless there was an error.
1541*0Sstevel@tonic-gate 		 */
1542*0Sstevel@tonic-gate 		if (error) {
1543*0Sstevel@tonic-gate 			releasef(aiocb->aio_fildes);
1544*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, error);
1545*0Sstevel@tonic-gate 			if (head) {
1546*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
1547*0Sstevel@tonic-gate 				head->lio_nent--;
1548*0Sstevel@tonic-gate 				head->lio_refcnt--;
1549*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
1550*0Sstevel@tonic-gate 			}
1551*0Sstevel@tonic-gate 			if (error == ENOTSUP)
1552*0Sstevel@tonic-gate 				aio_notsupported++;
1553*0Sstevel@tonic-gate 			else
1554*0Sstevel@tonic-gate 				aio_errors++;
1555*0Sstevel@tonic-gate 			lio_set_error(reqp);
1556*0Sstevel@tonic-gate 		} else {
1557*0Sstevel@tonic-gate 			clear_active_fd(aiocb->aio_fildes);
1558*0Sstevel@tonic-gate 		}
1559*0Sstevel@tonic-gate 	}
1560*0Sstevel@tonic-gate 
1561*0Sstevel@tonic-gate 	if (pkevtp)
1562*0Sstevel@tonic-gate 		port_free_event(pkevtp);
1563*0Sstevel@tonic-gate 
1564*0Sstevel@tonic-gate 	if (aio_notsupported) {
1565*0Sstevel@tonic-gate 		error = ENOTSUP;
1566*0Sstevel@tonic-gate 	} else if (aio_errors) {
1567*0Sstevel@tonic-gate 		/*
1568*0Sstevel@tonic-gate 		 * return EIO if any request failed
1569*0Sstevel@tonic-gate 		 */
1570*0Sstevel@tonic-gate 		error = EIO;
1571*0Sstevel@tonic-gate 	}
1572*0Sstevel@tonic-gate 
1573*0Sstevel@tonic-gate 	if (mode_arg == LIO_WAIT) {
1574*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
1575*0Sstevel@tonic-gate 		while (head->lio_refcnt > 0) {
1576*0Sstevel@tonic-gate 			if (!cv_wait_sig(&head->lio_notify, &aiop->aio_mutex)) {
1577*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
1578*0Sstevel@tonic-gate 				error = EINTR;
1579*0Sstevel@tonic-gate 				goto done;
1580*0Sstevel@tonic-gate 			}
1581*0Sstevel@tonic-gate 		}
1582*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
1583*0Sstevel@tonic-gate 		alio_cleanup(aiop, (aiocb_t **)cbplist, nent, AIO_64);
1584*0Sstevel@tonic-gate 	}
1585*0Sstevel@tonic-gate 
1586*0Sstevel@tonic-gate done:
1587*0Sstevel@tonic-gate 	kmem_free(cbplist, ssize);
1588*0Sstevel@tonic-gate 	if (deadhead) {
1589*0Sstevel@tonic-gate 		if (head->lio_sigqp)
1590*0Sstevel@tonic-gate 			kmem_free(head->lio_sigqp, sizeof (sigqueue_t));
1591*0Sstevel@tonic-gate 		kmem_free(head, sizeof (aio_lio_t));
1592*0Sstevel@tonic-gate 	}
1593*0Sstevel@tonic-gate 	return (error);
1594*0Sstevel@tonic-gate }
1595*0Sstevel@tonic-gate 
1596*0Sstevel@tonic-gate #endif /* _LP64 */
1597*0Sstevel@tonic-gate 
1598*0Sstevel@tonic-gate /*
1599*0Sstevel@tonic-gate  * Asynchronous list IO.
1600*0Sstevel@tonic-gate  * If list I/O is called with LIO_WAIT it can still return
1601*0Sstevel@tonic-gate  * before all the I/O's are completed if a signal is caught
1602*0Sstevel@tonic-gate  * or if the list include UFS I/O requests. If this happens,
1603*0Sstevel@tonic-gate  * libaio will call aliowait() to wait for the I/O's to
1604*0Sstevel@tonic-gate  * complete
1605*0Sstevel@tonic-gate  */
1606*0Sstevel@tonic-gate /*ARGSUSED*/
1607*0Sstevel@tonic-gate static int
1608*0Sstevel@tonic-gate aliowait(
1609*0Sstevel@tonic-gate 	int	mode,
1610*0Sstevel@tonic-gate 	void	*aiocb,
1611*0Sstevel@tonic-gate 	int	nent,
1612*0Sstevel@tonic-gate 	void	*sigev,
1613*0Sstevel@tonic-gate 	int	run_mode)
1614*0Sstevel@tonic-gate {
1615*0Sstevel@tonic-gate 	aio_lio_t	*head;
1616*0Sstevel@tonic-gate 	aio_t		*aiop;
1617*0Sstevel@tonic-gate 	caddr_t		cbplist;
1618*0Sstevel@tonic-gate 	aiocb_t		*cbp, **ucbp;
1619*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
1620*0Sstevel@tonic-gate 	aiocb32_t	*cbp32;
1621*0Sstevel@tonic-gate 	caddr32_t	*ucbp32;
1622*0Sstevel@tonic-gate 	aiocb64_32_t	*cbp64;
1623*0Sstevel@tonic-gate #endif
1624*0Sstevel@tonic-gate 	int		error = 0;
1625*0Sstevel@tonic-gate 	int		i;
1626*0Sstevel@tonic-gate 	size_t		ssize = 0;
1627*0Sstevel@tonic-gate 	model_t		model = get_udatamodel();
1628*0Sstevel@tonic-gate 
1629*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
1630*0Sstevel@tonic-gate 	if (aiop == NULL || nent <= 0 || nent > _AIO_LISTIO_MAX)
1631*0Sstevel@tonic-gate 		return (EINVAL);
1632*0Sstevel@tonic-gate 
1633*0Sstevel@tonic-gate 	if (model == DATAMODEL_NATIVE)
1634*0Sstevel@tonic-gate 		ssize = (sizeof (aiocb_t *) * nent);
1635*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
1636*0Sstevel@tonic-gate 	else
1637*0Sstevel@tonic-gate 		ssize = (sizeof (caddr32_t) * nent);
1638*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
1639*0Sstevel@tonic-gate 
1640*0Sstevel@tonic-gate 	if (ssize == 0)
1641*0Sstevel@tonic-gate 		return (EINVAL);
1642*0Sstevel@tonic-gate 
1643*0Sstevel@tonic-gate 	cbplist = kmem_alloc(ssize, KM_SLEEP);
1644*0Sstevel@tonic-gate 
1645*0Sstevel@tonic-gate 	if (model == DATAMODEL_NATIVE)
1646*0Sstevel@tonic-gate 		ucbp = (aiocb_t **)cbplist;
1647*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
1648*0Sstevel@tonic-gate 	else
1649*0Sstevel@tonic-gate 		ucbp32 = (caddr32_t *)cbplist;
1650*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
1651*0Sstevel@tonic-gate 
1652*0Sstevel@tonic-gate 	if (copyin(aiocb, cbplist, ssize)) {
1653*0Sstevel@tonic-gate 		error = EFAULT;
1654*0Sstevel@tonic-gate 		goto done;
1655*0Sstevel@tonic-gate 	}
1656*0Sstevel@tonic-gate 
1657*0Sstevel@tonic-gate 	/*
1658*0Sstevel@tonic-gate 	 * To find the list head, we go through the
1659*0Sstevel@tonic-gate 	 * list of aiocb structs, find the request
1660*0Sstevel@tonic-gate 	 * its for, then get the list head that reqp
1661*0Sstevel@tonic-gate 	 * points to
1662*0Sstevel@tonic-gate 	 */
1663*0Sstevel@tonic-gate 	head = NULL;
1664*0Sstevel@tonic-gate 
1665*0Sstevel@tonic-gate 	for (i = 0; i < nent; i++) {
1666*0Sstevel@tonic-gate 		if (model == DATAMODEL_NATIVE) {
1667*0Sstevel@tonic-gate 			/*
1668*0Sstevel@tonic-gate 			 * Since we are only checking for a NULL pointer
1669*0Sstevel@tonic-gate 			 * Following should work on both native data sizes
1670*0Sstevel@tonic-gate 			 * as well as for largefile aiocb.
1671*0Sstevel@tonic-gate 			 */
1672*0Sstevel@tonic-gate 			if ((cbp = *ucbp++) == NULL)
1673*0Sstevel@tonic-gate 				continue;
1674*0Sstevel@tonic-gate 			if (run_mode != AIO_LARGEFILE)
1675*0Sstevel@tonic-gate 				if (head = aio_list_get(&cbp->aio_resultp))
1676*0Sstevel@tonic-gate 					break;
1677*0Sstevel@tonic-gate 			else {
1678*0Sstevel@tonic-gate 				/*
1679*0Sstevel@tonic-gate 				 * This is a case when largefile call is
1680*0Sstevel@tonic-gate 				 * made on 32 bit kernel.
1681*0Sstevel@tonic-gate 				 * Treat each pointer as pointer to
1682*0Sstevel@tonic-gate 				 * aiocb64_32
1683*0Sstevel@tonic-gate 				 */
1684*0Sstevel@tonic-gate 				if (head = aio_list_get((aio_result_t *)
1685*0Sstevel@tonic-gate 				    &(((aiocb64_32_t *)cbp)->aio_resultp)))
1686*0Sstevel@tonic-gate 					break;
1687*0Sstevel@tonic-gate 			}
1688*0Sstevel@tonic-gate 		}
1689*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
1690*0Sstevel@tonic-gate 		else {
1691*0Sstevel@tonic-gate 			if (run_mode == AIO_LARGEFILE) {
1692*0Sstevel@tonic-gate 				if ((cbp64 = (aiocb64_32_t *)
1693*0Sstevel@tonic-gate 				    (uintptr_t)*ucbp32++) == NULL)
1694*0Sstevel@tonic-gate 					continue;
1695*0Sstevel@tonic-gate 				if (head = aio_list_get((aio_result_t *)
1696*0Sstevel@tonic-gate 				    &cbp64->aio_resultp))
1697*0Sstevel@tonic-gate 					break;
1698*0Sstevel@tonic-gate 			} else if (run_mode == AIO_32) {
1699*0Sstevel@tonic-gate 				if ((cbp32 = (aiocb32_t *)
1700*0Sstevel@tonic-gate 				    (uintptr_t)*ucbp32++) == NULL)
1701*0Sstevel@tonic-gate 					continue;
1702*0Sstevel@tonic-gate 				if (head = aio_list_get((aio_result_t *)
1703*0Sstevel@tonic-gate 				    &cbp32->aio_resultp))
1704*0Sstevel@tonic-gate 					break;
1705*0Sstevel@tonic-gate 			}
1706*0Sstevel@tonic-gate 		}
1707*0Sstevel@tonic-gate #endif	/* _SYSCALL32_IMPL */
1708*0Sstevel@tonic-gate 	}
1709*0Sstevel@tonic-gate 
1710*0Sstevel@tonic-gate 	if (head == NULL) {
1711*0Sstevel@tonic-gate 		error = EINVAL;
1712*0Sstevel@tonic-gate 		goto done;
1713*0Sstevel@tonic-gate 	}
1714*0Sstevel@tonic-gate 
1715*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
1716*0Sstevel@tonic-gate 	while (head->lio_refcnt > 0) {
1717*0Sstevel@tonic-gate 		if (!cv_wait_sig(&head->lio_notify, &aiop->aio_mutex)) {
1718*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
1719*0Sstevel@tonic-gate 			error = EINTR;
1720*0Sstevel@tonic-gate 			goto done;
1721*0Sstevel@tonic-gate 		}
1722*0Sstevel@tonic-gate 	}
1723*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
1724*0Sstevel@tonic-gate 	alio_cleanup(aiop, (aiocb_t **)cbplist, nent, run_mode);
1725*0Sstevel@tonic-gate done:
1726*0Sstevel@tonic-gate 	kmem_free(cbplist, ssize);
1727*0Sstevel@tonic-gate 	return (error);
1728*0Sstevel@tonic-gate }
1729*0Sstevel@tonic-gate 
1730*0Sstevel@tonic-gate aio_lio_t *
1731*0Sstevel@tonic-gate aio_list_get(aio_result_t *resultp)
1732*0Sstevel@tonic-gate {
1733*0Sstevel@tonic-gate 	aio_lio_t	*head = NULL;
1734*0Sstevel@tonic-gate 	aio_t		*aiop;
1735*0Sstevel@tonic-gate 	aio_req_t 	**bucket;
1736*0Sstevel@tonic-gate 	aio_req_t 	*reqp;
1737*0Sstevel@tonic-gate 	long		index;
1738*0Sstevel@tonic-gate 
1739*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
1740*0Sstevel@tonic-gate 	if (aiop == NULL)
1741*0Sstevel@tonic-gate 		return (NULL);
1742*0Sstevel@tonic-gate 
1743*0Sstevel@tonic-gate 	if (resultp) {
1744*0Sstevel@tonic-gate 		index = AIO_HASH(resultp);
1745*0Sstevel@tonic-gate 		bucket = &aiop->aio_hash[index];
1746*0Sstevel@tonic-gate 		for (reqp = *bucket; reqp != NULL;
1747*0Sstevel@tonic-gate 		    reqp = reqp->aio_hash_next) {
1748*0Sstevel@tonic-gate 			if (reqp->aio_req_resultp == resultp) {
1749*0Sstevel@tonic-gate 				head = reqp->aio_req_lio;
1750*0Sstevel@tonic-gate 				return (head);
1751*0Sstevel@tonic-gate 			}
1752*0Sstevel@tonic-gate 		}
1753*0Sstevel@tonic-gate 	}
1754*0Sstevel@tonic-gate 	return (NULL);
1755*0Sstevel@tonic-gate }
1756*0Sstevel@tonic-gate 
1757*0Sstevel@tonic-gate 
1758*0Sstevel@tonic-gate static void
1759*0Sstevel@tonic-gate lio_set_uerror(void *resultp, int error)
1760*0Sstevel@tonic-gate {
1761*0Sstevel@tonic-gate 	/*
1762*0Sstevel@tonic-gate 	 * the resultp field is a pointer to where the
1763*0Sstevel@tonic-gate 	 * error should be written out to the user's
1764*0Sstevel@tonic-gate 	 * aiocb.
1765*0Sstevel@tonic-gate 	 *
1766*0Sstevel@tonic-gate 	 */
1767*0Sstevel@tonic-gate 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1768*0Sstevel@tonic-gate 		(void) sulword(&((aio_result_t *)resultp)->aio_return,
1769*0Sstevel@tonic-gate 		    (ssize_t)-1);
1770*0Sstevel@tonic-gate 		(void) suword32(&((aio_result_t *)resultp)->aio_errno, error);
1771*0Sstevel@tonic-gate 	}
1772*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
1773*0Sstevel@tonic-gate 	else {
1774*0Sstevel@tonic-gate 		(void) suword32(&((aio_result32_t *)resultp)->aio_return,
1775*0Sstevel@tonic-gate 		    (uint_t)-1);
1776*0Sstevel@tonic-gate 		(void) suword32(&((aio_result32_t *)resultp)->aio_errno, error);
1777*0Sstevel@tonic-gate 	}
1778*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
1779*0Sstevel@tonic-gate }
1780*0Sstevel@tonic-gate 
1781*0Sstevel@tonic-gate /*
1782*0Sstevel@tonic-gate  * do cleanup completion for all requests in list. memory for
1783*0Sstevel@tonic-gate  * each request is also freed.
1784*0Sstevel@tonic-gate  */
1785*0Sstevel@tonic-gate static void
1786*0Sstevel@tonic-gate alio_cleanup(aio_t *aiop, aiocb_t **cbp, int nent, int run_mode)
1787*0Sstevel@tonic-gate {
1788*0Sstevel@tonic-gate 	int i;
1789*0Sstevel@tonic-gate 	aio_req_t *reqp;
1790*0Sstevel@tonic-gate 	aio_result_t *resultp;
1791*0Sstevel@tonic-gate 	aiocb64_32_t	*aiocb_64;
1792*0Sstevel@tonic-gate 
1793*0Sstevel@tonic-gate 	for (i = 0; i < nent; i++) {
1794*0Sstevel@tonic-gate 		if (get_udatamodel() == DATAMODEL_NATIVE) {
1795*0Sstevel@tonic-gate 			if (cbp[i] == NULL)
1796*0Sstevel@tonic-gate 				continue;
1797*0Sstevel@tonic-gate 			if (run_mode == AIO_LARGEFILE) {
1798*0Sstevel@tonic-gate 				aiocb_64 = (aiocb64_32_t *)cbp[i];
1799*0Sstevel@tonic-gate 				resultp = (aio_result_t *)&aiocb_64->
1800*0Sstevel@tonic-gate 				    aio_resultp;
1801*0Sstevel@tonic-gate 			} else
1802*0Sstevel@tonic-gate 				resultp = &cbp[i]->aio_resultp;
1803*0Sstevel@tonic-gate 		}
1804*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
1805*0Sstevel@tonic-gate 		else {
1806*0Sstevel@tonic-gate 			aiocb32_t	*aiocb_32;
1807*0Sstevel@tonic-gate 			caddr32_t	*cbp32;
1808*0Sstevel@tonic-gate 
1809*0Sstevel@tonic-gate 			cbp32 = (caddr32_t *)cbp;
1810*0Sstevel@tonic-gate 			if (cbp32[i] == NULL)
1811*0Sstevel@tonic-gate 				continue;
1812*0Sstevel@tonic-gate 			if (run_mode == AIO_32) {
1813*0Sstevel@tonic-gate 				aiocb_32 = (aiocb32_t *)(uintptr_t)cbp32[i];
1814*0Sstevel@tonic-gate 				resultp = (aio_result_t *)&aiocb_32->
1815*0Sstevel@tonic-gate 				    aio_resultp;
1816*0Sstevel@tonic-gate 			} else if (run_mode == AIO_LARGEFILE) {
1817*0Sstevel@tonic-gate 				aiocb_64 = (aiocb64_32_t *)(uintptr_t)cbp32[i];
1818*0Sstevel@tonic-gate 				resultp = (aio_result_t *)&aiocb_64->
1819*0Sstevel@tonic-gate 				    aio_resultp;
1820*0Sstevel@tonic-gate 			}
1821*0Sstevel@tonic-gate 		}
1822*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
1823*0Sstevel@tonic-gate 		/*
1824*0Sstevel@tonic-gate 		 * we need to get the aio_cleanupq_mutex since we call
1825*0Sstevel@tonic-gate 		 * aio_req_done().
1826*0Sstevel@tonic-gate 		 */
1827*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_cleanupq_mutex);
1828*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
1829*0Sstevel@tonic-gate 		reqp = aio_req_done(resultp);
1830*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
1831*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_cleanupq_mutex);
1832*0Sstevel@tonic-gate 		if (reqp != NULL) {
1833*0Sstevel@tonic-gate 			aphysio_unlock(reqp);
1834*0Sstevel@tonic-gate 			aio_copyout_result(reqp);
1835*0Sstevel@tonic-gate 			mutex_enter(&aiop->aio_mutex);
1836*0Sstevel@tonic-gate 			aio_req_free(aiop, reqp);
1837*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
1838*0Sstevel@tonic-gate 		}
1839*0Sstevel@tonic-gate 	}
1840*0Sstevel@tonic-gate }
1841*0Sstevel@tonic-gate 
1842*0Sstevel@tonic-gate /*
1843*0Sstevel@tonic-gate  * write out the results for an aio request that is
1844*0Sstevel@tonic-gate  * done.
1845*0Sstevel@tonic-gate  */
1846*0Sstevel@tonic-gate static int
1847*0Sstevel@tonic-gate aioerror(void *cb, int run_mode)
1848*0Sstevel@tonic-gate {
1849*0Sstevel@tonic-gate 	aio_result_t *resultp;
1850*0Sstevel@tonic-gate 	aio_t *aiop;
1851*0Sstevel@tonic-gate 	aio_req_t *reqp;
1852*0Sstevel@tonic-gate 	int retval;
1853*0Sstevel@tonic-gate 
1854*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
1855*0Sstevel@tonic-gate 	if (aiop == NULL || cb == NULL)
1856*0Sstevel@tonic-gate 		return (EINVAL);
1857*0Sstevel@tonic-gate 
1858*0Sstevel@tonic-gate 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1859*0Sstevel@tonic-gate 		if (run_mode == AIO_LARGEFILE)
1860*0Sstevel@tonic-gate 			resultp = (aio_result_t *)&((aiocb64_32_t *)cb)->
1861*0Sstevel@tonic-gate 			    aio_resultp;
1862*0Sstevel@tonic-gate 		else
1863*0Sstevel@tonic-gate 			resultp = &((aiocb_t *)cb)->aio_resultp;
1864*0Sstevel@tonic-gate 	}
1865*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
1866*0Sstevel@tonic-gate 	else {
1867*0Sstevel@tonic-gate 		if (run_mode == AIO_LARGEFILE)
1868*0Sstevel@tonic-gate 			resultp = (aio_result_t *)&((aiocb64_32_t *)cb)->
1869*0Sstevel@tonic-gate 			    aio_resultp;
1870*0Sstevel@tonic-gate 		else if (run_mode == AIO_32)
1871*0Sstevel@tonic-gate 			resultp = (aio_result_t *)&((aiocb32_t *)cb)->
1872*0Sstevel@tonic-gate 			    aio_resultp;
1873*0Sstevel@tonic-gate 	}
1874*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
1875*0Sstevel@tonic-gate 	/*
1876*0Sstevel@tonic-gate 	 * we need to get the aio_cleanupq_mutex since we call
1877*0Sstevel@tonic-gate 	 * aio_req_find().
1878*0Sstevel@tonic-gate 	 */
1879*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_cleanupq_mutex);
1880*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
1881*0Sstevel@tonic-gate 	retval = aio_req_find(resultp, &reqp);
1882*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
1883*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_cleanupq_mutex);
1884*0Sstevel@tonic-gate 	if (retval == 0) {
1885*0Sstevel@tonic-gate 		aphysio_unlock(reqp);
1886*0Sstevel@tonic-gate 		aio_copyout_result(reqp);
1887*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
1888*0Sstevel@tonic-gate 		aio_req_free(aiop, reqp);
1889*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
1890*0Sstevel@tonic-gate 		return (0);
1891*0Sstevel@tonic-gate 	} else if (retval == 1)
1892*0Sstevel@tonic-gate 		return (EINPROGRESS);
1893*0Sstevel@tonic-gate 	else if (retval == 2)
1894*0Sstevel@tonic-gate 		return (EINVAL);
1895*0Sstevel@tonic-gate 	return (0);
1896*0Sstevel@tonic-gate }
1897*0Sstevel@tonic-gate 
1898*0Sstevel@tonic-gate /*
1899*0Sstevel@tonic-gate  * 	aio_cancel - if no requests outstanding,
1900*0Sstevel@tonic-gate  *			return AIO_ALLDONE
1901*0Sstevel@tonic-gate  *			else
1902*0Sstevel@tonic-gate  *			return AIO_NOTCANCELED
1903*0Sstevel@tonic-gate  */
1904*0Sstevel@tonic-gate static int
1905*0Sstevel@tonic-gate aio_cancel(
1906*0Sstevel@tonic-gate 	int	fildes,
1907*0Sstevel@tonic-gate 	void 	*cb,
1908*0Sstevel@tonic-gate 	long	*rval,
1909*0Sstevel@tonic-gate 	int	run_mode)
1910*0Sstevel@tonic-gate {
1911*0Sstevel@tonic-gate 	aio_t *aiop;
1912*0Sstevel@tonic-gate 	void *resultp;
1913*0Sstevel@tonic-gate 	int index;
1914*0Sstevel@tonic-gate 	aio_req_t **bucket;
1915*0Sstevel@tonic-gate 	aio_req_t *ent;
1916*0Sstevel@tonic-gate 
1917*0Sstevel@tonic-gate 
1918*0Sstevel@tonic-gate 	/*
1919*0Sstevel@tonic-gate 	 * Verify valid file descriptor
1920*0Sstevel@tonic-gate 	 */
1921*0Sstevel@tonic-gate 	if ((getf(fildes)) == NULL) {
1922*0Sstevel@tonic-gate 		return (EBADF);
1923*0Sstevel@tonic-gate 	}
1924*0Sstevel@tonic-gate 	releasef(fildes);
1925*0Sstevel@tonic-gate 
1926*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
1927*0Sstevel@tonic-gate 	if (aiop == NULL)
1928*0Sstevel@tonic-gate 		return (EINVAL);
1929*0Sstevel@tonic-gate 
1930*0Sstevel@tonic-gate 	if (aiop->aio_outstanding == 0) {
1931*0Sstevel@tonic-gate 		*rval = AIO_ALLDONE;
1932*0Sstevel@tonic-gate 		return (0);
1933*0Sstevel@tonic-gate 	}
1934*0Sstevel@tonic-gate 
1935*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
1936*0Sstevel@tonic-gate 	if (cb != NULL) {
1937*0Sstevel@tonic-gate 		if (get_udatamodel() == DATAMODEL_NATIVE) {
1938*0Sstevel@tonic-gate 			if (run_mode == AIO_LARGEFILE)
1939*0Sstevel@tonic-gate 				resultp = (aio_result_t *)&((aiocb64_32_t *)cb)
1940*0Sstevel@tonic-gate 				    ->aio_resultp;
1941*0Sstevel@tonic-gate 			else
1942*0Sstevel@tonic-gate 				resultp = &((aiocb_t *)cb)->aio_resultp;
1943*0Sstevel@tonic-gate 		}
1944*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
1945*0Sstevel@tonic-gate 		else {
1946*0Sstevel@tonic-gate 			if (run_mode == AIO_LARGEFILE)
1947*0Sstevel@tonic-gate 				resultp = (aio_result_t *)&((aiocb64_32_t *)cb)
1948*0Sstevel@tonic-gate 				    ->aio_resultp;
1949*0Sstevel@tonic-gate 			else if (run_mode == AIO_32)
1950*0Sstevel@tonic-gate 				resultp = (aio_result_t *)&((aiocb32_t *)cb)
1951*0Sstevel@tonic-gate 				    ->aio_resultp;
1952*0Sstevel@tonic-gate 		}
1953*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
1954*0Sstevel@tonic-gate 		index = AIO_HASH(resultp);
1955*0Sstevel@tonic-gate 		bucket = &aiop->aio_hash[index];
1956*0Sstevel@tonic-gate 		for (ent = *bucket; ent != NULL; ent = ent->aio_hash_next) {
1957*0Sstevel@tonic-gate 			if (ent->aio_req_resultp == resultp) {
1958*0Sstevel@tonic-gate 				if ((ent->aio_req_flags & AIO_PENDING) == 0) {
1959*0Sstevel@tonic-gate 					mutex_exit(&aiop->aio_mutex);
1960*0Sstevel@tonic-gate 					*rval = AIO_ALLDONE;
1961*0Sstevel@tonic-gate 					return (0);
1962*0Sstevel@tonic-gate 				}
1963*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
1964*0Sstevel@tonic-gate 				*rval = AIO_NOTCANCELED;
1965*0Sstevel@tonic-gate 				return (0);
1966*0Sstevel@tonic-gate 			}
1967*0Sstevel@tonic-gate 		}
1968*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
1969*0Sstevel@tonic-gate 		*rval = AIO_ALLDONE;
1970*0Sstevel@tonic-gate 		return (0);
1971*0Sstevel@tonic-gate 	}
1972*0Sstevel@tonic-gate 
1973*0Sstevel@tonic-gate 	for (index = 0; index < AIO_HASHSZ; index++) {
1974*0Sstevel@tonic-gate 		bucket = &aiop->aio_hash[index];
1975*0Sstevel@tonic-gate 		for (ent = *bucket; ent != NULL; ent = ent->aio_hash_next) {
1976*0Sstevel@tonic-gate 			if (ent->aio_req_fd == fildes) {
1977*0Sstevel@tonic-gate 				if ((ent->aio_req_flags & AIO_PENDING) != 0) {
1978*0Sstevel@tonic-gate 					mutex_exit(&aiop->aio_mutex);
1979*0Sstevel@tonic-gate 					*rval = AIO_NOTCANCELED;
1980*0Sstevel@tonic-gate 					return (0);
1981*0Sstevel@tonic-gate 				}
1982*0Sstevel@tonic-gate 			}
1983*0Sstevel@tonic-gate 		}
1984*0Sstevel@tonic-gate 	}
1985*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
1986*0Sstevel@tonic-gate 	*rval = AIO_ALLDONE;
1987*0Sstevel@tonic-gate 	return (0);
1988*0Sstevel@tonic-gate }
1989*0Sstevel@tonic-gate 
1990*0Sstevel@tonic-gate /*
1991*0Sstevel@tonic-gate  * solaris version of asynchronous read and write
1992*0Sstevel@tonic-gate  */
1993*0Sstevel@tonic-gate static int
1994*0Sstevel@tonic-gate arw(
1995*0Sstevel@tonic-gate 	int	opcode,
1996*0Sstevel@tonic-gate 	int	fdes,
1997*0Sstevel@tonic-gate 	char	*bufp,
1998*0Sstevel@tonic-gate 	int	bufsize,
1999*0Sstevel@tonic-gate 	offset_t	offset,
2000*0Sstevel@tonic-gate 	aio_result_t	*resultp,
2001*0Sstevel@tonic-gate 	int		mode)
2002*0Sstevel@tonic-gate {
2003*0Sstevel@tonic-gate 	file_t		*fp;
2004*0Sstevel@tonic-gate 	int		error;
2005*0Sstevel@tonic-gate 	struct vnode	*vp;
2006*0Sstevel@tonic-gate 	aio_req_t	*reqp;
2007*0Sstevel@tonic-gate 	aio_t		*aiop;
2008*0Sstevel@tonic-gate 	int		(*aio_func)();
2009*0Sstevel@tonic-gate #ifdef _LP64
2010*0Sstevel@tonic-gate 	aiocb_t		aiocb;
2011*0Sstevel@tonic-gate #else
2012*0Sstevel@tonic-gate 	aiocb64_32_t	aiocb64;
2013*0Sstevel@tonic-gate #endif
2014*0Sstevel@tonic-gate 
2015*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
2016*0Sstevel@tonic-gate 	if (aiop == NULL)
2017*0Sstevel@tonic-gate 		return (EINVAL);
2018*0Sstevel@tonic-gate 
2019*0Sstevel@tonic-gate 	if ((fp = getf(fdes)) == NULL) {
2020*0Sstevel@tonic-gate 		return (EBADF);
2021*0Sstevel@tonic-gate 	}
2022*0Sstevel@tonic-gate 
2023*0Sstevel@tonic-gate 	/*
2024*0Sstevel@tonic-gate 	 * check the permission of the partition
2025*0Sstevel@tonic-gate 	 */
2026*0Sstevel@tonic-gate 	if ((fp->f_flag & mode) == 0) {
2027*0Sstevel@tonic-gate 		releasef(fdes);
2028*0Sstevel@tonic-gate 		return (EBADF);
2029*0Sstevel@tonic-gate 	}
2030*0Sstevel@tonic-gate 
2031*0Sstevel@tonic-gate 	vp = fp->f_vnode;
2032*0Sstevel@tonic-gate 	aio_func = check_vp(vp, mode);
2033*0Sstevel@tonic-gate 	if (aio_func == NULL) {
2034*0Sstevel@tonic-gate 		releasef(fdes);
2035*0Sstevel@tonic-gate 		return (EBADFD);
2036*0Sstevel@tonic-gate 	}
2037*0Sstevel@tonic-gate #ifdef _LP64
2038*0Sstevel@tonic-gate 	aiocb.aio_fildes = fdes;
2039*0Sstevel@tonic-gate 	aiocb.aio_buf = bufp;
2040*0Sstevel@tonic-gate 	aiocb.aio_nbytes = bufsize;
2041*0Sstevel@tonic-gate 	aiocb.aio_offset = offset;
2042*0Sstevel@tonic-gate 	aiocb.aio_sigevent.sigev_notify = 0;
2043*0Sstevel@tonic-gate 	error = aio_req_setup(&reqp, aiop, &aiocb, resultp, 0, vp);
2044*0Sstevel@tonic-gate #else
2045*0Sstevel@tonic-gate 	aiocb64.aio_fildes = fdes;
2046*0Sstevel@tonic-gate 	aiocb64.aio_buf = (caddr32_t)bufp;
2047*0Sstevel@tonic-gate 	aiocb64.aio_nbytes = bufsize;
2048*0Sstevel@tonic-gate 	aiocb64.aio_offset = offset;
2049*0Sstevel@tonic-gate 	aiocb64.aio_sigevent.sigev_notify = 0;
2050*0Sstevel@tonic-gate 	error = aio_req_setupLF(&reqp, aiop, &aiocb64, resultp, 0, vp);
2051*0Sstevel@tonic-gate #endif
2052*0Sstevel@tonic-gate 	if (error) {
2053*0Sstevel@tonic-gate 		releasef(fdes);
2054*0Sstevel@tonic-gate 		return (error);
2055*0Sstevel@tonic-gate 	}
2056*0Sstevel@tonic-gate 
2057*0Sstevel@tonic-gate 	/*
2058*0Sstevel@tonic-gate 	 * enable polling on this request if the opcode has
2059*0Sstevel@tonic-gate 	 * the AIO poll bit set
2060*0Sstevel@tonic-gate 	 */
2061*0Sstevel@tonic-gate 	if (opcode & AIO_POLL_BIT)
2062*0Sstevel@tonic-gate 		reqp->aio_req_flags |= AIO_POLL;
2063*0Sstevel@tonic-gate 
2064*0Sstevel@tonic-gate 	if (bufsize == 0) {
2065*0Sstevel@tonic-gate 		clear_active_fd(fdes);
2066*0Sstevel@tonic-gate 		aio_zerolen(reqp);
2067*0Sstevel@tonic-gate 		return (0);
2068*0Sstevel@tonic-gate 	}
2069*0Sstevel@tonic-gate 	/*
2070*0Sstevel@tonic-gate 	 * send the request to driver.
2071*0Sstevel@tonic-gate 	 * Clustering: If PXFS vnode, call PXFS function.
2072*0Sstevel@tonic-gate 	 */
2073*0Sstevel@tonic-gate 	error = (*aio_func)(vp, (aio_req_t *)&reqp->aio_req, CRED());
2074*0Sstevel@tonic-gate 	/*
2075*0Sstevel@tonic-gate 	 * the fd is stored in the aio_req_t by aio_req_setup(), and
2076*0Sstevel@tonic-gate 	 * is released by the aio_cleanup_thread() when the IO has
2077*0Sstevel@tonic-gate 	 * completed.
2078*0Sstevel@tonic-gate 	 */
2079*0Sstevel@tonic-gate 	if (error) {
2080*0Sstevel@tonic-gate 		releasef(fdes);
2081*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
2082*0Sstevel@tonic-gate 		aio_req_free(aiop, reqp);
2083*0Sstevel@tonic-gate 		aiop->aio_pending--;
2084*0Sstevel@tonic-gate 		if (aiop->aio_flags & AIO_REQ_BLOCK)
2085*0Sstevel@tonic-gate 			cv_signal(&aiop->aio_cleanupcv);
2086*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
2087*0Sstevel@tonic-gate 		return (error);
2088*0Sstevel@tonic-gate 	}
2089*0Sstevel@tonic-gate 	clear_active_fd(fdes);
2090*0Sstevel@tonic-gate 	return (0);
2091*0Sstevel@tonic-gate }
2092*0Sstevel@tonic-gate 
2093*0Sstevel@tonic-gate /*
2094*0Sstevel@tonic-gate  * Take request out of the port pending queue ...
2095*0Sstevel@tonic-gate  */
2096*0Sstevel@tonic-gate 
2097*0Sstevel@tonic-gate void
2098*0Sstevel@tonic-gate aio_deq_port_pending(aio_t *aiop, aio_req_t *reqp)
2099*0Sstevel@tonic-gate {
2100*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_mutex));
2101*0Sstevel@tonic-gate 	if (reqp->aio_req_prev == NULL)
2102*0Sstevel@tonic-gate 		/* first request */
2103*0Sstevel@tonic-gate 		aiop->aio_portpending = reqp->aio_req_next;
2104*0Sstevel@tonic-gate 	else
2105*0Sstevel@tonic-gate 		reqp->aio_req_prev->aio_req_next = reqp->aio_req_next;
2106*0Sstevel@tonic-gate 	if (reqp->aio_req_next != NULL)
2107*0Sstevel@tonic-gate 		reqp->aio_req_next->aio_req_prev = reqp->aio_req_prev;
2108*0Sstevel@tonic-gate }
2109*0Sstevel@tonic-gate 
2110*0Sstevel@tonic-gate /*
2111*0Sstevel@tonic-gate  * posix version of asynchronous read and write
2112*0Sstevel@tonic-gate  */
2113*0Sstevel@tonic-gate static	int
2114*0Sstevel@tonic-gate aiorw(
2115*0Sstevel@tonic-gate 	int		opcode,
2116*0Sstevel@tonic-gate 	void		*aiocb_arg,
2117*0Sstevel@tonic-gate 	int		mode,
2118*0Sstevel@tonic-gate 	int		run_mode)
2119*0Sstevel@tonic-gate {
2120*0Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
2121*0Sstevel@tonic-gate 	aiocb32_t	aiocb32;
2122*0Sstevel@tonic-gate 	struct	sigevent32 *sigev32;
2123*0Sstevel@tonic-gate 	port_notify32_t	pntfy32;
2124*0Sstevel@tonic-gate #endif
2125*0Sstevel@tonic-gate 	aiocb64_32_t	aiocb64;
2126*0Sstevel@tonic-gate 	aiocb_t		aiocb;
2127*0Sstevel@tonic-gate 	file_t		*fp;
2128*0Sstevel@tonic-gate 	int		error, fd;
2129*0Sstevel@tonic-gate 	size_t		bufsize;
2130*0Sstevel@tonic-gate 	struct vnode	*vp;
2131*0Sstevel@tonic-gate 	aio_req_t	*reqp;
2132*0Sstevel@tonic-gate 	aio_t		*aiop;
2133*0Sstevel@tonic-gate 	int		(*aio_func)();
2134*0Sstevel@tonic-gate 	aio_result_t	*resultp;
2135*0Sstevel@tonic-gate 	struct	sigevent *sigev;
2136*0Sstevel@tonic-gate 	model_t		model;
2137*0Sstevel@tonic-gate 	int		aio_use_port = 0;
2138*0Sstevel@tonic-gate 	port_notify_t	pntfy;
2139*0Sstevel@tonic-gate 
2140*0Sstevel@tonic-gate 	model = get_udatamodel();
2141*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
2142*0Sstevel@tonic-gate 	if (aiop == NULL)
2143*0Sstevel@tonic-gate 		return (EINVAL);
2144*0Sstevel@tonic-gate 
2145*0Sstevel@tonic-gate 	if (model == DATAMODEL_NATIVE) {
2146*0Sstevel@tonic-gate 		if (run_mode != AIO_LARGEFILE) {
2147*0Sstevel@tonic-gate 			if (copyin(aiocb_arg, &aiocb, sizeof (aiocb_t)))
2148*0Sstevel@tonic-gate 				return (EFAULT);
2149*0Sstevel@tonic-gate 			bufsize = aiocb.aio_nbytes;
2150*0Sstevel@tonic-gate 			resultp = &(((aiocb_t *)aiocb_arg)->aio_resultp);
2151*0Sstevel@tonic-gate 			if ((fp = getf(fd = aiocb.aio_fildes)) == NULL) {
2152*0Sstevel@tonic-gate 				return (EBADF);
2153*0Sstevel@tonic-gate 			}
2154*0Sstevel@tonic-gate 			sigev = &aiocb.aio_sigevent;
2155*0Sstevel@tonic-gate 		} else {
2156*0Sstevel@tonic-gate 			/*
2157*0Sstevel@tonic-gate 			 * We come here only when we make largefile
2158*0Sstevel@tonic-gate 			 * call on 32 bit kernel using 32 bit library.
2159*0Sstevel@tonic-gate 			 */
2160*0Sstevel@tonic-gate 			if (copyin(aiocb_arg, &aiocb64, sizeof (aiocb64_32_t)))
2161*0Sstevel@tonic-gate 				return (EFAULT);
2162*0Sstevel@tonic-gate 			bufsize = aiocb64.aio_nbytes;
2163*0Sstevel@tonic-gate 			resultp = (aio_result_t *)&(((aiocb64_32_t *)aiocb_arg)
2164*0Sstevel@tonic-gate 			    ->aio_resultp);
2165*0Sstevel@tonic-gate 			if ((fp = getf(fd = aiocb64.aio_fildes)) == NULL) {
2166*0Sstevel@tonic-gate 				return (EBADF);
2167*0Sstevel@tonic-gate 			}
2168*0Sstevel@tonic-gate 			sigev = (struct sigevent *)&aiocb64.aio_sigevent;
2169*0Sstevel@tonic-gate 		}
2170*0Sstevel@tonic-gate 
2171*0Sstevel@tonic-gate 		if (sigev->sigev_notify == SIGEV_PORT) {
2172*0Sstevel@tonic-gate 			if (copyin((void *)sigev->sigev_value.sival_ptr,
2173*0Sstevel@tonic-gate 			    &pntfy, sizeof (port_notify_t))) {
2174*0Sstevel@tonic-gate 				releasef(fd);
2175*0Sstevel@tonic-gate 				return (EFAULT);
2176*0Sstevel@tonic-gate 			}
2177*0Sstevel@tonic-gate 			aio_use_port = 1;
2178*0Sstevel@tonic-gate 		}
2179*0Sstevel@tonic-gate 	}
2180*0Sstevel@tonic-gate #ifdef	_SYSCALL32_IMPL
2181*0Sstevel@tonic-gate 	else {
2182*0Sstevel@tonic-gate 		if (run_mode == AIO_32) {
2183*0Sstevel@tonic-gate 			/* 32 bit system call is being made on 64 bit kernel */
2184*0Sstevel@tonic-gate 			if (copyin(aiocb_arg, &aiocb32, sizeof (aiocb32_t)))
2185*0Sstevel@tonic-gate 				return (EFAULT);
2186*0Sstevel@tonic-gate 
2187*0Sstevel@tonic-gate 			bufsize = aiocb32.aio_nbytes;
2188*0Sstevel@tonic-gate 			aiocb_32ton(&aiocb32, &aiocb);
2189*0Sstevel@tonic-gate 			resultp = (aio_result_t *)&(((aiocb32_t *)aiocb_arg)->
2190*0Sstevel@tonic-gate 			    aio_resultp);
2191*0Sstevel@tonic-gate 			if ((fp = getf(fd = aiocb32.aio_fildes)) == NULL) {
2192*0Sstevel@tonic-gate 				return (EBADF);
2193*0Sstevel@tonic-gate 			}
2194*0Sstevel@tonic-gate 			sigev32 = &aiocb32.aio_sigevent;
2195*0Sstevel@tonic-gate 		} else if (run_mode == AIO_LARGEFILE) {
2196*0Sstevel@tonic-gate 			/*
2197*0Sstevel@tonic-gate 			 * We come here only when we make largefile
2198*0Sstevel@tonic-gate 			 * call on 64 bit kernel using 32 bit library.
2199*0Sstevel@tonic-gate 			 */
2200*0Sstevel@tonic-gate 			if (copyin(aiocb_arg, &aiocb64, sizeof (aiocb64_32_t)))
2201*0Sstevel@tonic-gate 				return (EFAULT);
2202*0Sstevel@tonic-gate 			bufsize = aiocb64.aio_nbytes;
2203*0Sstevel@tonic-gate 			aiocb_LFton(&aiocb64, &aiocb);
2204*0Sstevel@tonic-gate 			resultp = (aio_result_t *)&(((aiocb64_32_t *)aiocb_arg)
2205*0Sstevel@tonic-gate 			    ->aio_resultp);
2206*0Sstevel@tonic-gate 			if ((fp = getf(fd = aiocb64.aio_fildes)) == NULL)
2207*0Sstevel@tonic-gate 				return (EBADF);
2208*0Sstevel@tonic-gate 			sigev32 = &aiocb64.aio_sigevent;
2209*0Sstevel@tonic-gate 		}
2210*0Sstevel@tonic-gate 
2211*0Sstevel@tonic-gate 		if (sigev32->sigev_notify == SIGEV_PORT) {
2212*0Sstevel@tonic-gate 			if (copyin(
2213*0Sstevel@tonic-gate 			    (void *)(uintptr_t)sigev32->sigev_value.sival_ptr,
2214*0Sstevel@tonic-gate 			    &pntfy32, sizeof (port_notify32_t))) {
2215*0Sstevel@tonic-gate 				releasef(fd);
2216*0Sstevel@tonic-gate 				return (EFAULT);
2217*0Sstevel@tonic-gate 			}
2218*0Sstevel@tonic-gate 			pntfy.portnfy_port = pntfy32.portnfy_port;
2219*0Sstevel@tonic-gate 			pntfy.portnfy_user =
2220*0Sstevel@tonic-gate 			    (void *)(uintptr_t)pntfy32.portnfy_user;
2221*0Sstevel@tonic-gate 			aio_use_port = 1;
2222*0Sstevel@tonic-gate 		}
2223*0Sstevel@tonic-gate 	}
2224*0Sstevel@tonic-gate #endif  /* _SYSCALL32_IMPL */
2225*0Sstevel@tonic-gate 
2226*0Sstevel@tonic-gate 	/*
2227*0Sstevel@tonic-gate 	 * check the permission of the partition
2228*0Sstevel@tonic-gate 	 */
2229*0Sstevel@tonic-gate 
2230*0Sstevel@tonic-gate 	if ((fp->f_flag & mode) == 0) {
2231*0Sstevel@tonic-gate 		releasef(fd);
2232*0Sstevel@tonic-gate 		return (EBADF);
2233*0Sstevel@tonic-gate 	}
2234*0Sstevel@tonic-gate 
2235*0Sstevel@tonic-gate 	vp = fp->f_vnode;
2236*0Sstevel@tonic-gate 	aio_func = check_vp(vp, mode);
2237*0Sstevel@tonic-gate 	if (aio_func == NULL) {
2238*0Sstevel@tonic-gate 		releasef(fd);
2239*0Sstevel@tonic-gate 		return (EBADFD);
2240*0Sstevel@tonic-gate 	}
2241*0Sstevel@tonic-gate 	if ((model == DATAMODEL_NATIVE) && (run_mode == AIO_LARGEFILE))
2242*0Sstevel@tonic-gate 		error = aio_req_setupLF(&reqp, aiop, &aiocb64, resultp,
2243*0Sstevel@tonic-gate 		    aio_use_port, vp);
2244*0Sstevel@tonic-gate 	else
2245*0Sstevel@tonic-gate 		error = aio_req_setup(&reqp, aiop, &aiocb, resultp,
2246*0Sstevel@tonic-gate 		    aio_use_port, vp);
2247*0Sstevel@tonic-gate 
2248*0Sstevel@tonic-gate 	if (error) {
2249*0Sstevel@tonic-gate 		releasef(fd);
2250*0Sstevel@tonic-gate 		return (error);
2251*0Sstevel@tonic-gate 	}
2252*0Sstevel@tonic-gate 	/*
2253*0Sstevel@tonic-gate 	 * enable polling on this request if the opcode has
2254*0Sstevel@tonic-gate 	 * the AIO poll bit set
2255*0Sstevel@tonic-gate 	 */
2256*0Sstevel@tonic-gate 	if (opcode & AIO_POLL_BIT)
2257*0Sstevel@tonic-gate 		reqp->aio_req_flags |= AIO_POLL;
2258*0Sstevel@tonic-gate 
2259*0Sstevel@tonic-gate 	if (model == DATAMODEL_NATIVE)
2260*0Sstevel@tonic-gate 		reqp->aio_req_iocb.iocb = aiocb_arg;
2261*0Sstevel@tonic-gate #ifdef  _SYSCALL32_IMPL
2262*0Sstevel@tonic-gate 	else
2263*0Sstevel@tonic-gate 		reqp->aio_req_iocb.iocb32 = (caddr32_t)(uintptr_t)aiocb_arg;
2264*0Sstevel@tonic-gate #endif
2265*0Sstevel@tonic-gate 
2266*0Sstevel@tonic-gate 	if (aio_use_port)
2267*0Sstevel@tonic-gate 		error = aio_req_assoc_port_rw(&pntfy, aiocb_arg, reqp);
2268*0Sstevel@tonic-gate 
2269*0Sstevel@tonic-gate 	/*
2270*0Sstevel@tonic-gate 	 * send the request to driver.
2271*0Sstevel@tonic-gate 	 * Clustering: If PXFS vnode, call PXFS function.
2272*0Sstevel@tonic-gate 	 */
2273*0Sstevel@tonic-gate 	if (error == 0) {
2274*0Sstevel@tonic-gate 		if (bufsize == 0) {
2275*0Sstevel@tonic-gate 			clear_active_fd(fd);
2276*0Sstevel@tonic-gate 			aio_zerolen(reqp);
2277*0Sstevel@tonic-gate 			return (0);
2278*0Sstevel@tonic-gate 		}
2279*0Sstevel@tonic-gate 		error = (*aio_func)(vp, (aio_req_t *)&reqp->aio_req, CRED());
2280*0Sstevel@tonic-gate 	}
2281*0Sstevel@tonic-gate 
2282*0Sstevel@tonic-gate 	/*
2283*0Sstevel@tonic-gate 	 * the fd is stored in the aio_req_t by aio_req_setup(), and
2284*0Sstevel@tonic-gate 	 * is released by the aio_cleanup_thread() when the IO has
2285*0Sstevel@tonic-gate 	 * completed.
2286*0Sstevel@tonic-gate 	 */
2287*0Sstevel@tonic-gate 	if (error) {
2288*0Sstevel@tonic-gate 		releasef(fd);
2289*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
2290*0Sstevel@tonic-gate 		aio_deq_port_pending(aiop, reqp);
2291*0Sstevel@tonic-gate 		aio_req_free(aiop, reqp);
2292*0Sstevel@tonic-gate 		aiop->aio_pending--;
2293*0Sstevel@tonic-gate 		if (aiop->aio_flags & AIO_REQ_BLOCK)
2294*0Sstevel@tonic-gate 			cv_signal(&aiop->aio_cleanupcv);
2295*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
2296*0Sstevel@tonic-gate 		return (error);
2297*0Sstevel@tonic-gate 	}
2298*0Sstevel@tonic-gate 	clear_active_fd(fd);
2299*0Sstevel@tonic-gate 	return (0);
2300*0Sstevel@tonic-gate }
2301*0Sstevel@tonic-gate 
2302*0Sstevel@tonic-gate 
2303*0Sstevel@tonic-gate /*
2304*0Sstevel@tonic-gate  * set error for a list IO entry that failed.
2305*0Sstevel@tonic-gate  */
2306*0Sstevel@tonic-gate static void
2307*0Sstevel@tonic-gate lio_set_error(aio_req_t *reqp)
2308*0Sstevel@tonic-gate {
2309*0Sstevel@tonic-gate 	aio_t *aiop = curproc->p_aio;
2310*0Sstevel@tonic-gate 
2311*0Sstevel@tonic-gate 	if (aiop == NULL)
2312*0Sstevel@tonic-gate 		return;
2313*0Sstevel@tonic-gate 
2314*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
2315*0Sstevel@tonic-gate 	aio_deq_port_pending(aiop, reqp);
2316*0Sstevel@tonic-gate 	aiop->aio_pending--;
2317*0Sstevel@tonic-gate 	/* request failed, AIO_PHYSIODONE set to aviod physio cleanup. */
2318*0Sstevel@tonic-gate 	reqp->aio_req_flags |= AIO_PHYSIODONE;
2319*0Sstevel@tonic-gate 	/*
2320*0Sstevel@tonic-gate 	 * Need to free the request now as its never
2321*0Sstevel@tonic-gate 	 * going to get on the done queue
2322*0Sstevel@tonic-gate 	 *
2323*0Sstevel@tonic-gate 	 * Note: aio_outstanding is decremented in
2324*0Sstevel@tonic-gate 	 *	 aio_req_free()
2325*0Sstevel@tonic-gate 	 */
2326*0Sstevel@tonic-gate 	aio_req_free(aiop, reqp);
2327*0Sstevel@tonic-gate 	if (aiop->aio_flags & AIO_REQ_BLOCK)
2328*0Sstevel@tonic-gate 		cv_signal(&aiop->aio_cleanupcv);
2329*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
2330*0Sstevel@tonic-gate }
2331*0Sstevel@tonic-gate 
2332*0Sstevel@tonic-gate /*
2333*0Sstevel@tonic-gate  * check if a specified request is done, and remove it from
2334*0Sstevel@tonic-gate  * the done queue. otherwise remove anybody from the done queue
2335*0Sstevel@tonic-gate  * if NULL is specified.
2336*0Sstevel@tonic-gate  */
2337*0Sstevel@tonic-gate static aio_req_t *
2338*0Sstevel@tonic-gate aio_req_done(void *resultp)
2339*0Sstevel@tonic-gate {
2340*0Sstevel@tonic-gate 	aio_req_t **bucket;
2341*0Sstevel@tonic-gate 	aio_req_t *ent;
2342*0Sstevel@tonic-gate 	aio_t *aiop = curproc->p_aio;
2343*0Sstevel@tonic-gate 	long index;
2344*0Sstevel@tonic-gate 
2345*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_cleanupq_mutex));
2346*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_mutex));
2347*0Sstevel@tonic-gate 
2348*0Sstevel@tonic-gate 	if (resultp) {
2349*0Sstevel@tonic-gate 		index = AIO_HASH(resultp);
2350*0Sstevel@tonic-gate 		bucket = &aiop->aio_hash[index];
2351*0Sstevel@tonic-gate 		for (ent = *bucket; ent != NULL; ent = ent->aio_hash_next) {
2352*0Sstevel@tonic-gate 			if (ent->aio_req_resultp == (aio_result_t *)resultp) {
2353*0Sstevel@tonic-gate 				if (ent->aio_req_flags & AIO_DONEQ) {
2354*0Sstevel@tonic-gate 					return (aio_req_remove(ent));
2355*0Sstevel@tonic-gate 				}
2356*0Sstevel@tonic-gate 				return (NULL);
2357*0Sstevel@tonic-gate 			}
2358*0Sstevel@tonic-gate 		}
2359*0Sstevel@tonic-gate 		/* no match, resultp is invalid */
2360*0Sstevel@tonic-gate 		return (NULL);
2361*0Sstevel@tonic-gate 	}
2362*0Sstevel@tonic-gate 	return (aio_req_remove(NULL));
2363*0Sstevel@tonic-gate }
2364*0Sstevel@tonic-gate 
2365*0Sstevel@tonic-gate /*
2366*0Sstevel@tonic-gate  * determine if a user-level resultp pointer is associated with an
2367*0Sstevel@tonic-gate  * active IO request. Zero is returned when the request is done,
2368*0Sstevel@tonic-gate  * and the request is removed from the done queue. Only when the
2369*0Sstevel@tonic-gate  * return value is zero, is the "reqp" pointer valid. One is returned
2370*0Sstevel@tonic-gate  * when the request is inprogress. Two is returned when the request
2371*0Sstevel@tonic-gate  * is invalid.
2372*0Sstevel@tonic-gate  */
2373*0Sstevel@tonic-gate static int
2374*0Sstevel@tonic-gate aio_req_find(aio_result_t *resultp, aio_req_t **reqp)
2375*0Sstevel@tonic-gate {
2376*0Sstevel@tonic-gate 	aio_req_t **bucket;
2377*0Sstevel@tonic-gate 	aio_req_t *ent;
2378*0Sstevel@tonic-gate 	aio_t *aiop = curproc->p_aio;
2379*0Sstevel@tonic-gate 	long index;
2380*0Sstevel@tonic-gate 
2381*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_cleanupq_mutex));
2382*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_mutex));
2383*0Sstevel@tonic-gate 
2384*0Sstevel@tonic-gate 	index = AIO_HASH(resultp);
2385*0Sstevel@tonic-gate 	bucket = &aiop->aio_hash[index];
2386*0Sstevel@tonic-gate 	for (ent = *bucket; ent != NULL; ent = ent->aio_hash_next) {
2387*0Sstevel@tonic-gate 		if (ent->aio_req_resultp == resultp) {
2388*0Sstevel@tonic-gate 			if (ent->aio_req_flags & AIO_DONEQ) {
2389*0Sstevel@tonic-gate 				*reqp = aio_req_remove(ent);
2390*0Sstevel@tonic-gate 				return (0);
2391*0Sstevel@tonic-gate 			}
2392*0Sstevel@tonic-gate 			return (1);
2393*0Sstevel@tonic-gate 		}
2394*0Sstevel@tonic-gate 	}
2395*0Sstevel@tonic-gate 	/* no match, resultp is invalid */
2396*0Sstevel@tonic-gate 	return (2);
2397*0Sstevel@tonic-gate }
2398*0Sstevel@tonic-gate 
2399*0Sstevel@tonic-gate /*
2400*0Sstevel@tonic-gate  * remove a request from the done queue.
2401*0Sstevel@tonic-gate  */
2402*0Sstevel@tonic-gate static aio_req_t *
2403*0Sstevel@tonic-gate aio_req_remove(aio_req_t *reqp)
2404*0Sstevel@tonic-gate {
2405*0Sstevel@tonic-gate 	aio_t *aiop = curproc->p_aio;
2406*0Sstevel@tonic-gate 	aio_req_t *head;
2407*0Sstevel@tonic-gate 
2408*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_mutex));
2409*0Sstevel@tonic-gate 
2410*0Sstevel@tonic-gate 	if (reqp) {
2411*0Sstevel@tonic-gate 		ASSERT(reqp->aio_req_flags & AIO_DONEQ);
2412*0Sstevel@tonic-gate 		if (reqp->aio_req_next == reqp) {
2413*0Sstevel@tonic-gate 			/* only one request on queue */
2414*0Sstevel@tonic-gate 			if (reqp ==  aiop->aio_doneq) {
2415*0Sstevel@tonic-gate 				aiop->aio_doneq = NULL;
2416*0Sstevel@tonic-gate 			} else {
2417*0Sstevel@tonic-gate 				ASSERT(reqp == aiop->aio_cleanupq);
2418*0Sstevel@tonic-gate 				aiop->aio_cleanupq = NULL;
2419*0Sstevel@tonic-gate 			}
2420*0Sstevel@tonic-gate 		} else {
2421*0Sstevel@tonic-gate 			reqp->aio_req_next->aio_req_prev = reqp->aio_req_prev;
2422*0Sstevel@tonic-gate 			reqp->aio_req_prev->aio_req_next = reqp->aio_req_next;
2423*0Sstevel@tonic-gate 			/*
2424*0Sstevel@tonic-gate 			 * The request can be either on the aio_doneq or the
2425*0Sstevel@tonic-gate 			 * aio_cleanupq
2426*0Sstevel@tonic-gate 			 */
2427*0Sstevel@tonic-gate 			if (reqp == aiop->aio_doneq)
2428*0Sstevel@tonic-gate 				aiop->aio_doneq = reqp->aio_req_next;
2429*0Sstevel@tonic-gate 
2430*0Sstevel@tonic-gate 			if (reqp == aiop->aio_cleanupq)
2431*0Sstevel@tonic-gate 				aiop->aio_cleanupq = reqp->aio_req_next;
2432*0Sstevel@tonic-gate 		}
2433*0Sstevel@tonic-gate 		reqp->aio_req_flags &= ~AIO_DONEQ;
2434*0Sstevel@tonic-gate 		return (reqp);
2435*0Sstevel@tonic-gate 	}
2436*0Sstevel@tonic-gate 
2437*0Sstevel@tonic-gate 	if (aiop->aio_doneq) {
2438*0Sstevel@tonic-gate 		head = aiop->aio_doneq;
2439*0Sstevel@tonic-gate 		ASSERT(head->aio_req_flags & AIO_DONEQ);
2440*0Sstevel@tonic-gate 		if (head == head->aio_req_next) {
2441*0Sstevel@tonic-gate 			/* only one request on queue */
2442*0Sstevel@tonic-gate 			aiop->aio_doneq = NULL;
2443*0Sstevel@tonic-gate 		} else {
2444*0Sstevel@tonic-gate 			head->aio_req_prev->aio_req_next = head->aio_req_next;
2445*0Sstevel@tonic-gate 			head->aio_req_next->aio_req_prev = head->aio_req_prev;
2446*0Sstevel@tonic-gate 			aiop->aio_doneq = head->aio_req_next;
2447*0Sstevel@tonic-gate 		}
2448*0Sstevel@tonic-gate 		head->aio_req_flags &= ~AIO_DONEQ;
2449*0Sstevel@tonic-gate 		return (head);
2450*0Sstevel@tonic-gate 	}
2451*0Sstevel@tonic-gate 	return (NULL);
2452*0Sstevel@tonic-gate }
2453*0Sstevel@tonic-gate 
2454*0Sstevel@tonic-gate static int
2455*0Sstevel@tonic-gate aio_req_setup(
2456*0Sstevel@tonic-gate 	aio_req_t	**reqpp,
2457*0Sstevel@tonic-gate 	aio_t 		*aiop,
2458*0Sstevel@tonic-gate 	aiocb_t 	*arg,
2459*0Sstevel@tonic-gate 	aio_result_t 	*resultp,
2460*0Sstevel@tonic-gate 	int		port,
2461*0Sstevel@tonic-gate 	vnode_t		*vp)
2462*0Sstevel@tonic-gate {
2463*0Sstevel@tonic-gate 	aio_req_t 	*reqp;
2464*0Sstevel@tonic-gate 	sigqueue_t	*sqp;
2465*0Sstevel@tonic-gate 	struct uio 	*uio;
2466*0Sstevel@tonic-gate 
2467*0Sstevel@tonic-gate 	struct sigevent *sigev;
2468*0Sstevel@tonic-gate 	int		error;
2469*0Sstevel@tonic-gate 
2470*0Sstevel@tonic-gate 	sigev = &arg->aio_sigevent;
2471*0Sstevel@tonic-gate 	if ((sigev->sigev_notify == SIGEV_SIGNAL) &&
2472*0Sstevel@tonic-gate 	    (sigev->sigev_signo > 0 && sigev->sigev_signo < NSIG)) {
2473*0Sstevel@tonic-gate 		sqp = kmem_zalloc(sizeof (sigqueue_t), KM_NOSLEEP);
2474*0Sstevel@tonic-gate 		if (sqp == NULL)
2475*0Sstevel@tonic-gate 			return (EAGAIN);
2476*0Sstevel@tonic-gate 		sqp->sq_func = NULL;
2477*0Sstevel@tonic-gate 		sqp->sq_next = NULL;
2478*0Sstevel@tonic-gate 		sqp->sq_info.si_code = SI_ASYNCIO;
2479*0Sstevel@tonic-gate 		sqp->sq_info.si_pid = curproc->p_pid;
2480*0Sstevel@tonic-gate 		sqp->sq_info.si_ctid = PRCTID(curproc);
2481*0Sstevel@tonic-gate 		sqp->sq_info.si_zoneid = getzoneid();
2482*0Sstevel@tonic-gate 		sqp->sq_info.si_uid = crgetuid(curproc->p_cred);
2483*0Sstevel@tonic-gate 		sqp->sq_info.si_signo = sigev->sigev_signo;
2484*0Sstevel@tonic-gate 		sqp->sq_info.si_value = sigev->sigev_value;
2485*0Sstevel@tonic-gate 	} else
2486*0Sstevel@tonic-gate 		sqp = NULL;
2487*0Sstevel@tonic-gate 
2488*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
2489*0Sstevel@tonic-gate 
2490*0Sstevel@tonic-gate 	if (aiop->aio_flags & AIO_REQ_BLOCK) {
2491*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
2492*0Sstevel@tonic-gate 		if (sqp)
2493*0Sstevel@tonic-gate 			kmem_free(sqp, sizeof (sigqueue_t));
2494*0Sstevel@tonic-gate 		return (EIO);
2495*0Sstevel@tonic-gate 	}
2496*0Sstevel@tonic-gate 	/*
2497*0Sstevel@tonic-gate 	 * get an aio_reqp from the free list or allocate one
2498*0Sstevel@tonic-gate 	 * from dynamic memory.
2499*0Sstevel@tonic-gate 	 */
2500*0Sstevel@tonic-gate 	if (error = aio_req_alloc(&reqp, resultp)) {
2501*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
2502*0Sstevel@tonic-gate 		if (sqp)
2503*0Sstevel@tonic-gate 			kmem_free(sqp, sizeof (sigqueue_t));
2504*0Sstevel@tonic-gate 		return (error);
2505*0Sstevel@tonic-gate 	}
2506*0Sstevel@tonic-gate 	aiop->aio_pending++;
2507*0Sstevel@tonic-gate 	aiop->aio_outstanding++;
2508*0Sstevel@tonic-gate 	reqp->aio_req_flags = AIO_PENDING;
2509*0Sstevel@tonic-gate 	if (port)
2510*0Sstevel@tonic-gate 		aio_enq_port_pending(aiop, reqp);
2511*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
2512*0Sstevel@tonic-gate 	/*
2513*0Sstevel@tonic-gate 	 * initialize aio request.
2514*0Sstevel@tonic-gate 	 */
2515*0Sstevel@tonic-gate 	reqp->aio_req_fd = arg->aio_fildes;
2516*0Sstevel@tonic-gate 	reqp->aio_req_sigqp = sqp;
2517*0Sstevel@tonic-gate 	reqp->aio_req_iocb.iocb = NULL;
2518*0Sstevel@tonic-gate 	reqp->aio_req_buf.b_file = vp;
2519*0Sstevel@tonic-gate 	uio = reqp->aio_req.aio_uio;
2520*0Sstevel@tonic-gate 	uio->uio_iovcnt = 1;
2521*0Sstevel@tonic-gate 	uio->uio_iov->iov_base = (caddr_t)arg->aio_buf;
2522*0Sstevel@tonic-gate 	uio->uio_iov->iov_len = arg->aio_nbytes;
2523*0Sstevel@tonic-gate 	uio->uio_loffset = arg->aio_offset;
2524*0Sstevel@tonic-gate 	*reqpp = reqp;
2525*0Sstevel@tonic-gate 	return (0);
2526*0Sstevel@tonic-gate }
2527*0Sstevel@tonic-gate 
2528*0Sstevel@tonic-gate /*
2529*0Sstevel@tonic-gate  * Allocate p_aio struct.
2530*0Sstevel@tonic-gate  */
2531*0Sstevel@tonic-gate static aio_t *
2532*0Sstevel@tonic-gate aio_aiop_alloc(void)
2533*0Sstevel@tonic-gate {
2534*0Sstevel@tonic-gate 	aio_t	*aiop;
2535*0Sstevel@tonic-gate 
2536*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&curproc->p_lock));
2537*0Sstevel@tonic-gate 
2538*0Sstevel@tonic-gate 	aiop = kmem_zalloc(sizeof (struct aio), KM_NOSLEEP);
2539*0Sstevel@tonic-gate 	if (aiop) {
2540*0Sstevel@tonic-gate 		mutex_init(&aiop->aio_mutex, NULL, MUTEX_DEFAULT, NULL);
2541*0Sstevel@tonic-gate 		mutex_init(&aiop->aio_cleanupq_mutex, NULL, MUTEX_DEFAULT,
2542*0Sstevel@tonic-gate 									NULL);
2543*0Sstevel@tonic-gate 		mutex_init(&aiop->aio_portq_mutex, NULL, MUTEX_DEFAULT, NULL);
2544*0Sstevel@tonic-gate 	}
2545*0Sstevel@tonic-gate 	return (aiop);
2546*0Sstevel@tonic-gate }
2547*0Sstevel@tonic-gate 
2548*0Sstevel@tonic-gate /*
2549*0Sstevel@tonic-gate  * Allocate an aio_req struct.
2550*0Sstevel@tonic-gate  */
2551*0Sstevel@tonic-gate static int
2552*0Sstevel@tonic-gate aio_req_alloc(aio_req_t **nreqp, aio_result_t *resultp)
2553*0Sstevel@tonic-gate {
2554*0Sstevel@tonic-gate 	aio_req_t *reqp;
2555*0Sstevel@tonic-gate 	aio_t *aiop = curproc->p_aio;
2556*0Sstevel@tonic-gate 
2557*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_mutex));
2558*0Sstevel@tonic-gate 
2559*0Sstevel@tonic-gate 	if ((reqp = aiop->aio_free) != NULL) {
2560*0Sstevel@tonic-gate 		reqp->aio_req_flags = 0;
2561*0Sstevel@tonic-gate 		aiop->aio_free = reqp->aio_req_next;
2562*0Sstevel@tonic-gate 		/*
2563*0Sstevel@tonic-gate 		 * Clustering:This field has to be specifically
2564*0Sstevel@tonic-gate 		 * set to null so that the right thing can be
2565*0Sstevel@tonic-gate 		 * done in aphysio()
2566*0Sstevel@tonic-gate 		 */
2567*0Sstevel@tonic-gate 		reqp->aio_req_buf.b_iodone = NULL;
2568*0Sstevel@tonic-gate 	} else {
2569*0Sstevel@tonic-gate 		/*
2570*0Sstevel@tonic-gate 		 * Check whether memory is getting tight.
2571*0Sstevel@tonic-gate 		 * This is a temporary mechanism to avoid memory
2572*0Sstevel@tonic-gate 		 * exhaustion by a single process until we come up
2573*0Sstevel@tonic-gate 		 * with a per process solution such as setrlimit().
2574*0Sstevel@tonic-gate 		 */
2575*0Sstevel@tonic-gate 		if (freemem < desfree)
2576*0Sstevel@tonic-gate 			return (EAGAIN);
2577*0Sstevel@tonic-gate 
2578*0Sstevel@tonic-gate 		reqp = kmem_zalloc(sizeof (struct aio_req_t), KM_NOSLEEP);
2579*0Sstevel@tonic-gate 		if (reqp == NULL)
2580*0Sstevel@tonic-gate 			return (EAGAIN);
2581*0Sstevel@tonic-gate 		reqp->aio_req.aio_uio = &(reqp->aio_req_uio);
2582*0Sstevel@tonic-gate 		reqp->aio_req.aio_uio->uio_iov = &(reqp->aio_req_iov);
2583*0Sstevel@tonic-gate 		reqp->aio_req.aio_private = reqp;
2584*0Sstevel@tonic-gate 	}
2585*0Sstevel@tonic-gate 
2586*0Sstevel@tonic-gate 	reqp->aio_req_buf.b_offset = -1;
2587*0Sstevel@tonic-gate 	reqp->aio_req_resultp = resultp;
2588*0Sstevel@tonic-gate 	if (aio_hash_insert(reqp, aiop)) {
2589*0Sstevel@tonic-gate 		reqp->aio_req_next = aiop->aio_free;
2590*0Sstevel@tonic-gate 		aiop->aio_free = reqp;
2591*0Sstevel@tonic-gate 		return (EINVAL);
2592*0Sstevel@tonic-gate 	}
2593*0Sstevel@tonic-gate 	*nreqp = reqp;
2594*0Sstevel@tonic-gate 	return (0);
2595*0Sstevel@tonic-gate }
2596*0Sstevel@tonic-gate 
2597*0Sstevel@tonic-gate /*
2598*0Sstevel@tonic-gate  * Allocate an aio_lio_t struct.
2599*0Sstevel@tonic-gate  */
2600*0Sstevel@tonic-gate static int
2601*0Sstevel@tonic-gate aio_lio_alloc(aio_lio_t **head)
2602*0Sstevel@tonic-gate {
2603*0Sstevel@tonic-gate 	aio_lio_t *liop;
2604*0Sstevel@tonic-gate 	aio_t *aiop = curproc->p_aio;
2605*0Sstevel@tonic-gate 
2606*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&aiop->aio_mutex));
2607*0Sstevel@tonic-gate 
2608*0Sstevel@tonic-gate 	if ((liop = aiop->aio_lio_free) != NULL) {
2609*0Sstevel@tonic-gate 		aiop->aio_lio_free = liop->lio_next;
2610*0Sstevel@tonic-gate 	} else {
2611*0Sstevel@tonic-gate 		/*
2612*0Sstevel@tonic-gate 		 * Check whether memory is getting tight.
2613*0Sstevel@tonic-gate 		 * This is a temporary mechanism to avoid memory
2614*0Sstevel@tonic-gate 		 * exhaustion by a single process until we come up
2615*0Sstevel@tonic-gate 		 * with a per process solution such as setrlimit().
2616*0Sstevel@tonic-gate 		 */
2617*0Sstevel@tonic-gate 		if (freemem < desfree)
2618*0Sstevel@tonic-gate 			return (EAGAIN);
2619*0Sstevel@tonic-gate 
2620*0Sstevel@tonic-gate 		liop = kmem_zalloc(sizeof (aio_lio_t), KM_NOSLEEP);
2621*0Sstevel@tonic-gate 		if (liop == NULL)
2622*0Sstevel@tonic-gate 			return (EAGAIN);
2623*0Sstevel@tonic-gate 	}
2624*0Sstevel@tonic-gate 	*head = liop;
2625*0Sstevel@tonic-gate 	return (0);
2626*0Sstevel@tonic-gate }
2627*0Sstevel@tonic-gate 
2628*0Sstevel@tonic-gate /*
2629*0Sstevel@tonic-gate  * this is a special per-process thread that is only activated if
2630*0Sstevel@tonic-gate  * the process is unmapping a segment with outstanding aio. normally,
2631*0Sstevel@tonic-gate  * the process will have completed the aio before unmapping the
2632*0Sstevel@tonic-gate  * segment. If the process does unmap a segment with outstanding aio,
2633*0Sstevel@tonic-gate  * this special thread will guarentee that the locked pages due to
2634*0Sstevel@tonic-gate  * aphysio() are released, thereby permitting the segment to be
2635*0Sstevel@tonic-gate  * unmapped.
2636*0Sstevel@tonic-gate  */
2637*0Sstevel@tonic-gate 
2638*0Sstevel@tonic-gate static int
2639*0Sstevel@tonic-gate aio_cleanup_thread(aio_t *aiop)
2640*0Sstevel@tonic-gate {
2641*0Sstevel@tonic-gate 	proc_t *p = curproc;
2642*0Sstevel@tonic-gate 	struct as *as = p->p_as;
2643*0Sstevel@tonic-gate 	int poked = 0;
2644*0Sstevel@tonic-gate 	kcondvar_t *cvp;
2645*0Sstevel@tonic-gate 	int exit_flag = 0;
2646*0Sstevel@tonic-gate 
2647*0Sstevel@tonic-gate 	sigfillset(&curthread->t_hold);
2648*0Sstevel@tonic-gate 	sigdiffset(&curthread->t_hold, &cantmask);
2649*0Sstevel@tonic-gate 	for (;;) {
2650*0Sstevel@tonic-gate 		/*
2651*0Sstevel@tonic-gate 		 * if a segment is being unmapped, and the current
2652*0Sstevel@tonic-gate 		 * process's done queue is not empty, then every request
2653*0Sstevel@tonic-gate 		 * on the doneq with locked resources should be forced
2654*0Sstevel@tonic-gate 		 * to release their locks. By moving the doneq request
2655*0Sstevel@tonic-gate 		 * to the cleanupq, aio_cleanup() will process the cleanupq,
2656*0Sstevel@tonic-gate 		 * and place requests back onto the doneq. All requests
2657*0Sstevel@tonic-gate 		 * processed by aio_cleanup() will have their physical
2658*0Sstevel@tonic-gate 		 * resources unlocked.
2659*0Sstevel@tonic-gate 		 */
2660*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
2661*0Sstevel@tonic-gate 		if ((aiop->aio_flags & AIO_CLEANUP) == 0) {
2662*0Sstevel@tonic-gate 			aiop->aio_flags |= AIO_CLEANUP;
2663*0Sstevel@tonic-gate 			mutex_enter(&as->a_contents);
2664*0Sstevel@tonic-gate 			if (AS_ISUNMAPWAIT(as) && aiop->aio_doneq) {
2665*0Sstevel@tonic-gate 				aio_req_t *doneqhead = aiop->aio_doneq;
2666*0Sstevel@tonic-gate 				mutex_exit(&as->a_contents);
2667*0Sstevel@tonic-gate 				aiop->aio_doneq = NULL;
2668*0Sstevel@tonic-gate 				aio_cleanupq_concat(aiop, doneqhead, AIO_DONEQ);
2669*0Sstevel@tonic-gate 			} else {
2670*0Sstevel@tonic-gate 				mutex_exit(&as->a_contents);
2671*0Sstevel@tonic-gate 			}
2672*0Sstevel@tonic-gate 		}
2673*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
2674*0Sstevel@tonic-gate 		aio_cleanup(AIO_CLEANUP_THREAD);
2675*0Sstevel@tonic-gate 		/*
2676*0Sstevel@tonic-gate 		 * thread should block on the cleanupcv while
2677*0Sstevel@tonic-gate 		 * AIO_CLEANUP is set.
2678*0Sstevel@tonic-gate 		 */
2679*0Sstevel@tonic-gate 		cvp = &aiop->aio_cleanupcv;
2680*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
2681*0Sstevel@tonic-gate 
2682*0Sstevel@tonic-gate 		if (aiop->aio_pollq != NULL || aiop->aio_cleanupq != NULL ||
2683*0Sstevel@tonic-gate 		    aiop->aio_notifyq != NULL ||
2684*0Sstevel@tonic-gate 		    aiop->aio_portcleanupq != NULL) {
2685*0Sstevel@tonic-gate 			mutex_exit(&aiop->aio_mutex);
2686*0Sstevel@tonic-gate 			continue;
2687*0Sstevel@tonic-gate 		}
2688*0Sstevel@tonic-gate 		mutex_enter(&as->a_contents);
2689*0Sstevel@tonic-gate 
2690*0Sstevel@tonic-gate 		/*
2691*0Sstevel@tonic-gate 		 * AIO_CLEANUP determines when the cleanup thread
2692*0Sstevel@tonic-gate 		 * should be active. This flag is only set when
2693*0Sstevel@tonic-gate 		 * the cleanup thread is awakened by as_unmap().
2694*0Sstevel@tonic-gate 		 * The flag is cleared when the blocking as_unmap()
2695*0Sstevel@tonic-gate 		 * that originally awakened us is allowed to
2696*0Sstevel@tonic-gate 		 * complete. as_unmap() blocks when trying to
2697*0Sstevel@tonic-gate 		 * unmap a segment that has SOFTLOCKed pages. when
2698*0Sstevel@tonic-gate 		 * the segment's pages are all SOFTUNLOCKed,
2699*0Sstevel@tonic-gate 		 * as->a_flags & AS_UNMAPWAIT should be zero. The flag
2700*0Sstevel@tonic-gate 		 * shouldn't be cleared right away if the cleanup thread
2701*0Sstevel@tonic-gate 		 * was interrupted because the process is doing forkall().
2702*0Sstevel@tonic-gate 		 * This happens when cv_wait_sig() returns zero,
2703*0Sstevel@tonic-gate 		 * because it was awakened by a pokelwps(). If the
2704*0Sstevel@tonic-gate 		 * process is not exiting, it must be doing forkall().
2705*0Sstevel@tonic-gate 		 */
2706*0Sstevel@tonic-gate 		if ((poked == 0) &&
2707*0Sstevel@tonic-gate 		    ((AS_ISUNMAPWAIT(as) == 0) || (aiop->aio_pending == 0))) {
2708*0Sstevel@tonic-gate 			aiop->aio_flags &= ~(AIO_CLEANUP | AIO_CLEANUP_PORT);
2709*0Sstevel@tonic-gate 			cvp = &as->a_cv;
2710*0Sstevel@tonic-gate 		}
2711*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
2712*0Sstevel@tonic-gate 		if (poked) {
2713*0Sstevel@tonic-gate 			/*
2714*0Sstevel@tonic-gate 			 * If the process is exiting/killed, don't return
2715*0Sstevel@tonic-gate 			 * immediately without waiting for pending I/O's
2716*0Sstevel@tonic-gate 			 * and releasing the page locks.
2717*0Sstevel@tonic-gate 			 */
2718*0Sstevel@tonic-gate 			if (p->p_flag & (SEXITLWPS|SKILLED)) {
2719*0Sstevel@tonic-gate 				/*
2720*0Sstevel@tonic-gate 				 * If exit_flag is set, then it is
2721*0Sstevel@tonic-gate 				 * safe to exit because we have released
2722*0Sstevel@tonic-gate 				 * page locks of completed I/O's.
2723*0Sstevel@tonic-gate 				 */
2724*0Sstevel@tonic-gate 				if (exit_flag)
2725*0Sstevel@tonic-gate 					break;
2726*0Sstevel@tonic-gate 
2727*0Sstevel@tonic-gate 				mutex_exit(&as->a_contents);
2728*0Sstevel@tonic-gate 
2729*0Sstevel@tonic-gate 				/*
2730*0Sstevel@tonic-gate 				 * Wait for all the pending aio to complete.
2731*0Sstevel@tonic-gate 				 */
2732*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
2733*0Sstevel@tonic-gate 				aiop->aio_flags |= AIO_REQ_BLOCK;
2734*0Sstevel@tonic-gate 				while (aiop->aio_pending != 0)
2735*0Sstevel@tonic-gate 					cv_wait(&aiop->aio_cleanupcv,
2736*0Sstevel@tonic-gate 						&aiop->aio_mutex);
2737*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
2738*0Sstevel@tonic-gate 				exit_flag = 1;
2739*0Sstevel@tonic-gate 				continue;
2740*0Sstevel@tonic-gate 			} else if (p->p_flag &
2741*0Sstevel@tonic-gate 			    (SHOLDFORK|SHOLDFORK1|SHOLDWATCH)) {
2742*0Sstevel@tonic-gate 				/*
2743*0Sstevel@tonic-gate 				 * hold LWP until it
2744*0Sstevel@tonic-gate 				 * is continued.
2745*0Sstevel@tonic-gate 				 */
2746*0Sstevel@tonic-gate 				mutex_exit(&as->a_contents);
2747*0Sstevel@tonic-gate 				mutex_enter(&p->p_lock);
2748*0Sstevel@tonic-gate 				stop(PR_SUSPENDED, SUSPEND_NORMAL);
2749*0Sstevel@tonic-gate 				mutex_exit(&p->p_lock);
2750*0Sstevel@tonic-gate 				poked = 0;
2751*0Sstevel@tonic-gate 				continue;
2752*0Sstevel@tonic-gate 			}
2753*0Sstevel@tonic-gate 		} else {
2754*0Sstevel@tonic-gate 			/*
2755*0Sstevel@tonic-gate 			 * When started this thread will sleep on as->a_cv.
2756*0Sstevel@tonic-gate 			 * as_unmap will awake this thread if the
2757*0Sstevel@tonic-gate 			 * segment has SOFTLOCKed pages (poked = 0).
2758*0Sstevel@tonic-gate 			 * 1. pokelwps() awakes this thread =>
2759*0Sstevel@tonic-gate 			 *    break the loop to check SEXITLWPS, SHOLDFORK, etc
2760*0Sstevel@tonic-gate 			 * 2. as_unmap awakes this thread =>
2761*0Sstevel@tonic-gate 			 *    to break the loop it is necessary that
2762*0Sstevel@tonic-gate 			 *    - AS_UNMAPWAIT is set (as_unmap is waiting for
2763*0Sstevel@tonic-gate 			 *	memory to be unlocked)
2764*0Sstevel@tonic-gate 			 *    - some transactions are still pending
2765*0Sstevel@tonic-gate 			 *    - AIO_CLEANUP is not set
2766*0Sstevel@tonic-gate 			 *	(if AIO_CLEANUP is set we have to wait for
2767*0Sstevel@tonic-gate 			 *	pending requests. aio_done will send a signal
2768*0Sstevel@tonic-gate 			 *	for every request which completes to continue
2769*0Sstevel@tonic-gate 			 *	unmapping the corresponding address range)
2770*0Sstevel@tonic-gate 			 */
2771*0Sstevel@tonic-gate 			while (poked == 0) {
2772*0Sstevel@tonic-gate 				if ((AS_ISUNMAPWAIT(as) != 0) &&
2773*0Sstevel@tonic-gate 				    (aiop->aio_pending != 0) &&
2774*0Sstevel@tonic-gate 				    ((aiop->aio_flags & AIO_CLEANUP) == 0))
2775*0Sstevel@tonic-gate 					break;
2776*0Sstevel@tonic-gate 				poked = !cv_wait_sig(cvp, &as->a_contents);
2777*0Sstevel@tonic-gate 				if (AS_ISUNMAPWAIT(as) == 0)
2778*0Sstevel@tonic-gate 					cv_signal(cvp);
2779*0Sstevel@tonic-gate 				if (aiop->aio_outstanding != 0)
2780*0Sstevel@tonic-gate 					break;
2781*0Sstevel@tonic-gate 			}
2782*0Sstevel@tonic-gate 		}
2783*0Sstevel@tonic-gate 		mutex_exit(&as->a_contents);
2784*0Sstevel@tonic-gate 	}
2785*0Sstevel@tonic-gate exit:
2786*0Sstevel@tonic-gate 	mutex_exit(&as->a_contents);
2787*0Sstevel@tonic-gate 	ASSERT((curproc->p_flag & (SEXITLWPS|SKILLED)));
2788*0Sstevel@tonic-gate 	aston(curthread);	/* make thread do post_syscall */
2789*0Sstevel@tonic-gate 	return (0);
2790*0Sstevel@tonic-gate }
2791*0Sstevel@tonic-gate 
2792*0Sstevel@tonic-gate /*
2793*0Sstevel@tonic-gate  * save a reference to a user's outstanding aio in a hash list.
2794*0Sstevel@tonic-gate  */
2795*0Sstevel@tonic-gate static int
2796*0Sstevel@tonic-gate aio_hash_insert(
2797*0Sstevel@tonic-gate 	aio_req_t *aio_reqp,
2798*0Sstevel@tonic-gate 	aio_t *aiop)
2799*0Sstevel@tonic-gate {
2800*0Sstevel@tonic-gate 	long index;
2801*0Sstevel@tonic-gate 	aio_result_t *resultp = aio_reqp->aio_req_resultp;
2802*0Sstevel@tonic-gate 	aio_req_t *current;
2803*0Sstevel@tonic-gate 	aio_req_t **nextp;
2804*0Sstevel@tonic-gate 
2805*0Sstevel@tonic-gate 	index = AIO_HASH(resultp);
2806*0Sstevel@tonic-gate 	nextp = &aiop->aio_hash[index];
2807*0Sstevel@tonic-gate 	while ((current = *nextp) != NULL) {
2808*0Sstevel@tonic-gate 		if (current->aio_req_resultp == resultp)
2809*0Sstevel@tonic-gate 			return (DUPLICATE);
2810*0Sstevel@tonic-gate 		nextp = &current->aio_hash_next;
2811*0Sstevel@tonic-gate 	}
2812*0Sstevel@tonic-gate 	*nextp = aio_reqp;
2813*0Sstevel@tonic-gate 	aio_reqp->aio_hash_next = NULL;
2814*0Sstevel@tonic-gate 	return (0);
2815*0Sstevel@tonic-gate }
2816*0Sstevel@tonic-gate 
2817*0Sstevel@tonic-gate static int
2818*0Sstevel@tonic-gate (*check_vp(struct vnode *vp, int mode))(vnode_t *, struct aio_req *,
2819*0Sstevel@tonic-gate     cred_t *)
2820*0Sstevel@tonic-gate {
2821*0Sstevel@tonic-gate 	struct snode *sp;
2822*0Sstevel@tonic-gate 	dev_t		dev;
2823*0Sstevel@tonic-gate 	struct cb_ops  	*cb;
2824*0Sstevel@tonic-gate 	major_t		major;
2825*0Sstevel@tonic-gate 	int		(*aio_func)();
2826*0Sstevel@tonic-gate 
2827*0Sstevel@tonic-gate 	dev = vp->v_rdev;
2828*0Sstevel@tonic-gate 	major = getmajor(dev);
2829*0Sstevel@tonic-gate 
2830*0Sstevel@tonic-gate 	/*
2831*0Sstevel@tonic-gate 	 * return NULL for requests to files and STREAMs so
2832*0Sstevel@tonic-gate 	 * that libaio takes care of them.
2833*0Sstevel@tonic-gate 	 */
2834*0Sstevel@tonic-gate 	if (vp->v_type == VCHR) {
2835*0Sstevel@tonic-gate 		/* no stream device for kaio */
2836*0Sstevel@tonic-gate 		if (STREAMSTAB(major)) {
2837*0Sstevel@tonic-gate 			return (NULL);
2838*0Sstevel@tonic-gate 		}
2839*0Sstevel@tonic-gate 	} else {
2840*0Sstevel@tonic-gate 		return (NULL);
2841*0Sstevel@tonic-gate 	}
2842*0Sstevel@tonic-gate 
2843*0Sstevel@tonic-gate 	/*
2844*0Sstevel@tonic-gate 	 * Check old drivers which do not have async I/O entry points.
2845*0Sstevel@tonic-gate 	 */
2846*0Sstevel@tonic-gate 	if (devopsp[major]->devo_rev < 3)
2847*0Sstevel@tonic-gate 		return (NULL);
2848*0Sstevel@tonic-gate 
2849*0Sstevel@tonic-gate 	cb = devopsp[major]->devo_cb_ops;
2850*0Sstevel@tonic-gate 
2851*0Sstevel@tonic-gate 	if (cb->cb_rev < 1)
2852*0Sstevel@tonic-gate 		return (NULL);
2853*0Sstevel@tonic-gate 
2854*0Sstevel@tonic-gate 	/*
2855*0Sstevel@tonic-gate 	 * Check whether this device is a block device.
2856*0Sstevel@tonic-gate 	 * Kaio is not supported for devices like tty.
2857*0Sstevel@tonic-gate 	 */
2858*0Sstevel@tonic-gate 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2859*0Sstevel@tonic-gate 		return (NULL);
2860*0Sstevel@tonic-gate 
2861*0Sstevel@tonic-gate 	/*
2862*0Sstevel@tonic-gate 	 * Clustering: If vnode is a PXFS vnode, then the device may be remote.
2863*0Sstevel@tonic-gate 	 * We cannot call the driver directly. Instead return the
2864*0Sstevel@tonic-gate 	 * PXFS functions.
2865*0Sstevel@tonic-gate 	 */
2866*0Sstevel@tonic-gate 
2867*0Sstevel@tonic-gate 	if (IS_PXFSVP(vp)) {
2868*0Sstevel@tonic-gate 		if (mode & FREAD)
2869*0Sstevel@tonic-gate 			return (clpxfs_aio_read);
2870*0Sstevel@tonic-gate 		else
2871*0Sstevel@tonic-gate 			return (clpxfs_aio_write);
2872*0Sstevel@tonic-gate 	}
2873*0Sstevel@tonic-gate 	if (mode & FREAD)
2874*0Sstevel@tonic-gate 		aio_func = (cb->cb_aread == nodev) ? NULL : driver_aio_read;
2875*0Sstevel@tonic-gate 	else
2876*0Sstevel@tonic-gate 		aio_func = (cb->cb_awrite == nodev) ? NULL : driver_aio_write;
2877*0Sstevel@tonic-gate 
2878*0Sstevel@tonic-gate 	/*
2879*0Sstevel@tonic-gate 	 * Do we need this ?
2880*0Sstevel@tonic-gate 	 * nodev returns ENXIO anyway.
2881*0Sstevel@tonic-gate 	 */
2882*0Sstevel@tonic-gate 	if (aio_func == nodev)
2883*0Sstevel@tonic-gate 		return (NULL);
2884*0Sstevel@tonic-gate 
2885*0Sstevel@tonic-gate 	sp = VTOS(vp);
2886*0Sstevel@tonic-gate 	smark(sp, SACC);
2887*0Sstevel@tonic-gate 	return (aio_func);
2888*0Sstevel@tonic-gate }
2889*0Sstevel@tonic-gate 
2890*0Sstevel@tonic-gate /*
2891*0Sstevel@tonic-gate  * Clustering: We want check_vp to return a function prototyped
2892*0Sstevel@tonic-gate  * correctly that will be common to both PXFS and regular case.
2893*0Sstevel@tonic-gate  * We define this intermediate function that will do the right
2894*0Sstevel@tonic-gate  * thing for driver cases.
2895*0Sstevel@tonic-gate  */
2896*0Sstevel@tonic-gate 
2897*0Sstevel@tonic-gate static int
2898*0Sstevel@tonic-gate driver_aio_write(vnode_t *vp, struct aio_req *aio, cred_t *cred_p)
2899*0Sstevel@tonic-gate {
2900*0Sstevel@tonic-gate 	dev_t dev;
2901*0Sstevel@tonic-gate 	struct cb_ops  	*cb;
2902*0Sstevel@tonic-gate 
2903*0Sstevel@tonic-gate 	ASSERT(vp->v_type == VCHR);
2904*0Sstevel@tonic-gate 	ASSERT(!IS_PXFSVP(vp));
2905*0Sstevel@tonic-gate 	dev = VTOS(vp)->s_dev;
2906*0Sstevel@tonic-gate 	ASSERT(STREAMSTAB(getmajor(dev)) == NULL);
2907*0Sstevel@tonic-gate 
2908*0Sstevel@tonic-gate 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2909*0Sstevel@tonic-gate 
2910*0Sstevel@tonic-gate 	ASSERT(cb->cb_awrite != nodev);
2911*0Sstevel@tonic-gate 	return ((*cb->cb_awrite)(dev, aio, cred_p));
2912*0Sstevel@tonic-gate }
2913*0Sstevel@tonic-gate 
2914*0Sstevel@tonic-gate /*
2915*0Sstevel@tonic-gate  * Clustering: We want check_vp to return a function prototyped
2916*0Sstevel@tonic-gate  * correctly that will be common to both PXFS and regular case.
2917*0Sstevel@tonic-gate  * We define this intermediate function that will do the right
2918*0Sstevel@tonic-gate  * thing for driver cases.
2919*0Sstevel@tonic-gate  */
2920*0Sstevel@tonic-gate 
2921*0Sstevel@tonic-gate static int
2922*0Sstevel@tonic-gate driver_aio_read(vnode_t *vp, struct aio_req *aio, cred_t *cred_p)
2923*0Sstevel@tonic-gate {
2924*0Sstevel@tonic-gate 	dev_t dev;
2925*0Sstevel@tonic-gate 	struct cb_ops  	*cb;
2926*0Sstevel@tonic-gate 
2927*0Sstevel@tonic-gate 	ASSERT(vp->v_type == VCHR);
2928*0Sstevel@tonic-gate 	ASSERT(!IS_PXFSVP(vp));
2929*0Sstevel@tonic-gate 	dev = VTOS(vp)->s_dev;
2930*0Sstevel@tonic-gate 	ASSERT(!STREAMSTAB(getmajor(dev)));
2931*0Sstevel@tonic-gate 
2932*0Sstevel@tonic-gate 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2933*0Sstevel@tonic-gate 
2934*0Sstevel@tonic-gate 	ASSERT(cb->cb_aread != nodev);
2935*0Sstevel@tonic-gate 	return ((*cb->cb_aread)(dev, aio, cred_p));
2936*0Sstevel@tonic-gate }
2937*0Sstevel@tonic-gate 
2938*0Sstevel@tonic-gate /*
2939*0Sstevel@tonic-gate  * This routine is called when a largefile call is made by a 32bit
2940*0Sstevel@tonic-gate  * process on a ILP32 or LP64 kernel. All 64bit processes are large
2941*0Sstevel@tonic-gate  * file by definition and will call alio() instead.
2942*0Sstevel@tonic-gate  */
2943*0Sstevel@tonic-gate static int
2944*0Sstevel@tonic-gate alioLF(
2945*0Sstevel@tonic-gate 	int		mode_arg,
2946*0Sstevel@tonic-gate 	void		*aiocb_arg,
2947*0Sstevel@tonic-gate 	int		nent,
2948*0Sstevel@tonic-gate 	void		*sigev)
2949*0Sstevel@tonic-gate {
2950*0Sstevel@tonic-gate 	file_t		*fp;
2951*0Sstevel@tonic-gate 	file_t		*prev_fp = NULL;
2952*0Sstevel@tonic-gate 	int		prev_mode = -1;
2953*0Sstevel@tonic-gate 	struct vnode	*vp;
2954*0Sstevel@tonic-gate 	aio_lio_t	*head;
2955*0Sstevel@tonic-gate 	aio_req_t	*reqp;
2956*0Sstevel@tonic-gate 	aio_t		*aiop;
2957*0Sstevel@tonic-gate 	caddr_t		cbplist;
2958*0Sstevel@tonic-gate 	aiocb64_32_t	*cbp;
2959*0Sstevel@tonic-gate 	caddr32_t	*ucbp;
2960*0Sstevel@tonic-gate 	aiocb64_32_t	cb64;
2961*0Sstevel@tonic-gate 	aiocb64_32_t	*aiocb = &cb64;
2962*0Sstevel@tonic-gate #ifdef _LP64
2963*0Sstevel@tonic-gate 	aiocb_t		aiocb_n;
2964*0Sstevel@tonic-gate #endif
2965*0Sstevel@tonic-gate 	struct sigevent32	sigevk;
2966*0Sstevel@tonic-gate 	sigqueue_t	*sqp;
2967*0Sstevel@tonic-gate 	int		(*aio_func)();
2968*0Sstevel@tonic-gate 	int		mode;
2969*0Sstevel@tonic-gate 	int		error = 0, aio_errors = 0;
2970*0Sstevel@tonic-gate 	int		i;
2971*0Sstevel@tonic-gate 	size_t		ssize;
2972*0Sstevel@tonic-gate 	int		deadhead = 0;
2973*0Sstevel@tonic-gate 	int		aio_notsupported = 0;
2974*0Sstevel@tonic-gate 	int		aio_use_port = 0;
2975*0Sstevel@tonic-gate 	port_kevent_t	*pkevtp = NULL;
2976*0Sstevel@tonic-gate 	port_notify32_t	pnotify;
2977*0Sstevel@tonic-gate 
2978*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
2979*0Sstevel@tonic-gate 	if (aiop == NULL || nent <= 0 || nent > _AIO_LISTIO_MAX)
2980*0Sstevel@tonic-gate 		return (EINVAL);
2981*0Sstevel@tonic-gate 
2982*0Sstevel@tonic-gate 	ASSERT(get_udatamodel() == DATAMODEL_ILP32);
2983*0Sstevel@tonic-gate 
2984*0Sstevel@tonic-gate 	ssize = (sizeof (caddr32_t) * nent);
2985*0Sstevel@tonic-gate 	cbplist = kmem_alloc(ssize, KM_SLEEP);
2986*0Sstevel@tonic-gate 	ucbp = (caddr32_t *)cbplist;
2987*0Sstevel@tonic-gate 
2988*0Sstevel@tonic-gate 	if (copyin(aiocb_arg, cbplist, ssize)) {
2989*0Sstevel@tonic-gate 		kmem_free(cbplist, ssize);
2990*0Sstevel@tonic-gate 		return (EFAULT);
2991*0Sstevel@tonic-gate 	}
2992*0Sstevel@tonic-gate 
2993*0Sstevel@tonic-gate 	if (sigev) {
2994*0Sstevel@tonic-gate 		if (copyin(sigev, &sigevk, sizeof (sigevk))) {
2995*0Sstevel@tonic-gate 			kmem_free(cbplist, ssize);
2996*0Sstevel@tonic-gate 			return (EFAULT);
2997*0Sstevel@tonic-gate 		}
2998*0Sstevel@tonic-gate 	}
2999*0Sstevel@tonic-gate 
3000*0Sstevel@tonic-gate 	/*
3001*0Sstevel@tonic-gate 	 * a list head should be allocated if notification is
3002*0Sstevel@tonic-gate 	 * enabled for this list.
3003*0Sstevel@tonic-gate 	 */
3004*0Sstevel@tonic-gate 	head = NULL;
3005*0Sstevel@tonic-gate 
3006*0Sstevel@tonic-gate 	/* Event Ports  */
3007*0Sstevel@tonic-gate 
3008*0Sstevel@tonic-gate 	if (sigev && sigevk.sigev_notify == SIGEV_PORT) {
3009*0Sstevel@tonic-gate 		/* Use PORT for completion notification */
3010*0Sstevel@tonic-gate 		if (copyin((void *)(uintptr_t)sigevk.sigev_value.sival_ptr,
3011*0Sstevel@tonic-gate 		    &pnotify, sizeof (port_notify32_t))) {
3012*0Sstevel@tonic-gate 			kmem_free(cbplist, ssize);
3013*0Sstevel@tonic-gate 			return (EFAULT);
3014*0Sstevel@tonic-gate 		}
3015*0Sstevel@tonic-gate 		/* use event ports for the list of aiocbs */
3016*0Sstevel@tonic-gate 		aio_use_port = 1;
3017*0Sstevel@tonic-gate 		error = port_alloc_event(pnotify.portnfy_port,
3018*0Sstevel@tonic-gate 		    PORT_ALLOC_PRIVATE, PORT_SOURCE_AIO, &pkevtp);
3019*0Sstevel@tonic-gate 		if (error) {
3020*0Sstevel@tonic-gate 			if (error == ENOMEM)
3021*0Sstevel@tonic-gate 				error = EAGAIN;
3022*0Sstevel@tonic-gate 			kmem_free(cbplist, ssize);
3023*0Sstevel@tonic-gate 			return (error);
3024*0Sstevel@tonic-gate 		}
3025*0Sstevel@tonic-gate 	} else if ((mode_arg == LIO_WAIT) || sigev) {
3026*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
3027*0Sstevel@tonic-gate 		error = aio_lio_alloc(&head);
3028*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
3029*0Sstevel@tonic-gate 		if (error)
3030*0Sstevel@tonic-gate 			goto done;
3031*0Sstevel@tonic-gate 		deadhead = 1;
3032*0Sstevel@tonic-gate 		head->lio_nent = nent;
3033*0Sstevel@tonic-gate 		head->lio_refcnt = nent;
3034*0Sstevel@tonic-gate 		if (sigev && (sigevk.sigev_notify == SIGEV_SIGNAL) &&
3035*0Sstevel@tonic-gate 		    (sigevk.sigev_signo > 0 && sigevk.sigev_signo < NSIG)) {
3036*0Sstevel@tonic-gate 			sqp = kmem_zalloc(sizeof (sigqueue_t), KM_NOSLEEP);
3037*0Sstevel@tonic-gate 			if (sqp == NULL) {
3038*0Sstevel@tonic-gate 				error = EAGAIN;
3039*0Sstevel@tonic-gate 				goto done;
3040*0Sstevel@tonic-gate 			}
3041*0Sstevel@tonic-gate 			sqp->sq_func = NULL;
3042*0Sstevel@tonic-gate 			sqp->sq_next = NULL;
3043*0Sstevel@tonic-gate 			sqp->sq_info.si_code = SI_ASYNCIO;
3044*0Sstevel@tonic-gate 			sqp->sq_info.si_pid = curproc->p_pid;
3045*0Sstevel@tonic-gate 			sqp->sq_info.si_ctid = PRCTID(curproc);
3046*0Sstevel@tonic-gate 			sqp->sq_info.si_zoneid = getzoneid();
3047*0Sstevel@tonic-gate 			sqp->sq_info.si_uid = crgetuid(curproc->p_cred);
3048*0Sstevel@tonic-gate 			sqp->sq_info.si_signo = sigevk.sigev_signo;
3049*0Sstevel@tonic-gate 			sqp->sq_info.si_value.sival_int =
3050*0Sstevel@tonic-gate 			    sigevk.sigev_value.sival_int;
3051*0Sstevel@tonic-gate 			head->lio_sigqp = sqp;
3052*0Sstevel@tonic-gate 		} else {
3053*0Sstevel@tonic-gate 			head->lio_sigqp = NULL;
3054*0Sstevel@tonic-gate 		}
3055*0Sstevel@tonic-gate 	}
3056*0Sstevel@tonic-gate 
3057*0Sstevel@tonic-gate 	for (i = 0; i < nent; i++, ucbp++) {
3058*0Sstevel@tonic-gate 
3059*0Sstevel@tonic-gate 		cbp = (aiocb64_32_t *)(uintptr_t)*ucbp;
3060*0Sstevel@tonic-gate 		/* skip entry if it can't be copied. */
3061*0Sstevel@tonic-gate 		if (cbp == NULL || copyin(cbp, aiocb, sizeof (aiocb64_32_t))) {
3062*0Sstevel@tonic-gate 			if (head) {
3063*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3064*0Sstevel@tonic-gate 				head->lio_nent--;
3065*0Sstevel@tonic-gate 				head->lio_refcnt--;
3066*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3067*0Sstevel@tonic-gate 			}
3068*0Sstevel@tonic-gate 			continue;
3069*0Sstevel@tonic-gate 		}
3070*0Sstevel@tonic-gate 
3071*0Sstevel@tonic-gate 		/* skip if opcode for aiocb is LIO_NOP */
3072*0Sstevel@tonic-gate 
3073*0Sstevel@tonic-gate 		mode = aiocb->aio_lio_opcode;
3074*0Sstevel@tonic-gate 		if (mode == LIO_NOP) {
3075*0Sstevel@tonic-gate 			cbp = NULL;
3076*0Sstevel@tonic-gate 			if (head) {
3077*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3078*0Sstevel@tonic-gate 				head->lio_nent--;
3079*0Sstevel@tonic-gate 				head->lio_refcnt--;
3080*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3081*0Sstevel@tonic-gate 			}
3082*0Sstevel@tonic-gate 			continue;
3083*0Sstevel@tonic-gate 		}
3084*0Sstevel@tonic-gate 
3085*0Sstevel@tonic-gate 		/* increment file descriptor's ref count. */
3086*0Sstevel@tonic-gate 		if ((fp = getf(aiocb->aio_fildes)) == NULL) {
3087*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, EBADF);
3088*0Sstevel@tonic-gate 			if (head) {
3089*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3090*0Sstevel@tonic-gate 				head->lio_nent--;
3091*0Sstevel@tonic-gate 				head->lio_refcnt--;
3092*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3093*0Sstevel@tonic-gate 			}
3094*0Sstevel@tonic-gate 			aio_errors++;
3095*0Sstevel@tonic-gate 			continue;
3096*0Sstevel@tonic-gate 		}
3097*0Sstevel@tonic-gate 
3098*0Sstevel@tonic-gate 		vp = fp->f_vnode;
3099*0Sstevel@tonic-gate 
3100*0Sstevel@tonic-gate 		/*
3101*0Sstevel@tonic-gate 		 * check the permission of the partition
3102*0Sstevel@tonic-gate 		 */
3103*0Sstevel@tonic-gate 		mode = aiocb->aio_lio_opcode;
3104*0Sstevel@tonic-gate 		if ((fp->f_flag & mode) == 0) {
3105*0Sstevel@tonic-gate 			releasef(aiocb->aio_fildes);
3106*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, EBADF);
3107*0Sstevel@tonic-gate 			if (head) {
3108*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3109*0Sstevel@tonic-gate 				head->lio_nent--;
3110*0Sstevel@tonic-gate 				head->lio_refcnt--;
3111*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3112*0Sstevel@tonic-gate 			}
3113*0Sstevel@tonic-gate 			aio_errors++;
3114*0Sstevel@tonic-gate 			continue;
3115*0Sstevel@tonic-gate 		}
3116*0Sstevel@tonic-gate 
3117*0Sstevel@tonic-gate 		/*
3118*0Sstevel@tonic-gate 		 * common case where requests are to the same fd
3119*0Sstevel@tonic-gate 		 * for the same r/w operation
3120*0Sstevel@tonic-gate 		 * for UFS, need to set EBADFD
3121*0Sstevel@tonic-gate 		 */
3122*0Sstevel@tonic-gate 		if ((fp != prev_fp) || (mode != prev_mode)) {
3123*0Sstevel@tonic-gate 			aio_func = check_vp(vp, mode);
3124*0Sstevel@tonic-gate 			if (aio_func == NULL) {
3125*0Sstevel@tonic-gate 				prev_fp = NULL;
3126*0Sstevel@tonic-gate 				releasef(aiocb->aio_fildes);
3127*0Sstevel@tonic-gate 				lio_set_uerror(&cbp->aio_resultp, EBADFD);
3128*0Sstevel@tonic-gate 				aio_notsupported++;
3129*0Sstevel@tonic-gate 				if (head) {
3130*0Sstevel@tonic-gate 					mutex_enter(&aiop->aio_mutex);
3131*0Sstevel@tonic-gate 					head->lio_nent--;
3132*0Sstevel@tonic-gate 					head->lio_refcnt--;
3133*0Sstevel@tonic-gate 					mutex_exit(&aiop->aio_mutex);
3134*0Sstevel@tonic-gate 				}
3135*0Sstevel@tonic-gate 				continue;
3136*0Sstevel@tonic-gate 			} else {
3137*0Sstevel@tonic-gate 				prev_fp = fp;
3138*0Sstevel@tonic-gate 				prev_mode = mode;
3139*0Sstevel@tonic-gate 			}
3140*0Sstevel@tonic-gate 		}
3141*0Sstevel@tonic-gate #ifdef	_LP64
3142*0Sstevel@tonic-gate 		aiocb_LFton(aiocb, &aiocb_n);
3143*0Sstevel@tonic-gate 		error = aio_req_setup(&reqp, aiop, &aiocb_n,
3144*0Sstevel@tonic-gate 		    (aio_result_t *)&cbp->aio_resultp, aio_use_port, vp);
3145*0Sstevel@tonic-gate #else
3146*0Sstevel@tonic-gate 		error = aio_req_setupLF(&reqp, aiop, aiocb,
3147*0Sstevel@tonic-gate 		    (aio_result_t *)&cbp->aio_resultp, aio_use_port, vp);
3148*0Sstevel@tonic-gate #endif  /* _LP64 */
3149*0Sstevel@tonic-gate 		if (error) {
3150*0Sstevel@tonic-gate 			releasef(aiocb->aio_fildes);
3151*0Sstevel@tonic-gate 			if (head) {
3152*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3153*0Sstevel@tonic-gate 				head->lio_nent--;
3154*0Sstevel@tonic-gate 				head->lio_refcnt--;
3155*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3156*0Sstevel@tonic-gate 			}
3157*0Sstevel@tonic-gate 			aio_errors++;
3158*0Sstevel@tonic-gate 			continue;
3159*0Sstevel@tonic-gate 		}
3160*0Sstevel@tonic-gate 
3161*0Sstevel@tonic-gate 		reqp->aio_req_lio = head;
3162*0Sstevel@tonic-gate 		deadhead = 0;
3163*0Sstevel@tonic-gate 
3164*0Sstevel@tonic-gate 		/*
3165*0Sstevel@tonic-gate 		 * Set the errno field now before sending the request to
3166*0Sstevel@tonic-gate 		 * the driver to avoid a race condition
3167*0Sstevel@tonic-gate 		 */
3168*0Sstevel@tonic-gate 		(void) suword32(&cbp->aio_resultp.aio_errno,
3169*0Sstevel@tonic-gate 		    EINPROGRESS);
3170*0Sstevel@tonic-gate 
3171*0Sstevel@tonic-gate 		reqp->aio_req_iocb.iocb32 = *ucbp;
3172*0Sstevel@tonic-gate 
3173*0Sstevel@tonic-gate 		if (aio_use_port) {
3174*0Sstevel@tonic-gate 			reqp->aio_req_port = pnotify.portnfy_port;
3175*0Sstevel@tonic-gate 			error = aio_req_assoc_port32(&aiocb->aio_sigevent,
3176*0Sstevel@tonic-gate 			    (void *)(uintptr_t)pnotify.portnfy_user,
3177*0Sstevel@tonic-gate 			    (aiocb_t *)(uintptr_t)*ucbp, reqp, pkevtp);
3178*0Sstevel@tonic-gate 		}
3179*0Sstevel@tonic-gate 
3180*0Sstevel@tonic-gate 		/*
3181*0Sstevel@tonic-gate 		 * send the request to driver.
3182*0Sstevel@tonic-gate 		 * Clustering: If PXFS vnode, call PXFS function.
3183*0Sstevel@tonic-gate 		 */
3184*0Sstevel@tonic-gate 		if (error == 0) {
3185*0Sstevel@tonic-gate 			if (aiocb->aio_nbytes == 0) {
3186*0Sstevel@tonic-gate 				clear_active_fd(aiocb->aio_fildes);
3187*0Sstevel@tonic-gate 				aio_zerolen(reqp);
3188*0Sstevel@tonic-gate 				continue;
3189*0Sstevel@tonic-gate 			}
3190*0Sstevel@tonic-gate 			error = (*aio_func)(vp, (aio_req_t *)&reqp->aio_req,
3191*0Sstevel@tonic-gate 			    CRED());
3192*0Sstevel@tonic-gate 		}
3193*0Sstevel@tonic-gate 
3194*0Sstevel@tonic-gate 		/*
3195*0Sstevel@tonic-gate 		 * the fd's ref count is not decremented until the IO has
3196*0Sstevel@tonic-gate 		 * completed unless there was an error.
3197*0Sstevel@tonic-gate 		 */
3198*0Sstevel@tonic-gate 		if (error) {
3199*0Sstevel@tonic-gate 			releasef(aiocb->aio_fildes);
3200*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, error);
3201*0Sstevel@tonic-gate 			if (head) {
3202*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3203*0Sstevel@tonic-gate 				head->lio_nent--;
3204*0Sstevel@tonic-gate 				head->lio_refcnt--;
3205*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3206*0Sstevel@tonic-gate 			}
3207*0Sstevel@tonic-gate 			if (error == ENOTSUP)
3208*0Sstevel@tonic-gate 				aio_notsupported++;
3209*0Sstevel@tonic-gate 			else
3210*0Sstevel@tonic-gate 				aio_errors++;
3211*0Sstevel@tonic-gate 			lio_set_error(reqp);
3212*0Sstevel@tonic-gate 		} else {
3213*0Sstevel@tonic-gate 			clear_active_fd(aiocb->aio_fildes);
3214*0Sstevel@tonic-gate 		}
3215*0Sstevel@tonic-gate 	}
3216*0Sstevel@tonic-gate 
3217*0Sstevel@tonic-gate 	if (pkevtp)
3218*0Sstevel@tonic-gate 		port_free_event(pkevtp);
3219*0Sstevel@tonic-gate 
3220*0Sstevel@tonic-gate 	if (aio_notsupported) {
3221*0Sstevel@tonic-gate 		error = ENOTSUP;
3222*0Sstevel@tonic-gate 	} else if (aio_errors) {
3223*0Sstevel@tonic-gate 		/*
3224*0Sstevel@tonic-gate 		 * return EIO if any request failed
3225*0Sstevel@tonic-gate 		 */
3226*0Sstevel@tonic-gate 		error = EIO;
3227*0Sstevel@tonic-gate 	}
3228*0Sstevel@tonic-gate 
3229*0Sstevel@tonic-gate 	if (mode_arg == LIO_WAIT) {
3230*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
3231*0Sstevel@tonic-gate 		while (head->lio_refcnt > 0) {
3232*0Sstevel@tonic-gate 			if (!cv_wait_sig(&head->lio_notify, &aiop->aio_mutex)) {
3233*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3234*0Sstevel@tonic-gate 				error = EINTR;
3235*0Sstevel@tonic-gate 				goto done;
3236*0Sstevel@tonic-gate 			}
3237*0Sstevel@tonic-gate 		}
3238*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
3239*0Sstevel@tonic-gate 		alio_cleanup(aiop, (aiocb_t **)cbplist, nent, AIO_LARGEFILE);
3240*0Sstevel@tonic-gate 	}
3241*0Sstevel@tonic-gate 
3242*0Sstevel@tonic-gate done:
3243*0Sstevel@tonic-gate 	kmem_free(cbplist, ssize);
3244*0Sstevel@tonic-gate 	if (deadhead) {
3245*0Sstevel@tonic-gate 		if (head->lio_sigqp)
3246*0Sstevel@tonic-gate 			kmem_free(head->lio_sigqp, sizeof (sigqueue_t));
3247*0Sstevel@tonic-gate 		kmem_free(head, sizeof (aio_lio_t));
3248*0Sstevel@tonic-gate 	}
3249*0Sstevel@tonic-gate 	return (error);
3250*0Sstevel@tonic-gate }
3251*0Sstevel@tonic-gate 
3252*0Sstevel@tonic-gate #ifdef  _SYSCALL32_IMPL
3253*0Sstevel@tonic-gate static void
3254*0Sstevel@tonic-gate aiocb_LFton(aiocb64_32_t *src, aiocb_t *dest)
3255*0Sstevel@tonic-gate {
3256*0Sstevel@tonic-gate 	dest->aio_fildes = src->aio_fildes;
3257*0Sstevel@tonic-gate 	dest->aio_buf = (void *)(uintptr_t)src->aio_buf;
3258*0Sstevel@tonic-gate 	dest->aio_nbytes = (size_t)src->aio_nbytes;
3259*0Sstevel@tonic-gate 	dest->aio_offset = (off_t)src->aio_offset;
3260*0Sstevel@tonic-gate 	dest->aio_reqprio = src->aio_reqprio;
3261*0Sstevel@tonic-gate 	dest->aio_sigevent.sigev_notify = src->aio_sigevent.sigev_notify;
3262*0Sstevel@tonic-gate 	dest->aio_sigevent.sigev_signo = src->aio_sigevent.sigev_signo;
3263*0Sstevel@tonic-gate 
3264*0Sstevel@tonic-gate 	/*
3265*0Sstevel@tonic-gate 	 * See comment in sigqueue32() on handling of 32-bit
3266*0Sstevel@tonic-gate 	 * sigvals in a 64-bit kernel.
3267*0Sstevel@tonic-gate 	 */
3268*0Sstevel@tonic-gate 	dest->aio_sigevent.sigev_value.sival_int =
3269*0Sstevel@tonic-gate 	    (int)src->aio_sigevent.sigev_value.sival_int;
3270*0Sstevel@tonic-gate 	dest->aio_sigevent.sigev_notify_function = (void (*)(union sigval))
3271*0Sstevel@tonic-gate 	    (uintptr_t)src->aio_sigevent.sigev_notify_function;
3272*0Sstevel@tonic-gate 	dest->aio_sigevent.sigev_notify_attributes = (pthread_attr_t *)
3273*0Sstevel@tonic-gate 	    (uintptr_t)src->aio_sigevent.sigev_notify_attributes;
3274*0Sstevel@tonic-gate 	dest->aio_sigevent.__sigev_pad2 = src->aio_sigevent.__sigev_pad2;
3275*0Sstevel@tonic-gate 	dest->aio_lio_opcode = src->aio_lio_opcode;
3276*0Sstevel@tonic-gate 	dest->aio_state = src->aio_state;
3277*0Sstevel@tonic-gate 	dest->aio__pad[0] = src->aio__pad[0];
3278*0Sstevel@tonic-gate }
3279*0Sstevel@tonic-gate #endif
3280*0Sstevel@tonic-gate 
3281*0Sstevel@tonic-gate /*
3282*0Sstevel@tonic-gate  * This function is used only for largefile calls made by
3283*0Sstevel@tonic-gate  * 32 bit applications on 32 bit kernel.
3284*0Sstevel@tonic-gate  */
3285*0Sstevel@tonic-gate static int
3286*0Sstevel@tonic-gate aio_req_setupLF(
3287*0Sstevel@tonic-gate 	aio_req_t	**reqpp,
3288*0Sstevel@tonic-gate 	aio_t		*aiop,
3289*0Sstevel@tonic-gate 	aiocb64_32_t	*arg,
3290*0Sstevel@tonic-gate 	aio_result_t	*resultp,
3291*0Sstevel@tonic-gate 	int		port,
3292*0Sstevel@tonic-gate 	vnode_t		*vp)
3293*0Sstevel@tonic-gate {
3294*0Sstevel@tonic-gate 	aio_req_t	*reqp;
3295*0Sstevel@tonic-gate 	sigqueue_t	*sqp;
3296*0Sstevel@tonic-gate 	struct	uio	*uio;
3297*0Sstevel@tonic-gate 
3298*0Sstevel@tonic-gate 	struct	sigevent *sigev;
3299*0Sstevel@tonic-gate 	int 		error;
3300*0Sstevel@tonic-gate 
3301*0Sstevel@tonic-gate 	sigev = (struct	sigevent *)&arg->aio_sigevent;
3302*0Sstevel@tonic-gate 	if ((sigev->sigev_notify == SIGEV_SIGNAL) &&
3303*0Sstevel@tonic-gate 	    (sigev->sigev_signo > 0 && sigev->sigev_signo < NSIG)) {
3304*0Sstevel@tonic-gate 		sqp = kmem_zalloc(sizeof (sigqueue_t), KM_NOSLEEP);
3305*0Sstevel@tonic-gate 		if (sqp == NULL)
3306*0Sstevel@tonic-gate 			return (EAGAIN);
3307*0Sstevel@tonic-gate 		sqp->sq_func = NULL;
3308*0Sstevel@tonic-gate 		sqp->sq_next = NULL;
3309*0Sstevel@tonic-gate 		sqp->sq_info.si_code = SI_ASYNCIO;
3310*0Sstevel@tonic-gate 		sqp->sq_info.si_pid = curproc->p_pid;
3311*0Sstevel@tonic-gate 		sqp->sq_info.si_ctid = PRCTID(curproc);
3312*0Sstevel@tonic-gate 		sqp->sq_info.si_zoneid = getzoneid();
3313*0Sstevel@tonic-gate 		sqp->sq_info.si_uid = crgetuid(curproc->p_cred);
3314*0Sstevel@tonic-gate 		sqp->sq_info.si_signo = sigev->sigev_signo;
3315*0Sstevel@tonic-gate 		sqp->sq_info.si_value = sigev->sigev_value;
3316*0Sstevel@tonic-gate 	} else
3317*0Sstevel@tonic-gate 		sqp = NULL;
3318*0Sstevel@tonic-gate 
3319*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
3320*0Sstevel@tonic-gate 
3321*0Sstevel@tonic-gate 	if (aiop->aio_flags & AIO_REQ_BLOCK) {
3322*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
3323*0Sstevel@tonic-gate 		if (sqp)
3324*0Sstevel@tonic-gate 			kmem_free(sqp, sizeof (sigqueue_t));
3325*0Sstevel@tonic-gate 		return (EIO);
3326*0Sstevel@tonic-gate 	}
3327*0Sstevel@tonic-gate 	/*
3328*0Sstevel@tonic-gate 	 * get an aio_reqp from the free list or allocate one
3329*0Sstevel@tonic-gate 	 * from dynamic memory.
3330*0Sstevel@tonic-gate 	 */
3331*0Sstevel@tonic-gate 	if (error = aio_req_alloc(&reqp, resultp)) {
3332*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
3333*0Sstevel@tonic-gate 		if (sqp)
3334*0Sstevel@tonic-gate 			kmem_free(sqp, sizeof (sigqueue_t));
3335*0Sstevel@tonic-gate 		return (error);
3336*0Sstevel@tonic-gate 	}
3337*0Sstevel@tonic-gate 	aiop->aio_pending++;
3338*0Sstevel@tonic-gate 	aiop->aio_outstanding++;
3339*0Sstevel@tonic-gate 	reqp->aio_req_flags = AIO_PENDING;
3340*0Sstevel@tonic-gate 	if (port)
3341*0Sstevel@tonic-gate 		aio_enq_port_pending(aiop, reqp);
3342*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
3343*0Sstevel@tonic-gate 	/*
3344*0Sstevel@tonic-gate 	 * initialize aio request.
3345*0Sstevel@tonic-gate 	 */
3346*0Sstevel@tonic-gate 	reqp->aio_req_fd = arg->aio_fildes;
3347*0Sstevel@tonic-gate 	reqp->aio_req_sigqp = sqp;
3348*0Sstevel@tonic-gate 	reqp->aio_req_iocb.iocb = NULL;
3349*0Sstevel@tonic-gate 	reqp->aio_req_buf.b_file = vp;
3350*0Sstevel@tonic-gate 	uio = reqp->aio_req.aio_uio;
3351*0Sstevel@tonic-gate 	uio->uio_iovcnt = 1;
3352*0Sstevel@tonic-gate 	uio->uio_iov->iov_base = (caddr_t)(uintptr_t)arg->aio_buf;
3353*0Sstevel@tonic-gate 	uio->uio_iov->iov_len = arg->aio_nbytes;
3354*0Sstevel@tonic-gate 	uio->uio_loffset = arg->aio_offset;
3355*0Sstevel@tonic-gate 	*reqpp = reqp;
3356*0Sstevel@tonic-gate 	return (0);
3357*0Sstevel@tonic-gate }
3358*0Sstevel@tonic-gate 
3359*0Sstevel@tonic-gate /*
3360*0Sstevel@tonic-gate  * This routine is called when a non largefile call is made by a 32bit
3361*0Sstevel@tonic-gate  * process on a ILP32 or LP64 kernel.
3362*0Sstevel@tonic-gate  */
3363*0Sstevel@tonic-gate static int
3364*0Sstevel@tonic-gate alio32(
3365*0Sstevel@tonic-gate 	int		mode_arg,
3366*0Sstevel@tonic-gate 	void		*aiocb_arg,
3367*0Sstevel@tonic-gate 	int		nent,
3368*0Sstevel@tonic-gate 	void		*sigev_arg)
3369*0Sstevel@tonic-gate {
3370*0Sstevel@tonic-gate 	file_t		*fp;
3371*0Sstevel@tonic-gate 	file_t		*prev_fp = NULL;
3372*0Sstevel@tonic-gate 	int		prev_mode = -1;
3373*0Sstevel@tonic-gate 	struct vnode	*vp;
3374*0Sstevel@tonic-gate 	aio_lio_t	*head;
3375*0Sstevel@tonic-gate 	aio_req_t	*reqp;
3376*0Sstevel@tonic-gate 	aio_t		*aiop;
3377*0Sstevel@tonic-gate 	aiocb_t		cb;
3378*0Sstevel@tonic-gate 	aiocb_t		*aiocb = &cb;
3379*0Sstevel@tonic-gate 	caddr_t		cbplist;
3380*0Sstevel@tonic-gate #ifdef	_LP64
3381*0Sstevel@tonic-gate 	aiocb32_t	*cbp;
3382*0Sstevel@tonic-gate 	caddr32_t	*ucbp;
3383*0Sstevel@tonic-gate 	aiocb32_t	cb32;
3384*0Sstevel@tonic-gate 	aiocb32_t	*aiocb32 = &cb32;
3385*0Sstevel@tonic-gate 	struct sigevent32	sigev;
3386*0Sstevel@tonic-gate #else
3387*0Sstevel@tonic-gate 	aiocb_t		*cbp, **ucbp;
3388*0Sstevel@tonic-gate 	struct sigevent	sigev;
3389*0Sstevel@tonic-gate #endif
3390*0Sstevel@tonic-gate 	sigqueue_t	*sqp;
3391*0Sstevel@tonic-gate 	int		(*aio_func)();
3392*0Sstevel@tonic-gate 	int		mode;
3393*0Sstevel@tonic-gate 	int		error = 0, aio_errors = 0;
3394*0Sstevel@tonic-gate 	int		i;
3395*0Sstevel@tonic-gate 	size_t		ssize;
3396*0Sstevel@tonic-gate 	int		deadhead = 0;
3397*0Sstevel@tonic-gate 	int		aio_notsupported = 0;
3398*0Sstevel@tonic-gate 	int		aio_use_port = 0;
3399*0Sstevel@tonic-gate 	port_kevent_t	*pkevtp = NULL;
3400*0Sstevel@tonic-gate #ifdef	_LP64
3401*0Sstevel@tonic-gate 	port_notify32_t	pnotify;
3402*0Sstevel@tonic-gate #else
3403*0Sstevel@tonic-gate 	port_notify_t	pnotify;
3404*0Sstevel@tonic-gate #endif
3405*0Sstevel@tonic-gate 	aiop = curproc->p_aio;
3406*0Sstevel@tonic-gate 	if (aiop == NULL || nent <= 0 || nent > _AIO_LISTIO_MAX)
3407*0Sstevel@tonic-gate 		return (EINVAL);
3408*0Sstevel@tonic-gate 
3409*0Sstevel@tonic-gate #ifdef	_LP64
3410*0Sstevel@tonic-gate 	ssize = (sizeof (caddr32_t) * nent);
3411*0Sstevel@tonic-gate #else
3412*0Sstevel@tonic-gate 	ssize = (sizeof (aiocb_t *) * nent);
3413*0Sstevel@tonic-gate #endif
3414*0Sstevel@tonic-gate 	cbplist = kmem_alloc(ssize, KM_SLEEP);
3415*0Sstevel@tonic-gate 	ucbp = (void *)cbplist;
3416*0Sstevel@tonic-gate 
3417*0Sstevel@tonic-gate 	if (copyin(aiocb_arg, cbplist, ssize)) {
3418*0Sstevel@tonic-gate 		kmem_free(cbplist, ssize);
3419*0Sstevel@tonic-gate 		return (EFAULT);
3420*0Sstevel@tonic-gate 	}
3421*0Sstevel@tonic-gate 
3422*0Sstevel@tonic-gate 	if (sigev_arg) {
3423*0Sstevel@tonic-gate 		if (copyin(sigev_arg, &sigev, sizeof (struct sigevent32))) {
3424*0Sstevel@tonic-gate 			kmem_free(cbplist, ssize);
3425*0Sstevel@tonic-gate 			return (EFAULT);
3426*0Sstevel@tonic-gate 		}
3427*0Sstevel@tonic-gate 	}
3428*0Sstevel@tonic-gate 
3429*0Sstevel@tonic-gate 	/*
3430*0Sstevel@tonic-gate 	 * a list head should be allocated if notification is
3431*0Sstevel@tonic-gate 	 * enabled for this list.
3432*0Sstevel@tonic-gate 	 */
3433*0Sstevel@tonic-gate 	head = NULL;
3434*0Sstevel@tonic-gate 
3435*0Sstevel@tonic-gate 	/* Event Ports  */
3436*0Sstevel@tonic-gate 
3437*0Sstevel@tonic-gate 	if (sigev_arg && sigev.sigev_notify == SIGEV_PORT) {
3438*0Sstevel@tonic-gate 		/* Use PORT for completion notification */
3439*0Sstevel@tonic-gate 		if (copyin((void *)(uintptr_t)sigev.sigev_value.sival_ptr,
3440*0Sstevel@tonic-gate 		    &pnotify, sizeof (port_notify32_t))) {
3441*0Sstevel@tonic-gate 			kmem_free(cbplist, ssize);
3442*0Sstevel@tonic-gate 			return (EFAULT);
3443*0Sstevel@tonic-gate 		}
3444*0Sstevel@tonic-gate 		/* use event ports for the list of aiocbs */
3445*0Sstevel@tonic-gate 		aio_use_port = 1;
3446*0Sstevel@tonic-gate 		error = port_alloc_event(pnotify.portnfy_port,
3447*0Sstevel@tonic-gate 		    PORT_ALLOC_PRIVATE, PORT_SOURCE_AIO, &pkevtp);
3448*0Sstevel@tonic-gate 		if (error) {
3449*0Sstevel@tonic-gate 			if ((error == ENOMEM) || (error == EAGAIN))
3450*0Sstevel@tonic-gate 				error = EAGAIN;
3451*0Sstevel@tonic-gate 			else
3452*0Sstevel@tonic-gate 				error = EINVAL;
3453*0Sstevel@tonic-gate 			kmem_free(cbplist, ssize);
3454*0Sstevel@tonic-gate 			return (error);
3455*0Sstevel@tonic-gate 		}
3456*0Sstevel@tonic-gate 	} else if ((mode_arg == LIO_WAIT) || sigev_arg) {
3457*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
3458*0Sstevel@tonic-gate 		error = aio_lio_alloc(&head);
3459*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
3460*0Sstevel@tonic-gate 		if (error)
3461*0Sstevel@tonic-gate 			goto done;
3462*0Sstevel@tonic-gate 		deadhead = 1;
3463*0Sstevel@tonic-gate 		head->lio_nent = nent;
3464*0Sstevel@tonic-gate 		head->lio_refcnt = nent;
3465*0Sstevel@tonic-gate 		if (sigev_arg && (sigev.sigev_notify == SIGEV_SIGNAL) &&
3466*0Sstevel@tonic-gate 		    (sigev.sigev_signo > 0 && sigev.sigev_signo < NSIG)) {
3467*0Sstevel@tonic-gate 			sqp = kmem_zalloc(sizeof (sigqueue_t), KM_NOSLEEP);
3468*0Sstevel@tonic-gate 			if (sqp == NULL) {
3469*0Sstevel@tonic-gate 				error = EAGAIN;
3470*0Sstevel@tonic-gate 				goto done;
3471*0Sstevel@tonic-gate 			}
3472*0Sstevel@tonic-gate 			sqp->sq_func = NULL;
3473*0Sstevel@tonic-gate 			sqp->sq_next = NULL;
3474*0Sstevel@tonic-gate 			sqp->sq_info.si_code = SI_ASYNCIO;
3475*0Sstevel@tonic-gate 			sqp->sq_info.si_pid = curproc->p_pid;
3476*0Sstevel@tonic-gate 			sqp->sq_info.si_ctid = PRCTID(curproc);
3477*0Sstevel@tonic-gate 			sqp->sq_info.si_zoneid = getzoneid();
3478*0Sstevel@tonic-gate 			sqp->sq_info.si_uid = crgetuid(curproc->p_cred);
3479*0Sstevel@tonic-gate 			sqp->sq_info.si_signo = sigev.sigev_signo;
3480*0Sstevel@tonic-gate 			sqp->sq_info.si_value.sival_int =
3481*0Sstevel@tonic-gate 			    sigev.sigev_value.sival_int;
3482*0Sstevel@tonic-gate 			head->lio_sigqp = sqp;
3483*0Sstevel@tonic-gate 		} else {
3484*0Sstevel@tonic-gate 			head->lio_sigqp = NULL;
3485*0Sstevel@tonic-gate 		}
3486*0Sstevel@tonic-gate 	}
3487*0Sstevel@tonic-gate 
3488*0Sstevel@tonic-gate 	for (i = 0; i < nent; i++, ucbp++) {
3489*0Sstevel@tonic-gate 
3490*0Sstevel@tonic-gate 		/* skip entry if it can't be copied. */
3491*0Sstevel@tonic-gate #ifdef	_LP64
3492*0Sstevel@tonic-gate 		cbp = (aiocb32_t *)(uintptr_t)*ucbp;
3493*0Sstevel@tonic-gate 		if (cbp == NULL || copyin(cbp, aiocb32, sizeof (aiocb32_t))) {
3494*0Sstevel@tonic-gate #else
3495*0Sstevel@tonic-gate 		cbp = (aiocb_t *)*ucbp;
3496*0Sstevel@tonic-gate 		if (cbp == NULL || copyin(cbp, aiocb, sizeof (aiocb_t))) {
3497*0Sstevel@tonic-gate #endif
3498*0Sstevel@tonic-gate 			if (head) {
3499*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3500*0Sstevel@tonic-gate 				head->lio_nent--;
3501*0Sstevel@tonic-gate 				head->lio_refcnt--;
3502*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3503*0Sstevel@tonic-gate 			}
3504*0Sstevel@tonic-gate 			continue;
3505*0Sstevel@tonic-gate 		}
3506*0Sstevel@tonic-gate #ifdef	_LP64
3507*0Sstevel@tonic-gate 		/*
3508*0Sstevel@tonic-gate 		 * copy 32 bit structure into 64 bit structure
3509*0Sstevel@tonic-gate 		 */
3510*0Sstevel@tonic-gate 		aiocb_32ton(aiocb32, aiocb);
3511*0Sstevel@tonic-gate #endif /* _LP64 */
3512*0Sstevel@tonic-gate 
3513*0Sstevel@tonic-gate 		/* skip if opcode for aiocb is LIO_NOP */
3514*0Sstevel@tonic-gate 
3515*0Sstevel@tonic-gate 		mode = aiocb->aio_lio_opcode;
3516*0Sstevel@tonic-gate 		if (mode == LIO_NOP) {
3517*0Sstevel@tonic-gate 			cbp = NULL;
3518*0Sstevel@tonic-gate 			if (head) {
3519*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3520*0Sstevel@tonic-gate 				head->lio_nent--;
3521*0Sstevel@tonic-gate 				head->lio_refcnt--;
3522*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3523*0Sstevel@tonic-gate 			}
3524*0Sstevel@tonic-gate 			continue;
3525*0Sstevel@tonic-gate 		}
3526*0Sstevel@tonic-gate 
3527*0Sstevel@tonic-gate 		/* increment file descriptor's ref count. */
3528*0Sstevel@tonic-gate 		if ((fp = getf(aiocb->aio_fildes)) == NULL) {
3529*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, EBADF);
3530*0Sstevel@tonic-gate 			if (head) {
3531*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3532*0Sstevel@tonic-gate 				head->lio_nent--;
3533*0Sstevel@tonic-gate 				head->lio_refcnt--;
3534*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3535*0Sstevel@tonic-gate 			}
3536*0Sstevel@tonic-gate 			aio_errors++;
3537*0Sstevel@tonic-gate 			continue;
3538*0Sstevel@tonic-gate 		}
3539*0Sstevel@tonic-gate 
3540*0Sstevel@tonic-gate 		vp = fp->f_vnode;
3541*0Sstevel@tonic-gate 
3542*0Sstevel@tonic-gate 		/*
3543*0Sstevel@tonic-gate 		 * check the permission of the partition
3544*0Sstevel@tonic-gate 		 */
3545*0Sstevel@tonic-gate 		mode = aiocb->aio_lio_opcode;
3546*0Sstevel@tonic-gate 		if ((fp->f_flag & mode) == 0) {
3547*0Sstevel@tonic-gate 			releasef(aiocb->aio_fildes);
3548*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, EBADF);
3549*0Sstevel@tonic-gate 			if (head) {
3550*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3551*0Sstevel@tonic-gate 				head->lio_nent--;
3552*0Sstevel@tonic-gate 				head->lio_refcnt--;
3553*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3554*0Sstevel@tonic-gate 			}
3555*0Sstevel@tonic-gate 			aio_errors++;
3556*0Sstevel@tonic-gate 			continue;
3557*0Sstevel@tonic-gate 		}
3558*0Sstevel@tonic-gate 
3559*0Sstevel@tonic-gate 		/*
3560*0Sstevel@tonic-gate 		 * common case where requests are to the same fd
3561*0Sstevel@tonic-gate 		 * for the same r/w operation
3562*0Sstevel@tonic-gate 		 * for UFS, need to set EBADFD
3563*0Sstevel@tonic-gate 		 */
3564*0Sstevel@tonic-gate 		if ((fp != prev_fp) || (mode != prev_mode)) {
3565*0Sstevel@tonic-gate 			aio_func = check_vp(vp, mode);
3566*0Sstevel@tonic-gate 			if (aio_func == NULL) {
3567*0Sstevel@tonic-gate 				prev_fp = NULL;
3568*0Sstevel@tonic-gate 				releasef(aiocb->aio_fildes);
3569*0Sstevel@tonic-gate 				lio_set_uerror(&cbp->aio_resultp,
3570*0Sstevel@tonic-gate 				    EBADFD);
3571*0Sstevel@tonic-gate 				aio_notsupported++;
3572*0Sstevel@tonic-gate 				if (head) {
3573*0Sstevel@tonic-gate 					mutex_enter(&aiop->aio_mutex);
3574*0Sstevel@tonic-gate 					head->lio_nent--;
3575*0Sstevel@tonic-gate 					head->lio_refcnt--;
3576*0Sstevel@tonic-gate 					mutex_exit(&aiop->aio_mutex);
3577*0Sstevel@tonic-gate 				}
3578*0Sstevel@tonic-gate 				continue;
3579*0Sstevel@tonic-gate 			} else {
3580*0Sstevel@tonic-gate 				prev_fp = fp;
3581*0Sstevel@tonic-gate 				prev_mode = mode;
3582*0Sstevel@tonic-gate 			}
3583*0Sstevel@tonic-gate 		}
3584*0Sstevel@tonic-gate 		if (error = aio_req_setup(&reqp, aiop, aiocb,
3585*0Sstevel@tonic-gate 		    (aio_result_t *)&cbp->aio_resultp, aio_use_port, vp)) {
3586*0Sstevel@tonic-gate 			releasef(aiocb->aio_fildes);
3587*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, error);
3588*0Sstevel@tonic-gate 			if (head) {
3589*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3590*0Sstevel@tonic-gate 				head->lio_nent--;
3591*0Sstevel@tonic-gate 				head->lio_refcnt--;
3592*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3593*0Sstevel@tonic-gate 			}
3594*0Sstevel@tonic-gate 			aio_errors++;
3595*0Sstevel@tonic-gate 			continue;
3596*0Sstevel@tonic-gate 		}
3597*0Sstevel@tonic-gate 
3598*0Sstevel@tonic-gate 		reqp->aio_req_lio = head;
3599*0Sstevel@tonic-gate 		deadhead = 0;
3600*0Sstevel@tonic-gate 
3601*0Sstevel@tonic-gate 		/*
3602*0Sstevel@tonic-gate 		 * Set the errno field now before sending the request to
3603*0Sstevel@tonic-gate 		 * the driver to avoid a race condition
3604*0Sstevel@tonic-gate 		 */
3605*0Sstevel@tonic-gate 		(void) suword32(&cbp->aio_resultp.aio_errno,
3606*0Sstevel@tonic-gate 		    EINPROGRESS);
3607*0Sstevel@tonic-gate 
3608*0Sstevel@tonic-gate 		reqp->aio_req_iocb.iocb32 = ((caddr32_t *)cbplist)[i];
3609*0Sstevel@tonic-gate 
3610*0Sstevel@tonic-gate 		if (aio_use_port) {
3611*0Sstevel@tonic-gate 			reqp->aio_req_port = pnotify.portnfy_port;
3612*0Sstevel@tonic-gate #ifdef _LP64
3613*0Sstevel@tonic-gate 			error = aio_req_assoc_port32(&aiocb32->aio_sigevent,
3614*0Sstevel@tonic-gate 			    (void *)(uintptr_t)pnotify.portnfy_user,
3615*0Sstevel@tonic-gate 			    (aiocb_t *)(uintptr_t)(((caddr32_t *)cbplist)[i]),
3616*0Sstevel@tonic-gate 			    reqp, pkevtp);
3617*0Sstevel@tonic-gate #else
3618*0Sstevel@tonic-gate 			error = aio_req_assoc_port(&aiocb->aio_sigevent,
3619*0Sstevel@tonic-gate 			    pnotify.portnfy_user,
3620*0Sstevel@tonic-gate 			    (aiocb_t *)(((caddr32_t *)cbplist)[i]),
3621*0Sstevel@tonic-gate 			    reqp, pkevtp);
3622*0Sstevel@tonic-gate #endif
3623*0Sstevel@tonic-gate 		}
3624*0Sstevel@tonic-gate 
3625*0Sstevel@tonic-gate 		/*
3626*0Sstevel@tonic-gate 		 * send the request to driver.
3627*0Sstevel@tonic-gate 		 * Clustering: If PXFS vnode, call PXFS function.
3628*0Sstevel@tonic-gate 		 */
3629*0Sstevel@tonic-gate 		if (error == 0) {
3630*0Sstevel@tonic-gate 			if (aiocb->aio_nbytes == 0) {
3631*0Sstevel@tonic-gate 				clear_active_fd(aiocb->aio_fildes);
3632*0Sstevel@tonic-gate 				aio_zerolen(reqp);
3633*0Sstevel@tonic-gate 				continue;
3634*0Sstevel@tonic-gate 			}
3635*0Sstevel@tonic-gate 			error = (*aio_func)(vp, (aio_req_t *)&reqp->aio_req,
3636*0Sstevel@tonic-gate 			    CRED());
3637*0Sstevel@tonic-gate 		}
3638*0Sstevel@tonic-gate 
3639*0Sstevel@tonic-gate 		/*
3640*0Sstevel@tonic-gate 		 * the fd's ref count is not decremented until the IO has
3641*0Sstevel@tonic-gate 		 * completed unless there was an error.
3642*0Sstevel@tonic-gate 		 */
3643*0Sstevel@tonic-gate 		if (error) {
3644*0Sstevel@tonic-gate 			releasef(aiocb->aio_fildes);
3645*0Sstevel@tonic-gate 			lio_set_uerror(&cbp->aio_resultp, error);
3646*0Sstevel@tonic-gate 			if (head) {
3647*0Sstevel@tonic-gate 				mutex_enter(&aiop->aio_mutex);
3648*0Sstevel@tonic-gate 				head->lio_nent--;
3649*0Sstevel@tonic-gate 				head->lio_refcnt--;
3650*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3651*0Sstevel@tonic-gate 			}
3652*0Sstevel@tonic-gate 			if (error == ENOTSUP)
3653*0Sstevel@tonic-gate 				aio_notsupported++;
3654*0Sstevel@tonic-gate 			else
3655*0Sstevel@tonic-gate 				aio_errors++;
3656*0Sstevel@tonic-gate 			lio_set_error(reqp);
3657*0Sstevel@tonic-gate 		} else {
3658*0Sstevel@tonic-gate 			clear_active_fd(aiocb->aio_fildes);
3659*0Sstevel@tonic-gate 		}
3660*0Sstevel@tonic-gate 	}
3661*0Sstevel@tonic-gate 
3662*0Sstevel@tonic-gate 	if (pkevtp)
3663*0Sstevel@tonic-gate 		port_free_event(pkevtp);
3664*0Sstevel@tonic-gate 
3665*0Sstevel@tonic-gate 	if (aio_notsupported) {
3666*0Sstevel@tonic-gate 		error = ENOTSUP;
3667*0Sstevel@tonic-gate 	} else if (aio_errors) {
3668*0Sstevel@tonic-gate 		/*
3669*0Sstevel@tonic-gate 		 * return EIO if any request failed
3670*0Sstevel@tonic-gate 		 */
3671*0Sstevel@tonic-gate 		error = EIO;
3672*0Sstevel@tonic-gate 	}
3673*0Sstevel@tonic-gate 
3674*0Sstevel@tonic-gate 	if (mode_arg == LIO_WAIT) {
3675*0Sstevel@tonic-gate 		mutex_enter(&aiop->aio_mutex);
3676*0Sstevel@tonic-gate 		while (head->lio_refcnt > 0) {
3677*0Sstevel@tonic-gate 			if (!cv_wait_sig(&head->lio_notify, &aiop->aio_mutex)) {
3678*0Sstevel@tonic-gate 				mutex_exit(&aiop->aio_mutex);
3679*0Sstevel@tonic-gate 				error = EINTR;
3680*0Sstevel@tonic-gate 				goto done;
3681*0Sstevel@tonic-gate 			}
3682*0Sstevel@tonic-gate 		}
3683*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
3684*0Sstevel@tonic-gate 		alio_cleanup(aiop, (aiocb_t **)cbplist, nent, AIO_32);
3685*0Sstevel@tonic-gate 	}
3686*0Sstevel@tonic-gate 
3687*0Sstevel@tonic-gate done:
3688*0Sstevel@tonic-gate 	kmem_free(cbplist, ssize);
3689*0Sstevel@tonic-gate 	if (deadhead) {
3690*0Sstevel@tonic-gate 		if (head->lio_sigqp)
3691*0Sstevel@tonic-gate 			kmem_free(head->lio_sigqp, sizeof (sigqueue_t));
3692*0Sstevel@tonic-gate 		kmem_free(head, sizeof (aio_lio_t));
3693*0Sstevel@tonic-gate 	}
3694*0Sstevel@tonic-gate 	return (error);
3695*0Sstevel@tonic-gate }
3696*0Sstevel@tonic-gate 
3697*0Sstevel@tonic-gate 
3698*0Sstevel@tonic-gate #ifdef  _SYSCALL32_IMPL
3699*0Sstevel@tonic-gate void
3700*0Sstevel@tonic-gate aiocb_32ton(aiocb32_t *src, aiocb_t *dest)
3701*0Sstevel@tonic-gate {
3702*0Sstevel@tonic-gate 	dest->aio_fildes = src->aio_fildes;
3703*0Sstevel@tonic-gate 	dest->aio_buf = (caddr_t)(uintptr_t)src->aio_buf;
3704*0Sstevel@tonic-gate 	dest->aio_nbytes = (size_t)src->aio_nbytes;
3705*0Sstevel@tonic-gate 	dest->aio_offset = (off_t)src->aio_offset;
3706*0Sstevel@tonic-gate 	dest->aio_reqprio = src->aio_reqprio;
3707*0Sstevel@tonic-gate 	dest->aio_sigevent.sigev_notify = src->aio_sigevent.sigev_notify;
3708*0Sstevel@tonic-gate 	dest->aio_sigevent.sigev_signo = src->aio_sigevent.sigev_signo;
3709*0Sstevel@tonic-gate 
3710*0Sstevel@tonic-gate 	/*
3711*0Sstevel@tonic-gate 	 * See comment in sigqueue32() on handling of 32-bit
3712*0Sstevel@tonic-gate 	 * sigvals in a 64-bit kernel.
3713*0Sstevel@tonic-gate 	 */
3714*0Sstevel@tonic-gate 	dest->aio_sigevent.sigev_value.sival_int =
3715*0Sstevel@tonic-gate 	    (int)src->aio_sigevent.sigev_value.sival_int;
3716*0Sstevel@tonic-gate 	dest->aio_sigevent.sigev_notify_function = (void (*)(union sigval))
3717*0Sstevel@tonic-gate 	    (uintptr_t)src->aio_sigevent.sigev_notify_function;
3718*0Sstevel@tonic-gate 	dest->aio_sigevent.sigev_notify_attributes = (pthread_attr_t *)
3719*0Sstevel@tonic-gate 	    (uintptr_t)src->aio_sigevent.sigev_notify_attributes;
3720*0Sstevel@tonic-gate 	dest->aio_sigevent.__sigev_pad2 = src->aio_sigevent.__sigev_pad2;
3721*0Sstevel@tonic-gate 	dest->aio_lio_opcode = src->aio_lio_opcode;
3722*0Sstevel@tonic-gate 	dest->aio_state = src->aio_state;
3723*0Sstevel@tonic-gate 	dest->aio__pad[0] = src->aio__pad[0];
3724*0Sstevel@tonic-gate }
3725*0Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
3726*0Sstevel@tonic-gate 
3727*0Sstevel@tonic-gate /*
3728*0Sstevel@tonic-gate  * aio_port_callback() is called just before the event is retrieved from the
3729*0Sstevel@tonic-gate  * port. The task of this callback function is to finish the work of the
3730*0Sstevel@tonic-gate  * transaction for the application, it means :
3731*0Sstevel@tonic-gate  * - copyout transaction data to the application
3732*0Sstevel@tonic-gate  *	(this thread is running in the right process context)
3733*0Sstevel@tonic-gate  * - keep trace of the transaction (update of counters).
3734*0Sstevel@tonic-gate  * - free allocated buffers
3735*0Sstevel@tonic-gate  * The aiocb pointer is the object element of the port_kevent_t structure.
3736*0Sstevel@tonic-gate  *
3737*0Sstevel@tonic-gate  * flag :
3738*0Sstevel@tonic-gate  *	PORT_CALLBACK_DEFAULT : do copyout and free resources
3739*0Sstevel@tonic-gate  *	PORT_CALLBACK_CLOSE   : don't do copyout, free resources
3740*0Sstevel@tonic-gate  */
3741*0Sstevel@tonic-gate 
3742*0Sstevel@tonic-gate /*ARGSUSED*/
3743*0Sstevel@tonic-gate int
3744*0Sstevel@tonic-gate aio_port_callback(void *arg, int *events, pid_t pid, int flag, void *evp)
3745*0Sstevel@tonic-gate {
3746*0Sstevel@tonic-gate 	aio_t		*aiop = curproc->p_aio;
3747*0Sstevel@tonic-gate 	aio_req_t	*reqp = arg;
3748*0Sstevel@tonic-gate 	struct	iovec	*iov;
3749*0Sstevel@tonic-gate 	struct	buf	*bp;
3750*0Sstevel@tonic-gate 	void		*resultp;
3751*0Sstevel@tonic-gate 
3752*0Sstevel@tonic-gate 	if (pid != curproc->p_pid) {
3753*0Sstevel@tonic-gate 		/* wrong proc !!, can not deliver data here ... */
3754*0Sstevel@tonic-gate 		return (EACCES);
3755*0Sstevel@tonic-gate 	}
3756*0Sstevel@tonic-gate 
3757*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_portq_mutex);
3758*0Sstevel@tonic-gate 	reqp->aio_req_portkev = NULL;
3759*0Sstevel@tonic-gate 	aio_req_remove_portq(aiop, reqp); /* remove request from portq */
3760*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_portq_mutex);
3761*0Sstevel@tonic-gate 	aphysio_unlock(reqp);		/* unlock used pages */
3762*0Sstevel@tonic-gate 	mutex_enter(&aiop->aio_mutex);
3763*0Sstevel@tonic-gate 	if (reqp->aio_req_flags & AIO_COPYOUTDONE) {
3764*0Sstevel@tonic-gate 		aio_req_free_port(aiop, reqp);	/* back to free list */
3765*0Sstevel@tonic-gate 		mutex_exit(&aiop->aio_mutex);
3766*0Sstevel@tonic-gate 		return (0);
3767*0Sstevel@tonic-gate 	}
3768*0Sstevel@tonic-gate 
3769*0Sstevel@tonic-gate 	iov = reqp->aio_req_uio.uio_iov;
3770*0Sstevel@tonic-gate 	bp = &reqp->aio_req_buf;
3771*0Sstevel@tonic-gate 	resultp = (void *)reqp->aio_req_resultp;
3772*0Sstevel@tonic-gate 	aio_req_free_port(aiop, reqp);	/* request struct back to free list */
3773*0Sstevel@tonic-gate 	mutex_exit(&aiop->aio_mutex);
3774*0Sstevel@tonic-gate 	if (flag == PORT_CALLBACK_DEFAULT)
3775*0Sstevel@tonic-gate 		aio_copyout_result_port(iov, bp, resultp);
3776*0Sstevel@tonic-gate 	return (0);
3777*0Sstevel@tonic-gate }
3778