10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51885Sraf * Common Development and Distribution License (the "License"). 61885Sraf * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211885Sraf 220Sstevel@tonic-gate /* 239973SSurya.Prakki@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #include <sys/types.h> 280Sstevel@tonic-gate #include <sys/proc.h> 290Sstevel@tonic-gate #include <sys/file.h> 300Sstevel@tonic-gate #include <sys/errno.h> 310Sstevel@tonic-gate #include <sys/param.h> 320Sstevel@tonic-gate #include <sys/sysmacros.h> 330Sstevel@tonic-gate #include <sys/cmn_err.h> 340Sstevel@tonic-gate #include <sys/systm.h> 350Sstevel@tonic-gate #include <vm/as.h> 360Sstevel@tonic-gate #include <vm/page.h> 370Sstevel@tonic-gate #include <sys/uio.h> 380Sstevel@tonic-gate #include <sys/kmem.h> 390Sstevel@tonic-gate #include <sys/debug.h> 400Sstevel@tonic-gate #include <sys/aio_impl.h> 410Sstevel@tonic-gate #include <sys/epm.h> 420Sstevel@tonic-gate #include <sys/fs/snode.h> 430Sstevel@tonic-gate #include <sys/siginfo.h> 440Sstevel@tonic-gate #include <sys/cpuvar.h> 450Sstevel@tonic-gate #include <sys/tnf_probe.h> 460Sstevel@tonic-gate #include <sys/conf.h> 470Sstevel@tonic-gate #include <sys/sdt.h> 480Sstevel@tonic-gate 490Sstevel@tonic-gate int aphysio(int (*)(), int (*)(), dev_t, int, void (*)(), struct aio_req *); 500Sstevel@tonic-gate void aio_done(struct buf *); 510Sstevel@tonic-gate void aphysio_unlock(aio_req_t *); 520Sstevel@tonic-gate void aio_cleanup(int); 530Sstevel@tonic-gate void aio_cleanup_exit(void); 540Sstevel@tonic-gate 550Sstevel@tonic-gate /* 560Sstevel@tonic-gate * private functions 570Sstevel@tonic-gate */ 580Sstevel@tonic-gate static void aio_sigev_send(proc_t *, sigqueue_t *); 590Sstevel@tonic-gate static void aio_hash_delete(aio_t *, aio_req_t *); 600Sstevel@tonic-gate static void aio_lio_free(aio_t *, aio_lio_t *); 619973SSurya.Prakki@Sun.COM static int aio_cleanup_cleanupq(aio_t *, aio_req_t *, int); 620Sstevel@tonic-gate static int aio_cleanup_notifyq(aio_t *, aio_req_t *, int); 630Sstevel@tonic-gate static void aio_cleanup_pollq(aio_t *, aio_req_t *, int); 640Sstevel@tonic-gate static void aio_cleanup_portq(aio_t *, aio_req_t *, int); 650Sstevel@tonic-gate 660Sstevel@tonic-gate /* 670Sstevel@tonic-gate * async version of physio() that doesn't wait synchronously 680Sstevel@tonic-gate * for the driver's strategy routine to complete. 690Sstevel@tonic-gate */ 700Sstevel@tonic-gate 710Sstevel@tonic-gate int 720Sstevel@tonic-gate aphysio( 730Sstevel@tonic-gate int (*strategy)(struct buf *), 740Sstevel@tonic-gate int (*cancel)(struct buf *), 750Sstevel@tonic-gate dev_t dev, 760Sstevel@tonic-gate int rw, 770Sstevel@tonic-gate void (*mincnt)(struct buf *), 780Sstevel@tonic-gate struct aio_req *aio) 790Sstevel@tonic-gate { 800Sstevel@tonic-gate struct uio *uio = aio->aio_uio; 810Sstevel@tonic-gate aio_req_t *reqp = (aio_req_t *)aio->aio_private; 820Sstevel@tonic-gate struct buf *bp = &reqp->aio_req_buf; 830Sstevel@tonic-gate struct iovec *iov; 840Sstevel@tonic-gate struct as *as; 850Sstevel@tonic-gate char *a; 860Sstevel@tonic-gate int error; 870Sstevel@tonic-gate size_t c; 880Sstevel@tonic-gate struct page **pplist; 890Sstevel@tonic-gate struct dev_ops *ops = devopsp[getmajor(dev)]; 900Sstevel@tonic-gate 910Sstevel@tonic-gate if (uio->uio_loffset < 0) 920Sstevel@tonic-gate return (EINVAL); 930Sstevel@tonic-gate #ifdef _ILP32 940Sstevel@tonic-gate /* 950Sstevel@tonic-gate * For 32-bit kernels, check against SPEC_MAXOFFSET_T which represents 960Sstevel@tonic-gate * the maximum size that can be supported by the IO subsystem. 970Sstevel@tonic-gate * XXX this code assumes a D_64BIT driver. 980Sstevel@tonic-gate */ 990Sstevel@tonic-gate if (uio->uio_loffset > SPEC_MAXOFFSET_T) 1000Sstevel@tonic-gate return (EINVAL); 1010Sstevel@tonic-gate #endif /* _ILP32 */ 1020Sstevel@tonic-gate 1030Sstevel@tonic-gate TNF_PROBE_5(aphysio_start, "kaio", /* CSTYLED */, 1049973SSurya.Prakki@Sun.COM tnf_opaque, bp, bp, 1059973SSurya.Prakki@Sun.COM tnf_device, device, dev, 1069973SSurya.Prakki@Sun.COM tnf_offset, blkno, btodt(uio->uio_loffset), 1079973SSurya.Prakki@Sun.COM tnf_size, size, uio->uio_iov->iov_len, 1089973SSurya.Prakki@Sun.COM tnf_bioflags, rw, rw); 1090Sstevel@tonic-gate 1100Sstevel@tonic-gate if (rw == B_READ) { 1110Sstevel@tonic-gate CPU_STATS_ADD_K(sys, phread, 1); 1120Sstevel@tonic-gate } else { 1130Sstevel@tonic-gate CPU_STATS_ADD_K(sys, phwrite, 1); 1140Sstevel@tonic-gate } 1150Sstevel@tonic-gate 1160Sstevel@tonic-gate iov = uio->uio_iov; 1170Sstevel@tonic-gate sema_init(&bp->b_sem, 0, NULL, SEMA_DEFAULT, NULL); 1180Sstevel@tonic-gate sema_init(&bp->b_io, 0, NULL, SEMA_DEFAULT, NULL); 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate bp->b_error = 0; 1210Sstevel@tonic-gate bp->b_flags = B_BUSY | B_PHYS | B_ASYNC | rw; 1220Sstevel@tonic-gate bp->b_edev = dev; 1230Sstevel@tonic-gate bp->b_dev = cmpdev(dev); 1240Sstevel@tonic-gate bp->b_lblkno = btodt(uio->uio_loffset); 1250Sstevel@tonic-gate bp->b_offset = uio->uio_loffset; 1260Sstevel@tonic-gate (void) ops->devo_getinfo(NULL, DDI_INFO_DEVT2DEVINFO, 1270Sstevel@tonic-gate (void *)bp->b_edev, (void **)&bp->b_dip); 1280Sstevel@tonic-gate 1290Sstevel@tonic-gate /* 1300Sstevel@tonic-gate * Clustering: Clustering can set the b_iodone, b_forw and 1310Sstevel@tonic-gate * b_proc fields to cluster-specifc values. 1320Sstevel@tonic-gate */ 1330Sstevel@tonic-gate if (bp->b_iodone == NULL) { 1340Sstevel@tonic-gate bp->b_iodone = (int (*)()) aio_done; 1350Sstevel@tonic-gate /* b_forw points at an aio_req_t structure */ 1360Sstevel@tonic-gate bp->b_forw = (struct buf *)reqp; 1370Sstevel@tonic-gate bp->b_proc = curproc; 1380Sstevel@tonic-gate } 1390Sstevel@tonic-gate 1400Sstevel@tonic-gate a = bp->b_un.b_addr = iov->iov_base; 1410Sstevel@tonic-gate c = bp->b_bcount = iov->iov_len; 1420Sstevel@tonic-gate 1430Sstevel@tonic-gate (*mincnt)(bp); 1440Sstevel@tonic-gate if (bp->b_bcount != iov->iov_len) 1450Sstevel@tonic-gate return (ENOTSUP); 1460Sstevel@tonic-gate 1470Sstevel@tonic-gate as = bp->b_proc->p_as; 1480Sstevel@tonic-gate 1490Sstevel@tonic-gate error = as_pagelock(as, &pplist, a, 1500Sstevel@tonic-gate c, rw == B_READ? S_WRITE : S_READ); 1510Sstevel@tonic-gate if (error != 0) { 1520Sstevel@tonic-gate bp->b_flags |= B_ERROR; 1530Sstevel@tonic-gate bp->b_error = error; 1540Sstevel@tonic-gate bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_SHADOW); 1550Sstevel@tonic-gate return (error); 1560Sstevel@tonic-gate } 1570Sstevel@tonic-gate reqp->aio_req_flags |= AIO_PAGELOCKDONE; 1580Sstevel@tonic-gate bp->b_shadow = pplist; 1590Sstevel@tonic-gate if (pplist != NULL) { 1600Sstevel@tonic-gate bp->b_flags |= B_SHADOW; 1610Sstevel@tonic-gate } 1620Sstevel@tonic-gate 1630Sstevel@tonic-gate if (cancel != anocancel) 1640Sstevel@tonic-gate cmn_err(CE_PANIC, 1650Sstevel@tonic-gate "aphysio: cancellation not supported, use anocancel"); 1660Sstevel@tonic-gate 1670Sstevel@tonic-gate reqp->aio_req_cancel = cancel; 1680Sstevel@tonic-gate 1690Sstevel@tonic-gate DTRACE_IO1(start, struct buf *, bp); 1700Sstevel@tonic-gate 1710Sstevel@tonic-gate return ((*strategy)(bp)); 1720Sstevel@tonic-gate } 1730Sstevel@tonic-gate 1740Sstevel@tonic-gate /*ARGSUSED*/ 1750Sstevel@tonic-gate int 1760Sstevel@tonic-gate anocancel(struct buf *bp) 1770Sstevel@tonic-gate { 1780Sstevel@tonic-gate return (ENXIO); 1790Sstevel@tonic-gate } 1800Sstevel@tonic-gate 1810Sstevel@tonic-gate /* 1820Sstevel@tonic-gate * Called from biodone(). 1830Sstevel@tonic-gate * Notify process that a pending AIO has finished. 1840Sstevel@tonic-gate */ 1850Sstevel@tonic-gate 1860Sstevel@tonic-gate /* 1870Sstevel@tonic-gate * Clustering: This function is made non-static as it is used 1880Sstevel@tonic-gate * by clustering s/w as contract private interface. 1890Sstevel@tonic-gate */ 1900Sstevel@tonic-gate 1910Sstevel@tonic-gate void 1920Sstevel@tonic-gate aio_done(struct buf *bp) 1930Sstevel@tonic-gate { 1940Sstevel@tonic-gate proc_t *p; 1950Sstevel@tonic-gate struct as *as; 1960Sstevel@tonic-gate aio_req_t *reqp; 1971885Sraf aio_lio_t *head = NULL; 1980Sstevel@tonic-gate aio_t *aiop; 1991885Sraf sigqueue_t *sigev = NULL; 2000Sstevel@tonic-gate sigqueue_t *lio_sigev = NULL; 2011885Sraf port_kevent_t *pkevp = NULL; 2021885Sraf port_kevent_t *lio_pkevp = NULL; 2030Sstevel@tonic-gate int fd; 2040Sstevel@tonic-gate int cleanupqflag; 2050Sstevel@tonic-gate int pollqflag; 2060Sstevel@tonic-gate int portevpend; 2070Sstevel@tonic-gate void (*func)(); 2081885Sraf int use_port = 0; 2094532Ssp92102 int reqp_flags = 0; 210*10620SSurya.Prakki@Sun.COM int send_signal = 0; 2110Sstevel@tonic-gate 2120Sstevel@tonic-gate p = bp->b_proc; 2134532Ssp92102 as = p->p_as; 2140Sstevel@tonic-gate reqp = (aio_req_t *)bp->b_forw; 2150Sstevel@tonic-gate fd = reqp->aio_req_fd; 2160Sstevel@tonic-gate 2170Sstevel@tonic-gate TNF_PROBE_5(aphysio_end, "kaio", /* CSTYLED */, 2189973SSurya.Prakki@Sun.COM tnf_opaque, bp, bp, 2199973SSurya.Prakki@Sun.COM tnf_device, device, bp->b_edev, 2209973SSurya.Prakki@Sun.COM tnf_offset, blkno, btodt(reqp->aio_req_uio.uio_loffset), 2219973SSurya.Prakki@Sun.COM tnf_size, size, reqp->aio_req_uio.uio_iov->iov_len, 2229973SSurya.Prakki@Sun.COM tnf_bioflags, rw, (bp->b_flags & (B_READ|B_WRITE))); 2230Sstevel@tonic-gate 2240Sstevel@tonic-gate /* 2250Sstevel@tonic-gate * mapout earlier so that more kmem is available when aio is 2260Sstevel@tonic-gate * heavily used. bug #1262082 2270Sstevel@tonic-gate */ 2280Sstevel@tonic-gate if (bp->b_flags & B_REMAPPED) 2290Sstevel@tonic-gate bp_mapout(bp); 2300Sstevel@tonic-gate 2310Sstevel@tonic-gate /* decrement fd's ref count by one, now that aio request is done. */ 2320Sstevel@tonic-gate areleasef(fd, P_FINFO(p)); 2330Sstevel@tonic-gate 2340Sstevel@tonic-gate aiop = p->p_aio; 2350Sstevel@tonic-gate ASSERT(aiop != NULL); 2360Sstevel@tonic-gate 2371885Sraf mutex_enter(&aiop->aio_portq_mutex); 2381885Sraf mutex_enter(&aiop->aio_mutex); 2391885Sraf ASSERT(aiop->aio_pending > 0); 2401885Sraf ASSERT(reqp->aio_req_flags & AIO_PENDING); 2411885Sraf aiop->aio_pending--; 2421885Sraf reqp->aio_req_flags &= ~AIO_PENDING; 2434532Ssp92102 reqp_flags = reqp->aio_req_flags; 2441885Sraf if ((pkevp = reqp->aio_req_portkev) != NULL) { 2450Sstevel@tonic-gate /* Event port notification is desired for this transaction */ 2460Sstevel@tonic-gate if (reqp->aio_req_flags & AIO_CLOSE_PORT) { 2470Sstevel@tonic-gate /* 2480Sstevel@tonic-gate * The port is being closed and it is waiting for 2490Sstevel@tonic-gate * pending asynchronous I/O transactions to complete. 2500Sstevel@tonic-gate */ 2510Sstevel@tonic-gate portevpend = --aiop->aio_portpendcnt; 2521885Sraf aio_deq(&aiop->aio_portpending, reqp); 2531885Sraf aio_enq(&aiop->aio_portq, reqp, 0); 2540Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 2550Sstevel@tonic-gate mutex_exit(&aiop->aio_portq_mutex); 2561885Sraf port_send_event(pkevp); 2570Sstevel@tonic-gate if (portevpend == 0) 2580Sstevel@tonic-gate cv_broadcast(&aiop->aio_portcv); 2590Sstevel@tonic-gate return; 2600Sstevel@tonic-gate } 2610Sstevel@tonic-gate 2620Sstevel@tonic-gate if (aiop->aio_flags & AIO_CLEANUP) { 2630Sstevel@tonic-gate /* 2640Sstevel@tonic-gate * aio_cleanup_thread() is waiting for completion of 2650Sstevel@tonic-gate * transactions. 2660Sstevel@tonic-gate */ 2670Sstevel@tonic-gate mutex_enter(&as->a_contents); 2681885Sraf aio_deq(&aiop->aio_portpending, reqp); 2691885Sraf aio_enq(&aiop->aio_portcleanupq, reqp, 0); 2700Sstevel@tonic-gate cv_signal(&aiop->aio_cleanupcv); 2710Sstevel@tonic-gate mutex_exit(&as->a_contents); 2720Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 2730Sstevel@tonic-gate mutex_exit(&aiop->aio_portq_mutex); 2740Sstevel@tonic-gate return; 2750Sstevel@tonic-gate } 2760Sstevel@tonic-gate 2771885Sraf aio_deq(&aiop->aio_portpending, reqp); 2781885Sraf aio_enq(&aiop->aio_portq, reqp, 0); 2790Sstevel@tonic-gate 2801885Sraf use_port = 1; 2811885Sraf } else { 2821885Sraf /* 2831885Sraf * when the AIO_CLEANUP flag is enabled for this 2841885Sraf * process, or when the AIO_POLL bit is set for 2851885Sraf * this request, special handling is required. 2861885Sraf * otherwise the request is put onto the doneq. 2871885Sraf */ 2881885Sraf cleanupqflag = (aiop->aio_flags & AIO_CLEANUP); 2891885Sraf pollqflag = (reqp->aio_req_flags & AIO_POLL); 2901885Sraf if (cleanupqflag | pollqflag) { 2911885Sraf 2924532Ssp92102 if (cleanupqflag) 2931885Sraf mutex_enter(&as->a_contents); 2940Sstevel@tonic-gate 2951885Sraf /* 2961885Sraf * requests with their AIO_POLL bit set are put 2971885Sraf * on the pollq, requests with sigevent structures 2981885Sraf * or with listio heads are put on the notifyq, and 2991885Sraf * the remaining requests don't require any special 3001885Sraf * cleanup handling, so they're put onto the default 3011885Sraf * cleanupq. 3021885Sraf */ 3031885Sraf if (pollqflag) 3041885Sraf aio_enq(&aiop->aio_pollq, reqp, AIO_POLLQ); 3051885Sraf else if (reqp->aio_req_sigqp || reqp->aio_req_lio) 3061885Sraf aio_enq(&aiop->aio_notifyq, reqp, AIO_NOTIFYQ); 3071885Sraf else 3081885Sraf aio_enq(&aiop->aio_cleanupq, reqp, 3091885Sraf AIO_CLEANUPQ); 3100Sstevel@tonic-gate 3111885Sraf if (cleanupqflag) { 3121885Sraf cv_signal(&aiop->aio_cleanupcv); 3131885Sraf mutex_exit(&as->a_contents); 3141885Sraf mutex_exit(&aiop->aio_mutex); 3151885Sraf mutex_exit(&aiop->aio_portq_mutex); 3161885Sraf } else { 3171885Sraf ASSERT(pollqflag); 3181885Sraf /* block aio_cleanup_exit until we're done */ 3191885Sraf aiop->aio_flags |= AIO_DONE_ACTIVE; 3201885Sraf mutex_exit(&aiop->aio_mutex); 3211885Sraf mutex_exit(&aiop->aio_portq_mutex); 3221885Sraf /* 3231885Sraf * let the cleanup processing happen from an AST 3241885Sraf * set an AST on all threads in this process 3251885Sraf */ 3261885Sraf mutex_enter(&p->p_lock); 3271885Sraf set_proc_ast(p); 3281885Sraf mutex_exit(&p->p_lock); 3291885Sraf mutex_enter(&aiop->aio_mutex); 3301885Sraf /* wakeup anybody waiting in aiowait() */ 3311885Sraf cv_broadcast(&aiop->aio_waitcv); 3321885Sraf 3331885Sraf /* wakeup aio_cleanup_exit if needed */ 3341885Sraf if (aiop->aio_flags & AIO_CLEANUP) 3351885Sraf cv_signal(&aiop->aio_cleanupcv); 3361885Sraf aiop->aio_flags &= ~AIO_DONE_ACTIVE; 3371885Sraf mutex_exit(&aiop->aio_mutex); 3381885Sraf } 3391885Sraf return; 3400Sstevel@tonic-gate } 3410Sstevel@tonic-gate 3420Sstevel@tonic-gate /* 3431885Sraf * save req's sigevent pointer, and check its 3441885Sraf * value after releasing aio_mutex lock. 3450Sstevel@tonic-gate */ 3461885Sraf sigev = reqp->aio_req_sigqp; 3471885Sraf reqp->aio_req_sigqp = NULL; 3480Sstevel@tonic-gate 3491885Sraf /* put request on done queue. */ 3501885Sraf aio_enq(&aiop->aio_doneq, reqp, AIO_DONEQ); 3511885Sraf } /* portkevent */ 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate /* 3541885Sraf * when list IO notification is enabled, a notification or 3551885Sraf * signal is sent only when all entries in the list are done. 3560Sstevel@tonic-gate */ 3570Sstevel@tonic-gate if ((head = reqp->aio_req_lio) != NULL) { 3580Sstevel@tonic-gate ASSERT(head->lio_refcnt > 0); 3590Sstevel@tonic-gate if (--head->lio_refcnt == 0) { 3600Sstevel@tonic-gate /* 3610Sstevel@tonic-gate * save lio's sigevent pointer, and check 3621885Sraf * its value after releasing aio_mutex lock. 3630Sstevel@tonic-gate */ 3640Sstevel@tonic-gate lio_sigev = head->lio_sigqp; 3650Sstevel@tonic-gate head->lio_sigqp = NULL; 3661885Sraf cv_signal(&head->lio_notify); 3671885Sraf if (head->lio_port >= 0 && 3681885Sraf (lio_pkevp = head->lio_portkev) != NULL) 3691885Sraf head->lio_port = -1; 3700Sstevel@tonic-gate } 3710Sstevel@tonic-gate } 3720Sstevel@tonic-gate 3730Sstevel@tonic-gate /* 3740Sstevel@tonic-gate * if AIO_WAITN set then 3750Sstevel@tonic-gate * send signal only when we reached the 3760Sstevel@tonic-gate * required amount of IO's finished 3770Sstevel@tonic-gate * or when all IO's are done 3780Sstevel@tonic-gate */ 3790Sstevel@tonic-gate if (aiop->aio_flags & AIO_WAITN) { 3800Sstevel@tonic-gate if (aiop->aio_waitncnt > 0) 3810Sstevel@tonic-gate aiop->aio_waitncnt--; 3820Sstevel@tonic-gate if (aiop->aio_pending == 0 || 3830Sstevel@tonic-gate aiop->aio_waitncnt == 0) 3840Sstevel@tonic-gate cv_broadcast(&aiop->aio_waitcv); 3850Sstevel@tonic-gate } else { 3860Sstevel@tonic-gate cv_broadcast(&aiop->aio_waitcv); 3870Sstevel@tonic-gate } 3880Sstevel@tonic-gate 389*10620SSurya.Prakki@Sun.COM /* 390*10620SSurya.Prakki@Sun.COM * No need to set this flag for pollq, portq, lio requests. 391*10620SSurya.Prakki@Sun.COM * Send a SIGIO signal when the process has a handler enabled. 392*10620SSurya.Prakki@Sun.COM */ 393*10620SSurya.Prakki@Sun.COM if (!sigev && !use_port && head == NULL && 394*10620SSurya.Prakki@Sun.COM (func = PTOU(p)->u_signal[SIGIO - 1]) != SIG_DFL && 395*10620SSurya.Prakki@Sun.COM (func != SIG_IGN)) { 396*10620SSurya.Prakki@Sun.COM send_signal = 1; 397*10620SSurya.Prakki@Sun.COM reqp->aio_req_flags |= AIO_SIGNALLED; 398*10620SSurya.Prakki@Sun.COM } 399*10620SSurya.Prakki@Sun.COM 4000Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 4011885Sraf mutex_exit(&aiop->aio_portq_mutex); 4021885Sraf 4034532Ssp92102 /* 4044532Ssp92102 * Could the cleanup thread be waiting for AIO with locked 4054532Ssp92102 * resources to finish? 4064532Ssp92102 * Ideally in that case cleanup thread should block on cleanupcv, 4074532Ssp92102 * but there is a window, where it could miss to see a new aio 4084532Ssp92102 * request that sneaked in. 4094532Ssp92102 */ 4104532Ssp92102 mutex_enter(&as->a_contents); 4114532Ssp92102 if ((reqp_flags & AIO_PAGELOCKDONE) && AS_ISUNMAPWAIT(as)) 4124532Ssp92102 cv_broadcast(&as->a_cv); 4134532Ssp92102 mutex_exit(&as->a_contents); 4144532Ssp92102 4150Sstevel@tonic-gate if (sigev) 4160Sstevel@tonic-gate aio_sigev_send(p, sigev); 417*10620SSurya.Prakki@Sun.COM else if (send_signal) 418*10620SSurya.Prakki@Sun.COM psignal(p, SIGIO); 419*10620SSurya.Prakki@Sun.COM 4201885Sraf if (pkevp) 4211885Sraf port_send_event(pkevp); 4221885Sraf if (lio_sigev) 4231885Sraf aio_sigev_send(p, lio_sigev); 4241885Sraf if (lio_pkevp) 4251885Sraf port_send_event(lio_pkevp); 4260Sstevel@tonic-gate } 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate /* 4290Sstevel@tonic-gate * send a queued signal to the specified process when 4300Sstevel@tonic-gate * the event signal is non-NULL. A return value of 1 4310Sstevel@tonic-gate * will indicate that a signal is queued, and 0 means that 4320Sstevel@tonic-gate * no signal was specified, nor sent. 4330Sstevel@tonic-gate */ 4340Sstevel@tonic-gate static void 4350Sstevel@tonic-gate aio_sigev_send(proc_t *p, sigqueue_t *sigev) 4360Sstevel@tonic-gate { 4370Sstevel@tonic-gate ASSERT(sigev != NULL); 4380Sstevel@tonic-gate 4390Sstevel@tonic-gate mutex_enter(&p->p_lock); 4400Sstevel@tonic-gate sigaddqa(p, NULL, sigev); 4410Sstevel@tonic-gate mutex_exit(&p->p_lock); 4420Sstevel@tonic-gate } 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate /* 4450Sstevel@tonic-gate * special case handling for zero length requests. the aio request 4460Sstevel@tonic-gate * short circuits the normal completion path since all that's required 4470Sstevel@tonic-gate * to complete this request is to copyout a zero to the aio request's 4480Sstevel@tonic-gate * return value. 4490Sstevel@tonic-gate */ 4500Sstevel@tonic-gate void 4510Sstevel@tonic-gate aio_zerolen(aio_req_t *reqp) 4520Sstevel@tonic-gate { 4530Sstevel@tonic-gate 4540Sstevel@tonic-gate struct buf *bp = &reqp->aio_req_buf; 4550Sstevel@tonic-gate 4560Sstevel@tonic-gate reqp->aio_req_flags |= AIO_ZEROLEN; 4570Sstevel@tonic-gate 4580Sstevel@tonic-gate bp->b_forw = (struct buf *)reqp; 4590Sstevel@tonic-gate bp->b_proc = curproc; 4600Sstevel@tonic-gate 4610Sstevel@tonic-gate bp->b_resid = 0; 4620Sstevel@tonic-gate bp->b_flags = 0; 4630Sstevel@tonic-gate 4640Sstevel@tonic-gate aio_done(bp); 4650Sstevel@tonic-gate } 4660Sstevel@tonic-gate 4670Sstevel@tonic-gate /* 4680Sstevel@tonic-gate * unlock pages previously locked by as_pagelock 4690Sstevel@tonic-gate */ 4700Sstevel@tonic-gate void 4710Sstevel@tonic-gate aphysio_unlock(aio_req_t *reqp) 4720Sstevel@tonic-gate { 4730Sstevel@tonic-gate struct buf *bp; 4740Sstevel@tonic-gate struct iovec *iov; 4750Sstevel@tonic-gate int flags; 4760Sstevel@tonic-gate 4770Sstevel@tonic-gate if (reqp->aio_req_flags & AIO_PHYSIODONE) 4780Sstevel@tonic-gate return; 4790Sstevel@tonic-gate 4800Sstevel@tonic-gate reqp->aio_req_flags |= AIO_PHYSIODONE; 4810Sstevel@tonic-gate 4820Sstevel@tonic-gate if (reqp->aio_req_flags & AIO_ZEROLEN) 4830Sstevel@tonic-gate return; 4840Sstevel@tonic-gate 4850Sstevel@tonic-gate bp = &reqp->aio_req_buf; 4860Sstevel@tonic-gate iov = reqp->aio_req_uio.uio_iov; 4870Sstevel@tonic-gate flags = (((bp->b_flags & B_READ) == B_READ) ? S_WRITE : S_READ); 4880Sstevel@tonic-gate if (reqp->aio_req_flags & AIO_PAGELOCKDONE) { 4890Sstevel@tonic-gate as_pageunlock(bp->b_proc->p_as, 4909973SSurya.Prakki@Sun.COM bp->b_flags & B_SHADOW ? bp->b_shadow : NULL, 4919973SSurya.Prakki@Sun.COM iov->iov_base, iov->iov_len, flags); 4920Sstevel@tonic-gate reqp->aio_req_flags &= ~AIO_PAGELOCKDONE; 4930Sstevel@tonic-gate } 4940Sstevel@tonic-gate bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_SHADOW); 4950Sstevel@tonic-gate bp->b_flags |= B_DONE; 4960Sstevel@tonic-gate } 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate /* 4991885Sraf * deletes a requests id from the hash table of outstanding io. 5000Sstevel@tonic-gate */ 5010Sstevel@tonic-gate static void 5021885Sraf aio_hash_delete(aio_t *aiop, struct aio_req_t *reqp) 5030Sstevel@tonic-gate { 5040Sstevel@tonic-gate long index; 5050Sstevel@tonic-gate aio_result_t *resultp = reqp->aio_req_resultp; 5060Sstevel@tonic-gate aio_req_t *current; 5070Sstevel@tonic-gate aio_req_t **nextp; 5080Sstevel@tonic-gate 5090Sstevel@tonic-gate index = AIO_HASH(resultp); 5100Sstevel@tonic-gate nextp = (aiop->aio_hash + index); 5110Sstevel@tonic-gate while ((current = *nextp) != NULL) { 5120Sstevel@tonic-gate if (current->aio_req_resultp == resultp) { 5130Sstevel@tonic-gate *nextp = current->aio_hash_next; 5140Sstevel@tonic-gate return; 5150Sstevel@tonic-gate } 5160Sstevel@tonic-gate nextp = ¤t->aio_hash_next; 5170Sstevel@tonic-gate } 5180Sstevel@tonic-gate } 5190Sstevel@tonic-gate 5200Sstevel@tonic-gate /* 5210Sstevel@tonic-gate * Put a list head struct onto its free list. 5220Sstevel@tonic-gate */ 5230Sstevel@tonic-gate static void 5240Sstevel@tonic-gate aio_lio_free(aio_t *aiop, aio_lio_t *head) 5250Sstevel@tonic-gate { 5260Sstevel@tonic-gate ASSERT(MUTEX_HELD(&aiop->aio_mutex)); 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate if (head->lio_sigqp != NULL) 5290Sstevel@tonic-gate kmem_free(head->lio_sigqp, sizeof (sigqueue_t)); 5300Sstevel@tonic-gate head->lio_next = aiop->aio_lio_free; 5310Sstevel@tonic-gate aiop->aio_lio_free = head; 5320Sstevel@tonic-gate } 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate /* 5350Sstevel@tonic-gate * Put a reqp onto the freelist. 5360Sstevel@tonic-gate */ 5370Sstevel@tonic-gate void 5380Sstevel@tonic-gate aio_req_free(aio_t *aiop, aio_req_t *reqp) 5390Sstevel@tonic-gate { 5400Sstevel@tonic-gate aio_lio_t *liop; 5410Sstevel@tonic-gate 5420Sstevel@tonic-gate ASSERT(MUTEX_HELD(&aiop->aio_mutex)); 5430Sstevel@tonic-gate 5440Sstevel@tonic-gate if (reqp->aio_req_portkev) { 5450Sstevel@tonic-gate port_free_event(reqp->aio_req_portkev); 5460Sstevel@tonic-gate reqp->aio_req_portkev = NULL; 5470Sstevel@tonic-gate } 5480Sstevel@tonic-gate 5490Sstevel@tonic-gate if ((liop = reqp->aio_req_lio) != NULL) { 5500Sstevel@tonic-gate if (--liop->lio_nent == 0) 5510Sstevel@tonic-gate aio_lio_free(aiop, liop); 5520Sstevel@tonic-gate reqp->aio_req_lio = NULL; 5530Sstevel@tonic-gate } 5541885Sraf if (reqp->aio_req_sigqp != NULL) { 5550Sstevel@tonic-gate kmem_free(reqp->aio_req_sigqp, sizeof (sigqueue_t)); 5561885Sraf reqp->aio_req_sigqp = NULL; 5571885Sraf } 5580Sstevel@tonic-gate reqp->aio_req_next = aiop->aio_free; 5591885Sraf reqp->aio_req_prev = NULL; 5600Sstevel@tonic-gate aiop->aio_free = reqp; 5610Sstevel@tonic-gate aiop->aio_outstanding--; 5620Sstevel@tonic-gate if (aiop->aio_outstanding == 0) 5630Sstevel@tonic-gate cv_broadcast(&aiop->aio_waitcv); 5640Sstevel@tonic-gate aio_hash_delete(aiop, reqp); 5650Sstevel@tonic-gate } 5660Sstevel@tonic-gate 5670Sstevel@tonic-gate /* 5680Sstevel@tonic-gate * Put a reqp onto the freelist. 5690Sstevel@tonic-gate */ 5700Sstevel@tonic-gate void 5710Sstevel@tonic-gate aio_req_free_port(aio_t *aiop, aio_req_t *reqp) 5720Sstevel@tonic-gate { 5730Sstevel@tonic-gate ASSERT(MUTEX_HELD(&aiop->aio_mutex)); 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate reqp->aio_req_next = aiop->aio_free; 5761885Sraf reqp->aio_req_prev = NULL; 5770Sstevel@tonic-gate aiop->aio_free = reqp; 5780Sstevel@tonic-gate aiop->aio_outstanding--; 5790Sstevel@tonic-gate aio_hash_delete(aiop, reqp); 5800Sstevel@tonic-gate } 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate 5830Sstevel@tonic-gate /* 5841885Sraf * Verify the integrity of a queue. 5850Sstevel@tonic-gate */ 5861885Sraf #if defined(DEBUG) 5870Sstevel@tonic-gate static void 5881885Sraf aio_verify_queue(aio_req_t *head, 5891885Sraf aio_req_t *entry_present, aio_req_t *entry_missing) 5901885Sraf { 5911885Sraf aio_req_t *reqp; 5921885Sraf int found = 0; 5931885Sraf int present = 0; 5941885Sraf 5951885Sraf if ((reqp = head) != NULL) { 5961885Sraf do { 5971885Sraf ASSERT(reqp->aio_req_prev->aio_req_next == reqp); 5981885Sraf ASSERT(reqp->aio_req_next->aio_req_prev == reqp); 5991885Sraf if (entry_present == reqp) 6001885Sraf found++; 6011885Sraf if (entry_missing == reqp) 6021885Sraf present++; 6031885Sraf } while ((reqp = reqp->aio_req_next) != head); 6041885Sraf } 6051885Sraf ASSERT(entry_present == NULL || found == 1); 6061885Sraf ASSERT(entry_missing == NULL || present == 0); 6071885Sraf } 6081885Sraf #else 6091885Sraf #define aio_verify_queue(x, y, z) 6101885Sraf #endif 6111885Sraf 6121885Sraf /* 6131885Sraf * Put a request onto the tail of a queue. 6141885Sraf */ 6151885Sraf void 6160Sstevel@tonic-gate aio_enq(aio_req_t **qhead, aio_req_t *reqp, int qflg_new) 6170Sstevel@tonic-gate { 6181885Sraf aio_req_t *head; 6191885Sraf aio_req_t *prev; 6201885Sraf 6211885Sraf aio_verify_queue(*qhead, NULL, reqp); 6221885Sraf 6231885Sraf if ((head = *qhead) == NULL) { 6240Sstevel@tonic-gate reqp->aio_req_next = reqp; 6250Sstevel@tonic-gate reqp->aio_req_prev = reqp; 6261885Sraf *qhead = reqp; 6270Sstevel@tonic-gate } else { 6281885Sraf reqp->aio_req_next = head; 6291885Sraf reqp->aio_req_prev = prev = head->aio_req_prev; 6301885Sraf prev->aio_req_next = reqp; 6311885Sraf head->aio_req_prev = reqp; 6320Sstevel@tonic-gate } 6330Sstevel@tonic-gate reqp->aio_req_flags |= qflg_new; 6340Sstevel@tonic-gate } 6350Sstevel@tonic-gate 6360Sstevel@tonic-gate /* 6371885Sraf * Remove a request from its queue. 6380Sstevel@tonic-gate */ 6390Sstevel@tonic-gate void 6401885Sraf aio_deq(aio_req_t **qhead, aio_req_t *reqp) 6410Sstevel@tonic-gate { 6421885Sraf aio_verify_queue(*qhead, reqp, NULL); 6430Sstevel@tonic-gate 6441885Sraf if (reqp->aio_req_next == reqp) { 6451885Sraf *qhead = NULL; 6460Sstevel@tonic-gate } else { 6471885Sraf reqp->aio_req_prev->aio_req_next = reqp->aio_req_next; 6481885Sraf reqp->aio_req_next->aio_req_prev = reqp->aio_req_prev; 6491885Sraf if (*qhead == reqp) 6501885Sraf *qhead = reqp->aio_req_next; 6510Sstevel@tonic-gate } 6521885Sraf reqp->aio_req_next = NULL; 6530Sstevel@tonic-gate reqp->aio_req_prev = NULL; 6540Sstevel@tonic-gate } 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate /* 6570Sstevel@tonic-gate * concatenate a specified queue with the cleanupq. the specified 6580Sstevel@tonic-gate * queue is put onto the tail of the cleanupq. all elements on the 6590Sstevel@tonic-gate * specified queue should have their aio_req_flags field cleared. 6600Sstevel@tonic-gate */ 6610Sstevel@tonic-gate /*ARGSUSED*/ 6620Sstevel@tonic-gate void 6630Sstevel@tonic-gate aio_cleanupq_concat(aio_t *aiop, aio_req_t *q2, int qflg) 6640Sstevel@tonic-gate { 6650Sstevel@tonic-gate aio_req_t *cleanupqhead, *q2tail; 6660Sstevel@tonic-gate aio_req_t *reqp = q2; 6670Sstevel@tonic-gate 6680Sstevel@tonic-gate do { 6690Sstevel@tonic-gate ASSERT(reqp->aio_req_flags & qflg); 6700Sstevel@tonic-gate reqp->aio_req_flags &= ~qflg; 6710Sstevel@tonic-gate reqp->aio_req_flags |= AIO_CLEANUPQ; 6720Sstevel@tonic-gate } while ((reqp = reqp->aio_req_next) != q2); 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate cleanupqhead = aiop->aio_cleanupq; 6750Sstevel@tonic-gate if (cleanupqhead == NULL) 6760Sstevel@tonic-gate aiop->aio_cleanupq = q2; 6770Sstevel@tonic-gate else { 6780Sstevel@tonic-gate cleanupqhead->aio_req_prev->aio_req_next = q2; 6790Sstevel@tonic-gate q2tail = q2->aio_req_prev; 6800Sstevel@tonic-gate q2tail->aio_req_next = cleanupqhead; 6810Sstevel@tonic-gate q2->aio_req_prev = cleanupqhead->aio_req_prev; 6820Sstevel@tonic-gate cleanupqhead->aio_req_prev = q2tail; 6830Sstevel@tonic-gate } 6840Sstevel@tonic-gate } 6850Sstevel@tonic-gate 6860Sstevel@tonic-gate /* 6870Sstevel@tonic-gate * cleanup aio requests that are on the per-process poll queue. 6880Sstevel@tonic-gate */ 6890Sstevel@tonic-gate void 6900Sstevel@tonic-gate aio_cleanup(int flag) 6910Sstevel@tonic-gate { 6920Sstevel@tonic-gate aio_t *aiop = curproc->p_aio; 6930Sstevel@tonic-gate aio_req_t *pollqhead, *cleanupqhead, *notifyqhead; 6940Sstevel@tonic-gate aio_req_t *cleanupport; 6950Sstevel@tonic-gate aio_req_t *portq = NULL; 6960Sstevel@tonic-gate void (*func)(); 6970Sstevel@tonic-gate int signalled = 0; 6980Sstevel@tonic-gate int qflag = 0; 6990Sstevel@tonic-gate int exitflg; 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate ASSERT(aiop != NULL); 7020Sstevel@tonic-gate 7030Sstevel@tonic-gate if (flag == AIO_CLEANUP_EXIT) 7040Sstevel@tonic-gate exitflg = AIO_CLEANUP_EXIT; 7050Sstevel@tonic-gate else 7060Sstevel@tonic-gate exitflg = 0; 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate /* 7090Sstevel@tonic-gate * We need to get the aio_cleanupq_mutex because we are calling 7100Sstevel@tonic-gate * aio_cleanup_cleanupq() 7110Sstevel@tonic-gate */ 7120Sstevel@tonic-gate mutex_enter(&aiop->aio_cleanupq_mutex); 7130Sstevel@tonic-gate /* 7140Sstevel@tonic-gate * take all the requests off the cleanupq, the notifyq, 7150Sstevel@tonic-gate * and the pollq. 7160Sstevel@tonic-gate */ 7170Sstevel@tonic-gate mutex_enter(&aiop->aio_mutex); 7180Sstevel@tonic-gate if ((cleanupqhead = aiop->aio_cleanupq) != NULL) { 7190Sstevel@tonic-gate aiop->aio_cleanupq = NULL; 7200Sstevel@tonic-gate qflag++; 7210Sstevel@tonic-gate } 7220Sstevel@tonic-gate if ((notifyqhead = aiop->aio_notifyq) != NULL) { 7230Sstevel@tonic-gate aiop->aio_notifyq = NULL; 7240Sstevel@tonic-gate qflag++; 7250Sstevel@tonic-gate } 7260Sstevel@tonic-gate if ((pollqhead = aiop->aio_pollq) != NULL) { 7270Sstevel@tonic-gate aiop->aio_pollq = NULL; 7280Sstevel@tonic-gate qflag++; 7290Sstevel@tonic-gate } 7300Sstevel@tonic-gate if (flag) { 7310Sstevel@tonic-gate if ((portq = aiop->aio_portq) != NULL) 7320Sstevel@tonic-gate qflag++; 7330Sstevel@tonic-gate 7340Sstevel@tonic-gate if ((cleanupport = aiop->aio_portcleanupq) != NULL) { 7350Sstevel@tonic-gate aiop->aio_portcleanupq = NULL; 7360Sstevel@tonic-gate qflag++; 7370Sstevel@tonic-gate } 7380Sstevel@tonic-gate } 7390Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 7400Sstevel@tonic-gate 7410Sstevel@tonic-gate /* 7420Sstevel@tonic-gate * return immediately if cleanupq, pollq, and 7430Sstevel@tonic-gate * notifyq are all empty. someone else must have 7440Sstevel@tonic-gate * emptied them. 7450Sstevel@tonic-gate */ 7460Sstevel@tonic-gate if (!qflag) { 7470Sstevel@tonic-gate mutex_exit(&aiop->aio_cleanupq_mutex); 7480Sstevel@tonic-gate return; 7490Sstevel@tonic-gate } 7500Sstevel@tonic-gate 7510Sstevel@tonic-gate /* 7520Sstevel@tonic-gate * do cleanup for the various queues. 7530Sstevel@tonic-gate */ 7540Sstevel@tonic-gate if (cleanupqhead) 7559973SSurya.Prakki@Sun.COM signalled = aio_cleanup_cleanupq(aiop, cleanupqhead, exitflg); 7560Sstevel@tonic-gate mutex_exit(&aiop->aio_cleanupq_mutex); 7570Sstevel@tonic-gate if (notifyqhead) 7580Sstevel@tonic-gate signalled = aio_cleanup_notifyq(aiop, notifyqhead, exitflg); 7590Sstevel@tonic-gate if (pollqhead) 7600Sstevel@tonic-gate aio_cleanup_pollq(aiop, pollqhead, exitflg); 7610Sstevel@tonic-gate if (flag && (cleanupport || portq)) 7620Sstevel@tonic-gate aio_cleanup_portq(aiop, cleanupport, exitflg); 7630Sstevel@tonic-gate 7640Sstevel@tonic-gate if (exitflg) 7650Sstevel@tonic-gate return; 7660Sstevel@tonic-gate 7670Sstevel@tonic-gate /* 7680Sstevel@tonic-gate * If we have an active aio_cleanup_thread it's possible for 7690Sstevel@tonic-gate * this routine to push something on to the done queue after 7700Sstevel@tonic-gate * an aiowait/aiosuspend thread has already decided to block. 7710Sstevel@tonic-gate * This being the case, we need a cv_broadcast here to wake 7720Sstevel@tonic-gate * these threads up. It is simpler and cleaner to do this 7730Sstevel@tonic-gate * broadcast here than in the individual cleanup routines. 7740Sstevel@tonic-gate */ 7750Sstevel@tonic-gate 7760Sstevel@tonic-gate mutex_enter(&aiop->aio_mutex); 7770Sstevel@tonic-gate cv_broadcast(&aiop->aio_waitcv); 7780Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 7790Sstevel@tonic-gate 7800Sstevel@tonic-gate /* 7810Sstevel@tonic-gate * Only if the process wasn't already signalled, 7820Sstevel@tonic-gate * determine if a SIGIO signal should be delievered. 7830Sstevel@tonic-gate */ 7840Sstevel@tonic-gate if (!signalled && 7851885Sraf (func = PTOU(curproc)->u_signal[SIGIO - 1]) != SIG_DFL && 7860Sstevel@tonic-gate func != SIG_IGN) 7870Sstevel@tonic-gate psignal(curproc, SIGIO); 7880Sstevel@tonic-gate } 7890Sstevel@tonic-gate 7900Sstevel@tonic-gate 7910Sstevel@tonic-gate /* 7920Sstevel@tonic-gate * Do cleanup for every element of the port cleanup queue. 7930Sstevel@tonic-gate */ 7940Sstevel@tonic-gate static void 7950Sstevel@tonic-gate aio_cleanup_portq(aio_t *aiop, aio_req_t *cleanupq, int exitflag) 7960Sstevel@tonic-gate { 7970Sstevel@tonic-gate aio_req_t *reqp; 7980Sstevel@tonic-gate aio_req_t *next; 7990Sstevel@tonic-gate aio_req_t *headp; 8001885Sraf aio_lio_t *liop; 8010Sstevel@tonic-gate 8020Sstevel@tonic-gate /* first check the portq */ 8030Sstevel@tonic-gate if (exitflag || ((aiop->aio_flags & AIO_CLEANUP_PORT) == 0)) { 8040Sstevel@tonic-gate mutex_enter(&aiop->aio_mutex); 8050Sstevel@tonic-gate if (aiop->aio_flags & AIO_CLEANUP) 8060Sstevel@tonic-gate aiop->aio_flags |= AIO_CLEANUP_PORT; 8070Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 8080Sstevel@tonic-gate 8091885Sraf /* 8101885Sraf * It is not allowed to hold locks during aphysio_unlock(). 8111885Sraf * The aio_done() interrupt function will try to acquire 8121885Sraf * aio_mutex and aio_portq_mutex. Therefore we disconnect 8131885Sraf * the portq list from the aiop for the duration of the 8141885Sraf * aphysio_unlock() loop below. 8151885Sraf */ 8160Sstevel@tonic-gate mutex_enter(&aiop->aio_portq_mutex); 8170Sstevel@tonic-gate headp = aiop->aio_portq; 8180Sstevel@tonic-gate aiop->aio_portq = NULL; 8190Sstevel@tonic-gate mutex_exit(&aiop->aio_portq_mutex); 8201885Sraf if ((reqp = headp) != NULL) { 8211885Sraf do { 8221885Sraf next = reqp->aio_req_next; 8231885Sraf aphysio_unlock(reqp); 8241885Sraf if (exitflag) { 8251885Sraf mutex_enter(&aiop->aio_mutex); 8261885Sraf aio_req_free(aiop, reqp); 8271885Sraf mutex_exit(&aiop->aio_mutex); 8281885Sraf } 8291885Sraf } while ((reqp = next) != headp); 8300Sstevel@tonic-gate } 8310Sstevel@tonic-gate 8320Sstevel@tonic-gate if (headp != NULL && exitflag == 0) { 8331885Sraf /* move unlocked requests back to the port queue */ 8341885Sraf aio_req_t *newq; 8351885Sraf 8360Sstevel@tonic-gate mutex_enter(&aiop->aio_portq_mutex); 8371885Sraf if ((newq = aiop->aio_portq) != NULL) { 8381885Sraf aio_req_t *headprev = headp->aio_req_prev; 8391885Sraf aio_req_t *newqprev = newq->aio_req_prev; 8401885Sraf 8411885Sraf headp->aio_req_prev = newqprev; 8421885Sraf newq->aio_req_prev = headprev; 8431885Sraf headprev->aio_req_next = newq; 8441885Sraf newqprev->aio_req_next = headp; 8450Sstevel@tonic-gate } 8460Sstevel@tonic-gate aiop->aio_portq = headp; 8470Sstevel@tonic-gate cv_broadcast(&aiop->aio_portcv); 8480Sstevel@tonic-gate mutex_exit(&aiop->aio_portq_mutex); 8490Sstevel@tonic-gate } 8500Sstevel@tonic-gate } 8510Sstevel@tonic-gate 8520Sstevel@tonic-gate /* now check the port cleanup queue */ 8531885Sraf if ((reqp = cleanupq) == NULL) 8541885Sraf return; 8551885Sraf do { 8560Sstevel@tonic-gate next = reqp->aio_req_next; 8570Sstevel@tonic-gate aphysio_unlock(reqp); 8580Sstevel@tonic-gate if (exitflag) { 8590Sstevel@tonic-gate mutex_enter(&aiop->aio_mutex); 8600Sstevel@tonic-gate aio_req_free(aiop, reqp); 8610Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 8620Sstevel@tonic-gate } else { 8630Sstevel@tonic-gate mutex_enter(&aiop->aio_portq_mutex); 8641885Sraf aio_enq(&aiop->aio_portq, reqp, 0); 8650Sstevel@tonic-gate mutex_exit(&aiop->aio_portq_mutex); 8661885Sraf port_send_event(reqp->aio_req_portkev); 8671885Sraf if ((liop = reqp->aio_req_lio) != NULL) { 8681885Sraf int send_event = 0; 8691885Sraf 8701885Sraf mutex_enter(&aiop->aio_mutex); 8711885Sraf ASSERT(liop->lio_refcnt > 0); 8721885Sraf if (--liop->lio_refcnt == 0) { 8731885Sraf if (liop->lio_port >= 0 && 8741885Sraf liop->lio_portkev) { 8751885Sraf liop->lio_port = -1; 8761885Sraf send_event = 1; 8771885Sraf } 8781885Sraf } 8791885Sraf mutex_exit(&aiop->aio_mutex); 8801885Sraf if (send_event) 8811885Sraf port_send_event(liop->lio_portkev); 8821885Sraf } 8830Sstevel@tonic-gate } 8841885Sraf } while ((reqp = next) != cleanupq); 8850Sstevel@tonic-gate } 8860Sstevel@tonic-gate 8870Sstevel@tonic-gate /* 8880Sstevel@tonic-gate * Do cleanup for every element of the cleanupq. 8890Sstevel@tonic-gate */ 8909973SSurya.Prakki@Sun.COM static int 8910Sstevel@tonic-gate aio_cleanup_cleanupq(aio_t *aiop, aio_req_t *qhead, int exitflg) 8920Sstevel@tonic-gate { 8930Sstevel@tonic-gate aio_req_t *reqp, *next; 8949973SSurya.Prakki@Sun.COM int signalled = 0; 8951885Sraf 8960Sstevel@tonic-gate ASSERT(MUTEX_HELD(&aiop->aio_cleanupq_mutex)); 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate /* 8990Sstevel@tonic-gate * Since aio_req_done() or aio_req_find() use the HASH list to find 9000Sstevel@tonic-gate * the required requests, they could potentially take away elements 9010Sstevel@tonic-gate * if they are already done (AIO_DONEQ is set). 9020Sstevel@tonic-gate * The aio_cleanupq_mutex protects the queue for the duration of the 9030Sstevel@tonic-gate * loop from aio_req_done() and aio_req_find(). 9040Sstevel@tonic-gate */ 9051885Sraf if ((reqp = qhead) == NULL) 9069973SSurya.Prakki@Sun.COM return (0); 9071885Sraf do { 9080Sstevel@tonic-gate ASSERT(reqp->aio_req_flags & AIO_CLEANUPQ); 9091885Sraf ASSERT(reqp->aio_req_portkev == NULL); 9100Sstevel@tonic-gate next = reqp->aio_req_next; 9110Sstevel@tonic-gate aphysio_unlock(reqp); 9120Sstevel@tonic-gate mutex_enter(&aiop->aio_mutex); 9131885Sraf if (exitflg) 9140Sstevel@tonic-gate aio_req_free(aiop, reqp); 9151885Sraf else 9161885Sraf aio_enq(&aiop->aio_doneq, reqp, AIO_DONEQ); 917*10620SSurya.Prakki@Sun.COM if (!exitflg) { 918*10620SSurya.Prakki@Sun.COM if (reqp->aio_req_flags & AIO_SIGNALLED) 919*10620SSurya.Prakki@Sun.COM signalled++; 920*10620SSurya.Prakki@Sun.COM else 921*10620SSurya.Prakki@Sun.COM reqp->aio_req_flags |= AIO_SIGNALLED; 922*10620SSurya.Prakki@Sun.COM } 9230Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 9241885Sraf } while ((reqp = next) != qhead); 9259973SSurya.Prakki@Sun.COM return (signalled); 9260Sstevel@tonic-gate } 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate /* 9290Sstevel@tonic-gate * do cleanup for every element of the notify queue. 9300Sstevel@tonic-gate */ 9310Sstevel@tonic-gate static int 9320Sstevel@tonic-gate aio_cleanup_notifyq(aio_t *aiop, aio_req_t *qhead, int exitflg) 9330Sstevel@tonic-gate { 9340Sstevel@tonic-gate aio_req_t *reqp, *next; 9350Sstevel@tonic-gate aio_lio_t *liohead; 9360Sstevel@tonic-gate sigqueue_t *sigev, *lio_sigev = NULL; 9370Sstevel@tonic-gate int signalled = 0; 9380Sstevel@tonic-gate 9391885Sraf if ((reqp = qhead) == NULL) 9401885Sraf return (0); 9411885Sraf do { 9420Sstevel@tonic-gate ASSERT(reqp->aio_req_flags & AIO_NOTIFYQ); 9430Sstevel@tonic-gate next = reqp->aio_req_next; 9440Sstevel@tonic-gate aphysio_unlock(reqp); 9450Sstevel@tonic-gate if (exitflg) { 9460Sstevel@tonic-gate mutex_enter(&aiop->aio_mutex); 9470Sstevel@tonic-gate aio_req_free(aiop, reqp); 9480Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 9491885Sraf } else { 9501885Sraf mutex_enter(&aiop->aio_mutex); 9511885Sraf aio_enq(&aiop->aio_doneq, reqp, AIO_DONEQ); 9521885Sraf sigev = reqp->aio_req_sigqp; 9531885Sraf reqp->aio_req_sigqp = NULL; 9541885Sraf if ((liohead = reqp->aio_req_lio) != NULL) { 9551885Sraf ASSERT(liohead->lio_refcnt > 0); 9561885Sraf if (--liohead->lio_refcnt == 0) { 9571885Sraf cv_signal(&liohead->lio_notify); 9581885Sraf lio_sigev = liohead->lio_sigqp; 9591885Sraf liohead->lio_sigqp = NULL; 9601885Sraf } 9611885Sraf } 9621885Sraf mutex_exit(&aiop->aio_mutex); 9631885Sraf if (sigev) { 9641885Sraf signalled++; 9651885Sraf aio_sigev_send(reqp->aio_req_buf.b_proc, 9661885Sraf sigev); 9671885Sraf } 9681885Sraf if (lio_sigev) { 9691885Sraf signalled++; 9701885Sraf aio_sigev_send(reqp->aio_req_buf.b_proc, 9711885Sraf lio_sigev); 9720Sstevel@tonic-gate } 9730Sstevel@tonic-gate } 9741885Sraf } while ((reqp = next) != qhead); 9751885Sraf 9760Sstevel@tonic-gate return (signalled); 9770Sstevel@tonic-gate } 9780Sstevel@tonic-gate 9790Sstevel@tonic-gate /* 9800Sstevel@tonic-gate * Do cleanup for every element of the poll queue. 9810Sstevel@tonic-gate */ 9820Sstevel@tonic-gate static void 9830Sstevel@tonic-gate aio_cleanup_pollq(aio_t *aiop, aio_req_t *qhead, int exitflg) 9840Sstevel@tonic-gate { 9850Sstevel@tonic-gate aio_req_t *reqp, *next; 9860Sstevel@tonic-gate 9870Sstevel@tonic-gate /* 9880Sstevel@tonic-gate * As no other threads should be accessing the queue at this point, 9890Sstevel@tonic-gate * it isn't necessary to hold aio_mutex while we traverse its elements. 9900Sstevel@tonic-gate */ 9911885Sraf if ((reqp = qhead) == NULL) 9921885Sraf return; 9931885Sraf do { 9940Sstevel@tonic-gate ASSERT(reqp->aio_req_flags & AIO_POLLQ); 9950Sstevel@tonic-gate next = reqp->aio_req_next; 9960Sstevel@tonic-gate aphysio_unlock(reqp); 9970Sstevel@tonic-gate if (exitflg) { 9980Sstevel@tonic-gate mutex_enter(&aiop->aio_mutex); 9990Sstevel@tonic-gate aio_req_free(aiop, reqp); 10000Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 10011885Sraf } else { 10021885Sraf aio_copyout_result(reqp); 10031885Sraf mutex_enter(&aiop->aio_mutex); 10041885Sraf aio_enq(&aiop->aio_doneq, reqp, AIO_DONEQ); 10051885Sraf mutex_exit(&aiop->aio_mutex); 10060Sstevel@tonic-gate } 10071885Sraf } while ((reqp = next) != qhead); 10080Sstevel@tonic-gate } 10090Sstevel@tonic-gate 10100Sstevel@tonic-gate /* 10110Sstevel@tonic-gate * called by exit(). waits for all outstanding kaio to finish 10120Sstevel@tonic-gate * before the kaio resources are freed. 10130Sstevel@tonic-gate */ 10140Sstevel@tonic-gate void 10150Sstevel@tonic-gate aio_cleanup_exit(void) 10160Sstevel@tonic-gate { 10170Sstevel@tonic-gate proc_t *p = curproc; 10180Sstevel@tonic-gate aio_t *aiop = p->p_aio; 10190Sstevel@tonic-gate aio_req_t *reqp, *next, *head; 10200Sstevel@tonic-gate aio_lio_t *nxtlio, *liop; 10210Sstevel@tonic-gate 10220Sstevel@tonic-gate /* 10230Sstevel@tonic-gate * wait for all outstanding kaio to complete. process 10240Sstevel@tonic-gate * is now single-threaded; no other kaio requests can 10250Sstevel@tonic-gate * happen once aio_pending is zero. 10260Sstevel@tonic-gate */ 10270Sstevel@tonic-gate mutex_enter(&aiop->aio_mutex); 10280Sstevel@tonic-gate aiop->aio_flags |= AIO_CLEANUP; 10290Sstevel@tonic-gate while ((aiop->aio_pending != 0) || (aiop->aio_flags & AIO_DONE_ACTIVE)) 10300Sstevel@tonic-gate cv_wait(&aiop->aio_cleanupcv, &aiop->aio_mutex); 10310Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 10320Sstevel@tonic-gate 10330Sstevel@tonic-gate /* cleanup the cleanup-thread queues. */ 10340Sstevel@tonic-gate aio_cleanup(AIO_CLEANUP_EXIT); 10350Sstevel@tonic-gate 10360Sstevel@tonic-gate /* 10370Sstevel@tonic-gate * Although this process is now single-threaded, we 10380Sstevel@tonic-gate * still need to protect ourselves against a race with 10390Sstevel@tonic-gate * aio_cleanup_dr_delete_memory(). 10400Sstevel@tonic-gate */ 10410Sstevel@tonic-gate mutex_enter(&p->p_lock); 10420Sstevel@tonic-gate 10430Sstevel@tonic-gate /* 10440Sstevel@tonic-gate * free up the done queue's resources. 10450Sstevel@tonic-gate */ 10460Sstevel@tonic-gate if ((head = aiop->aio_doneq) != NULL) { 10471885Sraf aiop->aio_doneq = NULL; 10481885Sraf reqp = head; 10491885Sraf do { 10500Sstevel@tonic-gate next = reqp->aio_req_next; 10510Sstevel@tonic-gate aphysio_unlock(reqp); 10520Sstevel@tonic-gate kmem_free(reqp, sizeof (struct aio_req_t)); 10531885Sraf } while ((reqp = next) != head); 10540Sstevel@tonic-gate } 10550Sstevel@tonic-gate /* 10560Sstevel@tonic-gate * release aio request freelist. 10570Sstevel@tonic-gate */ 10580Sstevel@tonic-gate for (reqp = aiop->aio_free; reqp != NULL; reqp = next) { 10590Sstevel@tonic-gate next = reqp->aio_req_next; 10600Sstevel@tonic-gate kmem_free(reqp, sizeof (struct aio_req_t)); 10610Sstevel@tonic-gate } 10620Sstevel@tonic-gate 10630Sstevel@tonic-gate /* 10640Sstevel@tonic-gate * release io list head freelist. 10650Sstevel@tonic-gate */ 10660Sstevel@tonic-gate for (liop = aiop->aio_lio_free; liop != NULL; liop = nxtlio) { 10670Sstevel@tonic-gate nxtlio = liop->lio_next; 10680Sstevel@tonic-gate kmem_free(liop, sizeof (aio_lio_t)); 10690Sstevel@tonic-gate } 10700Sstevel@tonic-gate 10710Sstevel@tonic-gate if (aiop->aio_iocb) 10720Sstevel@tonic-gate kmem_free(aiop->aio_iocb, aiop->aio_iocbsz); 10730Sstevel@tonic-gate 10740Sstevel@tonic-gate mutex_destroy(&aiop->aio_mutex); 10750Sstevel@tonic-gate mutex_destroy(&aiop->aio_portq_mutex); 10760Sstevel@tonic-gate mutex_destroy(&aiop->aio_cleanupq_mutex); 10770Sstevel@tonic-gate p->p_aio = NULL; 10780Sstevel@tonic-gate mutex_exit(&p->p_lock); 10790Sstevel@tonic-gate kmem_free(aiop, sizeof (struct aio)); 10800Sstevel@tonic-gate } 10810Sstevel@tonic-gate 10820Sstevel@tonic-gate /* 10830Sstevel@tonic-gate * copy out aio request's result to a user-level result_t buffer. 10840Sstevel@tonic-gate */ 10850Sstevel@tonic-gate void 10860Sstevel@tonic-gate aio_copyout_result(aio_req_t *reqp) 10870Sstevel@tonic-gate { 10880Sstevel@tonic-gate struct buf *bp; 10890Sstevel@tonic-gate struct iovec *iov; 10900Sstevel@tonic-gate void *resultp; 10910Sstevel@tonic-gate int error; 10920Sstevel@tonic-gate size_t retval; 10930Sstevel@tonic-gate 10940Sstevel@tonic-gate if (reqp->aio_req_flags & AIO_COPYOUTDONE) 10950Sstevel@tonic-gate return; 10960Sstevel@tonic-gate 10970Sstevel@tonic-gate reqp->aio_req_flags |= AIO_COPYOUTDONE; 10980Sstevel@tonic-gate 10990Sstevel@tonic-gate iov = reqp->aio_req_uio.uio_iov; 11000Sstevel@tonic-gate bp = &reqp->aio_req_buf; 11010Sstevel@tonic-gate /* "resultp" points to user-level result_t buffer */ 11020Sstevel@tonic-gate resultp = (void *)reqp->aio_req_resultp; 11030Sstevel@tonic-gate if (bp->b_flags & B_ERROR) { 11040Sstevel@tonic-gate if (bp->b_error) 11050Sstevel@tonic-gate error = bp->b_error; 11060Sstevel@tonic-gate else 11070Sstevel@tonic-gate error = EIO; 11080Sstevel@tonic-gate retval = (size_t)-1; 11090Sstevel@tonic-gate } else { 11100Sstevel@tonic-gate error = 0; 11110Sstevel@tonic-gate retval = iov->iov_len - bp->b_resid; 11120Sstevel@tonic-gate } 11130Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 11140Sstevel@tonic-gate if (get_udatamodel() == DATAMODEL_NATIVE) { 11150Sstevel@tonic-gate (void) sulword(&((aio_result_t *)resultp)->aio_return, retval); 11160Sstevel@tonic-gate (void) suword32(&((aio_result_t *)resultp)->aio_errno, error); 11170Sstevel@tonic-gate } else { 11180Sstevel@tonic-gate (void) suword32(&((aio_result32_t *)resultp)->aio_return, 11190Sstevel@tonic-gate (int)retval); 11200Sstevel@tonic-gate (void) suword32(&((aio_result32_t *)resultp)->aio_errno, error); 11210Sstevel@tonic-gate } 11220Sstevel@tonic-gate #else 11230Sstevel@tonic-gate (void) suword32(&((aio_result_t *)resultp)->aio_return, retval); 11240Sstevel@tonic-gate (void) suword32(&((aio_result_t *)resultp)->aio_errno, error); 11250Sstevel@tonic-gate #endif 11260Sstevel@tonic-gate } 11270Sstevel@tonic-gate 11280Sstevel@tonic-gate 11290Sstevel@tonic-gate void 11300Sstevel@tonic-gate aio_copyout_result_port(struct iovec *iov, struct buf *bp, void *resultp) 11310Sstevel@tonic-gate { 11320Sstevel@tonic-gate int errno; 11330Sstevel@tonic-gate size_t retval; 11340Sstevel@tonic-gate 11350Sstevel@tonic-gate if (bp->b_flags & B_ERROR) { 11360Sstevel@tonic-gate if (bp->b_error) 11370Sstevel@tonic-gate errno = bp->b_error; 11380Sstevel@tonic-gate else 11390Sstevel@tonic-gate errno = EIO; 11400Sstevel@tonic-gate retval = (size_t)-1; 11410Sstevel@tonic-gate } else { 11420Sstevel@tonic-gate errno = 0; 11430Sstevel@tonic-gate retval = iov->iov_len - bp->b_resid; 11440Sstevel@tonic-gate } 11450Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 11460Sstevel@tonic-gate if (get_udatamodel() == DATAMODEL_NATIVE) { 11470Sstevel@tonic-gate (void) sulword(&((aio_result_t *)resultp)->aio_return, retval); 11480Sstevel@tonic-gate (void) suword32(&((aio_result_t *)resultp)->aio_errno, errno); 11490Sstevel@tonic-gate } else { 11500Sstevel@tonic-gate (void) suword32(&((aio_result32_t *)resultp)->aio_return, 11510Sstevel@tonic-gate (int)retval); 11520Sstevel@tonic-gate (void) suword32(&((aio_result32_t *)resultp)->aio_errno, errno); 11530Sstevel@tonic-gate } 11540Sstevel@tonic-gate #else 11550Sstevel@tonic-gate (void) suword32(&((aio_result_t *)resultp)->aio_return, retval); 11560Sstevel@tonic-gate (void) suword32(&((aio_result_t *)resultp)->aio_errno, errno); 11570Sstevel@tonic-gate #endif 11580Sstevel@tonic-gate } 11590Sstevel@tonic-gate 11600Sstevel@tonic-gate /* 11610Sstevel@tonic-gate * This function is used to remove a request from the done queue. 11620Sstevel@tonic-gate */ 11630Sstevel@tonic-gate 11640Sstevel@tonic-gate void 11650Sstevel@tonic-gate aio_req_remove_portq(aio_t *aiop, aio_req_t *reqp) 11660Sstevel@tonic-gate { 11670Sstevel@tonic-gate ASSERT(MUTEX_HELD(&aiop->aio_portq_mutex)); 11680Sstevel@tonic-gate while (aiop->aio_portq == NULL) { 11690Sstevel@tonic-gate /* 11700Sstevel@tonic-gate * aio_portq is set to NULL when aio_cleanup_portq() 11710Sstevel@tonic-gate * is working with the event queue. 11720Sstevel@tonic-gate * The aio_cleanup_thread() uses aio_cleanup_portq() 11730Sstevel@tonic-gate * to unlock all AIO buffers with completed transactions. 11740Sstevel@tonic-gate * Wait here until aio_cleanup_portq() restores the 11750Sstevel@tonic-gate * list of completed transactions in aio_portq. 11760Sstevel@tonic-gate */ 11770Sstevel@tonic-gate cv_wait(&aiop->aio_portcv, &aiop->aio_portq_mutex); 11780Sstevel@tonic-gate } 11791885Sraf aio_deq(&aiop->aio_portq, reqp); 11800Sstevel@tonic-gate } 11810Sstevel@tonic-gate 11820Sstevel@tonic-gate /* ARGSUSED */ 11830Sstevel@tonic-gate void 11840Sstevel@tonic-gate aio_close_port(void *arg, int port, pid_t pid, int lastclose) 11850Sstevel@tonic-gate { 11860Sstevel@tonic-gate aio_t *aiop; 11870Sstevel@tonic-gate aio_req_t *reqp; 11880Sstevel@tonic-gate aio_req_t *next; 11890Sstevel@tonic-gate aio_req_t *headp; 11900Sstevel@tonic-gate int counter; 11910Sstevel@tonic-gate 11920Sstevel@tonic-gate if (arg == NULL) 11930Sstevel@tonic-gate aiop = curproc->p_aio; 11940Sstevel@tonic-gate else 11950Sstevel@tonic-gate aiop = (aio_t *)arg; 11960Sstevel@tonic-gate 11970Sstevel@tonic-gate /* 11980Sstevel@tonic-gate * The PORT_SOURCE_AIO source is always associated with every new 11990Sstevel@tonic-gate * created port by default. 12000Sstevel@tonic-gate * If no asynchronous I/O transactions were associated with the port 12010Sstevel@tonic-gate * then the aiop pointer will still be set to NULL. 12020Sstevel@tonic-gate */ 12030Sstevel@tonic-gate if (aiop == NULL) 12040Sstevel@tonic-gate return; 12050Sstevel@tonic-gate 12060Sstevel@tonic-gate /* 12070Sstevel@tonic-gate * Within a process event ports can be used to collect events other 12080Sstevel@tonic-gate * than PORT_SOURCE_AIO events. At the same time the process can submit 12090Sstevel@tonic-gate * asynchronous I/Os transactions which are not associated with the 12100Sstevel@tonic-gate * current port. 12110Sstevel@tonic-gate * The current process oriented model of AIO uses a sigle queue for 12120Sstevel@tonic-gate * pending events. On close the pending queue (queue of asynchronous 12130Sstevel@tonic-gate * I/O transactions using event port notification) must be scanned 12140Sstevel@tonic-gate * to detect and handle pending I/Os using the current port. 12150Sstevel@tonic-gate */ 12160Sstevel@tonic-gate mutex_enter(&aiop->aio_portq_mutex); 12170Sstevel@tonic-gate mutex_enter(&aiop->aio_mutex); 12181885Sraf counter = 0; 12191885Sraf if ((headp = aiop->aio_portpending) != NULL) { 12201885Sraf reqp = headp; 12211885Sraf do { 12221885Sraf if (reqp->aio_req_portkev && 12231885Sraf reqp->aio_req_port == port) { 12241885Sraf reqp->aio_req_flags |= AIO_CLOSE_PORT; 12251885Sraf counter++; 12261885Sraf } 12271885Sraf } while ((reqp = reqp->aio_req_next) != headp); 12280Sstevel@tonic-gate } 12290Sstevel@tonic-gate if (counter == 0) { 12300Sstevel@tonic-gate /* no AIOs pending */ 12310Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 12320Sstevel@tonic-gate mutex_exit(&aiop->aio_portq_mutex); 12330Sstevel@tonic-gate return; 12340Sstevel@tonic-gate } 12350Sstevel@tonic-gate aiop->aio_portpendcnt += counter; 123641Spraks mutex_exit(&aiop->aio_mutex); 12370Sstevel@tonic-gate while (aiop->aio_portpendcnt) 123841Spraks cv_wait(&aiop->aio_portcv, &aiop->aio_portq_mutex); 12390Sstevel@tonic-gate 12400Sstevel@tonic-gate /* 12410Sstevel@tonic-gate * all pending AIOs are completed. 12420Sstevel@tonic-gate * check port doneq 12430Sstevel@tonic-gate */ 12440Sstevel@tonic-gate headp = NULL; 12451885Sraf if ((reqp = aiop->aio_portq) != NULL) { 12461885Sraf do { 12471885Sraf next = reqp->aio_req_next; 12481885Sraf if (reqp->aio_req_port == port) { 12491885Sraf /* dequeue request and discard event */ 12501885Sraf aio_req_remove_portq(aiop, reqp); 12511885Sraf port_free_event(reqp->aio_req_portkev); 12521885Sraf /* put request in temporary queue */ 12531885Sraf reqp->aio_req_next = headp; 12541885Sraf headp = reqp; 12551885Sraf } 12561885Sraf } while ((reqp = next) != aiop->aio_portq); 12570Sstevel@tonic-gate } 12580Sstevel@tonic-gate mutex_exit(&aiop->aio_portq_mutex); 12590Sstevel@tonic-gate 12600Sstevel@tonic-gate /* headp points to the list of requests to be discarded */ 12610Sstevel@tonic-gate for (reqp = headp; reqp != NULL; reqp = next) { 12620Sstevel@tonic-gate next = reqp->aio_req_next; 12630Sstevel@tonic-gate aphysio_unlock(reqp); 12640Sstevel@tonic-gate mutex_enter(&aiop->aio_mutex); 12650Sstevel@tonic-gate aio_req_free_port(aiop, reqp); 12660Sstevel@tonic-gate mutex_exit(&aiop->aio_mutex); 12670Sstevel@tonic-gate } 12680Sstevel@tonic-gate 12690Sstevel@tonic-gate if (aiop->aio_flags & AIO_CLEANUP) 12700Sstevel@tonic-gate cv_broadcast(&aiop->aio_waitcv); 12710Sstevel@tonic-gate } 12720Sstevel@tonic-gate 12730Sstevel@tonic-gate /* 12740Sstevel@tonic-gate * aio_cleanup_dr_delete_memory is used by dr's delete_memory_thread 1275304Spraks * to kick start the aio_cleanup_thread for the give process to do the 1276304Spraks * necessary cleanup. 1277304Spraks * This is needed so that delete_memory_thread can obtain writer locks 1278304Spraks * on pages that need to be relocated during a dr memory delete operation, 1279304Spraks * otherwise a deadly embrace may occur. 12800Sstevel@tonic-gate */ 12810Sstevel@tonic-gate int 12820Sstevel@tonic-gate aio_cleanup_dr_delete_memory(proc_t *procp) 12830Sstevel@tonic-gate { 12840Sstevel@tonic-gate struct aio *aiop = procp->p_aio; 1285304Spraks struct as *as = procp->p_as; 1286304Spraks int ret = 0; 12870Sstevel@tonic-gate 12880Sstevel@tonic-gate ASSERT(MUTEX_HELD(&procp->p_lock)); 1289304Spraks 1290304Spraks mutex_enter(&as->a_contents); 1291304Spraks 1292304Spraks if (aiop != NULL) { 1293304Spraks aiop->aio_rqclnup = 1; 1294304Spraks cv_broadcast(&as->a_cv); 1295304Spraks ret = 1; 12960Sstevel@tonic-gate } 1297304Spraks mutex_exit(&as->a_contents); 1298304Spraks return (ret); 12990Sstevel@tonic-gate } 1300