10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 53760Ssp92102 * Common Development and Distribution License (the "License"). 63760Ssp92102 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*5331Samw * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/types.h> 290Sstevel@tonic-gate #include <sys/devops.h> 300Sstevel@tonic-gate #include <sys/conf.h> 310Sstevel@tonic-gate #include <sys/modctl.h> 320Sstevel@tonic-gate #include <sys/sunddi.h> 330Sstevel@tonic-gate #include <sys/stat.h> 340Sstevel@tonic-gate #include <sys/poll_impl.h> 350Sstevel@tonic-gate #include <sys/errno.h> 360Sstevel@tonic-gate #include <sys/kmem.h> 370Sstevel@tonic-gate #include <sys/mkdev.h> 380Sstevel@tonic-gate #include <sys/debug.h> 390Sstevel@tonic-gate #include <sys/file.h> 400Sstevel@tonic-gate #include <sys/sysmacros.h> 410Sstevel@tonic-gate #include <sys/systm.h> 420Sstevel@tonic-gate #include <sys/bitmap.h> 430Sstevel@tonic-gate #include <sys/devpoll.h> 440Sstevel@tonic-gate #include <sys/rctl.h> 450Sstevel@tonic-gate #include <sys/resource.h> 460Sstevel@tonic-gate 470Sstevel@tonic-gate #define RESERVED 1 480Sstevel@tonic-gate 490Sstevel@tonic-gate /* local data struct */ 500Sstevel@tonic-gate static dp_entry_t **devpolltbl; /* dev poll entries */ 510Sstevel@tonic-gate static size_t dptblsize; 520Sstevel@tonic-gate 530Sstevel@tonic-gate static kmutex_t devpoll_lock; /* lock protecting dev tbl */ 540Sstevel@tonic-gate int devpoll_init; /* is /dev/poll initialized already */ 550Sstevel@tonic-gate 560Sstevel@tonic-gate /* device local functions */ 570Sstevel@tonic-gate 580Sstevel@tonic-gate static int dpopen(dev_t *devp, int flag, int otyp, cred_t *credp); 590Sstevel@tonic-gate static int dpwrite(dev_t dev, struct uio *uiop, cred_t *credp); 600Sstevel@tonic-gate static int dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 610Sstevel@tonic-gate int *rvalp); 620Sstevel@tonic-gate static int dppoll(dev_t dev, short events, int anyyet, short *reventsp, 630Sstevel@tonic-gate struct pollhead **phpp); 640Sstevel@tonic-gate static int dpclose(dev_t dev, int flag, int otyp, cred_t *credp); 650Sstevel@tonic-gate static dev_info_t *dpdevi; 660Sstevel@tonic-gate 670Sstevel@tonic-gate 680Sstevel@tonic-gate static struct cb_ops dp_cb_ops = { 690Sstevel@tonic-gate dpopen, /* open */ 700Sstevel@tonic-gate dpclose, /* close */ 710Sstevel@tonic-gate nodev, /* strategy */ 720Sstevel@tonic-gate nodev, /* print */ 730Sstevel@tonic-gate nodev, /* dump */ 740Sstevel@tonic-gate nodev, /* read */ 750Sstevel@tonic-gate dpwrite, /* write */ 760Sstevel@tonic-gate dpioctl, /* ioctl */ 770Sstevel@tonic-gate nodev, /* devmap */ 780Sstevel@tonic-gate nodev, /* mmap */ 790Sstevel@tonic-gate nodev, /* segmap */ 800Sstevel@tonic-gate dppoll, /* poll */ 81258Scth ddi_prop_op, /* prop_op */ 820Sstevel@tonic-gate (struct streamtab *)0, /* streamtab */ 83258Scth D_MP, /* flags */ 84258Scth CB_REV, /* cb_ops revision */ 85258Scth nodev, /* aread */ 86258Scth nodev /* awrite */ 870Sstevel@tonic-gate }; 880Sstevel@tonic-gate 890Sstevel@tonic-gate static int dpattach(dev_info_t *, ddi_attach_cmd_t); 900Sstevel@tonic-gate static int dpdetach(dev_info_t *, ddi_detach_cmd_t); 910Sstevel@tonic-gate static int dpinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 920Sstevel@tonic-gate 930Sstevel@tonic-gate static struct dev_ops dp_ops = { 940Sstevel@tonic-gate DEVO_REV, /* devo_rev */ 950Sstevel@tonic-gate 0, /* refcnt */ 960Sstevel@tonic-gate dpinfo, /* info */ 970Sstevel@tonic-gate nulldev, /* identify */ 980Sstevel@tonic-gate nulldev, /* probe */ 990Sstevel@tonic-gate dpattach, /* attach */ 1000Sstevel@tonic-gate dpdetach, /* detach */ 1010Sstevel@tonic-gate nodev, /* reset */ 1020Sstevel@tonic-gate &dp_cb_ops, /* driver operations */ 1030Sstevel@tonic-gate (struct bus_ops *)NULL, /* bus operations */ 1040Sstevel@tonic-gate nulldev /* power */ 1050Sstevel@tonic-gate }; 1060Sstevel@tonic-gate 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate static struct modldrv modldrv = { 1090Sstevel@tonic-gate &mod_driverops, /* type of module - a driver */ 1100Sstevel@tonic-gate "Dev Poll driver %I%", 1110Sstevel@tonic-gate &dp_ops, 1120Sstevel@tonic-gate }; 1130Sstevel@tonic-gate 1140Sstevel@tonic-gate static struct modlinkage modlinkage = { 1150Sstevel@tonic-gate MODREV_1, 1160Sstevel@tonic-gate (void *)&modldrv, 1170Sstevel@tonic-gate NULL 1180Sstevel@tonic-gate }; 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate /* 1210Sstevel@tonic-gate * Locking Design 1220Sstevel@tonic-gate * 1230Sstevel@tonic-gate * The /dev/poll driver shares most of its code with poll sys call whose 1240Sstevel@tonic-gate * code is in common/syscall/poll.c. In poll(2) design, the pollcache 1250Sstevel@tonic-gate * structure is per lwp. An implicit assumption is made there that some 1260Sstevel@tonic-gate * portion of pollcache will never be touched by other lwps. E.g., in 1270Sstevel@tonic-gate * poll(2) design, no lwp will ever need to grow bitmap of other lwp. 1280Sstevel@tonic-gate * This assumption is not true for /dev/poll; hence the need for extra 1290Sstevel@tonic-gate * locking. 1300Sstevel@tonic-gate * 131*5331Samw * To allow more parallelism, each /dev/poll file descriptor (indexed by 1320Sstevel@tonic-gate * minor number) has its own lock. Since read (dpioctl) is a much more 1330Sstevel@tonic-gate * frequent operation than write, we want to allow multiple reads on same 1340Sstevel@tonic-gate * /dev/poll fd. However, we prevent writes from being starved by giving 1350Sstevel@tonic-gate * priority to write operation. Theoretically writes can starve reads as 136*5331Samw * well. But in practical sense this is not important because (1) writes 1370Sstevel@tonic-gate * happens less often than reads, and (2) write operation defines the 1380Sstevel@tonic-gate * content of poll fd a cache set. If writes happens so often that they 1390Sstevel@tonic-gate * can starve reads, that means the cached set is very unstable. It may 1400Sstevel@tonic-gate * not make sense to read an unstable cache set anyway. Therefore, the 1410Sstevel@tonic-gate * writers starving readers case is not handled in this design. 1420Sstevel@tonic-gate */ 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate int 1450Sstevel@tonic-gate _init() 1460Sstevel@tonic-gate { 1470Sstevel@tonic-gate int error; 1480Sstevel@tonic-gate 1490Sstevel@tonic-gate dptblsize = DEVPOLLSIZE; 1500Sstevel@tonic-gate devpolltbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 1510Sstevel@tonic-gate mutex_init(&devpoll_lock, NULL, MUTEX_DEFAULT, NULL); 1520Sstevel@tonic-gate devpoll_init = 1; 1530Sstevel@tonic-gate if ((error = mod_install(&modlinkage)) != 0) { 1540Sstevel@tonic-gate mutex_destroy(&devpoll_lock); 1550Sstevel@tonic-gate kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 1560Sstevel@tonic-gate devpoll_init = 0; 1570Sstevel@tonic-gate } 1580Sstevel@tonic-gate return (error); 1590Sstevel@tonic-gate } 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate int 1620Sstevel@tonic-gate _fini() 1630Sstevel@tonic-gate { 1640Sstevel@tonic-gate int error; 1650Sstevel@tonic-gate 1660Sstevel@tonic-gate if ((error = mod_remove(&modlinkage)) != 0) { 1670Sstevel@tonic-gate return (error); 1680Sstevel@tonic-gate } 1690Sstevel@tonic-gate mutex_destroy(&devpoll_lock); 1700Sstevel@tonic-gate kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 1710Sstevel@tonic-gate return (0); 1720Sstevel@tonic-gate } 1730Sstevel@tonic-gate 1740Sstevel@tonic-gate int 1750Sstevel@tonic-gate _info(struct modinfo *modinfop) 1760Sstevel@tonic-gate { 1770Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 1780Sstevel@tonic-gate } 1790Sstevel@tonic-gate 1800Sstevel@tonic-gate /*ARGSUSED*/ 1810Sstevel@tonic-gate static int 1820Sstevel@tonic-gate dpattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 1830Sstevel@tonic-gate { 1840Sstevel@tonic-gate if (ddi_create_minor_node(devi, "poll", S_IFCHR, 0, DDI_PSEUDO, NULL) 1850Sstevel@tonic-gate == DDI_FAILURE) { 1860Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 1870Sstevel@tonic-gate return (DDI_FAILURE); 1880Sstevel@tonic-gate } 1890Sstevel@tonic-gate dpdevi = devi; 1900Sstevel@tonic-gate return (DDI_SUCCESS); 1910Sstevel@tonic-gate } 1920Sstevel@tonic-gate 1930Sstevel@tonic-gate static int 1940Sstevel@tonic-gate dpdetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 1950Sstevel@tonic-gate { 1960Sstevel@tonic-gate if (cmd != DDI_DETACH) 1970Sstevel@tonic-gate return (DDI_FAILURE); 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 2000Sstevel@tonic-gate return (DDI_SUCCESS); 2010Sstevel@tonic-gate } 2020Sstevel@tonic-gate 2030Sstevel@tonic-gate /* ARGSUSED */ 2040Sstevel@tonic-gate static int 2050Sstevel@tonic-gate dpinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2060Sstevel@tonic-gate { 2070Sstevel@tonic-gate int error; 2080Sstevel@tonic-gate 2090Sstevel@tonic-gate switch (infocmd) { 2100Sstevel@tonic-gate case DDI_INFO_DEVT2DEVINFO: 2110Sstevel@tonic-gate *result = (void *)dpdevi; 2120Sstevel@tonic-gate error = DDI_SUCCESS; 2130Sstevel@tonic-gate break; 2140Sstevel@tonic-gate case DDI_INFO_DEVT2INSTANCE: 2150Sstevel@tonic-gate *result = (void *)0; 2160Sstevel@tonic-gate error = DDI_SUCCESS; 2170Sstevel@tonic-gate break; 2180Sstevel@tonic-gate default: 2190Sstevel@tonic-gate error = DDI_FAILURE; 2200Sstevel@tonic-gate } 2210Sstevel@tonic-gate return (error); 2220Sstevel@tonic-gate } 2230Sstevel@tonic-gate 2240Sstevel@tonic-gate /* 2250Sstevel@tonic-gate * dp_pcache_poll has similar logic to pcache_poll() in poll.c. The major 2260Sstevel@tonic-gate * differences are: (1) /dev/poll requires scanning the bitmap starting at 2270Sstevel@tonic-gate * where it was stopped last time, instead of always starting from 0, 2280Sstevel@tonic-gate * (2) since user may not have cleaned up the cached fds when they are 2290Sstevel@tonic-gate * closed, some polldats in cache may refer to closed or reused fds. We 2300Sstevel@tonic-gate * need to check for those cases. 2310Sstevel@tonic-gate * 2320Sstevel@tonic-gate * NOTE: Upon closing an fd, automatic poll cache cleanup is done for 2330Sstevel@tonic-gate * poll(2) caches but NOT for /dev/poll caches. So expect some 2340Sstevel@tonic-gate * stale entries! 2350Sstevel@tonic-gate */ 2360Sstevel@tonic-gate static int 2370Sstevel@tonic-gate dp_pcache_poll(pollfd_t *pfdp, pollcache_t *pcp, nfds_t nfds, int *fdcntp) 2380Sstevel@tonic-gate { 2390Sstevel@tonic-gate int start, ostart, end; 2400Sstevel@tonic-gate int fdcnt, fd; 2410Sstevel@tonic-gate boolean_t done; 2420Sstevel@tonic-gate file_t *fp; 2430Sstevel@tonic-gate short revent; 2440Sstevel@tonic-gate boolean_t no_wrap; 2450Sstevel@tonic-gate pollhead_t *php; 2460Sstevel@tonic-gate polldat_t *pdp; 2470Sstevel@tonic-gate int error = 0; 2480Sstevel@tonic-gate 2490Sstevel@tonic-gate ASSERT(MUTEX_HELD(&pcp->pc_lock)); 2500Sstevel@tonic-gate if (pcp->pc_bitmap == NULL) { 2510Sstevel@tonic-gate /* 2520Sstevel@tonic-gate * No Need to search because no poll fd 2530Sstevel@tonic-gate * has been cached. 2540Sstevel@tonic-gate */ 2550Sstevel@tonic-gate return (error); 2560Sstevel@tonic-gate } 2570Sstevel@tonic-gate retry: 2580Sstevel@tonic-gate start = ostart = pcp->pc_mapstart; 2590Sstevel@tonic-gate end = pcp->pc_mapend; 2600Sstevel@tonic-gate php = NULL; 2610Sstevel@tonic-gate 2620Sstevel@tonic-gate if (start == 0) { 2630Sstevel@tonic-gate /* 2640Sstevel@tonic-gate * started from every begining, no need to wrap around. 2650Sstevel@tonic-gate */ 2660Sstevel@tonic-gate no_wrap = B_TRUE; 2670Sstevel@tonic-gate } else { 2680Sstevel@tonic-gate no_wrap = B_FALSE; 2690Sstevel@tonic-gate } 2700Sstevel@tonic-gate done = B_FALSE; 2710Sstevel@tonic-gate fdcnt = 0; 2720Sstevel@tonic-gate while ((fdcnt < nfds) && !done) { 2730Sstevel@tonic-gate php = NULL; 2740Sstevel@tonic-gate revent = 0; 2750Sstevel@tonic-gate /* 2760Sstevel@tonic-gate * Examine the bit map in a circular fashion 2770Sstevel@tonic-gate * to avoid starvation. Always resume from 2780Sstevel@tonic-gate * last stop. Scan till end of the map. Then 2790Sstevel@tonic-gate * wrap around. 2800Sstevel@tonic-gate */ 2810Sstevel@tonic-gate fd = bt_getlowbit(pcp->pc_bitmap, start, end); 2820Sstevel@tonic-gate ASSERT(fd <= end); 2830Sstevel@tonic-gate if (fd >= 0) { 2840Sstevel@tonic-gate if (fd == end) { 2850Sstevel@tonic-gate if (no_wrap) { 2860Sstevel@tonic-gate done = B_TRUE; 2870Sstevel@tonic-gate } else { 2880Sstevel@tonic-gate start = 0; 2890Sstevel@tonic-gate end = ostart - 1; 2900Sstevel@tonic-gate no_wrap = B_TRUE; 2910Sstevel@tonic-gate } 2920Sstevel@tonic-gate } else { 2930Sstevel@tonic-gate start = fd + 1; 2940Sstevel@tonic-gate } 2950Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 2960Sstevel@tonic-gate ASSERT(pdp != NULL); 2970Sstevel@tonic-gate ASSERT(pdp->pd_fd == fd); 2980Sstevel@tonic-gate if (pdp->pd_fp == NULL) { 2990Sstevel@tonic-gate /* 3000Sstevel@tonic-gate * The fd is POLLREMOVed. This fd is 3010Sstevel@tonic-gate * logically no longer cached. So move 3020Sstevel@tonic-gate * on to the next one. 3030Sstevel@tonic-gate */ 3040Sstevel@tonic-gate continue; 3050Sstevel@tonic-gate } 3060Sstevel@tonic-gate if ((fp = getf(fd)) == NULL) { 3070Sstevel@tonic-gate /* 3080Sstevel@tonic-gate * The fd has been closed, but user has not 3090Sstevel@tonic-gate * done a POLLREMOVE on this fd yet. Instead 3100Sstevel@tonic-gate * of cleaning it here implicitly, we return 3110Sstevel@tonic-gate * POLLNVAL. This is consistent with poll(2) 3120Sstevel@tonic-gate * polling a closed fd. Hope this will remind 3130Sstevel@tonic-gate * user to do a POLLREMOVE. 3140Sstevel@tonic-gate */ 3150Sstevel@tonic-gate pfdp[fdcnt].fd = fd; 3160Sstevel@tonic-gate pfdp[fdcnt].revents = POLLNVAL; 3170Sstevel@tonic-gate fdcnt++; 3180Sstevel@tonic-gate continue; 3190Sstevel@tonic-gate } 3200Sstevel@tonic-gate if (fp != pdp->pd_fp) { 3210Sstevel@tonic-gate /* 3220Sstevel@tonic-gate * user is polling on a cached fd which was 3230Sstevel@tonic-gate * closed and then reused. Unfortunately 3240Sstevel@tonic-gate * there is no good way to inform user. 3250Sstevel@tonic-gate * If the file struct is also reused, we 3260Sstevel@tonic-gate * may not be able to detect the fd reuse 3270Sstevel@tonic-gate * at all. As long as this does not 3280Sstevel@tonic-gate * cause system failure and/or memory leak, 3290Sstevel@tonic-gate * we will play along. Man page states if 3300Sstevel@tonic-gate * user does not clean up closed fds, polling 3310Sstevel@tonic-gate * results will be indeterministic. 3320Sstevel@tonic-gate * 3330Sstevel@tonic-gate * XXX - perhaps log the detection of fd 3340Sstevel@tonic-gate * reuse? 3350Sstevel@tonic-gate */ 3360Sstevel@tonic-gate pdp->pd_fp = fp; 3370Sstevel@tonic-gate } 3380Sstevel@tonic-gate /* 3390Sstevel@tonic-gate * XXX - pollrelock() logic needs to know which 3400Sstevel@tonic-gate * which pollcache lock to grab. It'd be a 3410Sstevel@tonic-gate * cleaner solution if we could pass pcp as 3420Sstevel@tonic-gate * an arguement in VOP_POLL interface instead 3430Sstevel@tonic-gate * of implicitly passing it using thread_t 3440Sstevel@tonic-gate * struct. On the other hand, changing VOP_POLL 3450Sstevel@tonic-gate * interface will require all driver/file system 3460Sstevel@tonic-gate * poll routine to change. May want to revisit 3470Sstevel@tonic-gate * the tradeoff later. 3480Sstevel@tonic-gate */ 3490Sstevel@tonic-gate curthread->t_pollcache = pcp; 3500Sstevel@tonic-gate error = VOP_POLL(fp->f_vnode, pdp->pd_events, 0, 351*5331Samw &revent, &php, NULL); 3520Sstevel@tonic-gate curthread->t_pollcache = NULL; 3530Sstevel@tonic-gate releasef(fd); 3540Sstevel@tonic-gate if (error != 0) { 3550Sstevel@tonic-gate break; 3560Sstevel@tonic-gate } 3570Sstevel@tonic-gate /* 3580Sstevel@tonic-gate * layered devices (e.g. console driver) 3590Sstevel@tonic-gate * may change the vnode and thus the pollhead 3600Sstevel@tonic-gate * pointer out from underneath us. 3610Sstevel@tonic-gate */ 3620Sstevel@tonic-gate if (php != NULL && pdp->pd_php != NULL && 3630Sstevel@tonic-gate php != pdp->pd_php) { 3640Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 3650Sstevel@tonic-gate pdp->pd_php = php; 3660Sstevel@tonic-gate pollhead_insert(php, pdp); 3670Sstevel@tonic-gate /* 3680Sstevel@tonic-gate * The bit should still be set. 3690Sstevel@tonic-gate */ 3700Sstevel@tonic-gate ASSERT(BT_TEST(pcp->pc_bitmap, fd)); 3710Sstevel@tonic-gate goto retry; 3720Sstevel@tonic-gate } 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate if (revent != 0) { 3750Sstevel@tonic-gate pfdp[fdcnt].fd = fd; 3760Sstevel@tonic-gate pfdp[fdcnt].events = pdp->pd_events; 3770Sstevel@tonic-gate pfdp[fdcnt].revents = revent; 3780Sstevel@tonic-gate fdcnt++; 3790Sstevel@tonic-gate } else if (php != NULL) { 3800Sstevel@tonic-gate /* 3810Sstevel@tonic-gate * We clear a bit or cache a poll fd if 3820Sstevel@tonic-gate * the driver returns a poll head ptr, 3830Sstevel@tonic-gate * which is expected in the case of 0 3840Sstevel@tonic-gate * revents. Some buggy driver may return 3850Sstevel@tonic-gate * NULL php pointer with 0 revents. In 3860Sstevel@tonic-gate * this case, we just treat the driver as 3870Sstevel@tonic-gate * "noncachable" and not clearing the bit 3880Sstevel@tonic-gate * in bitmap. 3890Sstevel@tonic-gate */ 3900Sstevel@tonic-gate if ((pdp->pd_php != NULL) && 3910Sstevel@tonic-gate ((pcp->pc_flag & T_POLLWAKE) == 0)) { 3920Sstevel@tonic-gate BT_CLEAR(pcp->pc_bitmap, fd); 3930Sstevel@tonic-gate } 3940Sstevel@tonic-gate if (pdp->pd_php == NULL) { 3950Sstevel@tonic-gate pollhead_insert(php, pdp); 3960Sstevel@tonic-gate pdp->pd_php = php; 3970Sstevel@tonic-gate } 3980Sstevel@tonic-gate } 3990Sstevel@tonic-gate } else { 4000Sstevel@tonic-gate /* 4010Sstevel@tonic-gate * No bit set in the range. Check for wrap around. 4020Sstevel@tonic-gate */ 4030Sstevel@tonic-gate if (!no_wrap) { 4040Sstevel@tonic-gate start = 0; 4050Sstevel@tonic-gate end = ostart - 1; 4060Sstevel@tonic-gate no_wrap = B_TRUE; 4070Sstevel@tonic-gate } else { 4080Sstevel@tonic-gate done = B_TRUE; 4090Sstevel@tonic-gate } 4100Sstevel@tonic-gate } 4110Sstevel@tonic-gate } 4120Sstevel@tonic-gate 4130Sstevel@tonic-gate if (!done) { 4140Sstevel@tonic-gate pcp->pc_mapstart = start; 4150Sstevel@tonic-gate } 4160Sstevel@tonic-gate ASSERT(*fdcntp == 0); 4170Sstevel@tonic-gate *fdcntp = fdcnt; 4180Sstevel@tonic-gate return (error); 4190Sstevel@tonic-gate } 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate /*ARGSUSED*/ 4220Sstevel@tonic-gate static int 4230Sstevel@tonic-gate dpopen(dev_t *devp, int flag, int otyp, cred_t *credp) 4240Sstevel@tonic-gate { 4250Sstevel@tonic-gate minor_t minordev; 4260Sstevel@tonic-gate dp_entry_t *dpep; 4270Sstevel@tonic-gate pollcache_t *pcp; 4280Sstevel@tonic-gate 4290Sstevel@tonic-gate ASSERT(devpoll_init); 4300Sstevel@tonic-gate ASSERT(dptblsize <= MAXMIN); 4310Sstevel@tonic-gate mutex_enter(&devpoll_lock); 4320Sstevel@tonic-gate for (minordev = 0; minordev < dptblsize; minordev++) { 4330Sstevel@tonic-gate if (devpolltbl[minordev] == NULL) { 4340Sstevel@tonic-gate devpolltbl[minordev] = (dp_entry_t *)RESERVED; 4350Sstevel@tonic-gate break; 4360Sstevel@tonic-gate } 4370Sstevel@tonic-gate } 4380Sstevel@tonic-gate if (minordev == dptblsize) { 4390Sstevel@tonic-gate dp_entry_t **newtbl; 4400Sstevel@tonic-gate size_t oldsize; 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate /* 4430Sstevel@tonic-gate * Used up every entry in the existing devpoll table. 4440Sstevel@tonic-gate * Grow the table by DEVPOLLSIZE. 4450Sstevel@tonic-gate */ 4460Sstevel@tonic-gate if ((oldsize = dptblsize) >= MAXMIN) { 4470Sstevel@tonic-gate mutex_exit(&devpoll_lock); 4480Sstevel@tonic-gate return (ENXIO); 4490Sstevel@tonic-gate } 4500Sstevel@tonic-gate dptblsize += DEVPOLLSIZE; 4510Sstevel@tonic-gate if (dptblsize > MAXMIN) { 4520Sstevel@tonic-gate dptblsize = MAXMIN; 4530Sstevel@tonic-gate } 4540Sstevel@tonic-gate newtbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 4550Sstevel@tonic-gate bcopy(devpolltbl, newtbl, sizeof (caddr_t) * oldsize); 4560Sstevel@tonic-gate kmem_free(devpolltbl, sizeof (caddr_t) * oldsize); 4570Sstevel@tonic-gate devpolltbl = newtbl; 4580Sstevel@tonic-gate devpolltbl[minordev] = (dp_entry_t *)RESERVED; 4590Sstevel@tonic-gate } 4600Sstevel@tonic-gate mutex_exit(&devpoll_lock); 4610Sstevel@tonic-gate 4620Sstevel@tonic-gate dpep = kmem_zalloc(sizeof (dp_entry_t), KM_SLEEP); 4630Sstevel@tonic-gate /* 4640Sstevel@tonic-gate * allocate a pollcache skeleton here. Delay allocating bitmap 4650Sstevel@tonic-gate * structures until dpwrite() time, since we don't know the 4660Sstevel@tonic-gate * optimal size yet. 4670Sstevel@tonic-gate */ 4680Sstevel@tonic-gate pcp = pcache_alloc(); 4690Sstevel@tonic-gate dpep->dpe_pcache = pcp; 4700Sstevel@tonic-gate pcp->pc_pid = curproc->p_pid; 4710Sstevel@tonic-gate *devp = makedevice(getmajor(*devp), minordev); /* clone the driver */ 4720Sstevel@tonic-gate mutex_enter(&devpoll_lock); 4730Sstevel@tonic-gate ASSERT(minordev < dptblsize); 4740Sstevel@tonic-gate ASSERT(devpolltbl[minordev] == (dp_entry_t *)RESERVED); 4750Sstevel@tonic-gate devpolltbl[minordev] = dpep; 4760Sstevel@tonic-gate mutex_exit(&devpoll_lock); 4770Sstevel@tonic-gate return (0); 4780Sstevel@tonic-gate } 4790Sstevel@tonic-gate 4800Sstevel@tonic-gate /* 4810Sstevel@tonic-gate * Write to dev/poll add/remove fd's to/from a cached poll fd set, 4820Sstevel@tonic-gate * or change poll events for a watched fd. 4830Sstevel@tonic-gate */ 4840Sstevel@tonic-gate /*ARGSUSED*/ 4850Sstevel@tonic-gate static int 4860Sstevel@tonic-gate dpwrite(dev_t dev, struct uio *uiop, cred_t *credp) 4870Sstevel@tonic-gate { 4880Sstevel@tonic-gate minor_t minor; 4890Sstevel@tonic-gate dp_entry_t *dpep; 4900Sstevel@tonic-gate pollcache_t *pcp; 4910Sstevel@tonic-gate pollfd_t *pollfdp, *pfdp; 4920Sstevel@tonic-gate int error; 4930Sstevel@tonic-gate ssize_t uiosize; 4940Sstevel@tonic-gate nfds_t pollfdnum; 4950Sstevel@tonic-gate struct pollhead *php = NULL; 4960Sstevel@tonic-gate polldat_t *pdp; 4970Sstevel@tonic-gate int fd; 4980Sstevel@tonic-gate file_t *fp; 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate minor = getminor(dev); 5010Sstevel@tonic-gate 5020Sstevel@tonic-gate mutex_enter(&devpoll_lock); 5030Sstevel@tonic-gate ASSERT(minor < dptblsize); 5040Sstevel@tonic-gate dpep = devpolltbl[minor]; 5050Sstevel@tonic-gate ASSERT(dpep != NULL); 5060Sstevel@tonic-gate mutex_exit(&devpoll_lock); 5070Sstevel@tonic-gate pcp = dpep->dpe_pcache; 5080Sstevel@tonic-gate if (curproc->p_pid != pcp->pc_pid) { 5090Sstevel@tonic-gate return (EACCES); 5100Sstevel@tonic-gate } 5110Sstevel@tonic-gate uiosize = uiop->uio_resid; 5120Sstevel@tonic-gate pollfdnum = uiosize / sizeof (pollfd_t); 5130Sstevel@tonic-gate mutex_enter(&curproc->p_lock); 5140Sstevel@tonic-gate if (pollfdnum > (uint_t)rctl_enforced_value( 5150Sstevel@tonic-gate rctlproc_legacy[RLIMIT_NOFILE], curproc->p_rctls, curproc)) { 5160Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE], 5170Sstevel@tonic-gate curproc->p_rctls, curproc, RCA_SAFE); 5180Sstevel@tonic-gate mutex_exit(&curproc->p_lock); 5190Sstevel@tonic-gate return (set_errno(EINVAL)); 5200Sstevel@tonic-gate } 5210Sstevel@tonic-gate mutex_exit(&curproc->p_lock); 5220Sstevel@tonic-gate /* 5230Sstevel@tonic-gate * Copy in the pollfd array. Walk through the array and add 5240Sstevel@tonic-gate * each polled fd to the cached set. 5250Sstevel@tonic-gate */ 5260Sstevel@tonic-gate pollfdp = kmem_alloc(uiosize, KM_SLEEP); 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate /* 5290Sstevel@tonic-gate * Although /dev/poll uses the write(2) interface to cache fds, it's 5300Sstevel@tonic-gate * not supposed to function as a seekable device. To prevent offset 5310Sstevel@tonic-gate * from growing and eventually exceed the maximum, reset the offset 5320Sstevel@tonic-gate * here for every call. 5330Sstevel@tonic-gate */ 5340Sstevel@tonic-gate uiop->uio_loffset = 0; 5350Sstevel@tonic-gate if ((error = uiomove((caddr_t)pollfdp, uiosize, UIO_WRITE, uiop)) 5360Sstevel@tonic-gate != 0) { 5370Sstevel@tonic-gate kmem_free(pollfdp, uiosize); 5380Sstevel@tonic-gate return (error); 5390Sstevel@tonic-gate } 5400Sstevel@tonic-gate /* 5410Sstevel@tonic-gate * We are about to enter the core portion of dpwrite(). Make sure this 5420Sstevel@tonic-gate * write has exclusive access in this portion of the code, i.e., no 5430Sstevel@tonic-gate * other writers in this code and no other readers in dpioctl. 5440Sstevel@tonic-gate */ 5450Sstevel@tonic-gate mutex_enter(&dpep->dpe_lock); 5460Sstevel@tonic-gate dpep->dpe_writerwait++; 5470Sstevel@tonic-gate while (dpep->dpe_refcnt != 0) { 5480Sstevel@tonic-gate if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 5490Sstevel@tonic-gate dpep->dpe_writerwait--; 5500Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 5510Sstevel@tonic-gate kmem_free(pollfdp, uiosize); 5520Sstevel@tonic-gate return (set_errno(EINTR)); 5530Sstevel@tonic-gate } 5540Sstevel@tonic-gate } 5550Sstevel@tonic-gate dpep->dpe_writerwait--; 5560Sstevel@tonic-gate dpep->dpe_flag |= DP_WRITER_PRESENT; 5570Sstevel@tonic-gate dpep->dpe_refcnt++; 5580Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 5590Sstevel@tonic-gate 5600Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 5610Sstevel@tonic-gate if (pcp->pc_bitmap == NULL) { 5620Sstevel@tonic-gate pcache_create(pcp, pollfdnum); 5630Sstevel@tonic-gate } 5640Sstevel@tonic-gate for (pfdp = pollfdp; pfdp < pollfdp + pollfdnum; pfdp++) { 5650Sstevel@tonic-gate fd = pfdp->fd; 5660Sstevel@tonic-gate if ((uint_t)fd >= P_FINFO(curproc)->fi_nfiles) 5670Sstevel@tonic-gate continue; 5680Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 5690Sstevel@tonic-gate if (pfdp->events != POLLREMOVE) { 5700Sstevel@tonic-gate if (pdp == NULL) { 5710Sstevel@tonic-gate pdp = pcache_alloc_fd(0); 5720Sstevel@tonic-gate pdp->pd_fd = fd; 5730Sstevel@tonic-gate pdp->pd_pcache = pcp; 5740Sstevel@tonic-gate pcache_insert_fd(pcp, pdp, pollfdnum); 5750Sstevel@tonic-gate } 5760Sstevel@tonic-gate ASSERT(pdp->pd_fd == fd); 5770Sstevel@tonic-gate ASSERT(pdp->pd_pcache == pcp); 5780Sstevel@tonic-gate if (fd >= pcp->pc_mapsize) { 5790Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 5800Sstevel@tonic-gate pcache_grow_map(pcp, fd); 5810Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 5820Sstevel@tonic-gate } 5830Sstevel@tonic-gate if (fd > pcp->pc_mapend) { 5840Sstevel@tonic-gate pcp->pc_mapend = fd; 5850Sstevel@tonic-gate } 5860Sstevel@tonic-gate if ((fp = getf(fd)) == NULL) { 5870Sstevel@tonic-gate /* 5880Sstevel@tonic-gate * The fd is not valid. Since we can't pass 5890Sstevel@tonic-gate * this error back in the write() call, set 5900Sstevel@tonic-gate * the bit in bitmap to force DP_POLL ioctl 5910Sstevel@tonic-gate * to examine it. 5920Sstevel@tonic-gate */ 5930Sstevel@tonic-gate BT_SET(pcp->pc_bitmap, fd); 5940Sstevel@tonic-gate pdp->pd_events |= pfdp->events; 5950Sstevel@tonic-gate continue; 5960Sstevel@tonic-gate } 5970Sstevel@tonic-gate /* 5980Sstevel@tonic-gate * Don't do VOP_POLL for an already cached fd with 5990Sstevel@tonic-gate * same poll events. 6000Sstevel@tonic-gate */ 6010Sstevel@tonic-gate if ((pdp->pd_events == pfdp->events) && 6020Sstevel@tonic-gate (pdp->pd_fp != NULL)) { 6030Sstevel@tonic-gate /* 6040Sstevel@tonic-gate * the events are already cached 6050Sstevel@tonic-gate */ 6060Sstevel@tonic-gate releasef(fd); 6070Sstevel@tonic-gate continue; 6080Sstevel@tonic-gate } 6090Sstevel@tonic-gate 6100Sstevel@tonic-gate /* 6110Sstevel@tonic-gate * do VOP_POLL and cache this poll fd. 6120Sstevel@tonic-gate */ 6130Sstevel@tonic-gate /* 6140Sstevel@tonic-gate * XXX - pollrelock() logic needs to know which 6150Sstevel@tonic-gate * which pollcache lock to grab. It'd be a 6160Sstevel@tonic-gate * cleaner solution if we could pass pcp as 6170Sstevel@tonic-gate * an arguement in VOP_POLL interface instead 6180Sstevel@tonic-gate * of implicitly passing it using thread_t 6190Sstevel@tonic-gate * struct. On the other hand, changing VOP_POLL 6200Sstevel@tonic-gate * interface will require all driver/file system 6210Sstevel@tonic-gate * poll routine to change. May want to revisit 6220Sstevel@tonic-gate * the tradeoff later. 6230Sstevel@tonic-gate */ 6240Sstevel@tonic-gate curthread->t_pollcache = pcp; 6250Sstevel@tonic-gate error = VOP_POLL(fp->f_vnode, pfdp->events, 0, 626*5331Samw &pfdp->revents, &php, NULL); 6270Sstevel@tonic-gate curthread->t_pollcache = NULL; 6280Sstevel@tonic-gate /* 6290Sstevel@tonic-gate * We always set the bit when this fd is cached. 6300Sstevel@tonic-gate * So we don't have to worry about missing a 6310Sstevel@tonic-gate * pollwakeup between VOP_POLL and pollhead_insert. 6320Sstevel@tonic-gate * This forces the first DP_POLL to poll this fd. 6330Sstevel@tonic-gate * Real performance gain comes from subsequent 6340Sstevel@tonic-gate * DP_POLL. 6350Sstevel@tonic-gate */ 6360Sstevel@tonic-gate BT_SET(pcp->pc_bitmap, fd); 6370Sstevel@tonic-gate if (error != 0) { 6380Sstevel@tonic-gate releasef(fd); 6390Sstevel@tonic-gate break; 6400Sstevel@tonic-gate } 6410Sstevel@tonic-gate pdp->pd_fp = fp; 6420Sstevel@tonic-gate pdp->pd_events |= pfdp->events; 6430Sstevel@tonic-gate if (php != NULL) { 6440Sstevel@tonic-gate if (pdp->pd_php == NULL) { 6450Sstevel@tonic-gate pollhead_insert(php, pdp); 6460Sstevel@tonic-gate pdp->pd_php = php; 6470Sstevel@tonic-gate } else { 6480Sstevel@tonic-gate if (pdp->pd_php != php) { 6490Sstevel@tonic-gate pollhead_delete(pdp->pd_php, 6500Sstevel@tonic-gate pdp); 6510Sstevel@tonic-gate pollhead_insert(php, pdp); 6520Sstevel@tonic-gate pdp->pd_php = php; 6530Sstevel@tonic-gate } 6540Sstevel@tonic-gate } 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate } 6570Sstevel@tonic-gate releasef(fd); 6580Sstevel@tonic-gate } else { 6590Sstevel@tonic-gate if (pdp == NULL) { 6600Sstevel@tonic-gate continue; 6610Sstevel@tonic-gate } 6620Sstevel@tonic-gate ASSERT(pdp->pd_fd == fd); 6630Sstevel@tonic-gate pdp->pd_fp = NULL; 6640Sstevel@tonic-gate pdp->pd_events = 0; 6650Sstevel@tonic-gate ASSERT(pdp->pd_thread == NULL); 6660Sstevel@tonic-gate if (pdp->pd_php != NULL) { 6670Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 6680Sstevel@tonic-gate pdp->pd_php = NULL; 6690Sstevel@tonic-gate } 6700Sstevel@tonic-gate BT_CLEAR(pcp->pc_bitmap, fd); 6710Sstevel@tonic-gate } 6720Sstevel@tonic-gate } 6730Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 6740Sstevel@tonic-gate mutex_enter(&dpep->dpe_lock); 6750Sstevel@tonic-gate dpep->dpe_flag &= ~DP_WRITER_PRESENT; 6760Sstevel@tonic-gate ASSERT(dpep->dpe_refcnt == 1); 6770Sstevel@tonic-gate dpep->dpe_refcnt--; 6780Sstevel@tonic-gate cv_broadcast(&dpep->dpe_cv); 6790Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 6800Sstevel@tonic-gate kmem_free(pollfdp, uiosize); 6810Sstevel@tonic-gate return (error); 6820Sstevel@tonic-gate } 6830Sstevel@tonic-gate 6840Sstevel@tonic-gate /*ARGSUSED*/ 6850Sstevel@tonic-gate static int 6860Sstevel@tonic-gate dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) 6870Sstevel@tonic-gate { 6880Sstevel@tonic-gate timestruc_t now; 6890Sstevel@tonic-gate timestruc_t rqtime; 6900Sstevel@tonic-gate timestruc_t *rqtp = NULL; 6914123Sdm120769 int timecheck = 0; 6920Sstevel@tonic-gate minor_t minor; 6930Sstevel@tonic-gate dp_entry_t *dpep; 6940Sstevel@tonic-gate pollcache_t *pcp; 6950Sstevel@tonic-gate int error = 0; 6960Sstevel@tonic-gate STRUCT_DECL(dvpoll, dvpoll); 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate if (cmd == DP_POLL) { 6990Sstevel@tonic-gate /* do this now, before we sleep on DP_WRITER_PRESENT below */ 7004123Sdm120769 timecheck = timechanged; 7010Sstevel@tonic-gate gethrestime(&now); 7020Sstevel@tonic-gate } 7030Sstevel@tonic-gate minor = getminor(dev); 7040Sstevel@tonic-gate mutex_enter(&devpoll_lock); 7050Sstevel@tonic-gate ASSERT(minor < dptblsize); 7060Sstevel@tonic-gate dpep = devpolltbl[minor]; 7070Sstevel@tonic-gate mutex_exit(&devpoll_lock); 7080Sstevel@tonic-gate ASSERT(dpep != NULL); 7090Sstevel@tonic-gate pcp = dpep->dpe_pcache; 7100Sstevel@tonic-gate if (curproc->p_pid != pcp->pc_pid) 7110Sstevel@tonic-gate return (EACCES); 7120Sstevel@tonic-gate 7130Sstevel@tonic-gate mutex_enter(&dpep->dpe_lock); 7140Sstevel@tonic-gate while ((dpep->dpe_flag & DP_WRITER_PRESENT) || 7150Sstevel@tonic-gate (dpep->dpe_writerwait != 0)) { 7160Sstevel@tonic-gate if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 7170Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 7180Sstevel@tonic-gate return (EINTR); 7190Sstevel@tonic-gate } 7200Sstevel@tonic-gate } 7210Sstevel@tonic-gate dpep->dpe_refcnt++; 7220Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 7230Sstevel@tonic-gate 7240Sstevel@tonic-gate switch (cmd) { 7250Sstevel@tonic-gate case DP_POLL: 7260Sstevel@tonic-gate { 7270Sstevel@tonic-gate pollstate_t *ps; 7280Sstevel@tonic-gate nfds_t nfds; 7290Sstevel@tonic-gate int fdcnt = 0; 7300Sstevel@tonic-gate int time_out; 7310Sstevel@tonic-gate int rval; 7320Sstevel@tonic-gate 7330Sstevel@tonic-gate STRUCT_INIT(dvpoll, mode); 7340Sstevel@tonic-gate error = copyin((caddr_t)arg, STRUCT_BUF(dvpoll), 7350Sstevel@tonic-gate STRUCT_SIZE(dvpoll)); 7360Sstevel@tonic-gate if (error) { 7370Sstevel@tonic-gate DP_REFRELE(dpep); 7380Sstevel@tonic-gate return (EFAULT); 7390Sstevel@tonic-gate } 7400Sstevel@tonic-gate 7410Sstevel@tonic-gate time_out = STRUCT_FGET(dvpoll, dp_timeout); 7420Sstevel@tonic-gate if (time_out > 0) { 7430Sstevel@tonic-gate /* 7440Sstevel@tonic-gate * Determine the future time of the requested timeout. 7450Sstevel@tonic-gate */ 7460Sstevel@tonic-gate rqtp = &rqtime; 7470Sstevel@tonic-gate rqtp->tv_sec = time_out / MILLISEC; 7480Sstevel@tonic-gate rqtp->tv_nsec = (time_out % MILLISEC) * MICROSEC; 7490Sstevel@tonic-gate timespecadd(rqtp, &now); 7500Sstevel@tonic-gate } 7510Sstevel@tonic-gate 7520Sstevel@tonic-gate if ((nfds = STRUCT_FGET(dvpoll, dp_nfds)) == 0) { 7530Sstevel@tonic-gate /* 7540Sstevel@tonic-gate * We are just using DP_POLL to sleep, so 7550Sstevel@tonic-gate * we don't any of the devpoll apparatus. 7560Sstevel@tonic-gate * Do not check for signals if we have a zero timeout. 7570Sstevel@tonic-gate */ 7580Sstevel@tonic-gate DP_REFRELE(dpep); 7590Sstevel@tonic-gate if (time_out == 0) 7600Sstevel@tonic-gate return (0); 7610Sstevel@tonic-gate mutex_enter(&curthread->t_delay_lock); 7620Sstevel@tonic-gate while ((rval = cv_waituntil_sig(&curthread->t_delay_cv, 7634123Sdm120769 &curthread->t_delay_lock, rqtp, timecheck)) > 0) 7640Sstevel@tonic-gate continue; 7650Sstevel@tonic-gate mutex_exit(&curthread->t_delay_lock); 7660Sstevel@tonic-gate return ((rval == 0)? EINTR : 0); 7670Sstevel@tonic-gate } 7680Sstevel@tonic-gate 7690Sstevel@tonic-gate /* 7700Sstevel@tonic-gate * XXX It'd be nice not to have to alloc each time. 7710Sstevel@tonic-gate * But it requires another per thread structure hook. 7720Sstevel@tonic-gate * Do it later if there is data suggest that. 7730Sstevel@tonic-gate */ 7740Sstevel@tonic-gate if ((ps = curthread->t_pollstate) == NULL) { 7750Sstevel@tonic-gate curthread->t_pollstate = pollstate_create(); 7760Sstevel@tonic-gate ps = curthread->t_pollstate; 7770Sstevel@tonic-gate } 7780Sstevel@tonic-gate if (ps->ps_dpbufsize < nfds) { 7790Sstevel@tonic-gate struct proc *p = ttoproc(curthread); 7800Sstevel@tonic-gate /* 7810Sstevel@tonic-gate * The maximum size should be no large than 7820Sstevel@tonic-gate * current maximum open file count. 7830Sstevel@tonic-gate */ 7840Sstevel@tonic-gate mutex_enter(&p->p_lock); 7853760Ssp92102 if (nfds > p->p_fno_ctl) { 7860Sstevel@tonic-gate mutex_exit(&p->p_lock); 7870Sstevel@tonic-gate DP_REFRELE(dpep); 7880Sstevel@tonic-gate return (EINVAL); 7890Sstevel@tonic-gate } 7900Sstevel@tonic-gate mutex_exit(&p->p_lock); 7910Sstevel@tonic-gate kmem_free(ps->ps_dpbuf, sizeof (pollfd_t) * 7920Sstevel@tonic-gate ps->ps_dpbufsize); 7930Sstevel@tonic-gate ps->ps_dpbuf = kmem_zalloc(sizeof (pollfd_t) * 7940Sstevel@tonic-gate nfds, KM_SLEEP); 7950Sstevel@tonic-gate ps->ps_dpbufsize = nfds; 7960Sstevel@tonic-gate } 7970Sstevel@tonic-gate 7980Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 7990Sstevel@tonic-gate for (;;) { 8000Sstevel@tonic-gate pcp->pc_flag = 0; 8010Sstevel@tonic-gate error = dp_pcache_poll(ps->ps_dpbuf, pcp, nfds, &fdcnt); 8020Sstevel@tonic-gate if (fdcnt > 0 || error != 0) 8030Sstevel@tonic-gate break; 8040Sstevel@tonic-gate 8050Sstevel@tonic-gate /* 8060Sstevel@tonic-gate * A pollwake has happened since we polled cache. 8070Sstevel@tonic-gate */ 8080Sstevel@tonic-gate if (pcp->pc_flag & T_POLLWAKE) 8090Sstevel@tonic-gate continue; 8100Sstevel@tonic-gate 8110Sstevel@tonic-gate /* 812*5331Samw * Sleep until we are notified, signaled, or timed out. 8130Sstevel@tonic-gate * Do not check for signals if we have a zero timeout. 8140Sstevel@tonic-gate */ 8150Sstevel@tonic-gate if (time_out == 0) /* immediate timeout */ 8160Sstevel@tonic-gate break; 8170Sstevel@tonic-gate rval = cv_waituntil_sig(&pcp->pc_cv, &pcp->pc_lock, 8184123Sdm120769 rqtp, timecheck); 8190Sstevel@tonic-gate /* 8200Sstevel@tonic-gate * If we were awakened by a signal or timeout 8210Sstevel@tonic-gate * then break the loop, else poll again. 8220Sstevel@tonic-gate */ 8230Sstevel@tonic-gate if (rval <= 0) { 8240Sstevel@tonic-gate if (rval == 0) /* signal */ 8250Sstevel@tonic-gate error = EINTR; 8260Sstevel@tonic-gate break; 8270Sstevel@tonic-gate } 8280Sstevel@tonic-gate } 8290Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8300Sstevel@tonic-gate 8310Sstevel@tonic-gate if (error == 0 && fdcnt > 0) { 8320Sstevel@tonic-gate if (copyout(ps->ps_dpbuf, STRUCT_FGETP(dvpoll, 8330Sstevel@tonic-gate dp_fds), sizeof (pollfd_t) * fdcnt)) { 8340Sstevel@tonic-gate DP_REFRELE(dpep); 8350Sstevel@tonic-gate return (EFAULT); 8360Sstevel@tonic-gate } 8370Sstevel@tonic-gate *rvalp = fdcnt; 8380Sstevel@tonic-gate } 8390Sstevel@tonic-gate break; 8400Sstevel@tonic-gate } 8410Sstevel@tonic-gate 8420Sstevel@tonic-gate case DP_ISPOLLED: 8430Sstevel@tonic-gate { 8440Sstevel@tonic-gate pollfd_t pollfd; 8450Sstevel@tonic-gate polldat_t *pdp; 8460Sstevel@tonic-gate 8470Sstevel@tonic-gate STRUCT_INIT(dvpoll, mode); 8480Sstevel@tonic-gate error = copyin((caddr_t)arg, &pollfd, sizeof (pollfd_t)); 8490Sstevel@tonic-gate if (error) { 8500Sstevel@tonic-gate DP_REFRELE(dpep); 8510Sstevel@tonic-gate return (EFAULT); 8520Sstevel@tonic-gate } 8530Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 8540Sstevel@tonic-gate if (pcp->pc_hash == NULL) { 8550Sstevel@tonic-gate /* 8560Sstevel@tonic-gate * No Need to search because no poll fd 8570Sstevel@tonic-gate * has been cached. 8580Sstevel@tonic-gate */ 8590Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8600Sstevel@tonic-gate DP_REFRELE(dpep); 8610Sstevel@tonic-gate return (0); 8620Sstevel@tonic-gate } 8630Sstevel@tonic-gate if (pollfd.fd < 0) { 8640Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8650Sstevel@tonic-gate break; 8660Sstevel@tonic-gate } 8670Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, pollfd.fd); 8680Sstevel@tonic-gate if ((pdp != NULL) && (pdp->pd_fd == pollfd.fd) && 8690Sstevel@tonic-gate (pdp->pd_fp != NULL)) { 8700Sstevel@tonic-gate pollfd.revents = pdp->pd_events; 8710Sstevel@tonic-gate if (copyout(&pollfd, (caddr_t)arg, sizeof (pollfd_t))) { 8720Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8730Sstevel@tonic-gate DP_REFRELE(dpep); 8740Sstevel@tonic-gate return (EFAULT); 8750Sstevel@tonic-gate } 8760Sstevel@tonic-gate *rvalp = 1; 8770Sstevel@tonic-gate } 8780Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8790Sstevel@tonic-gate break; 8800Sstevel@tonic-gate } 8810Sstevel@tonic-gate 8820Sstevel@tonic-gate default: 8830Sstevel@tonic-gate DP_REFRELE(dpep); 8840Sstevel@tonic-gate return (EINVAL); 8850Sstevel@tonic-gate } 8860Sstevel@tonic-gate DP_REFRELE(dpep); 8870Sstevel@tonic-gate return (error); 8880Sstevel@tonic-gate } 8890Sstevel@tonic-gate 8900Sstevel@tonic-gate /*ARGSUSED*/ 8910Sstevel@tonic-gate static int 8920Sstevel@tonic-gate dppoll(dev_t dev, short events, int anyyet, short *reventsp, 8930Sstevel@tonic-gate struct pollhead **phpp) 8940Sstevel@tonic-gate { 8950Sstevel@tonic-gate /* 8960Sstevel@tonic-gate * Polling on a /dev/poll fd is not fully supported yet. 8970Sstevel@tonic-gate */ 8980Sstevel@tonic-gate *reventsp = POLLERR; 8990Sstevel@tonic-gate return (0); 9000Sstevel@tonic-gate } 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate /* 9030Sstevel@tonic-gate * devpoll close should do enough clean up before the pollcache is deleted, 9040Sstevel@tonic-gate * i.e., it should ensure no one still references the pollcache later. 9050Sstevel@tonic-gate * There is no "permission" check in here. Any process having the last 9060Sstevel@tonic-gate * reference of this /dev/poll fd can close. 9070Sstevel@tonic-gate */ 9080Sstevel@tonic-gate /*ARGSUSED*/ 9090Sstevel@tonic-gate static int 9100Sstevel@tonic-gate dpclose(dev_t dev, int flag, int otyp, cred_t *credp) 9110Sstevel@tonic-gate { 9120Sstevel@tonic-gate minor_t minor; 9130Sstevel@tonic-gate dp_entry_t *dpep; 9140Sstevel@tonic-gate pollcache_t *pcp; 9150Sstevel@tonic-gate int i; 9160Sstevel@tonic-gate polldat_t **hashtbl; 9170Sstevel@tonic-gate polldat_t *pdp; 9180Sstevel@tonic-gate 9190Sstevel@tonic-gate minor = getminor(dev); 9200Sstevel@tonic-gate 9210Sstevel@tonic-gate mutex_enter(&devpoll_lock); 9220Sstevel@tonic-gate dpep = devpolltbl[minor]; 9230Sstevel@tonic-gate ASSERT(dpep != NULL); 9240Sstevel@tonic-gate devpolltbl[minor] = NULL; 9250Sstevel@tonic-gate mutex_exit(&devpoll_lock); 9260Sstevel@tonic-gate pcp = dpep->dpe_pcache; 9270Sstevel@tonic-gate ASSERT(pcp != NULL); 9280Sstevel@tonic-gate /* 9290Sstevel@tonic-gate * At this point, no other lwp can access this pollcache via the 9300Sstevel@tonic-gate * /dev/poll fd. This pollcache is going away, so do the clean 9310Sstevel@tonic-gate * up without the pc_lock. 9320Sstevel@tonic-gate */ 9330Sstevel@tonic-gate hashtbl = pcp->pc_hash; 9340Sstevel@tonic-gate for (i = 0; i < pcp->pc_hashsize; i++) { 9350Sstevel@tonic-gate for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) { 9360Sstevel@tonic-gate if (pdp->pd_php != NULL) { 9370Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 9380Sstevel@tonic-gate pdp->pd_php = NULL; 9390Sstevel@tonic-gate pdp->pd_fp = NULL; 9400Sstevel@tonic-gate } 9410Sstevel@tonic-gate } 9420Sstevel@tonic-gate } 9430Sstevel@tonic-gate /* 9440Sstevel@tonic-gate * pollwakeup() may still interact with this pollcache. Wait until 9450Sstevel@tonic-gate * it is done. 9460Sstevel@tonic-gate */ 9470Sstevel@tonic-gate mutex_enter(&pcp->pc_no_exit); 9480Sstevel@tonic-gate ASSERT(pcp->pc_busy >= 0); 9490Sstevel@tonic-gate while (pcp->pc_busy > 0) 9500Sstevel@tonic-gate cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit); 9510Sstevel@tonic-gate mutex_exit(&pcp->pc_no_exit); 9520Sstevel@tonic-gate pcache_destroy(pcp); 9530Sstevel@tonic-gate ASSERT(dpep->dpe_refcnt == 0); 9540Sstevel@tonic-gate kmem_free(dpep, sizeof (dp_entry_t)); 9550Sstevel@tonic-gate return (0); 9560Sstevel@tonic-gate } 957