10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 53760Ssp92102 * Common Development and Distribution License (the "License"). 63760Ssp92102 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*7129Smeem * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/types.h> 290Sstevel@tonic-gate #include <sys/devops.h> 300Sstevel@tonic-gate #include <sys/conf.h> 310Sstevel@tonic-gate #include <sys/modctl.h> 320Sstevel@tonic-gate #include <sys/sunddi.h> 330Sstevel@tonic-gate #include <sys/stat.h> 340Sstevel@tonic-gate #include <sys/poll_impl.h> 350Sstevel@tonic-gate #include <sys/errno.h> 360Sstevel@tonic-gate #include <sys/kmem.h> 370Sstevel@tonic-gate #include <sys/mkdev.h> 380Sstevel@tonic-gate #include <sys/debug.h> 390Sstevel@tonic-gate #include <sys/file.h> 400Sstevel@tonic-gate #include <sys/sysmacros.h> 410Sstevel@tonic-gate #include <sys/systm.h> 420Sstevel@tonic-gate #include <sys/bitmap.h> 430Sstevel@tonic-gate #include <sys/devpoll.h> 440Sstevel@tonic-gate #include <sys/rctl.h> 450Sstevel@tonic-gate #include <sys/resource.h> 460Sstevel@tonic-gate 470Sstevel@tonic-gate #define RESERVED 1 480Sstevel@tonic-gate 490Sstevel@tonic-gate /* local data struct */ 500Sstevel@tonic-gate static dp_entry_t **devpolltbl; /* dev poll entries */ 510Sstevel@tonic-gate static size_t dptblsize; 520Sstevel@tonic-gate 530Sstevel@tonic-gate static kmutex_t devpoll_lock; /* lock protecting dev tbl */ 540Sstevel@tonic-gate int devpoll_init; /* is /dev/poll initialized already */ 550Sstevel@tonic-gate 560Sstevel@tonic-gate /* device local functions */ 570Sstevel@tonic-gate 580Sstevel@tonic-gate static int dpopen(dev_t *devp, int flag, int otyp, cred_t *credp); 590Sstevel@tonic-gate static int dpwrite(dev_t dev, struct uio *uiop, cred_t *credp); 600Sstevel@tonic-gate static int dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 610Sstevel@tonic-gate int *rvalp); 620Sstevel@tonic-gate static int dppoll(dev_t dev, short events, int anyyet, short *reventsp, 630Sstevel@tonic-gate struct pollhead **phpp); 640Sstevel@tonic-gate static int dpclose(dev_t dev, int flag, int otyp, cred_t *credp); 650Sstevel@tonic-gate static dev_info_t *dpdevi; 660Sstevel@tonic-gate 670Sstevel@tonic-gate 680Sstevel@tonic-gate static struct cb_ops dp_cb_ops = { 690Sstevel@tonic-gate dpopen, /* open */ 700Sstevel@tonic-gate dpclose, /* close */ 710Sstevel@tonic-gate nodev, /* strategy */ 720Sstevel@tonic-gate nodev, /* print */ 730Sstevel@tonic-gate nodev, /* dump */ 740Sstevel@tonic-gate nodev, /* read */ 750Sstevel@tonic-gate dpwrite, /* write */ 760Sstevel@tonic-gate dpioctl, /* ioctl */ 770Sstevel@tonic-gate nodev, /* devmap */ 780Sstevel@tonic-gate nodev, /* mmap */ 790Sstevel@tonic-gate nodev, /* segmap */ 800Sstevel@tonic-gate dppoll, /* poll */ 81258Scth ddi_prop_op, /* prop_op */ 820Sstevel@tonic-gate (struct streamtab *)0, /* streamtab */ 83258Scth D_MP, /* flags */ 84258Scth CB_REV, /* cb_ops revision */ 85258Scth nodev, /* aread */ 86258Scth nodev /* awrite */ 870Sstevel@tonic-gate }; 880Sstevel@tonic-gate 890Sstevel@tonic-gate static int dpattach(dev_info_t *, ddi_attach_cmd_t); 900Sstevel@tonic-gate static int dpdetach(dev_info_t *, ddi_detach_cmd_t); 910Sstevel@tonic-gate static int dpinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 920Sstevel@tonic-gate 930Sstevel@tonic-gate static struct dev_ops dp_ops = { 940Sstevel@tonic-gate DEVO_REV, /* devo_rev */ 950Sstevel@tonic-gate 0, /* refcnt */ 960Sstevel@tonic-gate dpinfo, /* info */ 970Sstevel@tonic-gate nulldev, /* identify */ 980Sstevel@tonic-gate nulldev, /* probe */ 990Sstevel@tonic-gate dpattach, /* attach */ 1000Sstevel@tonic-gate dpdetach, /* detach */ 1010Sstevel@tonic-gate nodev, /* reset */ 1020Sstevel@tonic-gate &dp_cb_ops, /* driver operations */ 1030Sstevel@tonic-gate (struct bus_ops *)NULL, /* bus operations */ 1040Sstevel@tonic-gate nulldev /* power */ 1050Sstevel@tonic-gate }; 1060Sstevel@tonic-gate 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate static struct modldrv modldrv = { 1090Sstevel@tonic-gate &mod_driverops, /* type of module - a driver */ 110*7129Smeem "/dev/poll driver", 1110Sstevel@tonic-gate &dp_ops, 1120Sstevel@tonic-gate }; 1130Sstevel@tonic-gate 1140Sstevel@tonic-gate static struct modlinkage modlinkage = { 1150Sstevel@tonic-gate MODREV_1, 1160Sstevel@tonic-gate (void *)&modldrv, 1170Sstevel@tonic-gate NULL 1180Sstevel@tonic-gate }; 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate /* 1210Sstevel@tonic-gate * Locking Design 1220Sstevel@tonic-gate * 1230Sstevel@tonic-gate * The /dev/poll driver shares most of its code with poll sys call whose 1240Sstevel@tonic-gate * code is in common/syscall/poll.c. In poll(2) design, the pollcache 1250Sstevel@tonic-gate * structure is per lwp. An implicit assumption is made there that some 1260Sstevel@tonic-gate * portion of pollcache will never be touched by other lwps. E.g., in 1270Sstevel@tonic-gate * poll(2) design, no lwp will ever need to grow bitmap of other lwp. 1280Sstevel@tonic-gate * This assumption is not true for /dev/poll; hence the need for extra 1290Sstevel@tonic-gate * locking. 1300Sstevel@tonic-gate * 1315331Samw * To allow more parallelism, each /dev/poll file descriptor (indexed by 1320Sstevel@tonic-gate * minor number) has its own lock. Since read (dpioctl) is a much more 1330Sstevel@tonic-gate * frequent operation than write, we want to allow multiple reads on same 1340Sstevel@tonic-gate * /dev/poll fd. However, we prevent writes from being starved by giving 1350Sstevel@tonic-gate * priority to write operation. Theoretically writes can starve reads as 1365331Samw * well. But in practical sense this is not important because (1) writes 1370Sstevel@tonic-gate * happens less often than reads, and (2) write operation defines the 1380Sstevel@tonic-gate * content of poll fd a cache set. If writes happens so often that they 1390Sstevel@tonic-gate * can starve reads, that means the cached set is very unstable. It may 1400Sstevel@tonic-gate * not make sense to read an unstable cache set anyway. Therefore, the 1410Sstevel@tonic-gate * writers starving readers case is not handled in this design. 1420Sstevel@tonic-gate */ 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate int 1450Sstevel@tonic-gate _init() 1460Sstevel@tonic-gate { 1470Sstevel@tonic-gate int error; 1480Sstevel@tonic-gate 1490Sstevel@tonic-gate dptblsize = DEVPOLLSIZE; 1500Sstevel@tonic-gate devpolltbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 1510Sstevel@tonic-gate mutex_init(&devpoll_lock, NULL, MUTEX_DEFAULT, NULL); 1520Sstevel@tonic-gate devpoll_init = 1; 1530Sstevel@tonic-gate if ((error = mod_install(&modlinkage)) != 0) { 1540Sstevel@tonic-gate mutex_destroy(&devpoll_lock); 1550Sstevel@tonic-gate kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 1560Sstevel@tonic-gate devpoll_init = 0; 1570Sstevel@tonic-gate } 1580Sstevel@tonic-gate return (error); 1590Sstevel@tonic-gate } 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate int 1620Sstevel@tonic-gate _fini() 1630Sstevel@tonic-gate { 1640Sstevel@tonic-gate int error; 1650Sstevel@tonic-gate 1660Sstevel@tonic-gate if ((error = mod_remove(&modlinkage)) != 0) { 1670Sstevel@tonic-gate return (error); 1680Sstevel@tonic-gate } 1690Sstevel@tonic-gate mutex_destroy(&devpoll_lock); 1700Sstevel@tonic-gate kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize); 1710Sstevel@tonic-gate return (0); 1720Sstevel@tonic-gate } 1730Sstevel@tonic-gate 1740Sstevel@tonic-gate int 1750Sstevel@tonic-gate _info(struct modinfo *modinfop) 1760Sstevel@tonic-gate { 1770Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 1780Sstevel@tonic-gate } 1790Sstevel@tonic-gate 1800Sstevel@tonic-gate /*ARGSUSED*/ 1810Sstevel@tonic-gate static int 1820Sstevel@tonic-gate dpattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 1830Sstevel@tonic-gate { 1840Sstevel@tonic-gate if (ddi_create_minor_node(devi, "poll", S_IFCHR, 0, DDI_PSEUDO, NULL) 1850Sstevel@tonic-gate == DDI_FAILURE) { 1860Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 1870Sstevel@tonic-gate return (DDI_FAILURE); 1880Sstevel@tonic-gate } 1890Sstevel@tonic-gate dpdevi = devi; 1900Sstevel@tonic-gate return (DDI_SUCCESS); 1910Sstevel@tonic-gate } 1920Sstevel@tonic-gate 1930Sstevel@tonic-gate static int 1940Sstevel@tonic-gate dpdetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 1950Sstevel@tonic-gate { 1960Sstevel@tonic-gate if (cmd != DDI_DETACH) 1970Sstevel@tonic-gate return (DDI_FAILURE); 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 2000Sstevel@tonic-gate return (DDI_SUCCESS); 2010Sstevel@tonic-gate } 2020Sstevel@tonic-gate 2030Sstevel@tonic-gate /* ARGSUSED */ 2040Sstevel@tonic-gate static int 2050Sstevel@tonic-gate dpinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2060Sstevel@tonic-gate { 2070Sstevel@tonic-gate int error; 2080Sstevel@tonic-gate 2090Sstevel@tonic-gate switch (infocmd) { 2100Sstevel@tonic-gate case DDI_INFO_DEVT2DEVINFO: 2110Sstevel@tonic-gate *result = (void *)dpdevi; 2120Sstevel@tonic-gate error = DDI_SUCCESS; 2130Sstevel@tonic-gate break; 2140Sstevel@tonic-gate case DDI_INFO_DEVT2INSTANCE: 2150Sstevel@tonic-gate *result = (void *)0; 2160Sstevel@tonic-gate error = DDI_SUCCESS; 2170Sstevel@tonic-gate break; 2180Sstevel@tonic-gate default: 2190Sstevel@tonic-gate error = DDI_FAILURE; 2200Sstevel@tonic-gate } 2210Sstevel@tonic-gate return (error); 2220Sstevel@tonic-gate } 2230Sstevel@tonic-gate 2240Sstevel@tonic-gate /* 2250Sstevel@tonic-gate * dp_pcache_poll has similar logic to pcache_poll() in poll.c. The major 2260Sstevel@tonic-gate * differences are: (1) /dev/poll requires scanning the bitmap starting at 2270Sstevel@tonic-gate * where it was stopped last time, instead of always starting from 0, 2280Sstevel@tonic-gate * (2) since user may not have cleaned up the cached fds when they are 2290Sstevel@tonic-gate * closed, some polldats in cache may refer to closed or reused fds. We 2300Sstevel@tonic-gate * need to check for those cases. 2310Sstevel@tonic-gate * 2320Sstevel@tonic-gate * NOTE: Upon closing an fd, automatic poll cache cleanup is done for 2330Sstevel@tonic-gate * poll(2) caches but NOT for /dev/poll caches. So expect some 2340Sstevel@tonic-gate * stale entries! 2350Sstevel@tonic-gate */ 2360Sstevel@tonic-gate static int 2370Sstevel@tonic-gate dp_pcache_poll(pollfd_t *pfdp, pollcache_t *pcp, nfds_t nfds, int *fdcntp) 2380Sstevel@tonic-gate { 2390Sstevel@tonic-gate int start, ostart, end; 2400Sstevel@tonic-gate int fdcnt, fd; 2410Sstevel@tonic-gate boolean_t done; 2420Sstevel@tonic-gate file_t *fp; 2430Sstevel@tonic-gate short revent; 2440Sstevel@tonic-gate boolean_t no_wrap; 2450Sstevel@tonic-gate pollhead_t *php; 2460Sstevel@tonic-gate polldat_t *pdp; 2470Sstevel@tonic-gate int error = 0; 2480Sstevel@tonic-gate 2490Sstevel@tonic-gate ASSERT(MUTEX_HELD(&pcp->pc_lock)); 2500Sstevel@tonic-gate if (pcp->pc_bitmap == NULL) { 2510Sstevel@tonic-gate /* 2520Sstevel@tonic-gate * No Need to search because no poll fd 2530Sstevel@tonic-gate * has been cached. 2540Sstevel@tonic-gate */ 2550Sstevel@tonic-gate return (error); 2560Sstevel@tonic-gate } 2570Sstevel@tonic-gate retry: 2580Sstevel@tonic-gate start = ostart = pcp->pc_mapstart; 2590Sstevel@tonic-gate end = pcp->pc_mapend; 2600Sstevel@tonic-gate php = NULL; 2610Sstevel@tonic-gate 2620Sstevel@tonic-gate if (start == 0) { 2630Sstevel@tonic-gate /* 2640Sstevel@tonic-gate * started from every begining, no need to wrap around. 2650Sstevel@tonic-gate */ 2660Sstevel@tonic-gate no_wrap = B_TRUE; 2670Sstevel@tonic-gate } else { 2680Sstevel@tonic-gate no_wrap = B_FALSE; 2690Sstevel@tonic-gate } 2700Sstevel@tonic-gate done = B_FALSE; 2710Sstevel@tonic-gate fdcnt = 0; 2720Sstevel@tonic-gate while ((fdcnt < nfds) && !done) { 2730Sstevel@tonic-gate php = NULL; 2740Sstevel@tonic-gate revent = 0; 2750Sstevel@tonic-gate /* 2760Sstevel@tonic-gate * Examine the bit map in a circular fashion 2770Sstevel@tonic-gate * to avoid starvation. Always resume from 2780Sstevel@tonic-gate * last stop. Scan till end of the map. Then 2790Sstevel@tonic-gate * wrap around. 2800Sstevel@tonic-gate */ 2810Sstevel@tonic-gate fd = bt_getlowbit(pcp->pc_bitmap, start, end); 2820Sstevel@tonic-gate ASSERT(fd <= end); 2830Sstevel@tonic-gate if (fd >= 0) { 2840Sstevel@tonic-gate if (fd == end) { 2850Sstevel@tonic-gate if (no_wrap) { 2860Sstevel@tonic-gate done = B_TRUE; 2870Sstevel@tonic-gate } else { 2880Sstevel@tonic-gate start = 0; 2890Sstevel@tonic-gate end = ostart - 1; 2900Sstevel@tonic-gate no_wrap = B_TRUE; 2910Sstevel@tonic-gate } 2920Sstevel@tonic-gate } else { 2930Sstevel@tonic-gate start = fd + 1; 2940Sstevel@tonic-gate } 2950Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 296*7129Smeem repoll: 2970Sstevel@tonic-gate ASSERT(pdp != NULL); 2980Sstevel@tonic-gate ASSERT(pdp->pd_fd == fd); 2990Sstevel@tonic-gate if (pdp->pd_fp == NULL) { 3000Sstevel@tonic-gate /* 3010Sstevel@tonic-gate * The fd is POLLREMOVed. This fd is 3020Sstevel@tonic-gate * logically no longer cached. So move 3030Sstevel@tonic-gate * on to the next one. 3040Sstevel@tonic-gate */ 3050Sstevel@tonic-gate continue; 3060Sstevel@tonic-gate } 3070Sstevel@tonic-gate if ((fp = getf(fd)) == NULL) { 3080Sstevel@tonic-gate /* 3090Sstevel@tonic-gate * The fd has been closed, but user has not 3100Sstevel@tonic-gate * done a POLLREMOVE on this fd yet. Instead 3110Sstevel@tonic-gate * of cleaning it here implicitly, we return 3120Sstevel@tonic-gate * POLLNVAL. This is consistent with poll(2) 3130Sstevel@tonic-gate * polling a closed fd. Hope this will remind 3140Sstevel@tonic-gate * user to do a POLLREMOVE. 3150Sstevel@tonic-gate */ 3160Sstevel@tonic-gate pfdp[fdcnt].fd = fd; 3170Sstevel@tonic-gate pfdp[fdcnt].revents = POLLNVAL; 3180Sstevel@tonic-gate fdcnt++; 3190Sstevel@tonic-gate continue; 3200Sstevel@tonic-gate } 3210Sstevel@tonic-gate if (fp != pdp->pd_fp) { 3220Sstevel@tonic-gate /* 3230Sstevel@tonic-gate * user is polling on a cached fd which was 3240Sstevel@tonic-gate * closed and then reused. Unfortunately 3250Sstevel@tonic-gate * there is no good way to inform user. 3260Sstevel@tonic-gate * If the file struct is also reused, we 3270Sstevel@tonic-gate * may not be able to detect the fd reuse 3280Sstevel@tonic-gate * at all. As long as this does not 3290Sstevel@tonic-gate * cause system failure and/or memory leak, 3300Sstevel@tonic-gate * we will play along. Man page states if 3310Sstevel@tonic-gate * user does not clean up closed fds, polling 3320Sstevel@tonic-gate * results will be indeterministic. 3330Sstevel@tonic-gate * 3340Sstevel@tonic-gate * XXX - perhaps log the detection of fd 3350Sstevel@tonic-gate * reuse? 3360Sstevel@tonic-gate */ 3370Sstevel@tonic-gate pdp->pd_fp = fp; 3380Sstevel@tonic-gate } 3390Sstevel@tonic-gate /* 3400Sstevel@tonic-gate * XXX - pollrelock() logic needs to know which 3410Sstevel@tonic-gate * which pollcache lock to grab. It'd be a 3420Sstevel@tonic-gate * cleaner solution if we could pass pcp as 3430Sstevel@tonic-gate * an arguement in VOP_POLL interface instead 3440Sstevel@tonic-gate * of implicitly passing it using thread_t 3450Sstevel@tonic-gate * struct. On the other hand, changing VOP_POLL 3460Sstevel@tonic-gate * interface will require all driver/file system 3470Sstevel@tonic-gate * poll routine to change. May want to revisit 3480Sstevel@tonic-gate * the tradeoff later. 3490Sstevel@tonic-gate */ 3500Sstevel@tonic-gate curthread->t_pollcache = pcp; 3510Sstevel@tonic-gate error = VOP_POLL(fp->f_vnode, pdp->pd_events, 0, 3525331Samw &revent, &php, NULL); 3530Sstevel@tonic-gate curthread->t_pollcache = NULL; 3540Sstevel@tonic-gate releasef(fd); 3550Sstevel@tonic-gate if (error != 0) { 3560Sstevel@tonic-gate break; 3570Sstevel@tonic-gate } 3580Sstevel@tonic-gate /* 3590Sstevel@tonic-gate * layered devices (e.g. console driver) 3600Sstevel@tonic-gate * may change the vnode and thus the pollhead 3610Sstevel@tonic-gate * pointer out from underneath us. 3620Sstevel@tonic-gate */ 3630Sstevel@tonic-gate if (php != NULL && pdp->pd_php != NULL && 3640Sstevel@tonic-gate php != pdp->pd_php) { 3650Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 3660Sstevel@tonic-gate pdp->pd_php = php; 3670Sstevel@tonic-gate pollhead_insert(php, pdp); 3680Sstevel@tonic-gate /* 3690Sstevel@tonic-gate * The bit should still be set. 3700Sstevel@tonic-gate */ 3710Sstevel@tonic-gate ASSERT(BT_TEST(pcp->pc_bitmap, fd)); 3720Sstevel@tonic-gate goto retry; 3730Sstevel@tonic-gate } 3740Sstevel@tonic-gate 3750Sstevel@tonic-gate if (revent != 0) { 3760Sstevel@tonic-gate pfdp[fdcnt].fd = fd; 3770Sstevel@tonic-gate pfdp[fdcnt].events = pdp->pd_events; 3780Sstevel@tonic-gate pfdp[fdcnt].revents = revent; 3790Sstevel@tonic-gate fdcnt++; 3800Sstevel@tonic-gate } else if (php != NULL) { 3810Sstevel@tonic-gate /* 3820Sstevel@tonic-gate * We clear a bit or cache a poll fd if 3830Sstevel@tonic-gate * the driver returns a poll head ptr, 3840Sstevel@tonic-gate * which is expected in the case of 0 3850Sstevel@tonic-gate * revents. Some buggy driver may return 3860Sstevel@tonic-gate * NULL php pointer with 0 revents. In 3870Sstevel@tonic-gate * this case, we just treat the driver as 3880Sstevel@tonic-gate * "noncachable" and not clearing the bit 3890Sstevel@tonic-gate * in bitmap. 3900Sstevel@tonic-gate */ 3910Sstevel@tonic-gate if ((pdp->pd_php != NULL) && 3920Sstevel@tonic-gate ((pcp->pc_flag & T_POLLWAKE) == 0)) { 3930Sstevel@tonic-gate BT_CLEAR(pcp->pc_bitmap, fd); 3940Sstevel@tonic-gate } 3950Sstevel@tonic-gate if (pdp->pd_php == NULL) { 3960Sstevel@tonic-gate pollhead_insert(php, pdp); 3970Sstevel@tonic-gate pdp->pd_php = php; 398*7129Smeem /* 399*7129Smeem * An event of interest may have 400*7129Smeem * arrived between the VOP_POLL() and 401*7129Smeem * the pollhead_insert(); check again. 402*7129Smeem */ 403*7129Smeem goto repoll; 4040Sstevel@tonic-gate } 4050Sstevel@tonic-gate } 4060Sstevel@tonic-gate } else { 4070Sstevel@tonic-gate /* 4080Sstevel@tonic-gate * No bit set in the range. Check for wrap around. 4090Sstevel@tonic-gate */ 4100Sstevel@tonic-gate if (!no_wrap) { 4110Sstevel@tonic-gate start = 0; 4120Sstevel@tonic-gate end = ostart - 1; 4130Sstevel@tonic-gate no_wrap = B_TRUE; 4140Sstevel@tonic-gate } else { 4150Sstevel@tonic-gate done = B_TRUE; 4160Sstevel@tonic-gate } 4170Sstevel@tonic-gate } 4180Sstevel@tonic-gate } 4190Sstevel@tonic-gate 4200Sstevel@tonic-gate if (!done) { 4210Sstevel@tonic-gate pcp->pc_mapstart = start; 4220Sstevel@tonic-gate } 4230Sstevel@tonic-gate ASSERT(*fdcntp == 0); 4240Sstevel@tonic-gate *fdcntp = fdcnt; 4250Sstevel@tonic-gate return (error); 4260Sstevel@tonic-gate } 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate /*ARGSUSED*/ 4290Sstevel@tonic-gate static int 4300Sstevel@tonic-gate dpopen(dev_t *devp, int flag, int otyp, cred_t *credp) 4310Sstevel@tonic-gate { 4320Sstevel@tonic-gate minor_t minordev; 4330Sstevel@tonic-gate dp_entry_t *dpep; 4340Sstevel@tonic-gate pollcache_t *pcp; 4350Sstevel@tonic-gate 4360Sstevel@tonic-gate ASSERT(devpoll_init); 4370Sstevel@tonic-gate ASSERT(dptblsize <= MAXMIN); 4380Sstevel@tonic-gate mutex_enter(&devpoll_lock); 4390Sstevel@tonic-gate for (minordev = 0; minordev < dptblsize; minordev++) { 4400Sstevel@tonic-gate if (devpolltbl[minordev] == NULL) { 4410Sstevel@tonic-gate devpolltbl[minordev] = (dp_entry_t *)RESERVED; 4420Sstevel@tonic-gate break; 4430Sstevel@tonic-gate } 4440Sstevel@tonic-gate } 4450Sstevel@tonic-gate if (minordev == dptblsize) { 4460Sstevel@tonic-gate dp_entry_t **newtbl; 4470Sstevel@tonic-gate size_t oldsize; 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate /* 4500Sstevel@tonic-gate * Used up every entry in the existing devpoll table. 4510Sstevel@tonic-gate * Grow the table by DEVPOLLSIZE. 4520Sstevel@tonic-gate */ 4530Sstevel@tonic-gate if ((oldsize = dptblsize) >= MAXMIN) { 4540Sstevel@tonic-gate mutex_exit(&devpoll_lock); 4550Sstevel@tonic-gate return (ENXIO); 4560Sstevel@tonic-gate } 4570Sstevel@tonic-gate dptblsize += DEVPOLLSIZE; 4580Sstevel@tonic-gate if (dptblsize > MAXMIN) { 4590Sstevel@tonic-gate dptblsize = MAXMIN; 4600Sstevel@tonic-gate } 4610Sstevel@tonic-gate newtbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP); 4620Sstevel@tonic-gate bcopy(devpolltbl, newtbl, sizeof (caddr_t) * oldsize); 4630Sstevel@tonic-gate kmem_free(devpolltbl, sizeof (caddr_t) * oldsize); 4640Sstevel@tonic-gate devpolltbl = newtbl; 4650Sstevel@tonic-gate devpolltbl[minordev] = (dp_entry_t *)RESERVED; 4660Sstevel@tonic-gate } 4670Sstevel@tonic-gate mutex_exit(&devpoll_lock); 4680Sstevel@tonic-gate 4690Sstevel@tonic-gate dpep = kmem_zalloc(sizeof (dp_entry_t), KM_SLEEP); 4700Sstevel@tonic-gate /* 4710Sstevel@tonic-gate * allocate a pollcache skeleton here. Delay allocating bitmap 4720Sstevel@tonic-gate * structures until dpwrite() time, since we don't know the 4730Sstevel@tonic-gate * optimal size yet. 4740Sstevel@tonic-gate */ 4750Sstevel@tonic-gate pcp = pcache_alloc(); 4760Sstevel@tonic-gate dpep->dpe_pcache = pcp; 4770Sstevel@tonic-gate pcp->pc_pid = curproc->p_pid; 4780Sstevel@tonic-gate *devp = makedevice(getmajor(*devp), minordev); /* clone the driver */ 4790Sstevel@tonic-gate mutex_enter(&devpoll_lock); 4800Sstevel@tonic-gate ASSERT(minordev < dptblsize); 4810Sstevel@tonic-gate ASSERT(devpolltbl[minordev] == (dp_entry_t *)RESERVED); 4820Sstevel@tonic-gate devpolltbl[minordev] = dpep; 4830Sstevel@tonic-gate mutex_exit(&devpoll_lock); 4840Sstevel@tonic-gate return (0); 4850Sstevel@tonic-gate } 4860Sstevel@tonic-gate 4870Sstevel@tonic-gate /* 4880Sstevel@tonic-gate * Write to dev/poll add/remove fd's to/from a cached poll fd set, 4890Sstevel@tonic-gate * or change poll events for a watched fd. 4900Sstevel@tonic-gate */ 4910Sstevel@tonic-gate /*ARGSUSED*/ 4920Sstevel@tonic-gate static int 4930Sstevel@tonic-gate dpwrite(dev_t dev, struct uio *uiop, cred_t *credp) 4940Sstevel@tonic-gate { 4950Sstevel@tonic-gate minor_t minor; 4960Sstevel@tonic-gate dp_entry_t *dpep; 4970Sstevel@tonic-gate pollcache_t *pcp; 4980Sstevel@tonic-gate pollfd_t *pollfdp, *pfdp; 4990Sstevel@tonic-gate int error; 5000Sstevel@tonic-gate ssize_t uiosize; 5010Sstevel@tonic-gate nfds_t pollfdnum; 5020Sstevel@tonic-gate struct pollhead *php = NULL; 5030Sstevel@tonic-gate polldat_t *pdp; 5040Sstevel@tonic-gate int fd; 5050Sstevel@tonic-gate file_t *fp; 5060Sstevel@tonic-gate 5070Sstevel@tonic-gate minor = getminor(dev); 5080Sstevel@tonic-gate 5090Sstevel@tonic-gate mutex_enter(&devpoll_lock); 5100Sstevel@tonic-gate ASSERT(minor < dptblsize); 5110Sstevel@tonic-gate dpep = devpolltbl[minor]; 5120Sstevel@tonic-gate ASSERT(dpep != NULL); 5130Sstevel@tonic-gate mutex_exit(&devpoll_lock); 5140Sstevel@tonic-gate pcp = dpep->dpe_pcache; 5150Sstevel@tonic-gate if (curproc->p_pid != pcp->pc_pid) { 5160Sstevel@tonic-gate return (EACCES); 5170Sstevel@tonic-gate } 5180Sstevel@tonic-gate uiosize = uiop->uio_resid; 5190Sstevel@tonic-gate pollfdnum = uiosize / sizeof (pollfd_t); 5200Sstevel@tonic-gate mutex_enter(&curproc->p_lock); 5210Sstevel@tonic-gate if (pollfdnum > (uint_t)rctl_enforced_value( 5220Sstevel@tonic-gate rctlproc_legacy[RLIMIT_NOFILE], curproc->p_rctls, curproc)) { 5230Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE], 5240Sstevel@tonic-gate curproc->p_rctls, curproc, RCA_SAFE); 5250Sstevel@tonic-gate mutex_exit(&curproc->p_lock); 5260Sstevel@tonic-gate return (set_errno(EINVAL)); 5270Sstevel@tonic-gate } 5280Sstevel@tonic-gate mutex_exit(&curproc->p_lock); 5290Sstevel@tonic-gate /* 5300Sstevel@tonic-gate * Copy in the pollfd array. Walk through the array and add 5310Sstevel@tonic-gate * each polled fd to the cached set. 5320Sstevel@tonic-gate */ 5330Sstevel@tonic-gate pollfdp = kmem_alloc(uiosize, KM_SLEEP); 5340Sstevel@tonic-gate 5350Sstevel@tonic-gate /* 5360Sstevel@tonic-gate * Although /dev/poll uses the write(2) interface to cache fds, it's 5370Sstevel@tonic-gate * not supposed to function as a seekable device. To prevent offset 5380Sstevel@tonic-gate * from growing and eventually exceed the maximum, reset the offset 5390Sstevel@tonic-gate * here for every call. 5400Sstevel@tonic-gate */ 5410Sstevel@tonic-gate uiop->uio_loffset = 0; 5420Sstevel@tonic-gate if ((error = uiomove((caddr_t)pollfdp, uiosize, UIO_WRITE, uiop)) 5430Sstevel@tonic-gate != 0) { 5440Sstevel@tonic-gate kmem_free(pollfdp, uiosize); 5450Sstevel@tonic-gate return (error); 5460Sstevel@tonic-gate } 5470Sstevel@tonic-gate /* 5480Sstevel@tonic-gate * We are about to enter the core portion of dpwrite(). Make sure this 5490Sstevel@tonic-gate * write has exclusive access in this portion of the code, i.e., no 5500Sstevel@tonic-gate * other writers in this code and no other readers in dpioctl. 5510Sstevel@tonic-gate */ 5520Sstevel@tonic-gate mutex_enter(&dpep->dpe_lock); 5530Sstevel@tonic-gate dpep->dpe_writerwait++; 5540Sstevel@tonic-gate while (dpep->dpe_refcnt != 0) { 5550Sstevel@tonic-gate if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 5560Sstevel@tonic-gate dpep->dpe_writerwait--; 5570Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 5580Sstevel@tonic-gate kmem_free(pollfdp, uiosize); 5590Sstevel@tonic-gate return (set_errno(EINTR)); 5600Sstevel@tonic-gate } 5610Sstevel@tonic-gate } 5620Sstevel@tonic-gate dpep->dpe_writerwait--; 5630Sstevel@tonic-gate dpep->dpe_flag |= DP_WRITER_PRESENT; 5640Sstevel@tonic-gate dpep->dpe_refcnt++; 5650Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 5660Sstevel@tonic-gate 5670Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 5680Sstevel@tonic-gate if (pcp->pc_bitmap == NULL) { 5690Sstevel@tonic-gate pcache_create(pcp, pollfdnum); 5700Sstevel@tonic-gate } 5710Sstevel@tonic-gate for (pfdp = pollfdp; pfdp < pollfdp + pollfdnum; pfdp++) { 5720Sstevel@tonic-gate fd = pfdp->fd; 5730Sstevel@tonic-gate if ((uint_t)fd >= P_FINFO(curproc)->fi_nfiles) 5740Sstevel@tonic-gate continue; 5750Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, fd); 5760Sstevel@tonic-gate if (pfdp->events != POLLREMOVE) { 5770Sstevel@tonic-gate if (pdp == NULL) { 5780Sstevel@tonic-gate pdp = pcache_alloc_fd(0); 5790Sstevel@tonic-gate pdp->pd_fd = fd; 5800Sstevel@tonic-gate pdp->pd_pcache = pcp; 5810Sstevel@tonic-gate pcache_insert_fd(pcp, pdp, pollfdnum); 5820Sstevel@tonic-gate } 5830Sstevel@tonic-gate ASSERT(pdp->pd_fd == fd); 5840Sstevel@tonic-gate ASSERT(pdp->pd_pcache == pcp); 5850Sstevel@tonic-gate if (fd >= pcp->pc_mapsize) { 5860Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 5870Sstevel@tonic-gate pcache_grow_map(pcp, fd); 5880Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 5890Sstevel@tonic-gate } 5900Sstevel@tonic-gate if (fd > pcp->pc_mapend) { 5910Sstevel@tonic-gate pcp->pc_mapend = fd; 5920Sstevel@tonic-gate } 5930Sstevel@tonic-gate if ((fp = getf(fd)) == NULL) { 5940Sstevel@tonic-gate /* 5950Sstevel@tonic-gate * The fd is not valid. Since we can't pass 5960Sstevel@tonic-gate * this error back in the write() call, set 5970Sstevel@tonic-gate * the bit in bitmap to force DP_POLL ioctl 5980Sstevel@tonic-gate * to examine it. 5990Sstevel@tonic-gate */ 6000Sstevel@tonic-gate BT_SET(pcp->pc_bitmap, fd); 6010Sstevel@tonic-gate pdp->pd_events |= pfdp->events; 6020Sstevel@tonic-gate continue; 6030Sstevel@tonic-gate } 6040Sstevel@tonic-gate /* 6050Sstevel@tonic-gate * Don't do VOP_POLL for an already cached fd with 6060Sstevel@tonic-gate * same poll events. 6070Sstevel@tonic-gate */ 6080Sstevel@tonic-gate if ((pdp->pd_events == pfdp->events) && 6090Sstevel@tonic-gate (pdp->pd_fp != NULL)) { 6100Sstevel@tonic-gate /* 6110Sstevel@tonic-gate * the events are already cached 6120Sstevel@tonic-gate */ 6130Sstevel@tonic-gate releasef(fd); 6140Sstevel@tonic-gate continue; 6150Sstevel@tonic-gate } 6160Sstevel@tonic-gate 6170Sstevel@tonic-gate /* 6180Sstevel@tonic-gate * do VOP_POLL and cache this poll fd. 6190Sstevel@tonic-gate */ 6200Sstevel@tonic-gate /* 6210Sstevel@tonic-gate * XXX - pollrelock() logic needs to know which 6220Sstevel@tonic-gate * which pollcache lock to grab. It'd be a 6230Sstevel@tonic-gate * cleaner solution if we could pass pcp as 6240Sstevel@tonic-gate * an arguement in VOP_POLL interface instead 6250Sstevel@tonic-gate * of implicitly passing it using thread_t 6260Sstevel@tonic-gate * struct. On the other hand, changing VOP_POLL 6270Sstevel@tonic-gate * interface will require all driver/file system 6280Sstevel@tonic-gate * poll routine to change. May want to revisit 6290Sstevel@tonic-gate * the tradeoff later. 6300Sstevel@tonic-gate */ 6310Sstevel@tonic-gate curthread->t_pollcache = pcp; 6320Sstevel@tonic-gate error = VOP_POLL(fp->f_vnode, pfdp->events, 0, 6335331Samw &pfdp->revents, &php, NULL); 6340Sstevel@tonic-gate curthread->t_pollcache = NULL; 6350Sstevel@tonic-gate /* 636*7129Smeem * We always set the bit when this fd is cached; 637*7129Smeem * this forces the first DP_POLL to poll this fd. 6380Sstevel@tonic-gate * Real performance gain comes from subsequent 639*7129Smeem * DP_POLL. We also attempt a pollhead_insert(); 640*7129Smeem * if it's not possible, we'll do it in dpioctl(). 6410Sstevel@tonic-gate */ 6420Sstevel@tonic-gate BT_SET(pcp->pc_bitmap, fd); 6430Sstevel@tonic-gate if (error != 0) { 6440Sstevel@tonic-gate releasef(fd); 6450Sstevel@tonic-gate break; 6460Sstevel@tonic-gate } 6470Sstevel@tonic-gate pdp->pd_fp = fp; 6480Sstevel@tonic-gate pdp->pd_events |= pfdp->events; 6490Sstevel@tonic-gate if (php != NULL) { 6500Sstevel@tonic-gate if (pdp->pd_php == NULL) { 6510Sstevel@tonic-gate pollhead_insert(php, pdp); 6520Sstevel@tonic-gate pdp->pd_php = php; 6530Sstevel@tonic-gate } else { 6540Sstevel@tonic-gate if (pdp->pd_php != php) { 6550Sstevel@tonic-gate pollhead_delete(pdp->pd_php, 6560Sstevel@tonic-gate pdp); 6570Sstevel@tonic-gate pollhead_insert(php, pdp); 6580Sstevel@tonic-gate pdp->pd_php = php; 6590Sstevel@tonic-gate } 6600Sstevel@tonic-gate } 6610Sstevel@tonic-gate 6620Sstevel@tonic-gate } 6630Sstevel@tonic-gate releasef(fd); 6640Sstevel@tonic-gate } else { 6650Sstevel@tonic-gate if (pdp == NULL) { 6660Sstevel@tonic-gate continue; 6670Sstevel@tonic-gate } 6680Sstevel@tonic-gate ASSERT(pdp->pd_fd == fd); 6690Sstevel@tonic-gate pdp->pd_fp = NULL; 6700Sstevel@tonic-gate pdp->pd_events = 0; 6710Sstevel@tonic-gate ASSERT(pdp->pd_thread == NULL); 6720Sstevel@tonic-gate if (pdp->pd_php != NULL) { 6730Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 6740Sstevel@tonic-gate pdp->pd_php = NULL; 6750Sstevel@tonic-gate } 6760Sstevel@tonic-gate BT_CLEAR(pcp->pc_bitmap, fd); 6770Sstevel@tonic-gate } 6780Sstevel@tonic-gate } 6790Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 6800Sstevel@tonic-gate mutex_enter(&dpep->dpe_lock); 6810Sstevel@tonic-gate dpep->dpe_flag &= ~DP_WRITER_PRESENT; 6820Sstevel@tonic-gate ASSERT(dpep->dpe_refcnt == 1); 6830Sstevel@tonic-gate dpep->dpe_refcnt--; 6840Sstevel@tonic-gate cv_broadcast(&dpep->dpe_cv); 6850Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 6860Sstevel@tonic-gate kmem_free(pollfdp, uiosize); 6870Sstevel@tonic-gate return (error); 6880Sstevel@tonic-gate } 6890Sstevel@tonic-gate 6900Sstevel@tonic-gate /*ARGSUSED*/ 6910Sstevel@tonic-gate static int 6920Sstevel@tonic-gate dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) 6930Sstevel@tonic-gate { 6940Sstevel@tonic-gate timestruc_t now; 6950Sstevel@tonic-gate timestruc_t rqtime; 6960Sstevel@tonic-gate timestruc_t *rqtp = NULL; 6974123Sdm120769 int timecheck = 0; 6980Sstevel@tonic-gate minor_t minor; 6990Sstevel@tonic-gate dp_entry_t *dpep; 7000Sstevel@tonic-gate pollcache_t *pcp; 7010Sstevel@tonic-gate int error = 0; 7020Sstevel@tonic-gate STRUCT_DECL(dvpoll, dvpoll); 7030Sstevel@tonic-gate 7040Sstevel@tonic-gate if (cmd == DP_POLL) { 7050Sstevel@tonic-gate /* do this now, before we sleep on DP_WRITER_PRESENT below */ 7064123Sdm120769 timecheck = timechanged; 7070Sstevel@tonic-gate gethrestime(&now); 7080Sstevel@tonic-gate } 7090Sstevel@tonic-gate minor = getminor(dev); 7100Sstevel@tonic-gate mutex_enter(&devpoll_lock); 7110Sstevel@tonic-gate ASSERT(minor < dptblsize); 7120Sstevel@tonic-gate dpep = devpolltbl[minor]; 7130Sstevel@tonic-gate mutex_exit(&devpoll_lock); 7140Sstevel@tonic-gate ASSERT(dpep != NULL); 7150Sstevel@tonic-gate pcp = dpep->dpe_pcache; 7160Sstevel@tonic-gate if (curproc->p_pid != pcp->pc_pid) 7170Sstevel@tonic-gate return (EACCES); 7180Sstevel@tonic-gate 7190Sstevel@tonic-gate mutex_enter(&dpep->dpe_lock); 7200Sstevel@tonic-gate while ((dpep->dpe_flag & DP_WRITER_PRESENT) || 7210Sstevel@tonic-gate (dpep->dpe_writerwait != 0)) { 7220Sstevel@tonic-gate if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) { 7230Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 7240Sstevel@tonic-gate return (EINTR); 7250Sstevel@tonic-gate } 7260Sstevel@tonic-gate } 7270Sstevel@tonic-gate dpep->dpe_refcnt++; 7280Sstevel@tonic-gate mutex_exit(&dpep->dpe_lock); 7290Sstevel@tonic-gate 7300Sstevel@tonic-gate switch (cmd) { 7310Sstevel@tonic-gate case DP_POLL: 7320Sstevel@tonic-gate { 7330Sstevel@tonic-gate pollstate_t *ps; 7340Sstevel@tonic-gate nfds_t nfds; 7350Sstevel@tonic-gate int fdcnt = 0; 7360Sstevel@tonic-gate int time_out; 7370Sstevel@tonic-gate int rval; 7380Sstevel@tonic-gate 7390Sstevel@tonic-gate STRUCT_INIT(dvpoll, mode); 7400Sstevel@tonic-gate error = copyin((caddr_t)arg, STRUCT_BUF(dvpoll), 7410Sstevel@tonic-gate STRUCT_SIZE(dvpoll)); 7420Sstevel@tonic-gate if (error) { 7430Sstevel@tonic-gate DP_REFRELE(dpep); 7440Sstevel@tonic-gate return (EFAULT); 7450Sstevel@tonic-gate } 7460Sstevel@tonic-gate 7470Sstevel@tonic-gate time_out = STRUCT_FGET(dvpoll, dp_timeout); 7480Sstevel@tonic-gate if (time_out > 0) { 7490Sstevel@tonic-gate /* 7500Sstevel@tonic-gate * Determine the future time of the requested timeout. 7510Sstevel@tonic-gate */ 7520Sstevel@tonic-gate rqtp = &rqtime; 7530Sstevel@tonic-gate rqtp->tv_sec = time_out / MILLISEC; 7540Sstevel@tonic-gate rqtp->tv_nsec = (time_out % MILLISEC) * MICROSEC; 7550Sstevel@tonic-gate timespecadd(rqtp, &now); 7560Sstevel@tonic-gate } 7570Sstevel@tonic-gate 7580Sstevel@tonic-gate if ((nfds = STRUCT_FGET(dvpoll, dp_nfds)) == 0) { 7590Sstevel@tonic-gate /* 7600Sstevel@tonic-gate * We are just using DP_POLL to sleep, so 7610Sstevel@tonic-gate * we don't any of the devpoll apparatus. 7620Sstevel@tonic-gate * Do not check for signals if we have a zero timeout. 7630Sstevel@tonic-gate */ 7640Sstevel@tonic-gate DP_REFRELE(dpep); 7650Sstevel@tonic-gate if (time_out == 0) 7660Sstevel@tonic-gate return (0); 7670Sstevel@tonic-gate mutex_enter(&curthread->t_delay_lock); 7680Sstevel@tonic-gate while ((rval = cv_waituntil_sig(&curthread->t_delay_cv, 7694123Sdm120769 &curthread->t_delay_lock, rqtp, timecheck)) > 0) 7700Sstevel@tonic-gate continue; 7710Sstevel@tonic-gate mutex_exit(&curthread->t_delay_lock); 7720Sstevel@tonic-gate return ((rval == 0)? EINTR : 0); 7730Sstevel@tonic-gate } 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate /* 7760Sstevel@tonic-gate * XXX It'd be nice not to have to alloc each time. 7770Sstevel@tonic-gate * But it requires another per thread structure hook. 7780Sstevel@tonic-gate * Do it later if there is data suggest that. 7790Sstevel@tonic-gate */ 7800Sstevel@tonic-gate if ((ps = curthread->t_pollstate) == NULL) { 7810Sstevel@tonic-gate curthread->t_pollstate = pollstate_create(); 7820Sstevel@tonic-gate ps = curthread->t_pollstate; 7830Sstevel@tonic-gate } 7840Sstevel@tonic-gate if (ps->ps_dpbufsize < nfds) { 7850Sstevel@tonic-gate struct proc *p = ttoproc(curthread); 7860Sstevel@tonic-gate /* 7870Sstevel@tonic-gate * The maximum size should be no large than 7880Sstevel@tonic-gate * current maximum open file count. 7890Sstevel@tonic-gate */ 7900Sstevel@tonic-gate mutex_enter(&p->p_lock); 7913760Ssp92102 if (nfds > p->p_fno_ctl) { 7920Sstevel@tonic-gate mutex_exit(&p->p_lock); 7930Sstevel@tonic-gate DP_REFRELE(dpep); 7940Sstevel@tonic-gate return (EINVAL); 7950Sstevel@tonic-gate } 7960Sstevel@tonic-gate mutex_exit(&p->p_lock); 7970Sstevel@tonic-gate kmem_free(ps->ps_dpbuf, sizeof (pollfd_t) * 7980Sstevel@tonic-gate ps->ps_dpbufsize); 7990Sstevel@tonic-gate ps->ps_dpbuf = kmem_zalloc(sizeof (pollfd_t) * 8000Sstevel@tonic-gate nfds, KM_SLEEP); 8010Sstevel@tonic-gate ps->ps_dpbufsize = nfds; 8020Sstevel@tonic-gate } 8030Sstevel@tonic-gate 8040Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 8050Sstevel@tonic-gate for (;;) { 8060Sstevel@tonic-gate pcp->pc_flag = 0; 8070Sstevel@tonic-gate error = dp_pcache_poll(ps->ps_dpbuf, pcp, nfds, &fdcnt); 8080Sstevel@tonic-gate if (fdcnt > 0 || error != 0) 8090Sstevel@tonic-gate break; 8100Sstevel@tonic-gate 8110Sstevel@tonic-gate /* 8120Sstevel@tonic-gate * A pollwake has happened since we polled cache. 8130Sstevel@tonic-gate */ 8140Sstevel@tonic-gate if (pcp->pc_flag & T_POLLWAKE) 8150Sstevel@tonic-gate continue; 8160Sstevel@tonic-gate 8170Sstevel@tonic-gate /* 8185331Samw * Sleep until we are notified, signaled, or timed out. 8190Sstevel@tonic-gate * Do not check for signals if we have a zero timeout. 8200Sstevel@tonic-gate */ 8210Sstevel@tonic-gate if (time_out == 0) /* immediate timeout */ 8220Sstevel@tonic-gate break; 8230Sstevel@tonic-gate rval = cv_waituntil_sig(&pcp->pc_cv, &pcp->pc_lock, 824*7129Smeem rqtp, timecheck); 8250Sstevel@tonic-gate /* 8260Sstevel@tonic-gate * If we were awakened by a signal or timeout 8270Sstevel@tonic-gate * then break the loop, else poll again. 8280Sstevel@tonic-gate */ 8290Sstevel@tonic-gate if (rval <= 0) { 8300Sstevel@tonic-gate if (rval == 0) /* signal */ 8310Sstevel@tonic-gate error = EINTR; 8320Sstevel@tonic-gate break; 8330Sstevel@tonic-gate } 8340Sstevel@tonic-gate } 8350Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8360Sstevel@tonic-gate 8370Sstevel@tonic-gate if (error == 0 && fdcnt > 0) { 8380Sstevel@tonic-gate if (copyout(ps->ps_dpbuf, STRUCT_FGETP(dvpoll, 8390Sstevel@tonic-gate dp_fds), sizeof (pollfd_t) * fdcnt)) { 8400Sstevel@tonic-gate DP_REFRELE(dpep); 8410Sstevel@tonic-gate return (EFAULT); 8420Sstevel@tonic-gate } 8430Sstevel@tonic-gate *rvalp = fdcnt; 8440Sstevel@tonic-gate } 8450Sstevel@tonic-gate break; 8460Sstevel@tonic-gate } 8470Sstevel@tonic-gate 8480Sstevel@tonic-gate case DP_ISPOLLED: 8490Sstevel@tonic-gate { 8500Sstevel@tonic-gate pollfd_t pollfd; 8510Sstevel@tonic-gate polldat_t *pdp; 8520Sstevel@tonic-gate 8530Sstevel@tonic-gate STRUCT_INIT(dvpoll, mode); 8540Sstevel@tonic-gate error = copyin((caddr_t)arg, &pollfd, sizeof (pollfd_t)); 8550Sstevel@tonic-gate if (error) { 8560Sstevel@tonic-gate DP_REFRELE(dpep); 8570Sstevel@tonic-gate return (EFAULT); 8580Sstevel@tonic-gate } 8590Sstevel@tonic-gate mutex_enter(&pcp->pc_lock); 8600Sstevel@tonic-gate if (pcp->pc_hash == NULL) { 8610Sstevel@tonic-gate /* 8620Sstevel@tonic-gate * No Need to search because no poll fd 8630Sstevel@tonic-gate * has been cached. 8640Sstevel@tonic-gate */ 8650Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8660Sstevel@tonic-gate DP_REFRELE(dpep); 8670Sstevel@tonic-gate return (0); 8680Sstevel@tonic-gate } 8690Sstevel@tonic-gate if (pollfd.fd < 0) { 8700Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8710Sstevel@tonic-gate break; 8720Sstevel@tonic-gate } 8730Sstevel@tonic-gate pdp = pcache_lookup_fd(pcp, pollfd.fd); 8740Sstevel@tonic-gate if ((pdp != NULL) && (pdp->pd_fd == pollfd.fd) && 8750Sstevel@tonic-gate (pdp->pd_fp != NULL)) { 8760Sstevel@tonic-gate pollfd.revents = pdp->pd_events; 8770Sstevel@tonic-gate if (copyout(&pollfd, (caddr_t)arg, sizeof (pollfd_t))) { 8780Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8790Sstevel@tonic-gate DP_REFRELE(dpep); 8800Sstevel@tonic-gate return (EFAULT); 8810Sstevel@tonic-gate } 8820Sstevel@tonic-gate *rvalp = 1; 8830Sstevel@tonic-gate } 8840Sstevel@tonic-gate mutex_exit(&pcp->pc_lock); 8850Sstevel@tonic-gate break; 8860Sstevel@tonic-gate } 8870Sstevel@tonic-gate 8880Sstevel@tonic-gate default: 8890Sstevel@tonic-gate DP_REFRELE(dpep); 8900Sstevel@tonic-gate return (EINVAL); 8910Sstevel@tonic-gate } 8920Sstevel@tonic-gate DP_REFRELE(dpep); 8930Sstevel@tonic-gate return (error); 8940Sstevel@tonic-gate } 8950Sstevel@tonic-gate 8960Sstevel@tonic-gate /*ARGSUSED*/ 8970Sstevel@tonic-gate static int 8980Sstevel@tonic-gate dppoll(dev_t dev, short events, int anyyet, short *reventsp, 8990Sstevel@tonic-gate struct pollhead **phpp) 9000Sstevel@tonic-gate { 9010Sstevel@tonic-gate /* 9020Sstevel@tonic-gate * Polling on a /dev/poll fd is not fully supported yet. 9030Sstevel@tonic-gate */ 9040Sstevel@tonic-gate *reventsp = POLLERR; 9050Sstevel@tonic-gate return (0); 9060Sstevel@tonic-gate } 9070Sstevel@tonic-gate 9080Sstevel@tonic-gate /* 9090Sstevel@tonic-gate * devpoll close should do enough clean up before the pollcache is deleted, 9100Sstevel@tonic-gate * i.e., it should ensure no one still references the pollcache later. 9110Sstevel@tonic-gate * There is no "permission" check in here. Any process having the last 9120Sstevel@tonic-gate * reference of this /dev/poll fd can close. 9130Sstevel@tonic-gate */ 9140Sstevel@tonic-gate /*ARGSUSED*/ 9150Sstevel@tonic-gate static int 9160Sstevel@tonic-gate dpclose(dev_t dev, int flag, int otyp, cred_t *credp) 9170Sstevel@tonic-gate { 9180Sstevel@tonic-gate minor_t minor; 9190Sstevel@tonic-gate dp_entry_t *dpep; 9200Sstevel@tonic-gate pollcache_t *pcp; 9210Sstevel@tonic-gate int i; 9220Sstevel@tonic-gate polldat_t **hashtbl; 9230Sstevel@tonic-gate polldat_t *pdp; 9240Sstevel@tonic-gate 9250Sstevel@tonic-gate minor = getminor(dev); 9260Sstevel@tonic-gate 9270Sstevel@tonic-gate mutex_enter(&devpoll_lock); 9280Sstevel@tonic-gate dpep = devpolltbl[minor]; 9290Sstevel@tonic-gate ASSERT(dpep != NULL); 9300Sstevel@tonic-gate devpolltbl[minor] = NULL; 9310Sstevel@tonic-gate mutex_exit(&devpoll_lock); 9320Sstevel@tonic-gate pcp = dpep->dpe_pcache; 9330Sstevel@tonic-gate ASSERT(pcp != NULL); 9340Sstevel@tonic-gate /* 9350Sstevel@tonic-gate * At this point, no other lwp can access this pollcache via the 9360Sstevel@tonic-gate * /dev/poll fd. This pollcache is going away, so do the clean 9370Sstevel@tonic-gate * up without the pc_lock. 9380Sstevel@tonic-gate */ 9390Sstevel@tonic-gate hashtbl = pcp->pc_hash; 9400Sstevel@tonic-gate for (i = 0; i < pcp->pc_hashsize; i++) { 9410Sstevel@tonic-gate for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) { 9420Sstevel@tonic-gate if (pdp->pd_php != NULL) { 9430Sstevel@tonic-gate pollhead_delete(pdp->pd_php, pdp); 9440Sstevel@tonic-gate pdp->pd_php = NULL; 9450Sstevel@tonic-gate pdp->pd_fp = NULL; 9460Sstevel@tonic-gate } 9470Sstevel@tonic-gate } 9480Sstevel@tonic-gate } 9490Sstevel@tonic-gate /* 9500Sstevel@tonic-gate * pollwakeup() may still interact with this pollcache. Wait until 9510Sstevel@tonic-gate * it is done. 9520Sstevel@tonic-gate */ 9530Sstevel@tonic-gate mutex_enter(&pcp->pc_no_exit); 9540Sstevel@tonic-gate ASSERT(pcp->pc_busy >= 0); 9550Sstevel@tonic-gate while (pcp->pc_busy > 0) 9560Sstevel@tonic-gate cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit); 9570Sstevel@tonic-gate mutex_exit(&pcp->pc_no_exit); 9580Sstevel@tonic-gate pcache_destroy(pcp); 9590Sstevel@tonic-gate ASSERT(dpep->dpe_refcnt == 0); 9600Sstevel@tonic-gate kmem_free(dpep, sizeof (dp_entry_t)); 9610Sstevel@tonic-gate return (0); 9620Sstevel@tonic-gate } 963