Lines Matching defs:kq
82 * This lock is used if multiple kq locks are required. This possibly
102 static int kqueue_register(struct kqueue *kq, struct kevent *kev,
105 static void kqueue_release(struct kqueue *kq, int locked);
106 static void kqueue_destroy(struct kqueue *kq);
107 static void kqueue_drain(struct kqueue *kq, struct thread *td);
108 static int kqueue_expand(struct kqueue *kq, const struct filterops *fops,
111 static int kqueue_scan(struct kqueue *kq, int maxevents,
115 static void kqueue_wakeup(struct kqueue *kq);
146 static int knote_attach(struct knote *kn, struct kqueue *kq);
224 #define KQ_LOCK(kq) do { \
225 mtx_lock(&(kq)->kq_lock); \
227 #define KQ_FLUX_WAKEUP(kq) do { \
228 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \
229 (kq)->kq_state &= ~KQ_FLUXWAIT; \
230 wakeup((kq)); \
233 #define KQ_UNLOCK_FLUX(kq) do { \
234 KQ_FLUX_WAKEUP(kq); \
235 mtx_unlock(&(kq)->kq_lock); \
237 #define KQ_UNLOCK(kq) do { \
238 mtx_unlock(&(kq)->kq_lock); \
240 #define KQ_OWNED(kq) do { \
241 mtx_assert(&(kq)->kq_lock, MA_OWNED); \
243 #define KQ_NOTOWNED(kq) do { \
244 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \
379 struct kqueue *kq = kn->kn_fp->f_data;
386 knlist_add(&kq->kq_sel.si_note, kn, 0);
394 struct kqueue *kq = kn->kn_fp->f_data;
396 knlist_remove(&kq->kq_sel.si_note, kn, 0);
403 struct kqueue *kq = kn->kn_fp->f_data;
405 kn->kn_data = kq->kq_count;
530 struct kqueue *kq;
542 kq = kn->kn_kq;
543 KQ_LOCK(kq);
545 KQ_UNLOCK(kq);
555 KQ_UNLOCK(kq);
566 KQ_UNLOCK(kq);
585 error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
599 error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
605 KQ_LOCK(kq);
607 KQ_UNLOCK_FLUX(kq);
893 struct kqueue *kq;
916 * This has to be done while the kq is locked:
921 kq = kn->kn_kq;
922 KQ_LOCK(kq);
928 KQ_UNLOCK(kq);
1072 kqueue_init(struct kqueue *kq)
1075 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
1076 TAILQ_INIT(&kq->kq_head);
1077 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
1078 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
1085 struct kqueue *kq;
1102 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
1103 kqueue_init(kq);
1104 kq->kq_fdp = fdp;
1105 kq->kq_cred = crhold(cred);
1108 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
1111 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
1323 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
1345 error = kqueue_register(kq, kevp, td, M_WAITOK);
1363 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
1370 struct kqueue *kq;
1373 error = kqueue_acquire(fp, &kq);
1376 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
1377 kqueue_release(kq, 0);
1389 struct kqueue kq = {};
1392 kqueue_init(&kq);
1393 kq.kq_refcnt = 1;
1394 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
1395 kqueue_drain(&kq, td);
1396 kqueue_destroy(&kq);
1486 * A ref to kq (obtained via kqueue_acquire) must be held.
1489 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td,
1542 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1547 error = kqueue_expand(kq, fops, kev->ident, mflag);
1558 * getting both the knlist lock and the kq lock since
1561 if (fp->f_data == kq) {
1576 KQ_LOCK(kq);
1577 if (kev->ident < kq->kq_knlistsize) {
1578 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1584 error = kqueue_expand(kq, fops, kev->ident, mflag);
1589 KQ_LOCK(kq);
1601 } else if (kq->kq_knhashmask != 0) {
1604 list = &kq->kq_knhash[
1605 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1620 kq->kq_state |= KQ_FLUXWAIT;
1621 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1637 KQ_UNLOCK(kq);
1642 kn->kn_kq = kq;
1663 error = knote_attach(kn, kq);
1664 KQ_UNLOCK(kq);
1678 KQ_UNLOCK(kq);
1686 KQ_UNLOCK(kq);
1708 KQ_UNLOCK(kq);
1735 KQ_LOCK(kq);
1744 KQ_UNLOCK_FLUX(kq);
1762 struct kqueue *kq;
1766 kq = fp->f_data;
1767 if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1769 *kqp = kq;
1770 KQ_LOCK(kq);
1771 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1772 KQ_UNLOCK(kq);
1775 kq->kq_refcnt++;
1776 KQ_UNLOCK(kq);
1782 kqueue_release(struct kqueue *kq, int locked)
1785 KQ_OWNED(kq);
1787 KQ_LOCK(kq);
1788 kq->kq_refcnt--;
1789 if (kq->kq_refcnt == 1)
1790 wakeup(&kq->kq_refcnt);
1792 KQ_UNLOCK(kq);
1802 kqueue_schedtask(struct kqueue *kq)
1804 KQ_OWNED(kq);
1805 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1808 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1809 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
1810 kq->kq_state |= KQ_TASKSCHED;
1816 * Expand the kq to make sure we have storage for fops/ident pair.
1821 kqueue_expand(struct kqueue *kq, const struct filterops *fops, uintptr_t ident,
1828 KQ_NOTOWNED(kq);
1834 if (kq->kq_knlistsize <= fd) {
1835 size = kq->kq_knlistsize;
1841 KQ_LOCK(kq);
1842 if ((kq->kq_state & KQ_CLOSING) != 0) {
1845 } else if (kq->kq_knlistsize > fd) {
1848 if (kq->kq_knlist != NULL) {
1849 bcopy(kq->kq_knlist, list,
1850 kq->kq_knlistsize * sizeof(*list));
1851 to_free = kq->kq_knlist;
1852 kq->kq_knlist = NULL;
1855 kq->kq_knlistsize * sizeof(*list),
1856 (size - kq->kq_knlistsize) * sizeof(*list));
1857 kq->kq_knlistsize = size;
1858 kq->kq_knlist = list;
1860 KQ_UNLOCK(kq);
1863 if (kq->kq_knhashmask == 0) {
1869 KQ_LOCK(kq);
1870 if ((kq->kq_state & KQ_CLOSING) != 0) {
1873 } else if (kq->kq_knhashmask == 0) {
1874 kq->kq_knhash = tmp_knhash;
1875 kq->kq_knhashmask = tmp_knhashmask;
1879 KQ_UNLOCK(kq);
1884 KQ_NOTOWNED(kq);
1891 struct kqueue *kq;
1895 kq = arg;
1898 KQ_LOCK(kq);
1900 KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1902 kq->kq_state &= ~KQ_TASKSCHED;
1903 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1904 wakeup(&kq->kq_state);
1906 KQ_UNLOCK(kq);
1915 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1960 KQ_LOCK(kq);
1964 if (kq->kq_count == 0) {
1968 kq->kq_state |= KQ_SLEEP;
1969 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1982 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1985 KQ_OWNED(kq);
1986 kn = TAILQ_FIRST(&kq->kq_head);
1992 KQ_FLUX_WAKEUP(kq);
1994 kq->kq_state |= KQ_FLUXWAIT;
1995 error = msleep(kq, &kq->kq_lock, PSOCK,
2000 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2003 kq->kq_count--;
2007 KQ_FLUX_WAKEUP(kq);
2018 kq->kq_count--;
2019 KQ_UNLOCK(kq);
2025 KQ_LOCK(kq);
2030 kq->kq_count--;
2031 KQ_UNLOCK(kq);
2038 KQ_LOCK(kq);
2043 KQ_UNLOCK(kq);
2048 KQ_LOCK(kq);
2053 kq->kq_count--;
2064 KQ_LOCK(kq);
2078 kq->kq_count--;
2080 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2095 KQ_UNLOCK_FLUX(kq);
2099 KQ_LOCK(kq);
2104 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
2106 KQ_OWNED(kq);
2107 KQ_UNLOCK_FLUX(kq);
2110 KQ_NOTOWNED(kq);
2141 struct kqueue *kq;
2143 kq = fp->f_data;
2147 kq->kq_state |= KQ_ASYNC;
2149 kq->kq_state &= ~KQ_ASYNC;
2154 return (fsetown(*(int *)data, &kq->kq_sigio));
2157 *(int *)data = fgetown(&kq->kq_sigio);
2170 struct kqueue *kq;
2174 if ((error = kqueue_acquire(fp, &kq)))
2177 KQ_LOCK(kq);
2179 if (kq->kq_count) {
2182 selrecord(td, &kq->kq_sel);
2183 if (SEL_WAITING(&kq->kq_sel))
2184 kq->kq_state |= KQ_SEL;
2187 kqueue_release(kq, 1);
2188 KQ_UNLOCK(kq);
2210 kqueue_drain(struct kqueue *kq, struct thread *td)
2215 KQ_LOCK(kq);
2217 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
2219 kq->kq_state |= KQ_CLOSING;
2220 if (kq->kq_refcnt > 1)
2221 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
2223 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
2225 KASSERT(knlist_empty(&kq->kq_sel.si_note),
2228 for (i = 0; i < kq->kq_knlistsize; i++) {
2229 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
2231 kq->kq_state |= KQ_FLUXWAIT;
2232 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
2236 KQ_UNLOCK(kq);
2238 KQ_LOCK(kq);
2241 if (kq->kq_knhashmask != 0) {
2242 for (i = 0; i <= kq->kq_knhashmask; i++) {
2243 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
2245 kq->kq_state |= KQ_FLUXWAIT;
2246 msleep(kq, &kq->kq_lock, PSOCK,
2251 KQ_UNLOCK(kq);
2253 KQ_LOCK(kq);
2258 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
2259 kq->kq_state |= KQ_TASKDRAIN;
2260 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
2263 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2264 selwakeuppri(&kq->kq_sel, PSOCK);
2265 if (!SEL_WAITING(&kq->kq_sel))
2266 kq->kq_state &= ~KQ_SEL;
2269 KQ_UNLOCK(kq);
2273 kqueue_destroy(struct kqueue *kq)
2276 KASSERT(kq->kq_fdp == NULL,
2278 seldrain(&kq->kq_sel);
2279 knlist_destroy(&kq->kq_sel.si_note);
2280 mtx_destroy(&kq->kq_lock);
2282 if (kq->kq_knhash != NULL)
2283 free(kq->kq_knhash, M_KQUEUE);
2284 if (kq->kq_knlist != NULL)
2285 free(kq->kq_knlist, M_KQUEUE);
2287 funsetown(&kq->kq_sigio);
2294 struct kqueue *kq = fp->f_data;
2299 if ((error = kqueue_acquire(fp, &kq)))
2301 kqueue_drain(kq, td);
2309 fdp = kq->kq_fdp;
2310 kq->kq_fdp = NULL;
2316 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
2320 kqueue_destroy(kq);
2321 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
2322 crfree(kq->kq_cred);
2323 free(kq, M_KQUEUE);
2332 struct kqueue *kq = fp->f_data;
2335 kif->kf_un.kf_kqueue.kf_kqueue_addr = (uintptr_t)kq;
2336 kif->kf_un.kf_kqueue.kf_kqueue_count = kq->kq_count;
2337 kif->kf_un.kf_kqueue.kf_kqueue_state = kq->kq_state;
2342 kqueue_wakeup(struct kqueue *kq)
2344 KQ_OWNED(kq);
2346 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
2347 kq->kq_state &= ~KQ_SLEEP;
2348 wakeup(kq);
2350 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2351 selwakeuppri(&kq->kq_sel, PSOCK);
2352 if (!SEL_WAITING(&kq->kq_sel))
2353 kq->kq_state &= ~KQ_SEL;
2355 if (!knlist_empty(&kq->kq_sel.si_note))
2356 kqueue_schedtask(kq);
2357 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
2358 pgsigio(&kq->kq_sigio, SIGIO, 0);
2365 * There is a possibility to optimize in the case of one kq watching another.
2373 struct kqueue *kq;
2393 kq = kn->kn_kq;
2394 KQ_LOCK(kq);
2398 * the influx coming from the kq unlock in the
2404 KQ_UNLOCK(kq);
2407 KQ_UNLOCK(kq);
2409 KQ_LOCK(kq);
2413 KQ_UNLOCK_FLUX(kq);
2417 KQ_UNLOCK(kq);
2452 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked"));
2592 struct kqueue *kq;
2604 kq = kn->kn_kq;
2605 KQ_LOCK(kq);
2607 KQ_UNLOCK(kq);
2613 KQ_UNLOCK(kq);
2618 KQ_UNLOCK(kq);
2620 kq = NULL;
2626 kq = kn->kn_kq;
2627 KQ_LOCK(kq);
2630 kq->kq_state |= KQ_FLUXWAIT;
2631 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2632 kq = NULL;
2653 struct kqueue *kq;
2663 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2664 KQ_LOCK(kq);
2668 while (kq->kq_knlistsize > fd &&
2669 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2673 wakeup(kq);
2674 kq->kq_state |= KQ_FLUXWAIT;
2675 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2679 KQ_UNLOCK(kq);
2682 KQ_LOCK(kq);
2684 KQ_UNLOCK_FLUX(kq);
2689 knote_attach(struct knote *kn, struct kqueue *kq)
2694 KQ_OWNED(kq);
2696 if ((kq->kq_state & KQ_CLOSING) != 0)
2699 if (kn->kn_id >= kq->kq_knlistsize)
2701 list = &kq->kq_knlist[kn->kn_id];
2703 if (kq->kq_knhash == NULL)
2705 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2723 struct kqueue *kq;
2726 kq = kn->kn_kq;
2730 KQ_NOTOWNED(kq);
2732 KQ_LOCK(kq);
2737 list = &kq->kq_knlist[kn->kn_id];
2739 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2745 KQ_UNLOCK_FLUX(kq);
2759 struct kqueue *kq = kn->kn_kq;
2764 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2766 kq->kq_count++;
2767 kqueue_wakeup(kq);
2773 struct kqueue *kq = kn->kn_kq;
2778 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2780 kq->kq_count--;
2808 * Register the kev w/ the kq specified by fd.
2813 struct kqueue *kq;
2822 if ((error = kqueue_acquire(fp, &kq)) != 0)
2825 error = kqueue_register(kq, kev, td, mflag);
2826 kqueue_release(kq, 0);