1 /* $NetBSD: kern_event.c,v 1.13 2003/03/21 21:13:51 dsl Exp $ */ 2 /*- 3 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp $ 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/proc.h> 34 #include <sys/malloc.h> 35 #include <sys/unistd.h> 36 #include <sys/file.h> 37 #include <sys/fcntl.h> 38 #include <sys/select.h> 39 #include <sys/queue.h> 40 #include <sys/event.h> 41 #include <sys/eventvar.h> 42 #include <sys/poll.h> 43 #include <sys/pool.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/stat.h> 48 #include <sys/uio.h> 49 #include <sys/mount.h> 50 #include <sys/filedesc.h> 51 #include <sys/sa.h> 52 #include <sys/syscallargs.h> 53 54 static int kqueue_scan(struct file *fp, size_t maxevents, 55 struct kevent *ulistp, const struct timespec *timeout, 56 struct proc *p, register_t *retval); 57 static void kqueue_wakeup(struct kqueue *kq); 58 59 static int kqueue_read(struct file *fp, off_t *offset, struct uio *uio, 60 struct ucred *cred, int flags); 61 static int kqueue_write(struct file *fp, off_t *offset, struct uio *uio, 62 struct ucred *cred, int flags); 63 static int kqueue_ioctl(struct file *fp, u_long com, void *data, 64 struct proc *p); 65 static int kqueue_fcntl(struct file *fp, u_int com, void *data, 66 struct proc *p); 67 static int kqueue_poll(struct file *fp, int events, struct proc *p); 68 static int kqueue_kqfilter(struct file *fp, struct knote *kn); 69 static int kqueue_stat(struct file *fp, struct stat *sp, struct proc *p); 70 static int kqueue_close(struct file *fp, struct proc *p); 71 72 static struct fileops kqueueops = { 73 kqueue_read, kqueue_write, kqueue_ioctl, kqueue_fcntl, kqueue_poll, 74 kqueue_stat, kqueue_close, kqueue_kqfilter 75 }; 76 77 static void knote_attach(struct knote *kn, struct filedesc *fdp); 78 static void knote_drop(struct knote *kn, struct proc *p, 79 struct filedesc *fdp); 80 static void knote_enqueue(struct knote *kn); 81 static void knote_dequeue(struct knote *kn); 82 83 static void filt_kqdetach(struct knote *kn); 84 static int filt_kqueue(struct knote *kn, long hint); 85 static int filt_procattach(struct knote *kn); 86 static void filt_procdetach(struct knote *kn); 87 static int filt_proc(struct knote *kn, long hint); 88 static int filt_fileattach(struct knote *kn); 89 static void filt_timerexpire(void *knx); 90 static int filt_timerattach(struct knote *kn); 91 static void filt_timerdetach(struct knote *kn); 92 static int filt_timer(struct knote *kn, long hint); 93 94 static const struct filterops kqread_filtops = 95 { 1, NULL, filt_kqdetach, filt_kqueue }; 96 static const struct filterops proc_filtops = 97 { 0, filt_procattach, filt_procdetach, filt_proc }; 98 static const struct filterops file_filtops = 99 { 1, filt_fileattach, NULL, NULL }; 100 static struct filterops timer_filtops = 101 { 0, filt_timerattach, filt_timerdetach, filt_timer }; 102 103 struct pool kqueue_pool; 104 struct pool knote_pool; 105 static int kq_ncallouts = 0; 106 static int kq_calloutmax = (4 * 1024); 107 108 MALLOC_DEFINE(M_KEVENT, "kevent", "kevents/knotes"); 109 110 #define KNOTE_ACTIVATE(kn) \ 111 do { \ 112 kn->kn_status |= KN_ACTIVE; \ 113 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 114 knote_enqueue(kn); \ 115 } while(0) 116 117 #define KN_HASHSIZE 64 /* XXX should be tunable */ 118 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 119 120 extern const struct filterops sig_filtops; 121 122 /* 123 * Table for for all system-defined filters. 124 * These should be listed in the numeric order of the EVFILT_* defines. 125 * If filtops is NULL, the filter isn't implemented in NetBSD. 126 * End of list is when name is NULL. 127 */ 128 struct kfilter { 129 const char *name; /* name of filter */ 130 uint32_t filter; /* id of filter */ 131 const struct filterops *filtops;/* operations for filter */ 132 }; 133 134 /* System defined filters */ 135 static const struct kfilter sys_kfilters[] = { 136 { "EVFILT_READ", EVFILT_READ, &file_filtops }, 137 { "EVFILT_WRITE", EVFILT_WRITE, &file_filtops }, 138 { "EVFILT_AIO", EVFILT_AIO, NULL }, 139 { "EVFILT_VNODE", EVFILT_VNODE, &file_filtops }, 140 { "EVFILT_PROC", EVFILT_PROC, &proc_filtops }, 141 { "EVFILT_SIGNAL", EVFILT_SIGNAL, &sig_filtops }, 142 { "EVFILT_TIMER", EVFILT_TIMER, &timer_filtops }, 143 { NULL, 0, NULL }, /* end of list */ 144 }; 145 146 /* User defined kfilters */ 147 static struct kfilter *user_kfilters; /* array */ 148 static int user_kfilterc; /* current offset */ 149 static int user_kfiltermaxc; /* max size so far */ 150 151 /* 152 * kqueue_init: 153 * 154 * Initialize the kqueue/knote facility. 155 */ 156 void 157 kqueue_init(void) 158 { 159 160 pool_init(&kqueue_pool, sizeof(struct kqueue), 0, 0, 0, "kqueuepl", 161 NULL); 162 pool_init(&knote_pool, sizeof(struct knote), 0, 0, 0, "knotepl", 163 NULL); 164 } 165 166 /* 167 * Find kfilter entry by name, or NULL if not found. 168 */ 169 static const struct kfilter * 170 kfilter_byname_sys(const char *name) 171 { 172 int i; 173 174 for (i = 0; sys_kfilters[i].name != NULL; i++) { 175 if (strcmp(name, sys_kfilters[i].name) == 0) 176 return (&sys_kfilters[i]); 177 } 178 return (NULL); 179 } 180 181 static struct kfilter * 182 kfilter_byname_user(const char *name) 183 { 184 int i; 185 186 /* user_kfilters[] could be NULL if no filters were registered */ 187 if (!user_kfilters) 188 return (NULL); 189 190 for (i = 0; user_kfilters[i].name != NULL; i++) { 191 if (user_kfilters[i].name != '\0' && 192 strcmp(name, user_kfilters[i].name) == 0) 193 return (&user_kfilters[i]); 194 } 195 return (NULL); 196 } 197 198 static const struct kfilter * 199 kfilter_byname(const char *name) 200 { 201 const struct kfilter *kfilter; 202 203 if ((kfilter = kfilter_byname_sys(name)) != NULL) 204 return (kfilter); 205 206 return (kfilter_byname_user(name)); 207 } 208 209 /* 210 * Find kfilter entry by filter id, or NULL if not found. 211 * Assumes entries are indexed in filter id order, for speed. 212 */ 213 static const struct kfilter * 214 kfilter_byfilter(uint32_t filter) 215 { 216 const struct kfilter *kfilter; 217 218 if (filter < EVFILT_SYSCOUNT) /* it's a system filter */ 219 kfilter = &sys_kfilters[filter]; 220 else if (user_kfilters != NULL && 221 filter < EVFILT_SYSCOUNT + user_kfilterc) 222 /* it's a user filter */ 223 kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT]; 224 else 225 return (NULL); /* out of range */ 226 KASSERT(kfilter->filter == filter); /* sanity check! */ 227 return (kfilter); 228 } 229 230 /* 231 * Register a new kfilter. Stores the entry in user_kfilters. 232 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 233 * If retfilter != NULL, the new filterid is returned in it. 234 */ 235 int 236 kfilter_register(const char *name, const struct filterops *filtops, 237 int *retfilter) 238 { 239 struct kfilter *kfilter; 240 void *space; 241 int len; 242 243 if (name == NULL || name[0] == '\0' || filtops == NULL) 244 return (EINVAL); /* invalid args */ 245 if (kfilter_byname(name) != NULL) 246 return (EEXIST); /* already exists */ 247 if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT) 248 return (EINVAL); /* too many */ 249 250 /* check if need to grow user_kfilters */ 251 if (user_kfilterc + 1 > user_kfiltermaxc) { 252 /* 253 * Grow in KFILTER_EXTENT chunks. Use malloc(9), because we 254 * want to traverse user_kfilters as an array. 255 */ 256 user_kfiltermaxc += KFILTER_EXTENT; 257 kfilter = malloc(user_kfiltermaxc * sizeof(struct filter *), 258 M_KEVENT, M_WAITOK); 259 260 /* copy existing user_kfilters */ 261 if (user_kfilters != NULL) 262 memcpy((caddr_t)kfilter, (caddr_t)user_kfilters, 263 user_kfilterc * sizeof(struct kfilter *)); 264 /* zero new sections */ 265 memset((caddr_t)kfilter + 266 user_kfilterc * sizeof(struct kfilter *), 0, 267 (user_kfiltermaxc - user_kfilterc) * 268 sizeof(struct kfilter *)); 269 /* switch to new kfilter */ 270 if (user_kfilters != NULL) 271 free(user_kfilters, M_KEVENT); 272 user_kfilters = kfilter; 273 } 274 len = strlen(name) + 1; /* copy name */ 275 space = malloc(len, M_KEVENT, M_WAITOK); 276 memcpy(space, name, len); 277 user_kfilters[user_kfilterc].name = space; 278 279 user_kfilters[user_kfilterc].filter = user_kfilterc + EVFILT_SYSCOUNT; 280 281 len = sizeof(struct filterops); /* copy filtops */ 282 space = malloc(len, M_KEVENT, M_WAITOK); 283 memcpy(space, filtops, len); 284 user_kfilters[user_kfilterc].filtops = space; 285 286 if (retfilter != NULL) 287 *retfilter = user_kfilters[user_kfilterc].filter; 288 user_kfilterc++; /* finally, increment count */ 289 return (0); 290 } 291 292 /* 293 * Unregister a kfilter previously registered with kfilter_register. 294 * This retains the filter id, but clears the name and frees filtops (filter 295 * operations), so that the number isn't reused during a boot. 296 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise. 297 */ 298 int 299 kfilter_unregister(const char *name) 300 { 301 struct kfilter *kfilter; 302 303 if (name == NULL || name[0] == '\0') 304 return (EINVAL); /* invalid name */ 305 306 if (kfilter_byname_sys(name) != NULL) 307 return (EINVAL); /* can't detach system filters */ 308 309 kfilter = kfilter_byname_user(name); 310 if (kfilter == NULL) /* not found */ 311 return (ENOENT); 312 313 if (kfilter->name[0] != '\0') { 314 /* XXX Cast away const (but we know it's safe. */ 315 free((void *) kfilter->name, M_KEVENT); 316 kfilter->name = ""; /* mark as `not implemented' */ 317 } 318 if (kfilter->filtops != NULL) { 319 /* XXX Cast away const (but we know it's safe. */ 320 free((void *) kfilter->filtops, M_KEVENT); 321 kfilter->filtops = NULL; /* mark as `not implemented' */ 322 } 323 return (0); 324 } 325 326 327 /* 328 * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file 329 * descriptors. Calls struct fileops kqfilter method for given file descriptor. 330 */ 331 static int 332 filt_fileattach(struct knote *kn) 333 { 334 struct file *fp; 335 336 fp = kn->kn_fp; 337 return ((*fp->f_ops->fo_kqfilter)(fp, kn)); 338 } 339 340 /* 341 * Filter detach method for EVFILT_READ on kqueue descriptor. 342 */ 343 static void 344 filt_kqdetach(struct knote *kn) 345 { 346 struct kqueue *kq; 347 348 kq = (struct kqueue *)kn->kn_fp->f_data; 349 SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext); 350 } 351 352 /* 353 * Filter event method for EVFILT_READ on kqueue descriptor. 354 */ 355 /*ARGSUSED*/ 356 static int 357 filt_kqueue(struct knote *kn, long hint) 358 { 359 struct kqueue *kq; 360 361 kq = (struct kqueue *)kn->kn_fp->f_data; 362 kn->kn_data = kq->kq_count; 363 return (kn->kn_data > 0); 364 } 365 366 /* 367 * Filter attach method for EVFILT_PROC. 368 */ 369 static int 370 filt_procattach(struct knote *kn) 371 { 372 struct proc *p; 373 374 p = pfind(kn->kn_id); 375 if (p == NULL) 376 return (ESRCH); 377 378 /* 379 * Fail if it's not owned by you, or the last exec gave us 380 * setuid/setgid privs (unless you're root). 381 */ 382 if ((p->p_cred->p_ruid != curproc->p_cred->p_ruid || 383 (p->p_flag & P_SUGID)) 384 && suser(curproc->p_ucred, &curproc->p_acflag) != 0) 385 return (EACCES); 386 387 kn->kn_ptr.p_proc = p; 388 kn->kn_flags |= EV_CLEAR; /* automatically set */ 389 390 /* 391 * internal flag indicating registration done by kernel 392 */ 393 if (kn->kn_flags & EV_FLAG1) { 394 kn->kn_data = kn->kn_sdata; /* ppid */ 395 kn->kn_fflags = NOTE_CHILD; 396 kn->kn_flags &= ~EV_FLAG1; 397 } 398 399 /* XXXSMP lock the process? */ 400 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 401 402 return (0); 403 } 404 405 /* 406 * Filter detach method for EVFILT_PROC. 407 * 408 * The knote may be attached to a different process, which may exit, 409 * leaving nothing for the knote to be attached to. So when the process 410 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 411 * it will be deleted when read out. However, as part of the knote deletion, 412 * this routine is called, so a check is needed to avoid actually performing 413 * a detach, because the original process might not exist any more. 414 */ 415 static void 416 filt_procdetach(struct knote *kn) 417 { 418 struct proc *p; 419 420 if (kn->kn_status & KN_DETACHED) 421 return; 422 423 p = kn->kn_ptr.p_proc; 424 KASSERT(p->p_stat == SDEAD || pfind(kn->kn_id) == p); 425 426 /* XXXSMP lock the process? */ 427 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 428 } 429 430 /* 431 * Filter event method for EVFILT_PROC. 432 */ 433 static int 434 filt_proc(struct knote *kn, long hint) 435 { 436 u_int event; 437 438 /* 439 * mask off extra data 440 */ 441 event = (u_int)hint & NOTE_PCTRLMASK; 442 443 /* 444 * if the user is interested in this event, record it. 445 */ 446 if (kn->kn_sfflags & event) 447 kn->kn_fflags |= event; 448 449 /* 450 * process is gone, so flag the event as finished. 451 */ 452 if (event == NOTE_EXIT) { 453 /* 454 * Detach the knote from watched process and mark 455 * it as such. We can't leave this to kqueue_scan(), 456 * since the process might not exist by then. And we 457 * have to do this now, since psignal KNOTE() is called 458 * also for zombies and we might end up reading freed 459 * memory if the kevent would already be picked up 460 * and knote g/c'ed. 461 */ 462 kn->kn_fop->f_detach(kn); 463 kn->kn_status |= KN_DETACHED; 464 465 /* Mark as ONESHOT, so that the knote it g/c'ed when read */ 466 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 467 return (1); 468 } 469 470 /* 471 * process forked, and user wants to track the new process, 472 * so attach a new knote to it, and immediately report an 473 * event with the parent's pid. 474 */ 475 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { 476 struct kevent kev; 477 int error; 478 479 /* 480 * register knote with new process. 481 */ 482 kev.ident = hint & NOTE_PDATAMASK; /* pid */ 483 kev.filter = kn->kn_filter; 484 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 485 kev.fflags = kn->kn_sfflags; 486 kev.data = kn->kn_id; /* parent */ 487 kev.udata = kn->kn_kevent.udata; /* preserve udata */ 488 error = kqueue_register(kn->kn_kq, &kev, NULL); 489 if (error) 490 kn->kn_fflags |= NOTE_TRACKERR; 491 } 492 493 return (kn->kn_fflags != 0); 494 } 495 496 static void 497 filt_timerexpire(void *knx) 498 { 499 struct knote *kn = knx; 500 int tticks; 501 502 kn->kn_data++; 503 KNOTE_ACTIVATE(kn); 504 505 if ((kn->kn_flags & EV_ONESHOT) == 0) { 506 tticks = mstohz(kn->kn_sdata); 507 callout_schedule((struct callout *)kn->kn_hook, tticks); 508 } 509 } 510 511 /* 512 * data contains amount of time to sleep, in milliseconds 513 */ 514 static int 515 filt_timerattach(struct knote *kn) 516 { 517 struct callout *calloutp; 518 int tticks; 519 520 if (kq_ncallouts >= kq_calloutmax) 521 return (ENOMEM); 522 kq_ncallouts++; 523 524 tticks = mstohz(kn->kn_sdata); 525 526 /* if the supplied value is under our resolution, use 1 tick */ 527 if (tticks == 0) { 528 if (kn->kn_sdata == 0) 529 return (EINVAL); 530 tticks = 1; 531 } 532 533 kn->kn_flags |= EV_CLEAR; /* automatically set */ 534 MALLOC(calloutp, struct callout *, sizeof(*calloutp), 535 M_KEVENT, 0); 536 callout_init(calloutp); 537 callout_reset(calloutp, tticks, filt_timerexpire, kn); 538 kn->kn_hook = calloutp; 539 540 return (0); 541 } 542 543 static void 544 filt_timerdetach(struct knote *kn) 545 { 546 struct callout *calloutp; 547 548 calloutp = (struct callout *)kn->kn_hook; 549 callout_stop(calloutp); 550 FREE(calloutp, M_KEVENT); 551 kq_ncallouts--; 552 } 553 554 static int 555 filt_timer(struct knote *kn, long hint) 556 { 557 return (kn->kn_data != 0); 558 } 559 560 /* 561 * filt_seltrue: 562 * 563 * This filter "event" routine simulates seltrue(). 564 */ 565 int 566 filt_seltrue(struct knote *kn, long hint) 567 { 568 569 /* 570 * We don't know how much data can be read/written, 571 * but we know that it *can* be. This is about as 572 * good as select/poll does as well. 573 */ 574 kn->kn_data = 0; 575 return (1); 576 } 577 578 /* 579 * This provides full kqfilter entry for device switch tables, which 580 * has same effect as filter using filt_seltrue() as filter method. 581 */ 582 static void 583 filt_seltruedetach(struct knote *kn) 584 { 585 /* Nothing to do */ 586 } 587 588 static const struct filterops seltrue_filtops = 589 { 1, NULL, filt_seltruedetach, filt_seltrue }; 590 591 int 592 seltrue_kqfilter(dev_t dev, struct knote *kn) 593 { 594 switch (kn->kn_filter) { 595 case EVFILT_READ: 596 case EVFILT_WRITE: 597 kn->kn_fop = &seltrue_filtops; 598 break; 599 default: 600 return (1); 601 } 602 603 /* Nothing more to do */ 604 return (0); 605 } 606 607 /* 608 * kqueue(2) system call. 609 */ 610 int 611 sys_kqueue(struct lwp *l, void *v, register_t *retval) 612 { 613 struct filedesc *fdp; 614 struct kqueue *kq; 615 struct file *fp; 616 struct proc *p; 617 int fd, error; 618 619 p = l->l_proc; 620 fdp = p->p_fd; 621 error = falloc(p, &fp, &fd); /* setup a new file descriptor */ 622 if (error) 623 return (error); 624 fp->f_flag = FREAD | FWRITE; 625 fp->f_type = DTYPE_KQUEUE; 626 fp->f_ops = &kqueueops; 627 kq = pool_get(&kqueue_pool, PR_WAITOK); 628 memset((char *)kq, 0, sizeof(struct kqueue)); 629 simple_lock_init(&kq->kq_lock); 630 TAILQ_INIT(&kq->kq_head); 631 fp->f_data = (caddr_t)kq; /* store the kqueue with the fp */ 632 *retval = fd; 633 if (fdp->fd_knlistsize < 0) 634 fdp->fd_knlistsize = 0; /* this process has a kq */ 635 kq->kq_fdp = fdp; 636 FILE_SET_MATURE(fp); 637 FILE_UNUSE(fp, p); /* falloc() does FILE_USE() */ 638 return (error); 639 } 640 641 /* 642 * kevent(2) system call. 643 */ 644 int 645 sys_kevent(struct lwp *l, void *v, register_t *retval) 646 { 647 struct sys_kevent_args /* { 648 syscallarg(int) fd; 649 syscallarg(const struct kevent *) changelist; 650 syscallarg(size_t) nchanges; 651 syscallarg(struct kevent *) eventlist; 652 syscallarg(size_t) nevents; 653 syscallarg(const struct timespec *) timeout; 654 } */ *uap = v; 655 struct kevent *kevp; 656 struct kqueue *kq; 657 struct file *fp; 658 struct timespec ts; 659 struct proc *p; 660 size_t i, n; 661 int nerrors, error; 662 663 p = l->l_proc; 664 /* check that we're dealing with a kq */ 665 fp = fd_getfile(p->p_fd, SCARG(uap, fd)); 666 if (fp == NULL) 667 return (EBADF); 668 669 if (fp->f_type != DTYPE_KQUEUE) { 670 simple_unlock(&fp->f_slock); 671 return (EBADF); 672 } 673 674 FILE_USE(fp); 675 676 if (SCARG(uap, timeout) != NULL) { 677 error = copyin(SCARG(uap, timeout), &ts, sizeof(ts)); 678 if (error) 679 goto done; 680 SCARG(uap, timeout) = &ts; 681 } 682 683 kq = (struct kqueue *)fp->f_data; 684 nerrors = 0; 685 686 /* traverse list of events to register */ 687 while (SCARG(uap, nchanges) > 0) { 688 /* copyin a maximum of KQ_EVENTS at each pass */ 689 n = MIN(SCARG(uap, nchanges), KQ_NEVENTS); 690 error = copyin(SCARG(uap, changelist), kq->kq_kev, 691 n * sizeof(struct kevent)); 692 if (error) 693 goto done; 694 for (i = 0; i < n; i++) { 695 kevp = &kq->kq_kev[i]; 696 kevp->flags &= ~EV_SYSFLAGS; 697 /* register each knote */ 698 error = kqueue_register(kq, kevp, p); 699 if (error) { 700 if (SCARG(uap, nevents) != 0) { 701 kevp->flags = EV_ERROR; 702 kevp->data = error; 703 error = copyout((caddr_t)kevp, 704 (caddr_t)SCARG(uap, eventlist), 705 sizeof(*kevp)); 706 if (error) 707 goto done; 708 SCARG(uap, eventlist)++; 709 SCARG(uap, nevents)--; 710 nerrors++; 711 } else { 712 goto done; 713 } 714 } 715 } 716 SCARG(uap, nchanges) -= n; /* update the results */ 717 SCARG(uap, changelist) += n; 718 } 719 if (nerrors) { 720 *retval = nerrors; 721 error = 0; 722 goto done; 723 } 724 725 /* actually scan through the events */ 726 error = kqueue_scan(fp, SCARG(uap, nevents), SCARG(uap, eventlist), 727 SCARG(uap, timeout), p, retval); 728 done: 729 FILE_UNUSE(fp, p); 730 return (error); 731 } 732 733 /* 734 * Register a given kevent kev onto the kqueue 735 */ 736 int 737 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p) 738 { 739 const struct kfilter *kfilter; 740 struct filedesc *fdp; 741 struct file *fp; 742 struct knote *kn; 743 int s, error; 744 745 fdp = kq->kq_fdp; 746 fp = NULL; 747 kn = NULL; 748 error = 0; 749 kfilter = kfilter_byfilter(kev->filter); 750 if (kfilter == NULL || kfilter->filtops == NULL) { 751 /* filter not found nor implemented */ 752 return (EINVAL); 753 } 754 755 /* search if knote already exists */ 756 if (kfilter->filtops->f_isfd) { 757 /* monitoring a file descriptor */ 758 if ((fp = fd_getfile(fdp, kev->ident)) == NULL) 759 return (EBADF); /* validate descriptor */ 760 FILE_USE(fp); 761 762 if (kev->ident < fdp->fd_knlistsize) { 763 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link) 764 if (kq == kn->kn_kq && 765 kev->filter == kn->kn_filter) 766 break; 767 } 768 } else { 769 /* 770 * not monitoring a file descriptor, so 771 * lookup knotes in internal hash table 772 */ 773 if (fdp->fd_knhashmask != 0) { 774 struct klist *list; 775 776 list = &fdp->fd_knhash[ 777 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 778 SLIST_FOREACH(kn, list, kn_link) 779 if (kev->ident == kn->kn_id && 780 kq == kn->kn_kq && 781 kev->filter == kn->kn_filter) 782 break; 783 } 784 } 785 786 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) { 787 error = ENOENT; /* filter not found */ 788 goto done; 789 } 790 791 /* 792 * kn now contains the matching knote, or NULL if no match 793 */ 794 if (kev->flags & EV_ADD) { 795 /* add knote */ 796 797 if (kn == NULL) { 798 /* create new knote */ 799 kn = pool_get(&knote_pool, PR_WAITOK); 800 if (kn == NULL) { 801 error = ENOMEM; 802 goto done; 803 } 804 kn->kn_fp = fp; 805 kn->kn_kq = kq; 806 kn->kn_fop = kfilter->filtops; 807 808 /* 809 * apply reference count to knote structure, and 810 * do not release it at the end of this routine. 811 */ 812 fp = NULL; 813 814 kn->kn_sfflags = kev->fflags; 815 kn->kn_sdata = kev->data; 816 kev->fflags = 0; 817 kev->data = 0; 818 kn->kn_kevent = *kev; 819 820 knote_attach(kn, fdp); 821 if ((error = kfilter->filtops->f_attach(kn)) != 0) { 822 knote_drop(kn, p, fdp); 823 goto done; 824 } 825 } else { 826 /* modify existing knote */ 827 828 /* 829 * The user may change some filter values after the 830 * initial EV_ADD, but doing so will not reset any 831 * filter which have already been triggered. 832 */ 833 kn->kn_sfflags = kev->fflags; 834 kn->kn_sdata = kev->data; 835 kn->kn_kevent.udata = kev->udata; 836 } 837 838 s = splsched(); 839 if (kn->kn_fop->f_event(kn, 0)) 840 KNOTE_ACTIVATE(kn); 841 splx(s); 842 843 } else if (kev->flags & EV_DELETE) { /* delete knote */ 844 kn->kn_fop->f_detach(kn); 845 knote_drop(kn, p, fdp); 846 goto done; 847 } 848 849 /* disable knote */ 850 if ((kev->flags & EV_DISABLE) && 851 ((kn->kn_status & KN_DISABLED) == 0)) { 852 s = splsched(); 853 kn->kn_status |= KN_DISABLED; 854 splx(s); 855 } 856 857 /* enable knote */ 858 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 859 s = splsched(); 860 kn->kn_status &= ~KN_DISABLED; 861 if ((kn->kn_status & KN_ACTIVE) && 862 ((kn->kn_status & KN_QUEUED) == 0)) 863 knote_enqueue(kn); 864 splx(s); 865 } 866 867 done: 868 if (fp != NULL) 869 FILE_UNUSE(fp, p); 870 return (error); 871 } 872 873 /* 874 * Scan through the list of events on fp (for a maximum of maxevents), 875 * returning the results in to ulistp. Timeout is determined by tsp; if 876 * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait 877 * as appropriate. 878 */ 879 static int 880 kqueue_scan(struct file *fp, size_t maxevents, struct kevent *ulistp, 881 const struct timespec *tsp, struct proc *p, register_t *retval) 882 { 883 struct kqueue *kq; 884 struct kevent *kevp; 885 struct timeval atv; 886 struct knote *kn, marker; 887 size_t count, nkev; 888 int s, timeout, error; 889 890 kq = (struct kqueue *)fp->f_data; 891 count = maxevents; 892 nkev = error = 0; 893 if (count == 0) 894 goto done; 895 896 if (tsp) { /* timeout supplied */ 897 TIMESPEC_TO_TIMEVAL(&atv, tsp); 898 if (itimerfix(&atv)) { 899 error = EINVAL; 900 goto done; 901 } 902 s = splclock(); 903 timeradd(&atv, &time, &atv); /* calc. time to wait until */ 904 splx(s); 905 timeout = hzto(&atv); 906 if (timeout <= 0) 907 timeout = -1; /* do poll */ 908 } else { 909 /* no timeout, wait forever */ 910 timeout = 0; 911 } 912 goto start; 913 914 retry: 915 if (tsp) { 916 /* 917 * We have to recalculate the timeout on every retry. 918 */ 919 timeout = hzto(&atv); 920 if (timeout <= 0) 921 goto done; 922 } 923 924 start: 925 kevp = kq->kq_kev; 926 s = splsched(); 927 simple_lock(&kq->kq_lock); 928 if (kq->kq_count == 0) { 929 if (timeout < 0) { 930 error = EWOULDBLOCK; 931 } else { 932 kq->kq_state |= KQ_SLEEP; 933 error = ltsleep(kq, PSOCK | PCATCH | PNORELOCK, 934 "kqread", timeout, &kq->kq_lock); 935 } 936 splx(s); 937 if (error == 0) 938 goto retry; 939 /* don't restart after signals... */ 940 if (error == ERESTART) 941 error = EINTR; 942 else if (error == EWOULDBLOCK) 943 error = 0; 944 goto done; 945 } 946 947 /* mark end of knote list */ 948 TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe); 949 simple_unlock(&kq->kq_lock); 950 951 while (count) { /* while user wants data ... */ 952 simple_lock(&kq->kq_lock); 953 kn = TAILQ_FIRST(&kq->kq_head); /* get next knote */ 954 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 955 if (kn == &marker) { /* if it's our marker, stop */ 956 /* What if it's some else's marker? */ 957 simple_unlock(&kq->kq_lock); 958 splx(s); 959 if (count == maxevents) 960 goto retry; 961 goto done; 962 } 963 kq->kq_count--; 964 simple_unlock(&kq->kq_lock); 965 966 if (kn->kn_status & KN_DISABLED) { 967 /* don't want disabled events */ 968 kn->kn_status &= ~KN_QUEUED; 969 continue; 970 } 971 if ((kn->kn_flags & EV_ONESHOT) == 0 && 972 kn->kn_fop->f_event(kn, 0) == 0) { 973 /* 974 * non-ONESHOT event that hasn't 975 * triggered again, so de-queue. 976 */ 977 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 978 continue; 979 } 980 *kevp = kn->kn_kevent; 981 kevp++; 982 nkev++; 983 if (kn->kn_flags & EV_ONESHOT) { 984 /* delete ONESHOT events after retrieval */ 985 kn->kn_status &= ~KN_QUEUED; 986 splx(s); 987 kn->kn_fop->f_detach(kn); 988 knote_drop(kn, p, p->p_fd); 989 s = splsched(); 990 } else if (kn->kn_flags & EV_CLEAR) { 991 /* clear state after retrieval */ 992 kn->kn_data = 0; 993 kn->kn_fflags = 0; 994 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 995 } else { 996 /* add event back on list */ 997 simple_lock(&kq->kq_lock); 998 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 999 kq->kq_count++; 1000 simple_unlock(&kq->kq_lock); 1001 } 1002 count--; 1003 if (nkev == KQ_NEVENTS) { 1004 /* do copyouts in KQ_NEVENTS chunks */ 1005 splx(s); 1006 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp, 1007 sizeof(struct kevent) * nkev); 1008 ulistp += nkev; 1009 nkev = 0; 1010 kevp = kq->kq_kev; 1011 s = splsched(); 1012 if (error) 1013 break; 1014 } 1015 } 1016 1017 /* remove marker */ 1018 simple_lock(&kq->kq_lock); 1019 TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe); 1020 simple_unlock(&kq->kq_lock); 1021 splx(s); 1022 done: 1023 if (nkev != 0) { 1024 /* copyout remaining events */ 1025 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp, 1026 sizeof(struct kevent) * nkev); 1027 } 1028 *retval = maxevents - count; 1029 1030 return (error); 1031 } 1032 1033 /* 1034 * struct fileops read method for a kqueue descriptor. 1035 * Not implemented. 1036 * XXX: This could be expanded to call kqueue_scan, if desired. 1037 */ 1038 /*ARGSUSED*/ 1039 static int 1040 kqueue_read(struct file *fp, off_t *offset, struct uio *uio, 1041 struct ucred *cred, int flags) 1042 { 1043 1044 return (ENXIO); 1045 } 1046 1047 /* 1048 * struct fileops write method for a kqueue descriptor. 1049 * Not implemented. 1050 */ 1051 /*ARGSUSED*/ 1052 static int 1053 kqueue_write(struct file *fp, off_t *offset, struct uio *uio, 1054 struct ucred *cred, int flags) 1055 { 1056 1057 return (ENXIO); 1058 } 1059 1060 /* 1061 * struct fileops ioctl method for a kqueue descriptor. 1062 * 1063 * Two ioctls are currently supported. They both use struct kfilter_mapping: 1064 * KFILTER_BYNAME find name for filter, and return result in 1065 * name, which is of size len. 1066 * KFILTER_BYFILTER find filter for name. len is ignored. 1067 */ 1068 /*ARGSUSED*/ 1069 static int 1070 kqueue_ioctl(struct file *fp, u_long com, void *data, struct proc *p) 1071 { 1072 struct kfilter_mapping *km; 1073 const struct kfilter *kfilter; 1074 char *name; 1075 int error; 1076 1077 km = (struct kfilter_mapping *)data; 1078 error = 0; 1079 1080 switch (com) { 1081 case KFILTER_BYFILTER: /* convert filter -> name */ 1082 kfilter = kfilter_byfilter(km->filter); 1083 if (kfilter != NULL) 1084 error = copyoutstr(kfilter->name, km->name, km->len, 1085 NULL); 1086 else 1087 error = ENOENT; 1088 break; 1089 1090 case KFILTER_BYNAME: /* convert name -> filter */ 1091 MALLOC(name, char *, KFILTER_MAXNAME, M_KEVENT, M_WAITOK); 1092 error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL); 1093 if (error) { 1094 FREE(name, M_KEVENT); 1095 break; 1096 } 1097 kfilter = kfilter_byname(name); 1098 if (kfilter != NULL) 1099 km->filter = kfilter->filter; 1100 else 1101 error = ENOENT; 1102 FREE(name, M_KEVENT); 1103 break; 1104 1105 default: 1106 error = ENOTTY; 1107 1108 } 1109 return (error); 1110 } 1111 1112 /* 1113 * struct fileops fcntl method for a kqueue descriptor. 1114 * Not implemented. 1115 */ 1116 /*ARGSUSED*/ 1117 static int 1118 kqueue_fcntl(struct file *fp, u_int com, void *data, struct proc *p) 1119 { 1120 1121 return (ENOTTY); 1122 } 1123 1124 /* 1125 * struct fileops poll method for a kqueue descriptor. 1126 * Determine if kqueue has events pending. 1127 */ 1128 static int 1129 kqueue_poll(struct file *fp, int events, struct proc *p) 1130 { 1131 struct kqueue *kq; 1132 int revents; 1133 1134 kq = (struct kqueue *)fp->f_data; 1135 revents = 0; 1136 if (events & (POLLIN | POLLRDNORM)) { 1137 if (kq->kq_count) { 1138 revents |= events & (POLLIN | POLLRDNORM); 1139 } else { 1140 selrecord(p, &kq->kq_sel); 1141 } 1142 } 1143 return (revents); 1144 } 1145 1146 /* 1147 * struct fileops stat method for a kqueue descriptor. 1148 * Returns dummy info, with st_size being number of events pending. 1149 */ 1150 static int 1151 kqueue_stat(struct file *fp, struct stat *st, struct proc *p) 1152 { 1153 struct kqueue *kq; 1154 1155 kq = (struct kqueue *)fp->f_data; 1156 memset((void *)st, 0, sizeof(*st)); 1157 st->st_size = kq->kq_count; 1158 st->st_blksize = sizeof(struct kevent); 1159 st->st_mode = S_IFIFO; 1160 return (0); 1161 } 1162 1163 /* 1164 * struct fileops close method for a kqueue descriptor. 1165 * Cleans up kqueue. 1166 */ 1167 static int 1168 kqueue_close(struct file *fp, struct proc *p) 1169 { 1170 struct kqueue *kq; 1171 struct filedesc *fdp; 1172 struct knote **knp, *kn, *kn0; 1173 int i; 1174 1175 kq = (struct kqueue *)fp->f_data; 1176 fdp = p->p_fd; 1177 for (i = 0; i < fdp->fd_knlistsize; i++) { 1178 knp = &SLIST_FIRST(&fdp->fd_knlist[i]); 1179 kn = *knp; 1180 while (kn != NULL) { 1181 kn0 = SLIST_NEXT(kn, kn_link); 1182 if (kq == kn->kn_kq) { 1183 kn->kn_fop->f_detach(kn); 1184 FILE_UNUSE(kn->kn_fp, p); 1185 pool_put(&knote_pool, kn); 1186 *knp = kn0; 1187 } else { 1188 knp = &SLIST_NEXT(kn, kn_link); 1189 } 1190 kn = kn0; 1191 } 1192 } 1193 if (fdp->fd_knhashmask != 0) { 1194 for (i = 0; i < fdp->fd_knhashmask + 1; i++) { 1195 knp = &SLIST_FIRST(&fdp->fd_knhash[i]); 1196 kn = *knp; 1197 while (kn != NULL) { 1198 kn0 = SLIST_NEXT(kn, kn_link); 1199 if (kq == kn->kn_kq) { 1200 kn->kn_fop->f_detach(kn); 1201 /* XXX non-fd release of kn->kn_ptr */ 1202 pool_put(&knote_pool, kn); 1203 *knp = kn0; 1204 } else { 1205 knp = &SLIST_NEXT(kn, kn_link); 1206 } 1207 kn = kn0; 1208 } 1209 } 1210 } 1211 pool_put(&kqueue_pool, kq); 1212 fp->f_data = NULL; 1213 1214 return (0); 1215 } 1216 1217 /* 1218 * wakeup a kqueue 1219 */ 1220 static void 1221 kqueue_wakeup(struct kqueue *kq) 1222 { 1223 int s; 1224 1225 s = splsched(); 1226 simple_lock(&kq->kq_lock); 1227 if (kq->kq_state & KQ_SLEEP) { /* if currently sleeping ... */ 1228 kq->kq_state &= ~KQ_SLEEP; 1229 wakeup(kq); /* ... wakeup */ 1230 } 1231 1232 /* Notify select/poll and kevent. */ 1233 selnotify(&kq->kq_sel, 0); 1234 simple_unlock(&kq->kq_lock); 1235 splx(s); 1236 } 1237 1238 /* 1239 * struct fileops kqfilter method for a kqueue descriptor. 1240 * Event triggered when monitored kqueue changes. 1241 */ 1242 /*ARGSUSED*/ 1243 static int 1244 kqueue_kqfilter(struct file *fp, struct knote *kn) 1245 { 1246 struct kqueue *kq; 1247 1248 KASSERT(fp == kn->kn_fp); 1249 kq = (struct kqueue *)kn->kn_fp->f_data; 1250 if (kn->kn_filter != EVFILT_READ) 1251 return (1); 1252 kn->kn_fop = &kqread_filtops; 1253 SLIST_INSERT_HEAD(&kq->kq_sel.sel_klist, kn, kn_selnext); 1254 return (0); 1255 } 1256 1257 1258 /* 1259 * Walk down a list of knotes, activating them if their event has triggered. 1260 */ 1261 void 1262 knote(struct klist *list, long hint) 1263 { 1264 struct knote *kn; 1265 1266 SLIST_FOREACH(kn, list, kn_selnext) 1267 if (kn->kn_fop->f_event(kn, hint)) 1268 KNOTE_ACTIVATE(kn); 1269 } 1270 1271 /* 1272 * Remove all knotes from a specified klist 1273 */ 1274 void 1275 knote_remove(struct proc *p, struct klist *list) 1276 { 1277 struct knote *kn; 1278 1279 while ((kn = SLIST_FIRST(list)) != NULL) { 1280 kn->kn_fop->f_detach(kn); 1281 knote_drop(kn, p, p->p_fd); 1282 } 1283 } 1284 1285 /* 1286 * Remove all knotes referencing a specified fd 1287 */ 1288 void 1289 knote_fdclose(struct proc *p, int fd) 1290 { 1291 struct filedesc *fdp; 1292 struct klist *list; 1293 1294 fdp = p->p_fd; 1295 list = &fdp->fd_knlist[fd]; 1296 knote_remove(p, list); 1297 } 1298 1299 /* 1300 * Attach a new knote to a file descriptor 1301 */ 1302 static void 1303 knote_attach(struct knote *kn, struct filedesc *fdp) 1304 { 1305 struct klist *list; 1306 int size; 1307 1308 if (! kn->kn_fop->f_isfd) { 1309 /* if knote is not on an fd, store on internal hash table */ 1310 if (fdp->fd_knhashmask == 0) 1311 fdp->fd_knhash = hashinit(KN_HASHSIZE, HASH_LIST, 1312 M_KEVENT, M_WAITOK, &fdp->fd_knhashmask); 1313 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 1314 goto done; 1315 } 1316 1317 /* 1318 * otherwise, knote is on an fd. 1319 * knotes are stored in fd_knlist indexed by kn->kn_id. 1320 */ 1321 if (fdp->fd_knlistsize <= kn->kn_id) { 1322 /* expand list, it's too small */ 1323 size = fdp->fd_knlistsize; 1324 while (size <= kn->kn_id) { 1325 /* grow in KQ_EXTENT chunks */ 1326 size += KQ_EXTENT; 1327 } 1328 list = malloc(size * sizeof(struct klist *), M_KEVENT,M_WAITOK); 1329 if (fdp->fd_knlist) { 1330 /* copy existing knlist */ 1331 memcpy((caddr_t)list, (caddr_t)fdp->fd_knlist, 1332 fdp->fd_knlistsize * sizeof(struct klist *)); 1333 } 1334 /* 1335 * Zero new memory. Stylistically, SLIST_INIT() should be 1336 * used here, but that does same thing as the memset() anyway. 1337 */ 1338 memset(&list[fdp->fd_knlistsize], 0, 1339 (size - fdp->fd_knlistsize) * sizeof(struct klist *)); 1340 1341 /* switch to new knlist */ 1342 if (fdp->fd_knlist != NULL) 1343 free(fdp->fd_knlist, M_KEVENT); 1344 fdp->fd_knlistsize = size; 1345 fdp->fd_knlist = list; 1346 } 1347 1348 /* get list head for this fd */ 1349 list = &fdp->fd_knlist[kn->kn_id]; 1350 done: 1351 /* add new knote */ 1352 SLIST_INSERT_HEAD(list, kn, kn_link); 1353 kn->kn_status = 0; 1354 } 1355 1356 /* 1357 * Drop knote. 1358 * Should be called at spl == 0, since we don't want to hold spl 1359 * while calling FILE_UNUSE and free. 1360 */ 1361 static void 1362 knote_drop(struct knote *kn, struct proc *p, struct filedesc *fdp) 1363 { 1364 struct klist *list; 1365 1366 if (kn->kn_fop->f_isfd) 1367 list = &fdp->fd_knlist[kn->kn_id]; 1368 else 1369 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 1370 1371 SLIST_REMOVE(list, kn, knote, kn_link); 1372 if (kn->kn_status & KN_QUEUED) 1373 knote_dequeue(kn); 1374 if (kn->kn_fop->f_isfd) 1375 FILE_UNUSE(kn->kn_fp, p); 1376 pool_put(&knote_pool, kn); 1377 } 1378 1379 1380 /* 1381 * Queue new event for knote. 1382 */ 1383 static void 1384 knote_enqueue(struct knote *kn) 1385 { 1386 struct kqueue *kq; 1387 int s; 1388 1389 kq = kn->kn_kq; 1390 KASSERT((kn->kn_status & KN_QUEUED) == 0); 1391 1392 s = splsched(); 1393 simple_lock(&kq->kq_lock); 1394 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1395 kn->kn_status |= KN_QUEUED; 1396 kq->kq_count++; 1397 simple_unlock(&kq->kq_lock); 1398 splx(s); 1399 kqueue_wakeup(kq); 1400 } 1401 1402 /* 1403 * Dequeue event for knote. 1404 */ 1405 static void 1406 knote_dequeue(struct knote *kn) 1407 { 1408 struct kqueue *kq; 1409 int s; 1410 1411 KASSERT(kn->kn_status & KN_QUEUED); 1412 kq = kn->kn_kq; 1413 1414 s = splsched(); 1415 simple_lock(&kq->kq_lock); 1416 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1417 kn->kn_status &= ~KN_QUEUED; 1418 kq->kq_count--; 1419 simple_unlock(&kq->kq_lock); 1420 splx(s); 1421 } 1422