1 /* $OpenBSD: kern_unveil.c,v 1.53 2022/01/11 07:31:50 semarie Exp $ */ 2 3 /* 4 * Copyright (c) 2017-2019 Bob Beck <beck@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 21 #include <sys/acct.h> 22 #include <sys/mount.h> 23 #include <sys/filedesc.h> 24 #include <sys/proc.h> 25 #include <sys/namei.h> 26 #include <sys/pool.h> 27 #include <sys/vnode.h> 28 #include <sys/ktrace.h> 29 #include <sys/types.h> 30 #include <sys/malloc.h> 31 #include <sys/tree.h> 32 #include <sys/lock.h> 33 34 #include <sys/conf.h> 35 #include <sys/syscall.h> 36 #include <sys/syscallargs.h> 37 #include <sys/systm.h> 38 39 #include <sys/pledge.h> 40 41 struct unvname { 42 char *un_name; 43 size_t un_namesize; 44 u_char un_flags; 45 RBT_ENTRY(unvnmae) un_rbt; 46 }; 47 48 RBT_HEAD(unvname_rbt, unvname); 49 50 struct unveil { 51 struct vnode *uv_vp; 52 ssize_t uv_cover; 53 struct unvname_rbt uv_names; 54 struct rwlock uv_lock; 55 u_char uv_flags; 56 }; 57 58 /* #define DEBUG_UNVEIL */ 59 #ifdef DEBUG_UNVEIL 60 #define DPRINTF(x...) do { printf(x); } while (0) 61 #else 62 #define DPRINTF(x...) 63 #endif 64 65 #define UNVEIL_MAX_VNODES 128 66 #define UNVEIL_MAX_NAMES 128 67 68 static inline int 69 unvname_compare(const struct unvname *n1, const struct unvname *n2) 70 { 71 if (n1->un_namesize == n2->un_namesize) 72 return (memcmp(n1->un_name, n2->un_name, n1->un_namesize)); 73 else 74 return (n1->un_namesize - n2->un_namesize); 75 } 76 77 struct unvname * 78 unvname_new(const char *name, size_t size, u_char flags) 79 { 80 struct unvname *ret = malloc(sizeof(struct unvname), M_PROC, M_WAITOK); 81 ret->un_name = malloc(size, M_PROC, M_WAITOK); 82 memcpy(ret->un_name, name, size); 83 ret->un_namesize = size; 84 ret->un_flags = flags; 85 return ret; 86 } 87 88 void 89 unvname_delete(struct unvname *name) 90 { 91 free(name->un_name, M_PROC, name->un_namesize); 92 free(name, M_PROC, sizeof(struct unvname)); 93 } 94 95 RBT_PROTOTYPE(unvname_rbt, unvname, un_rbt, unvname_compare); 96 RBT_GENERATE(unvname_rbt, unvname, un_rbt, unvname_compare); 97 98 int 99 unveil_delete_names(struct unveil *uv) 100 { 101 struct unvname *unvn, *next; 102 int ret = 0; 103 104 rw_enter_write(&uv->uv_lock); 105 RBT_FOREACH_SAFE(unvn, unvname_rbt, &uv->uv_names, next) { 106 RBT_REMOVE(unvname_rbt, &uv->uv_names, unvn); 107 unvname_delete(unvn); 108 ret++; 109 } 110 rw_exit_write(&uv->uv_lock); 111 112 DPRINTF("deleted %d names\n", ret); 113 return ret; 114 } 115 116 int 117 unveil_add_name_unlocked(struct unveil *uv, char *name, u_char flags) 118 { 119 struct unvname *unvn; 120 121 unvn = unvname_new(name, strlen(name) + 1, flags); 122 if (RBT_INSERT(unvname_rbt, &uv->uv_names, unvn) != NULL) { 123 /* Name already present. */ 124 unvname_delete(unvn); 125 return 0; 126 } 127 128 DPRINTF("added name %s underneath vnode %p\n", name, uv->uv_vp); 129 return 1; 130 } 131 132 int 133 unveil_add_name(struct unveil *uv, char *name, u_char flags) 134 { 135 int ret; 136 137 rw_enter_write(&uv->uv_lock); 138 ret = unveil_add_name_unlocked(uv, name, flags); 139 rw_exit_write(&uv->uv_lock); 140 return ret; 141 } 142 143 struct unvname * 144 unveil_namelookup(struct unveil *uv, char *name) 145 { 146 struct unvname n, *ret = NULL; 147 148 rw_enter_read(&uv->uv_lock); 149 150 DPRINTF("%s: looking up name %s (%p) in vnode %p\n", 151 __func__, name, name, uv->uv_vp); 152 153 KASSERT(uv->uv_vp != NULL); 154 155 n.un_name = name; 156 n.un_namesize = strlen(name) + 1; 157 158 ret = RBT_FIND(unvname_rbt, &uv->uv_names, &n); 159 160 rw_exit_read(&uv->uv_lock); 161 162 DPRINTF("%s: %s name %s in vnode %p\n", __func__, 163 (ret == NULL) ? "no match for" : "matched", 164 name, uv->uv_vp); 165 return ret; 166 } 167 168 void 169 unveil_destroy(struct process *ps) 170 { 171 size_t i; 172 173 for (i = 0; ps->ps_uvpaths != NULL && i < ps->ps_uvvcount; i++) { 174 struct unveil *uv = ps->ps_uvpaths + i; 175 176 struct vnode *vp = uv->uv_vp; 177 /* skip any vnodes zapped by unveil_removevnode */ 178 if (vp != NULL) { 179 vp->v_uvcount--; 180 181 DPRINTF("unveil: %s(%d): removing vnode %p uvcount %d " 182 "in position %ld\n", 183 ps->ps_comm, ps->ps_pid, vp, vp->v_uvcount, i); 184 vrele(vp); 185 } 186 ps->ps_uvncount -= unveil_delete_names(uv); 187 uv->uv_vp = NULL; 188 uv->uv_flags = 0; 189 } 190 191 KASSERT(ps->ps_uvncount == 0); 192 free(ps->ps_uvpaths, M_PROC, UNVEIL_MAX_VNODES * 193 sizeof(struct unveil)); 194 ps->ps_uvvcount = 0; 195 ps->ps_uvpaths = NULL; 196 } 197 198 void 199 unveil_copy(struct process *parent, struct process *child) 200 { 201 size_t i; 202 203 child->ps_uvdone = parent->ps_uvdone; 204 if (parent->ps_uvvcount == 0) 205 return; 206 207 child->ps_uvpaths = mallocarray(UNVEIL_MAX_VNODES, 208 sizeof(struct unveil), M_PROC, M_WAITOK|M_ZERO); 209 210 child->ps_uvncount = 0; 211 for (i = 0; parent->ps_uvpaths != NULL && i < parent->ps_uvvcount; 212 i++) { 213 struct unveil *from = parent->ps_uvpaths + i; 214 struct unveil *to = child->ps_uvpaths + i; 215 struct unvname *unvn, *next; 216 217 to->uv_vp = from->uv_vp; 218 if (to->uv_vp != NULL) { 219 vref(to->uv_vp); 220 to->uv_vp->v_uvcount++; 221 } 222 rw_init(&to->uv_lock, "unveil"); 223 RBT_INIT(unvname_rbt, &to->uv_names); 224 rw_enter_read(&from->uv_lock); 225 RBT_FOREACH_SAFE(unvn, unvname_rbt, &from->uv_names, next) { 226 if (unveil_add_name_unlocked(&child->ps_uvpaths[i], 227 unvn->un_name, unvn->un_flags)) 228 child->ps_uvncount++; 229 } 230 rw_exit_read(&from->uv_lock); 231 to->uv_flags = from->uv_flags; 232 to->uv_cover = from->uv_cover; 233 } 234 child->ps_uvvcount = parent->ps_uvvcount; 235 } 236 237 /* 238 * Walk up from vnode dp, until we find a matching unveil, or the root vnode 239 * returns -1 if no unveil to be found above dp or if dp is the root vnode. 240 */ 241 ssize_t 242 unveil_find_cover(struct vnode *dp, struct proc *p) 243 { 244 struct vnode *vp = NULL, *parent = NULL, *root; 245 ssize_t ret = -1; 246 int error; 247 248 /* use the correct root to stop at, chrooted or not.. */ 249 root = p->p_fd->fd_rdir ? p->p_fd->fd_rdir : rootvnode; 250 vp = dp; 251 252 while (vp != root) { 253 struct componentname cn = { 254 .cn_nameiop = LOOKUP, 255 .cn_flags = ISLASTCN | ISDOTDOT | RDONLY, 256 .cn_proc = p, 257 .cn_cred = p->p_ucred, 258 .cn_pnbuf = NULL, 259 .cn_nameptr = "..", 260 .cn_namelen = 2, 261 .cn_consume = 0 262 }; 263 264 /* 265 * If we are at the root of a filesystem, and we are 266 * still mounted somewhere, take the .. in the above 267 * filesystem. 268 */ 269 if (vp != root && (vp->v_flag & VROOT)) { 270 if (vp->v_mount == NULL) 271 return -1; 272 vp = vp->v_mount->mnt_vnodecovered ? 273 vp->v_mount->mnt_vnodecovered : vp; 274 } 275 276 if (vget(vp, LK_EXCLUSIVE|LK_RETRY) != 0) 277 return -1; 278 /* Get parent vnode of vp using lookup of '..' */ 279 /* This returns with vp unlocked but ref'ed*/ 280 error = VOP_LOOKUP(vp, &parent, &cn); 281 if (error) { 282 if (!(cn.cn_flags & PDIRUNLOCK)) 283 vput(vp); 284 else { 285 /* 286 * This corner case should not happen because 287 * we have not set LOCKPARENT in the flags 288 */ 289 DPRINTF("vnode %p PDIRUNLOCK on error\n", vp); 290 vrele(vp); 291 } 292 break; 293 } 294 295 vrele(vp); 296 (void) unveil_lookup(parent, p->p_p, &ret); 297 vput(parent); 298 299 if (ret >= 0) 300 break; 301 302 if (vp == parent) { 303 ret = -1; 304 break; 305 } 306 vp = parent; 307 parent = NULL; 308 } 309 return ret; 310 } 311 312 313 struct unveil * 314 unveil_lookup(struct vnode *vp, struct process *pr, ssize_t *position) 315 { 316 struct unveil *uv = pr->ps_uvpaths; 317 ssize_t i; 318 319 if (position != NULL) 320 *position = -1; 321 322 if (vp->v_uvcount == 0) 323 return NULL; 324 325 for (i = 0; i < pr->ps_uvvcount; i++) { 326 if (vp == uv[i].uv_vp) { 327 KASSERT(uv[i].uv_vp->v_uvcount > 0); 328 KASSERT(uv[i].uv_vp->v_usecount > 0); 329 if (position != NULL) 330 *position = i; 331 return &uv[i]; 332 } 333 } 334 return NULL; 335 } 336 337 int 338 unveil_parsepermissions(const char *permissions, u_char *perms) 339 { 340 size_t i = 0; 341 char c; 342 343 *perms = UNVEIL_USERSET; 344 while ((c = permissions[i++]) != '\0') { 345 switch (c) { 346 case 'r': 347 *perms |= UNVEIL_READ; 348 break; 349 case 'w': 350 *perms |= UNVEIL_WRITE; 351 break; 352 case 'x': 353 *perms |= UNVEIL_EXEC; 354 break; 355 case 'c': 356 *perms |= UNVEIL_CREATE; 357 break; 358 default: 359 return -1; 360 } 361 } 362 return 0; 363 } 364 365 int 366 unveil_setflags(u_char *flags, u_char nflags) 367 { 368 #if 0 369 if (((~(*flags)) & nflags) != 0) { 370 DPRINTF("Flags escalation %llX -> %llX\n", *flags, nflags); 371 return 1; 372 } 373 #endif 374 *flags = nflags; 375 return 1; 376 } 377 378 struct unveil * 379 unveil_add_vnode(struct proc *p, struct vnode *vp) 380 { 381 struct process *pr = p->p_p; 382 struct unveil *uv = NULL; 383 ssize_t i; 384 385 KASSERT(pr->ps_uvvcount < UNVEIL_MAX_VNODES); 386 387 uv = &pr->ps_uvpaths[pr->ps_uvvcount++]; 388 rw_init(&uv->uv_lock, "unveil"); 389 RBT_INIT(unvname_rbt, &uv->uv_names); 390 uv->uv_vp = vp; 391 uv->uv_flags = 0; 392 393 /* find out what we are covered by */ 394 uv->uv_cover = unveil_find_cover(vp, p); 395 396 /* 397 * Find anyone covered by what we are covered by 398 * and re-check what covers them (we could have 399 * interposed a cover) 400 */ 401 for (i = 0; i < pr->ps_uvvcount - 1; i++) { 402 if (pr->ps_uvpaths[i].uv_cover == uv->uv_cover) 403 pr->ps_uvpaths[i].uv_cover = 404 unveil_find_cover(pr->ps_uvpaths[i].uv_vp, p); 405 } 406 407 return (uv); 408 } 409 410 int 411 unveil_add(struct proc *p, struct nameidata *ndp, const char *permissions) 412 { 413 struct process *pr = p->p_p; 414 struct vnode *vp; 415 struct unveil *uv; 416 int directory_add; 417 int ret = EINVAL; 418 u_char flags; 419 420 KASSERT(ISSET(ndp->ni_cnd.cn_flags, HASBUF)); /* must have SAVENAME */ 421 422 if (unveil_parsepermissions(permissions, &flags) == -1) 423 goto done; 424 425 if (pr->ps_uvpaths == NULL) { 426 pr->ps_uvpaths = mallocarray(UNVEIL_MAX_VNODES, 427 sizeof(struct unveil), M_PROC, M_WAITOK|M_ZERO); 428 } 429 430 if (pr->ps_uvvcount >= UNVEIL_MAX_VNODES || 431 pr->ps_uvncount >= UNVEIL_MAX_NAMES) { 432 ret = E2BIG; 433 goto done; 434 } 435 436 /* Are we a directory? or something else */ 437 directory_add = ndp->ni_vp != NULL && ndp->ni_vp->v_type == VDIR; 438 439 if (directory_add) 440 vp = ndp->ni_vp; 441 else 442 vp = ndp->ni_dvp; 443 444 KASSERT(vp->v_type == VDIR); 445 vref(vp); 446 vp->v_uvcount++; 447 if ((uv = unveil_lookup(vp, pr, NULL)) != NULL) { 448 /* 449 * We already have unveiled this directory 450 * vnode 451 */ 452 vp->v_uvcount--; 453 vrele(vp); 454 455 /* 456 * If we are adding a directory which was already 457 * unveiled containing only specific terminals, 458 * unrestrict it. 459 */ 460 if (directory_add) { 461 DPRINTF("unveil: %s(%d): updating directory vnode %p" 462 " to unrestricted uvcount %d\n", 463 pr->ps_comm, pr->ps_pid, vp, vp->v_uvcount); 464 465 if (!unveil_setflags(&uv->uv_flags, flags)) 466 ret = EPERM; 467 else 468 ret = 0; 469 goto done; 470 } 471 472 /* 473 * If we are adding a terminal that is already unveiled, just 474 * replace the flags and we are done 475 */ 476 if (!directory_add) { 477 struct unvname *tname; 478 if ((tname = unveil_namelookup(uv, 479 ndp->ni_cnd.cn_nameptr)) != NULL) { 480 DPRINTF("unveil: %s(%d): changing flags for %s" 481 "in vnode %p, uvcount %d\n", 482 pr->ps_comm, pr->ps_pid, tname->un_name, vp, 483 vp->v_uvcount); 484 485 if (!unveil_setflags(&tname->un_flags, flags)) 486 ret = EPERM; 487 else 488 ret = 0; 489 goto done; 490 } 491 } 492 493 } else { 494 /* 495 * New unveil involving this directory vnode. 496 */ 497 uv = unveil_add_vnode(p, vp); 498 } 499 500 /* 501 * At this stage with have a unveil in uv with a vnode for a 502 * directory. If the component we are adding is a directory, 503 * we are done. Otherwise, we add the component name the name 504 * list in uv. 505 */ 506 507 if (directory_add) { 508 uv->uv_flags = flags; 509 ret = 0; 510 511 DPRINTF("unveil: %s(%d): added unrestricted directory vnode %p" 512 ", uvcount %d\n", 513 pr->ps_comm, pr->ps_pid, vp, vp->v_uvcount); 514 goto done; 515 } 516 517 if (unveil_add_name(uv, ndp->ni_cnd.cn_nameptr, flags)) 518 pr->ps_uvncount++; 519 ret = 0; 520 521 DPRINTF("unveil: %s(%d): added name %s beneath %s vnode %p," 522 " uvcount %d\n", 523 pr->ps_comm, pr->ps_pid, ndp->ni_cnd.cn_nameptr, 524 uv->uv_flags ? "unrestricted" : "restricted", 525 vp, vp->v_uvcount); 526 527 done: 528 return ret; 529 } 530 531 /* 532 * XXX this will probably change. 533 * XXX collapse down later once debug surely unneeded 534 */ 535 int 536 unveil_flagmatch(struct nameidata *ni, u_char flags) 537 { 538 if (flags == 0) { 539 DPRINTF("All operations forbidden for 0 flags\n"); 540 return 0; 541 } 542 if (ni->ni_unveil & UNVEIL_READ) { 543 if ((flags & UNVEIL_READ) == 0) { 544 DPRINTF("unveil lacks UNVEIL_READ\n"); 545 return 0; 546 } 547 } 548 if (ni->ni_unveil & UNVEIL_WRITE) { 549 if ((flags & UNVEIL_WRITE) == 0) { 550 DPRINTF("unveil lacks UNVEIL_WRITE\n"); 551 return 0; 552 } 553 } 554 if (ni->ni_unveil & UNVEIL_EXEC) { 555 if ((flags & UNVEIL_EXEC) == 0) { 556 DPRINTF("unveil lacks UNVEIL_EXEC\n"); 557 return 0; 558 } 559 } 560 if (ni->ni_unveil & UNVEIL_CREATE) { 561 if ((flags & UNVEIL_CREATE) == 0) { 562 DPRINTF("unveil lacks UNVEIL_CREATE\n"); 563 return 0; 564 } 565 } 566 return 1; 567 } 568 569 /* 570 * When traversing up towards the root figure out the proper unveil for 571 * the parent directory. 572 */ 573 struct unveil * 574 unveil_covered(struct unveil *uv, struct vnode *dvp, struct proc *p) 575 { 576 if (uv && uv->uv_vp == dvp) { 577 /* if at the root, chrooted or not, return the current uv */ 578 if (dvp == (p->p_fd->fd_rdir ? p->p_fd->fd_rdir : rootvnode)) 579 return uv; 580 if (uv->uv_cover >=0) { 581 KASSERT(uv->uv_cover < p->p_p->ps_uvvcount); 582 return &p->p_p->ps_uvpaths[uv->uv_cover]; 583 } 584 return NULL; 585 } 586 return uv; 587 } 588 589 590 /* 591 * Start a relative path lookup. Ensure we find whatever unveil covered 592 * where we start from, either by having a saved current working directory 593 * unveil, or by walking up and finding a cover the hard way if we are 594 * doing a non AT_FDCWD relative lookup. Caller passes a NULL dp 595 * if we are using AT_FDCWD. 596 */ 597 void 598 unveil_start_relative(struct proc *p, struct nameidata *ni, struct vnode *dp) 599 { 600 struct process *pr = p->p_p; 601 struct unveil *uv = NULL; 602 ssize_t uvi; 603 604 if (pr->ps_uvpaths == NULL) 605 return; 606 607 uv = unveil_lookup(dp, pr, NULL); 608 if (uv == NULL) { 609 uvi = unveil_find_cover(dp, p); 610 if (uvi >= 0) { 611 KASSERT(uvi < pr->ps_uvvcount); 612 uv = &pr->ps_uvpaths[uvi]; 613 } 614 } 615 616 /* 617 * Store this match for later use. Flags are checked at the end. 618 */ 619 if (uv) { 620 DPRINTF("unveil: %s(%d): relative unveil at %p matches", 621 pr->ps_comm, pr->ps_pid, uv); 622 623 ni->ni_unveil_match = uv; 624 } 625 } 626 627 /* 628 * unveil checking - for component directories in a namei lookup. 629 */ 630 void 631 unveil_check_component(struct proc *p, struct nameidata *ni, struct vnode *dp) 632 { 633 struct process *pr = p->p_p; 634 struct unveil *uv = NULL; 635 636 if (ni->ni_pledge == PLEDGE_UNVEIL || pr->ps_uvpaths == NULL) 637 return; 638 if (ni->ni_cnd.cn_flags & BYPASSUNVEIL) 639 return; 640 641 if (ni->ni_cnd.cn_flags & ISDOTDOT) { 642 /* 643 * adjust unveil match as necessary 644 */ 645 uv = unveil_covered(ni->ni_unveil_match, dp, p); 646 647 /* clear the match when we DOTDOT above it */ 648 if (ni->ni_unveil_match && ni->ni_unveil_match->uv_vp == dp) 649 ni->ni_unveil_match = NULL; 650 } else 651 uv = unveil_lookup(dp, pr, NULL); 652 653 if (uv != NULL) { 654 /* update match */ 655 ni->ni_unveil_match = uv; 656 657 DPRINTF("unveil: %s(%d): component directory match for " 658 "vnode %p\n", pr->ps_comm, pr->ps_pid, dp); 659 } 660 } 661 662 /* 663 * unveil checking - only done after namei lookup has succeeded on 664 * the last component of a namei lookup. 665 */ 666 int 667 unveil_check_final(struct proc *p, struct nameidata *ni) 668 { 669 struct process *pr = p->p_p; 670 struct unveil *uv = NULL, *nuv; 671 struct unvname *tname = NULL; 672 673 if (ni->ni_pledge == PLEDGE_UNVEIL || pr->ps_uvpaths == NULL) 674 return (0); 675 676 if (ni->ni_cnd.cn_flags & BYPASSUNVEIL) { 677 DPRINTF("unveil: %s(%d): BYPASSUNVEIL.\n", 678 pr->ps_comm, pr->ps_pid); 679 680 return (0); 681 } 682 683 if (ni->ni_vp != NULL && ni->ni_vp->v_type == VDIR) { 684 /* We are matching a directory terminal component */ 685 uv = unveil_lookup(ni->ni_vp, pr, NULL); 686 if (uv == NULL || (uv->uv_flags & UNVEIL_USERSET) == 0) { 687 DPRINTF("unveil: %s(%d) no match for vnode %p\n", 688 pr->ps_comm, pr->ps_pid, ni->ni_vp); 689 690 if (uv != NULL) 691 ni->ni_unveil_match = uv; 692 goto done; 693 } 694 if (!unveil_flagmatch(ni, uv->uv_flags)) { 695 DPRINTF("unveil: %s(%d) flag mismatch for directory" 696 " vnode %p\n", 697 pr->ps_comm, pr->ps_pid, ni->ni_vp); 698 699 pr->ps_acflag |= AUNVEIL; 700 if (uv->uv_flags & UNVEIL_MASK) 701 return EACCES; 702 else 703 return ENOENT; 704 705 } 706 /* directory and flags match, success */ 707 DPRINTF("unveil: %s(%d): matched directory \"%s\" at vnode %p\n", 708 pr->ps_comm, pr->ps_pid, ni->ni_cnd.cn_nameptr, 709 uv->uv_vp); 710 711 return (0); 712 } 713 714 /* Otherwise, we are matching a non-terminal component */ 715 uv = unveil_lookup(ni->ni_dvp, pr, NULL); 716 if (uv == NULL) { 717 DPRINTF("unveil: %s(%d) no match for directory vnode %p\n", 718 pr->ps_comm, pr->ps_pid, ni->ni_dvp); 719 720 goto done; 721 } 722 if ((tname = unveil_namelookup(uv, ni->ni_cnd.cn_nameptr)) == NULL) { 723 DPRINTF("unveil: %s(%d) no match for terminal '%s' in " 724 "directory vnode %p\n", 725 pr->ps_comm, pr->ps_pid, 726 ni->ni_cnd.cn_nameptr, ni->ni_dvp); 727 728 /* no specific name, so check unveil directory flags */ 729 if (!unveil_flagmatch(ni, uv->uv_flags)) { 730 DPRINTF("unveil: %s(%d) terminal " 731 "'%s' flags mismatch in directory " 732 "vnode %p\n", 733 pr->ps_comm, pr->ps_pid, 734 ni->ni_cnd.cn_nameptr, ni->ni_dvp); 735 736 /* 737 * If dir has user set restrictions fail with 738 * EACCES or ENOENT. Otherwise, use any covering 739 * match that we found above this dir. 740 */ 741 if (uv->uv_flags & UNVEIL_USERSET) { 742 pr->ps_acflag |= AUNVEIL; 743 if (uv->uv_flags & UNVEIL_MASK) 744 return EACCES; 745 else 746 return ENOENT; 747 } 748 /* start backtrack from this node */ 749 ni->ni_unveil_match = uv; 750 goto done; 751 } 752 /* directory flags match, success */ 753 DPRINTF("unveil: %s(%d): matched \"%s\" underneath vnode %p\n", 754 pr->ps_comm, pr->ps_pid, ni->ni_cnd.cn_nameptr, 755 uv->uv_vp); 756 757 return (0); 758 } 759 if (!unveil_flagmatch(ni, tname->un_flags)) { 760 /* do flags match for matched name */ 761 DPRINTF("unveil: %s(%d) flag mismatch for terminal '%s'\n", 762 pr->ps_comm, pr->ps_pid, tname->un_name); 763 764 pr->ps_acflag |= AUNVEIL; 765 return EACCES; 766 } 767 /* name and flags match. success */ 768 DPRINTF("unveil: %s(%d) matched terminal '%s'\n", 769 pr->ps_comm, pr->ps_pid, tname->un_name); 770 771 return (0); 772 773 done: 774 /* 775 * last component did not match, check previous matches if 776 * access is allowed or not. 777 */ 778 for (uv = ni->ni_unveil_match; uv != NULL; uv = nuv) { 779 if (unveil_flagmatch(ni, uv->uv_flags)) { 780 DPRINTF("unveil: %s(%d): matched \"%s\" underneath/at " 781 "vnode %p\n", pr->ps_comm, pr->ps_pid, 782 ni->ni_cnd.cn_nameptr, uv->uv_vp); 783 784 return (0); 785 } 786 /* if node has any flags set then this is an access violation */ 787 if (uv->uv_flags & UNVEIL_USERSET) { 788 DPRINTF("unveil: %s(%d) flag mismatch for vnode %p\n", 789 pr->ps_comm, pr->ps_pid, uv->uv_vp); 790 791 pr->ps_acflag |= AUNVEIL; 792 if (uv->uv_flags & UNVEIL_MASK) 793 return EACCES; 794 else 795 return ENOENT; 796 } 797 798 DPRINTF("unveil: %s(%d) check cover for vnode %p, uv_cover %zd\n", 799 pr->ps_comm, pr->ps_pid, uv->uv_vp, uv->uv_cover); 800 801 nuv = unveil_covered(uv, uv->uv_vp, p); 802 if (nuv == uv) 803 break; 804 } 805 pr->ps_acflag |= AUNVEIL; 806 return ENOENT; 807 } 808 809 /* 810 * Scan all active processes to see if any of them have a unveil 811 * to this vnode. If so, NULL the vnode in their unveil list, 812 * vrele, drop the reference, and mark their unveil list 813 * as needing to have the hole shrunk the next time the process 814 * uses it for lookup. 815 */ 816 void 817 unveil_removevnode(struct vnode *vp) 818 { 819 struct process *pr; 820 821 if (vp->v_uvcount == 0) 822 return; 823 824 DPRINTF("%s: found vnode %p with count %d\n", 825 __func__, vp, vp->v_uvcount); 826 827 vref(vp); /* make sure it is held till we are done */ 828 829 LIST_FOREACH(pr, &allprocess, ps_list) { 830 struct unveil * uv; 831 832 if ((uv = unveil_lookup(vp, pr, NULL)) != NULL && 833 uv->uv_vp != NULL) { 834 uv->uv_vp = NULL; 835 uv->uv_flags = 0; 836 837 DPRINTF("%s: vnode %p now count %d\n", 838 __func__, vp, vp->v_uvcount); 839 840 if (vp->v_uvcount > 0) { 841 vrele(vp); 842 vp->v_uvcount--; 843 } else 844 panic("vp %p, v_uvcount of %d should be 0", 845 vp, vp->v_uvcount); 846 } 847 } 848 KASSERT(vp->v_uvcount == 0); 849 850 vrele(vp); /* release our ref */ 851 } 852