1 /*- 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and code derived from software contributed to 9 * Berkeley by William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: Utah $Hdr: mem.c 1.13 89/10/08$ 36 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 37 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $ 38 */ 39 40 /* 41 * Memory special file 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/buf.h> 47 #include <sys/conf.h> 48 #include <sys/fcntl.h> 49 #include <sys/filio.h> 50 #include <sys/interrupt.h> 51 #include <sys/kernel.h> 52 #include <sys/malloc.h> 53 #include <sys/memrange.h> 54 #include <sys/proc.h> 55 #include <sys/caps.h> 56 #include <sys/queue.h> 57 #include <sys/random.h> 58 #include <sys/signalvar.h> 59 #include <sys/uio.h> 60 #include <sys/vnode.h> 61 #include <sys/sysctl.h> 62 63 #include <sys/signal2.h> 64 #include <sys/spinlock2.h> 65 66 #include <vm/vm.h> 67 #include <vm/pmap.h> 68 #include <vm/vm_map.h> 69 #include <vm/vm_extern.h> 70 71 72 static d_open_t mmopen; 73 static d_close_t mmclose; 74 static d_read_t mmread; 75 static d_write_t mmwrite; 76 static d_ioctl_t mmioctl; 77 #if 0 78 static d_mmap_t memmmap; 79 #endif 80 static d_kqfilter_t mmkqfilter; 81 static int memuksmap(vm_map_backing_t ba, int op, cdev_t dev, vm_page_t fake); 82 83 #define CDEV_MAJOR 2 84 static struct dev_ops mem_ops = { 85 { "mem", 0, D_MPSAFE | D_QUICK }, 86 .d_open = mmopen, 87 .d_close = mmclose, 88 .d_read = mmread, 89 .d_write = mmwrite, 90 .d_ioctl = mmioctl, 91 .d_kqfilter = mmkqfilter, 92 #if 0 93 .d_mmap = memmmap, 94 #endif 95 .d_uksmap = memuksmap 96 }; 97 98 static struct dev_ops mem_ops_mem = { 99 { "mem", 0, D_MEM | D_MPSAFE | D_QUICK }, 100 .d_open = mmopen, 101 .d_close = mmclose, 102 .d_read = mmread, 103 .d_write = mmwrite, 104 .d_ioctl = mmioctl, 105 .d_kqfilter = mmkqfilter, 106 #if 0 107 .d_mmap = memmmap, 108 #endif 109 .d_uksmap = memuksmap 110 }; 111 112 static struct dev_ops mem_ops_noq = { 113 { "mem", 0, D_MPSAFE }, 114 .d_open = mmopen, 115 .d_close = mmclose, 116 .d_read = mmread, 117 .d_write = mmwrite, 118 .d_ioctl = mmioctl, 119 .d_kqfilter = mmkqfilter, 120 #if 0 121 .d_mmap = memmmap, 122 #endif 123 .d_uksmap = memuksmap 124 }; 125 126 static int rand_bolt; 127 static caddr_t zbuf; 128 static cdev_t zerodev = NULL; 129 static struct lock mem_lock = LOCK_INITIALIZER("memlk", 0, 0); 130 131 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); 132 static int mem_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *); 133 static int random_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *); 134 135 struct mem_range_softc mem_range_softc; 136 137 static int seedenable; 138 SYSCTL_INT(_kern, OID_AUTO, seedenable, CTLFLAG_RW, &seedenable, 0, ""); 139 140 static int 141 mmopen(struct dev_open_args *ap) 142 { 143 cdev_t dev = ap->a_head.a_dev; 144 int error; 145 146 switch (minor(dev)) { 147 case 0: 148 case 1: 149 /* 150 * /dev/mem and /dev/kmem 151 */ 152 error = caps_priv_check(ap->a_cred, SYSCAP_RESTRICTEDROOT); 153 if (error == 0) { 154 if (ap->a_oflags & FWRITE) { 155 if (securelevel > 0 || kernel_mem_readonly) 156 error = EPERM; 157 } 158 } 159 break; 160 case 3: 161 case 4: 162 /* 163 * /dev/random 164 * /dev/urandom 165 * 166 * Cannot be written to from RESTRICTEDROOT environments. 167 */ 168 error = 0; 169 if (ap->a_oflags & FWRITE) { 170 error = caps_priv_check(ap->a_cred, 171 SYSCAP_RESTRICTEDROOT); 172 } 173 break; 174 case 6: 175 /* 176 * /dev/kpmap can only be opened for reading. 177 */ 178 error = 0; 179 if (ap->a_oflags & FWRITE) 180 error = EPERM; 181 break; 182 case 14: 183 /* 184 * /dev/io 185 */ 186 error = caps_priv_check(ap->a_cred, SYSCAP_RESTRICTEDROOT); 187 if (error == 0) { 188 if (securelevel > 0 || kernel_mem_readonly) 189 error = EPERM; 190 else 191 error = cpu_set_iopl(); 192 } 193 break; 194 default: 195 error = 0; 196 break; 197 } 198 return (error); 199 } 200 201 static int 202 mmclose(struct dev_close_args *ap) 203 { 204 cdev_t dev = ap->a_head.a_dev; 205 int error; 206 207 switch (minor(dev)) { 208 case 14: 209 error = cpu_clr_iopl(); 210 break; 211 default: 212 error = 0; 213 break; 214 } 215 return (error); 216 } 217 218 219 static int 220 mmrw(cdev_t dev, struct uio *uio, int flags) 221 { 222 int o; 223 u_int c; 224 u_int poolsize; 225 u_long v; 226 struct iovec *iov; 227 int error = 0; 228 caddr_t buf = NULL; 229 230 while (uio->uio_resid > 0 && error == 0) { 231 iov = uio->uio_iov; 232 if (iov->iov_len == 0) { 233 uio->uio_iov++; 234 uio->uio_iovcnt--; 235 if (uio->uio_iovcnt < 0) 236 panic("mmrw"); 237 continue; 238 } 239 switch (minor(dev)) { 240 case 0: 241 /* 242 * minor device 0 is physical memory, /dev/mem 243 */ 244 v = uio->uio_offset; 245 v &= ~(long)PAGE_MASK; 246 pmap_kenter((vm_offset_t)ptvmmap, v); 247 o = (int)uio->uio_offset & PAGE_MASK; 248 c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK)); 249 c = min(c, (u_int)(PAGE_SIZE - o)); 250 c = min(c, (u_int)iov->iov_len); 251 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); 252 pmap_kremove((vm_offset_t)ptvmmap); 253 continue; 254 255 case 1: { 256 /* 257 * minor device 1 is kernel memory, /dev/kmem 258 */ 259 vm_offset_t saddr, eaddr; 260 int prot; 261 262 c = iov->iov_len; 263 264 /* 265 * Make sure that all of the pages are currently 266 * resident so that we don't create any zero-fill 267 * pages. 268 */ 269 saddr = trunc_page(uio->uio_offset); 270 eaddr = round_page(uio->uio_offset + c); 271 if (saddr > eaddr) 272 return EFAULT; 273 274 /* 275 * Make sure the kernel addresses are mapped. 276 * platform_direct_mapped() can be used to bypass 277 * default mapping via the page table (virtual kernels 278 * contain a lot of out-of-band data). 279 */ 280 prot = VM_PROT_READ; 281 if (uio->uio_rw != UIO_READ) 282 prot |= VM_PROT_WRITE; 283 error = kvm_access_check(saddr, eaddr, prot); 284 if (error) 285 return (error); 286 error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset, 287 (int)c, uio); 288 continue; 289 } 290 case 2: 291 /* 292 * minor device 2 (/dev/null) is EOF/RATHOLE 293 */ 294 if (uio->uio_rw == UIO_READ) 295 return (0); 296 c = iov->iov_len; 297 break; 298 case 3: 299 /* 300 * minor device 3 (/dev/random) is source of filth 301 * on read, seeder on write 302 */ 303 if (buf == NULL) 304 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); 305 c = min(iov->iov_len, PAGE_SIZE); 306 if (uio->uio_rw == UIO_WRITE) { 307 error = uiomove(buf, (int)c, uio); 308 if (error == 0 && 309 seedenable && 310 securelevel <= 0) 311 { 312 error = add_buffer_randomness_src(buf, c, RAND_SRC_SEEDING); 313 } else if (error == 0) { 314 error = EPERM; 315 } 316 } else { 317 poolsize = read_random(buf, c, 0); 318 if (poolsize == 0) { 319 if (buf) 320 kfree(buf, M_TEMP); 321 if ((flags & IO_NDELAY) != 0) 322 return (EWOULDBLOCK); 323 return (0); 324 } 325 c = min(c, poolsize); 326 error = uiomove(buf, (int)c, uio); 327 } 328 continue; 329 case 4: 330 /* 331 * minor device 4 (/dev/urandom) is source of muck 332 * on read, writes are disallowed. 333 */ 334 c = min(iov->iov_len, PAGE_SIZE); 335 if (uio->uio_rw == UIO_WRITE) { 336 error = EPERM; 337 break; 338 } 339 if (CURSIG(curthread->td_lwp) != 0) { 340 /* 341 * Use tsleep() to get the error code right. 342 * It should return immediately. 343 */ 344 error = tsleep(&rand_bolt, PCATCH, "urand", 1); 345 if (error != 0 && error != EWOULDBLOCK) 346 continue; 347 } 348 if (buf == NULL) 349 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); 350 poolsize = read_random(buf, c, 1); 351 c = min(c, poolsize); 352 error = uiomove(buf, (int)c, uio); 353 continue; 354 /* case 5: read/write not supported, mmap only */ 355 /* case 6: read/write not supported, mmap only */ 356 case 12: 357 /* 358 * minor device 12 (/dev/zero) is source of nulls 359 * on read, write are disallowed. 360 */ 361 if (uio->uio_rw == UIO_WRITE) { 362 c = iov->iov_len; 363 break; 364 } 365 if (zbuf == NULL) { 366 zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP, 367 M_WAITOK | M_ZERO); 368 } 369 c = min(iov->iov_len, PAGE_SIZE); 370 error = uiomove(zbuf, (int)c, uio); 371 continue; 372 default: 373 return (ENODEV); 374 } 375 if (error) 376 break; 377 iov->iov_base = (char *)iov->iov_base + c; 378 iov->iov_len -= c; 379 uio->uio_offset += c; 380 uio->uio_resid -= c; 381 } 382 if (buf) 383 kfree(buf, M_TEMP); 384 return (error); 385 } 386 387 static int 388 mmread(struct dev_read_args *ap) 389 { 390 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag)); 391 } 392 393 static int 394 mmwrite(struct dev_write_args *ap) 395 { 396 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag)); 397 } 398 399 /*******************************************************\ 400 * allow user processes to MMAP some memory sections * 401 * instead of going through read/write * 402 \*******************************************************/ 403 404 static int user_kernel_mapping(vm_map_backing_t ba, int num, 405 vm_ooffset_t offset, vm_ooffset_t *resultp); 406 407 static int 408 memuksmap(vm_map_backing_t ba, int op, cdev_t dev, vm_page_t fake) 409 { 410 vm_ooffset_t result; 411 int error; 412 struct lwp *lp; 413 414 error = 0; 415 416 switch(op) { 417 case UKSMAPOP_ADD: 418 /* 419 * We only need to track mappings for /dev/lpmap, all process 420 * mappings will be deleted when the process exits and we 421 * do not need to track kernel mappings. 422 */ 423 if (minor(dev) == 7) { 424 lp = ba->aux_info; 425 spin_lock(&lp->lwp_spin); 426 TAILQ_INSERT_TAIL(&lp->lwp_lpmap_backing_list, 427 ba, entry); 428 spin_unlock(&lp->lwp_spin); 429 } 430 break; 431 case UKSMAPOP_REM: 432 /* 433 * We only need to track mappings for /dev/lpmap, all process 434 * mappings will be deleted when the process exits and we 435 * do not need to track kernel mappings. 436 */ 437 if (minor(dev) == 7) { 438 lp = ba->aux_info; 439 spin_lock(&lp->lwp_spin); 440 TAILQ_REMOVE(&lp->lwp_lpmap_backing_list, ba, entry); 441 spin_unlock(&lp->lwp_spin); 442 } 443 break; 444 case UKSMAPOP_FAULT: 445 switch (minor(dev)) { 446 case 0: 447 /* 448 * minor device 0 is physical memory 449 */ 450 fake->phys_addr = ptoa(fake->pindex); 451 break; 452 case 1: 453 /* 454 * minor device 1 is kernel memory 455 */ 456 fake->phys_addr = vtophys(ptoa(fake->pindex)); 457 break; 458 case 5: 459 case 6: 460 case 7: 461 /* 462 * minor device 5 is /dev/upmap (see sys/upmap.h) 463 * minor device 6 is /dev/kpmap (see sys/upmap.h) 464 * minor device 7 is /dev/lpmap (see sys/upmap.h) 465 */ 466 result = 0; 467 error = user_kernel_mapping(ba, 468 minor(dev), 469 ptoa(fake->pindex), 470 &result); 471 fake->phys_addr = result; 472 break; 473 default: 474 error = EINVAL; 475 break; 476 } 477 break; 478 default: 479 error = EINVAL; 480 break; 481 } 482 return error; 483 } 484 485 static int 486 mmioctl(struct dev_ioctl_args *ap) 487 { 488 cdev_t dev = ap->a_head.a_dev; 489 int error; 490 491 lockmgr(&mem_lock, LK_EXCLUSIVE); 492 493 switch (minor(dev)) { 494 case 0: 495 error = mem_ioctl(dev, ap->a_cmd, ap->a_data, 496 ap->a_fflag, ap->a_cred); 497 break; 498 case 3: 499 case 4: 500 error = random_ioctl(dev, ap->a_cmd, ap->a_data, 501 ap->a_fflag, ap->a_cred); 502 break; 503 default: 504 error = ENODEV; 505 break; 506 } 507 508 lockmgr(&mem_lock, LK_RELEASE); 509 510 return (error); 511 } 512 513 /* 514 * Operations for changing memory attributes. 515 * 516 * This is basically just an ioctl shim for mem_range_attr_get 517 * and mem_range_attr_set. 518 */ 519 static int 520 mem_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) 521 { 522 int nd, error = 0; 523 struct mem_range_op *mo = (struct mem_range_op *)data; 524 struct mem_range_desc *md; 525 526 /* is this for us? */ 527 if ((cmd != MEMRANGE_GET) && 528 (cmd != MEMRANGE_SET)) 529 return (ENOTTY); 530 531 /* any chance we can handle this? */ 532 if (mem_range_softc.mr_op == NULL) 533 return (EOPNOTSUPP); 534 535 /* do we have any descriptors? */ 536 if (mem_range_softc.mr_ndesc == 0) 537 return (ENXIO); 538 539 switch (cmd) { 540 case MEMRANGE_GET: 541 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); 542 if (nd > 0) { 543 md = (struct mem_range_desc *) 544 kmalloc(nd * sizeof(struct mem_range_desc), 545 M_MEMDESC, M_WAITOK); 546 error = mem_range_attr_get(md, &nd); 547 if (!error) 548 error = copyout(md, mo->mo_desc, 549 nd * sizeof(struct mem_range_desc)); 550 kfree(md, M_MEMDESC); 551 } else { 552 nd = mem_range_softc.mr_ndesc; 553 } 554 mo->mo_arg[0] = nd; 555 break; 556 557 case MEMRANGE_SET: 558 md = (struct mem_range_desc *)kmalloc(sizeof(struct mem_range_desc), 559 M_MEMDESC, M_WAITOK); 560 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); 561 /* clamp description string */ 562 md->mr_owner[sizeof(md->mr_owner) - 1] = 0; 563 if (error == 0) 564 error = mem_range_attr_set(md, &mo->mo_arg[0]); 565 kfree(md, M_MEMDESC); 566 break; 567 } 568 return (error); 569 } 570 571 /* 572 * Implementation-neutral, kernel-callable functions for manipulating 573 * memory range attributes. 574 */ 575 int 576 mem_range_attr_get(struct mem_range_desc *mrd, int *arg) 577 { 578 /* can we handle this? */ 579 if (mem_range_softc.mr_op == NULL) 580 return (EOPNOTSUPP); 581 582 if (*arg == 0) { 583 *arg = mem_range_softc.mr_ndesc; 584 } else { 585 bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc)); 586 } 587 return (0); 588 } 589 590 int 591 mem_range_attr_set(struct mem_range_desc *mrd, int *arg) 592 { 593 /* can we handle this? */ 594 if (mem_range_softc.mr_op == NULL) 595 return (EOPNOTSUPP); 596 597 return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg)); 598 } 599 600 void 601 mem_range_AP_init(void) 602 { 603 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) 604 mem_range_softc.mr_op->initAP(&mem_range_softc); 605 } 606 607 static int 608 random_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) 609 { 610 int error; 611 int intr; 612 613 /* 614 * Even inspecting the state is privileged, since it gives a hint 615 * about how easily the randomness might be guessed. 616 */ 617 error = 0; 618 619 switch (cmd) { 620 /* Really handled in upper layer */ 621 case FIOASYNC: 622 break; 623 case MEM_SETIRQ: 624 intr = *(int16_t *)data; 625 if ((error = caps_priv_check(cred, SYSCAP_RESTRICTEDROOT)) != 0) 626 break; 627 if (intr < 0 || intr >= MAX_INTS) 628 return (EINVAL); 629 register_randintr(intr); 630 break; 631 case MEM_CLEARIRQ: 632 intr = *(int16_t *)data; 633 if ((error = caps_priv_check(cred, SYSCAP_RESTRICTEDROOT)) != 0) 634 break; 635 if (intr < 0 || intr >= MAX_INTS) 636 return (EINVAL); 637 unregister_randintr(intr); 638 break; 639 case MEM_RETURNIRQ: 640 error = ENOTSUP; 641 break; 642 case MEM_FINDIRQ: 643 intr = *(int16_t *)data; 644 if ((error = caps_priv_check(cred, SYSCAP_RESTRICTEDROOT)) != 0) 645 break; 646 if (intr < 0 || intr >= MAX_INTS) 647 return (EINVAL); 648 intr = next_registered_randintr(intr); 649 if (intr == MAX_INTS) 650 return (ENOENT); 651 *(u_int16_t *)data = intr; 652 break; 653 default: 654 error = ENOTSUP; 655 break; 656 } 657 return (error); 658 } 659 660 static int 661 mm_filter_read(struct knote *kn, long hint) 662 { 663 return (1); 664 } 665 666 static int 667 mm_filter_write(struct knote *kn, long hint) 668 { 669 return (1); 670 } 671 672 static void 673 dummy_filter_detach(struct knote *kn) {} 674 675 /* Implemented in kern_nrandom.c */ 676 static struct filterops random_read_filtops = 677 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, random_filter_read }; 678 679 static struct filterops mm_read_filtops = 680 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_read }; 681 682 static struct filterops mm_write_filtops = 683 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_write }; 684 685 static int 686 mmkqfilter(struct dev_kqfilter_args *ap) 687 { 688 struct knote *kn = ap->a_kn; 689 cdev_t dev = ap->a_head.a_dev; 690 691 ap->a_result = 0; 692 switch (kn->kn_filter) { 693 case EVFILT_READ: 694 switch (minor(dev)) { 695 case 3: 696 kn->kn_fop = &random_read_filtops; 697 break; 698 default: 699 kn->kn_fop = &mm_read_filtops; 700 break; 701 } 702 break; 703 case EVFILT_WRITE: 704 kn->kn_fop = &mm_write_filtops; 705 break; 706 default: 707 ap->a_result = EOPNOTSUPP; 708 return (0); 709 } 710 711 return (0); 712 } 713 714 int 715 iszerodev(cdev_t dev) 716 { 717 return (zerodev == dev); 718 } 719 720 /* 721 * /dev/lpmap, /dev/upmap, /dev/kpmap. 722 */ 723 static int 724 user_kernel_mapping(vm_map_backing_t ba, int num, vm_ooffset_t offset, 725 vm_ooffset_t *resultp) 726 { 727 struct proc *p; 728 struct lwp *lp; 729 int error; 730 int invfork; 731 732 if (offset < 0) 733 return (EINVAL); 734 735 error = EINVAL; 736 737 switch(num) { 738 case 5: 739 /* 740 * /dev/upmap - maps RW per-process shared user-kernel area. 741 */ 742 743 /* 744 * If this is a child currently in vfork the pmap is shared 745 * with the parent! We need to actually set-up the parent's 746 * p_upmap, not the child's, and we need to set the invfork 747 * flag. Userland will probably adjust its static state so 748 * it must be consistent with the parent or userland will be 749 * really badly confused. 750 * 751 * (this situation can happen when user code in vfork() calls 752 * libc's getpid() or some other function which then decides 753 * it wants the upmap). 754 */ 755 p = ba->aux_info; 756 if (p == NULL) 757 break; 758 if (p->p_flags & P_PPWAIT) { 759 p = p->p_pptr; 760 if (p == NULL) 761 return (EINVAL); 762 invfork = 1; 763 } else { 764 invfork = 0; 765 } 766 767 /* 768 * Create the kernel structure as required, set the invfork 769 * flag if we are faulting in on a vfork(). 770 */ 771 if (p->p_upmap == NULL) 772 proc_usermap(p, invfork); 773 if (p->p_upmap && invfork) 774 p->p_upmap->invfork = invfork; 775 776 /* 777 * Extract address for pmap 778 */ 779 if (p->p_upmap && 780 offset < roundup2(sizeof(*p->p_upmap), PAGE_SIZE)) { 781 /* only good for current process */ 782 *resultp = pmap_kextract((vm_offset_t)p->p_upmap + 783 offset); 784 error = 0; 785 } 786 break; 787 case 6: 788 /* 789 * /dev/kpmap - maps RO shared kernel global page 790 * 791 * Extract address for pmap 792 */ 793 if (kpmap && 794 offset < roundup2(sizeof(*kpmap), PAGE_SIZE)) { 795 *resultp = pmap_kextract((vm_offset_t)kpmap + offset); 796 error = 0; 797 } 798 break; 799 case 7: 800 /* 801 * /dev/lpmap - maps RW per-thread shared user-kernel area. 802 */ 803 lp = ba->aux_info; 804 if (lp == NULL) 805 break; 806 807 /* 808 * Create the kernel structure as required 809 */ 810 if (lp->lwp_lpmap == NULL) 811 lwp_usermap(lp, -1); /* second arg not yet XXX */ 812 813 /* 814 * Extract address for pmap 815 */ 816 if (lp->lwp_lpmap && 817 offset < roundup2(sizeof(*lp->lwp_lpmap), PAGE_SIZE)) { 818 /* only good for current process */ 819 *resultp = pmap_kextract((vm_offset_t)lp->lwp_lpmap + 820 offset); 821 error = 0; 822 } 823 break; 824 default: 825 break; 826 } 827 return error; 828 } 829 830 static void 831 mem_drvinit(void *unused) 832 { 833 834 /* Initialise memory range handling */ 835 if (mem_range_softc.mr_op != NULL) 836 mem_range_softc.mr_op->init(&mem_range_softc); 837 838 make_dev(&mem_ops_mem, 0, UID_ROOT, GID_KMEM, 0640, "mem"); 839 make_dev(&mem_ops_mem, 1, UID_ROOT, GID_KMEM, 0640, "kmem"); 840 make_dev(&mem_ops, 2, UID_ROOT, GID_WHEEL, 0666, "null"); 841 make_dev(&mem_ops, 3, UID_ROOT, GID_WHEEL, 0644, "random"); 842 make_dev(&mem_ops, 4, UID_ROOT, GID_WHEEL, 0644, "urandom"); 843 make_dev(&mem_ops, 5, UID_ROOT, GID_WHEEL, 0666, "upmap"); 844 make_dev(&mem_ops, 6, UID_ROOT, GID_WHEEL, 0444, "kpmap"); 845 make_dev(&mem_ops, 7, UID_ROOT, GID_WHEEL, 0666, "lpmap"); 846 zerodev = make_dev(&mem_ops, 12, UID_ROOT, GID_WHEEL, 0666, "zero"); 847 make_dev(&mem_ops_noq, 14, UID_ROOT, GID_WHEEL, 0600, "io"); 848 } 849 850 SYSINIT(memdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, mem_drvinit, 851 NULL); 852 853