1 /*- 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and code derived from software contributed to 9 * Berkeley by William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: Utah $Hdr: mem.c 1.13 89/10/08$ 40 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 41 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $ 42 * $DragonFly: src/sys/kern/kern_memio.c,v 1.32 2008/07/23 16:39:28 dillon Exp $ 43 */ 44 45 /* 46 * Memory special file 47 */ 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/buf.h> 52 #include <sys/conf.h> 53 #include <sys/fcntl.h> 54 #include <sys/filio.h> 55 #include <sys/kernel.h> 56 #include <sys/malloc.h> 57 #include <sys/memrange.h> 58 #include <sys/proc.h> 59 #include <sys/priv.h> 60 #include <sys/random.h> 61 #include <sys/signalvar.h> 62 #include <sys/signal2.h> 63 #include <sys/uio.h> 64 #include <sys/vnode.h> 65 66 #include <vm/vm.h> 67 #include <vm/pmap.h> 68 #include <vm/vm_extern.h> 69 70 71 static d_open_t mmopen; 72 static d_close_t mmclose; 73 static d_read_t mmread; 74 static d_write_t mmwrite; 75 static d_ioctl_t mmioctl; 76 static d_mmap_t memmmap; 77 static d_poll_t mmpoll; 78 static d_kqfilter_t mmkqfilter; 79 80 #define CDEV_MAJOR 2 81 static struct dev_ops mem_ops = { 82 { "mem", CDEV_MAJOR, D_MEM | D_MPSAFE_READ | D_MPSAFE_WRITE | D_KQFILTER }, 83 .d_open = mmopen, 84 .d_close = mmclose, 85 .d_read = mmread, 86 .d_write = mmwrite, 87 .d_ioctl = mmioctl, 88 .d_poll = mmpoll, 89 .d_kqfilter = mmkqfilter, 90 .d_mmap = memmmap, 91 }; 92 93 static int rand_bolt; 94 static caddr_t zbuf; 95 static cdev_t zerodev = NULL; 96 97 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); 98 static int mem_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *); 99 static int random_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *); 100 101 struct mem_range_softc mem_range_softc; 102 103 104 static int 105 mmopen(struct dev_open_args *ap) 106 { 107 cdev_t dev = ap->a_head.a_dev; 108 int error; 109 110 switch (minor(dev)) { 111 case 0: 112 case 1: 113 if (ap->a_oflags & FWRITE) { 114 if (securelevel > 0 || kernel_mem_readonly) 115 return (EPERM); 116 } 117 error = 0; 118 break; 119 case 14: 120 error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0); 121 if (error != 0) 122 break; 123 if (securelevel > 0 || kernel_mem_readonly) { 124 error = EPERM; 125 break; 126 } 127 error = cpu_set_iopl(); 128 break; 129 default: 130 error = 0; 131 break; 132 } 133 return (error); 134 } 135 136 static int 137 mmclose(struct dev_close_args *ap) 138 { 139 cdev_t dev = ap->a_head.a_dev; 140 int error; 141 142 switch (minor(dev)) { 143 case 14: 144 error = cpu_clr_iopl(); 145 break; 146 default: 147 error = 0; 148 break; 149 } 150 return (error); 151 } 152 153 154 static int 155 mmrw(cdev_t dev, struct uio *uio, int flags) 156 { 157 int o; 158 u_int c, v; 159 u_int poolsize; 160 struct iovec *iov; 161 int error = 0; 162 caddr_t buf = NULL; 163 164 while (uio->uio_resid > 0 && error == 0) { 165 iov = uio->uio_iov; 166 if (iov->iov_len == 0) { 167 uio->uio_iov++; 168 uio->uio_iovcnt--; 169 if (uio->uio_iovcnt < 0) 170 panic("mmrw"); 171 continue; 172 } 173 switch (minor(dev)) { 174 case 0: 175 /* 176 * minor device 0 is physical memory, /dev/mem 177 */ 178 v = uio->uio_offset; 179 v &= ~PAGE_MASK; 180 pmap_kenter((vm_offset_t)ptvmmap, v); 181 o = (int)uio->uio_offset & PAGE_MASK; 182 c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK)); 183 c = min(c, (u_int)(PAGE_SIZE - o)); 184 c = min(c, (u_int)iov->iov_len); 185 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); 186 pmap_kremove((vm_offset_t)ptvmmap); 187 continue; 188 189 case 1: { 190 /* 191 * minor device 1 is kernel memory, /dev/kmem 192 */ 193 vm_offset_t saddr, eaddr; 194 int prot; 195 196 c = iov->iov_len; 197 198 /* 199 * Make sure that all of the pages are currently 200 * resident so that we don't create any zero-fill 201 * pages. 202 */ 203 saddr = trunc_page(uio->uio_offset); 204 eaddr = round_page(uio->uio_offset + c); 205 if (saddr > eaddr) 206 return EFAULT; 207 208 /* 209 * Make sure the kernel addresses are mapped. 210 * platform_direct_mapped() can be used to bypass 211 * default mapping via the page table (virtual kernels 212 * contain a lot of out-of-band data). 213 */ 214 prot = VM_PROT_READ; 215 if (uio->uio_rw != UIO_READ) 216 prot |= VM_PROT_WRITE; 217 error = kvm_access_check(saddr, eaddr, prot); 218 if (error) 219 return (error); 220 error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset, 221 (int)c, uio); 222 continue; 223 } 224 case 2: 225 /* 226 * minor device 2 is EOF/RATHOLE 227 */ 228 if (uio->uio_rw == UIO_READ) 229 return (0); 230 c = iov->iov_len; 231 break; 232 case 3: 233 /* 234 * minor device 3 (/dev/random) is source of filth 235 * on read, seeder on write 236 */ 237 if (buf == NULL) 238 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); 239 c = min(iov->iov_len, PAGE_SIZE); 240 if (uio->uio_rw == UIO_WRITE) { 241 error = uiomove(buf, (int)c, uio); 242 if (error == 0) 243 error = add_buffer_randomness(buf, c); 244 } else { 245 poolsize = read_random(buf, c); 246 if (poolsize == 0) { 247 if (buf) 248 kfree(buf, M_TEMP); 249 if ((flags & IO_NDELAY) != 0) 250 return (EWOULDBLOCK); 251 return (0); 252 } 253 c = min(c, poolsize); 254 error = uiomove(buf, (int)c, uio); 255 } 256 continue; 257 case 4: 258 /* 259 * minor device 4 (/dev/urandom) is source of muck 260 * on read, writes are disallowed. 261 */ 262 c = min(iov->iov_len, PAGE_SIZE); 263 if (uio->uio_rw == UIO_WRITE) { 264 error = EPERM; 265 break; 266 } 267 if (CURSIG(curthread->td_lwp) != 0) { 268 /* 269 * Use tsleep() to get the error code right. 270 * It should return immediately. 271 */ 272 error = tsleep(&rand_bolt, PCATCH, "urand", 1); 273 if (error != 0 && error != EWOULDBLOCK) 274 continue; 275 } 276 if (buf == NULL) 277 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK); 278 poolsize = read_random_unlimited(buf, c); 279 c = min(c, poolsize); 280 error = uiomove(buf, (int)c, uio); 281 continue; 282 case 12: 283 /* 284 * minor device 12 (/dev/zero) is source of nulls 285 * on read, write are disallowed. 286 */ 287 if (uio->uio_rw == UIO_WRITE) { 288 c = iov->iov_len; 289 break; 290 } 291 if (zbuf == NULL) { 292 zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP, 293 M_WAITOK | M_ZERO); 294 } 295 c = min(iov->iov_len, PAGE_SIZE); 296 error = uiomove(zbuf, (int)c, uio); 297 continue; 298 default: 299 return (ENODEV); 300 } 301 if (error) 302 break; 303 iov->iov_base = (char *)iov->iov_base + c; 304 iov->iov_len -= c; 305 uio->uio_offset += c; 306 uio->uio_resid -= c; 307 } 308 if (buf) 309 kfree(buf, M_TEMP); 310 return (error); 311 } 312 313 static int 314 mmread(struct dev_read_args *ap) 315 { 316 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag)); 317 } 318 319 static int 320 mmwrite(struct dev_write_args *ap) 321 { 322 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag)); 323 } 324 325 326 327 328 329 /*******************************************************\ 330 * allow user processes to MMAP some memory sections * 331 * instead of going through read/write * 332 \*******************************************************/ 333 334 static int 335 memmmap(struct dev_mmap_args *ap) 336 { 337 cdev_t dev = ap->a_head.a_dev; 338 339 switch (minor(dev)) { 340 case 0: 341 /* 342 * minor device 0 is physical memory 343 */ 344 #if defined(__i386__) 345 ap->a_result = i386_btop(ap->a_offset); 346 #elif defined(__x86_64__) 347 ap->a_result = x86_64_btop(ap->a_offset); 348 #endif 349 return 0; 350 case 1: 351 /* 352 * minor device 1 is kernel memory 353 */ 354 #if defined(__i386__) 355 ap->a_result = i386_btop(vtophys(ap->a_offset)); 356 #elif defined(__x86_64__) 357 ap->a_result = x86_64_btop(vtophys(ap->a_offset)); 358 #endif 359 return 0; 360 361 default: 362 return EINVAL; 363 } 364 } 365 366 static int 367 mmioctl(struct dev_ioctl_args *ap) 368 { 369 cdev_t dev = ap->a_head.a_dev; 370 371 switch (minor(dev)) { 372 case 0: 373 return mem_ioctl(dev, ap->a_cmd, ap->a_data, 374 ap->a_fflag, ap->a_cred); 375 case 3: 376 case 4: 377 return random_ioctl(dev, ap->a_cmd, ap->a_data, 378 ap->a_fflag, ap->a_cred); 379 } 380 return (ENODEV); 381 } 382 383 /* 384 * Operations for changing memory attributes. 385 * 386 * This is basically just an ioctl shim for mem_range_attr_get 387 * and mem_range_attr_set. 388 */ 389 static int 390 mem_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) 391 { 392 int nd, error = 0; 393 struct mem_range_op *mo = (struct mem_range_op *)data; 394 struct mem_range_desc *md; 395 396 /* is this for us? */ 397 if ((cmd != MEMRANGE_GET) && 398 (cmd != MEMRANGE_SET)) 399 return (ENOTTY); 400 401 /* any chance we can handle this? */ 402 if (mem_range_softc.mr_op == NULL) 403 return (EOPNOTSUPP); 404 405 /* do we have any descriptors? */ 406 if (mem_range_softc.mr_ndesc == 0) 407 return (ENXIO); 408 409 switch (cmd) { 410 case MEMRANGE_GET: 411 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); 412 if (nd > 0) { 413 md = (struct mem_range_desc *) 414 kmalloc(nd * sizeof(struct mem_range_desc), 415 M_MEMDESC, M_WAITOK); 416 error = mem_range_attr_get(md, &nd); 417 if (!error) 418 error = copyout(md, mo->mo_desc, 419 nd * sizeof(struct mem_range_desc)); 420 kfree(md, M_MEMDESC); 421 } else { 422 nd = mem_range_softc.mr_ndesc; 423 } 424 mo->mo_arg[0] = nd; 425 break; 426 427 case MEMRANGE_SET: 428 md = (struct mem_range_desc *)kmalloc(sizeof(struct mem_range_desc), 429 M_MEMDESC, M_WAITOK); 430 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); 431 /* clamp description string */ 432 md->mr_owner[sizeof(md->mr_owner) - 1] = 0; 433 if (error == 0) 434 error = mem_range_attr_set(md, &mo->mo_arg[0]); 435 kfree(md, M_MEMDESC); 436 break; 437 } 438 return (error); 439 } 440 441 /* 442 * Implementation-neutral, kernel-callable functions for manipulating 443 * memory range attributes. 444 */ 445 int 446 mem_range_attr_get(struct mem_range_desc *mrd, int *arg) 447 { 448 /* can we handle this? */ 449 if (mem_range_softc.mr_op == NULL) 450 return (EOPNOTSUPP); 451 452 if (*arg == 0) { 453 *arg = mem_range_softc.mr_ndesc; 454 } else { 455 bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc)); 456 } 457 return (0); 458 } 459 460 int 461 mem_range_attr_set(struct mem_range_desc *mrd, int *arg) 462 { 463 /* can we handle this? */ 464 if (mem_range_softc.mr_op == NULL) 465 return (EOPNOTSUPP); 466 467 return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg)); 468 } 469 470 #ifdef SMP 471 void 472 mem_range_AP_init(void) 473 { 474 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) 475 return (mem_range_softc.mr_op->initAP(&mem_range_softc)); 476 } 477 #endif 478 479 static int 480 random_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred) 481 { 482 int error; 483 int intr; 484 485 /* 486 * Even inspecting the state is privileged, since it gives a hint 487 * about how easily the randomness might be guessed. 488 */ 489 error = 0; 490 491 switch (cmd) { 492 /* Really handled in upper layer */ 493 case FIOASYNC: 494 break; 495 case MEM_SETIRQ: 496 intr = *(int16_t *)data; 497 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 498 break; 499 if (intr < 0 || intr >= MAX_INTS) 500 return (EINVAL); 501 register_randintr(intr); 502 break; 503 case MEM_CLEARIRQ: 504 intr = *(int16_t *)data; 505 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 506 break; 507 if (intr < 0 || intr >= MAX_INTS) 508 return (EINVAL); 509 unregister_randintr(intr); 510 break; 511 case MEM_RETURNIRQ: 512 error = ENOTSUP; 513 break; 514 case MEM_FINDIRQ: 515 intr = *(int16_t *)data; 516 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 517 break; 518 if (intr < 0 || intr >= MAX_INTS) 519 return (EINVAL); 520 intr = next_registered_randintr(intr); 521 if (intr == MAX_INTS) 522 return (ENOENT); 523 *(u_int16_t *)data = intr; 524 break; 525 default: 526 error = ENOTSUP; 527 break; 528 } 529 return (error); 530 } 531 532 int 533 mmpoll(struct dev_poll_args *ap) 534 { 535 cdev_t dev = ap->a_head.a_dev; 536 int revents; 537 538 switch (minor(dev)) { 539 case 3: /* /dev/random */ 540 revents = random_poll(dev, ap->a_events); 541 break; 542 case 4: /* /dev/urandom */ 543 default: 544 revents = seltrue(dev, ap->a_events); 545 break; 546 } 547 ap->a_events = revents; 548 return (0); 549 } 550 551 static int 552 mm_filter_read(struct knote *kn, long hint) 553 { 554 return (1); 555 } 556 557 static void 558 dummy_filter_detach(struct knote *kn) {} 559 560 static struct filterops random_read_filtops = 561 { 1, NULL, dummy_filter_detach, random_filter_read }; 562 563 static struct filterops mm_read_filtops = 564 { 1, NULL, dummy_filter_detach, mm_filter_read }; 565 566 int 567 mmkqfilter(struct dev_kqfilter_args *ap) 568 { 569 struct knote *kn = ap->a_kn; 570 cdev_t dev = ap->a_head.a_dev; 571 572 ap->a_result = 0; 573 switch (kn->kn_filter) { 574 case EVFILT_READ: 575 switch (minor(dev)) { 576 case 3: 577 kn->kn_fop = &random_read_filtops; 578 break; 579 default: 580 kn->kn_fop = &mm_read_filtops; 581 break; 582 } 583 break; 584 default: 585 ap->a_result = 1; 586 return (0); 587 } 588 589 return (0); 590 } 591 592 int 593 iszerodev(cdev_t dev) 594 { 595 return (zerodev == dev); 596 } 597 598 static void 599 mem_drvinit(void *unused) 600 { 601 602 /* Initialise memory range handling */ 603 if (mem_range_softc.mr_op != NULL) 604 mem_range_softc.mr_op->init(&mem_range_softc); 605 606 make_dev(&mem_ops, 0, UID_ROOT, GID_KMEM, 0640, "mem"); 607 make_dev(&mem_ops, 1, UID_ROOT, GID_KMEM, 0640, "kmem"); 608 make_dev(&mem_ops, 2, UID_ROOT, GID_WHEEL, 0666, "null"); 609 make_dev(&mem_ops, 3, UID_ROOT, GID_WHEEL, 0644, "random"); 610 make_dev(&mem_ops, 4, UID_ROOT, GID_WHEEL, 0644, "urandom"); 611 zerodev = make_dev(&mem_ops, 12, UID_ROOT, GID_WHEEL, 0666, "zero"); 612 make_dev(&mem_ops, 14, UID_ROOT, GID_WHEEL, 0600, "io"); 613 } 614 615 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL) 616 617