1 /* $OpenBSD: drm_linux.c,v 1.32 2018/09/11 20:25:58 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org> 4 * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <dev/pci/drm/drmP.h> 20 #include <dev/pci/ppbreg.h> 21 #include <sys/event.h> 22 #include <sys/file.h> 23 #include <sys/filedesc.h> 24 #include <sys/stat.h> 25 #include <sys/unistd.h> 26 27 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED); 28 void *sch_ident; 29 int sch_priority; 30 31 void 32 flush_barrier(void *arg) 33 { 34 int *barrier = arg; 35 36 *barrier = 1; 37 wakeup(barrier); 38 } 39 40 void 41 flush_workqueue(struct workqueue_struct *wq) 42 { 43 struct sleep_state sls; 44 struct task task; 45 int barrier = 0; 46 47 if (cold) 48 return; 49 50 task_set(&task, flush_barrier, &barrier); 51 task_add((struct taskq *)wq, &task); 52 while (!barrier) { 53 sleep_setup(&sls, &barrier, PWAIT, "flwqbar"); 54 sleep_finish(&sls, !barrier); 55 } 56 } 57 58 void 59 flush_work(struct work_struct *work) 60 { 61 struct sleep_state sls; 62 struct task task; 63 int barrier = 0; 64 65 if (cold) 66 return; 67 68 task_set(&task, flush_barrier, &barrier); 69 task_add(work->tq, &task); 70 while (!barrier) { 71 sleep_setup(&sls, &barrier, PWAIT, "flwkbar"); 72 sleep_finish(&sls, !barrier); 73 } 74 } 75 76 void 77 flush_delayed_work(struct delayed_work *dwork) 78 { 79 struct sleep_state sls; 80 struct task task; 81 int barrier = 0; 82 83 if (cold) 84 return; 85 86 while (timeout_pending(&dwork->to)) 87 tsleep(&barrier, PWAIT, "fldwto", 1); 88 89 task_set(&task, flush_barrier, &barrier); 90 task_add(dwork->tq ? dwork->tq : systq, &task); 91 while (!barrier) { 92 sleep_setup(&sls, &barrier, PWAIT, "fldwbar"); 93 sleep_finish(&sls, !barrier); 94 } 95 } 96 97 struct timespec 98 ns_to_timespec(const int64_t nsec) 99 { 100 struct timespec ts; 101 int32_t rem; 102 103 if (nsec == 0) { 104 ts.tv_sec = 0; 105 ts.tv_nsec = 0; 106 return (ts); 107 } 108 109 ts.tv_sec = nsec / NSEC_PER_SEC; 110 rem = nsec % NSEC_PER_SEC; 111 if (rem < 0) { 112 ts.tv_sec--; 113 rem += NSEC_PER_SEC; 114 } 115 ts.tv_nsec = rem; 116 return (ts); 117 } 118 119 int64_t 120 timeval_to_ns(const struct timeval *tv) 121 { 122 return ((int64_t)tv->tv_sec * NSEC_PER_SEC) + 123 tv->tv_usec * NSEC_PER_USEC; 124 } 125 126 struct timeval 127 ns_to_timeval(const int64_t nsec) 128 { 129 struct timeval tv; 130 int32_t rem; 131 132 if (nsec == 0) { 133 tv.tv_sec = 0; 134 tv.tv_usec = 0; 135 return (tv); 136 } 137 138 tv.tv_sec = nsec / NSEC_PER_SEC; 139 rem = nsec % NSEC_PER_SEC; 140 if (rem < 0) { 141 tv.tv_sec--; 142 rem += NSEC_PER_SEC; 143 } 144 tv.tv_usec = rem / 1000; 145 return (tv); 146 } 147 148 int64_t 149 timeval_to_us(const struct timeval *tv) 150 { 151 return ((int64_t)tv->tv_sec * 1000000) + tv->tv_usec; 152 } 153 154 extern char *hw_vendor, *hw_prod, *hw_ver; 155 156 bool 157 dmi_match(int slot, const char *str) 158 { 159 switch (slot) { 160 case DMI_SYS_VENDOR: 161 case DMI_BOARD_VENDOR: 162 if (hw_vendor != NULL && 163 !strcmp(hw_vendor, str)) 164 return true; 165 break; 166 case DMI_PRODUCT_NAME: 167 case DMI_BOARD_NAME: 168 if (hw_prod != NULL && 169 !strcmp(hw_prod, str)) 170 return true; 171 break; 172 case DMI_PRODUCT_VERSION: 173 case DMI_BOARD_VERSION: 174 if (hw_ver != NULL && 175 !strcmp(hw_ver, str)) 176 return true; 177 break; 178 case DMI_NONE: 179 default: 180 return false; 181 } 182 183 return false; 184 } 185 186 static bool 187 dmi_found(const struct dmi_system_id *dsi) 188 { 189 int i, slot; 190 191 for (i = 0; i < nitems(dsi->matches); i++) { 192 slot = dsi->matches[i].slot; 193 if (slot == DMI_NONE) 194 break; 195 if (!dmi_match(slot, dsi->matches[i].substr)) 196 return false; 197 } 198 199 return true; 200 } 201 202 int 203 dmi_check_system(const struct dmi_system_id *sysid) 204 { 205 const struct dmi_system_id *dsi; 206 int num = 0; 207 208 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { 209 if (dmi_found(dsi)) { 210 num++; 211 if (dsi->callback && dsi->callback(dsi)) 212 break; 213 } 214 } 215 return (num); 216 } 217 218 struct vm_page * 219 alloc_pages(unsigned int gfp_mask, unsigned int order) 220 { 221 int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK; 222 struct pglist mlist; 223 224 if (gfp_mask & M_CANFAIL) 225 flags |= UVM_PLA_FAILOK; 226 if (gfp_mask & M_ZERO) 227 flags |= UVM_PLA_ZERO; 228 229 TAILQ_INIT(&mlist); 230 if (uvm_pglistalloc(PAGE_SIZE << order, dma_constraint.ucr_low, 231 dma_constraint.ucr_high, PAGE_SIZE, 0, &mlist, 1, flags)) 232 return NULL; 233 return TAILQ_FIRST(&mlist); 234 } 235 236 void 237 __free_pages(struct vm_page *page, unsigned int order) 238 { 239 struct pglist mlist; 240 int i; 241 242 TAILQ_INIT(&mlist); 243 for (i = 0; i < (1 << order); i++) 244 TAILQ_INSERT_TAIL(&mlist, &page[i], pageq); 245 uvm_pglistfree(&mlist); 246 } 247 248 void * 249 kmap(struct vm_page *pg) 250 { 251 vaddr_t va; 252 253 #if defined (__HAVE_PMAP_DIRECT) 254 va = pmap_map_direct(pg); 255 #else 256 va = uvm_km_valloc_wait(phys_map, PAGE_SIZE); 257 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE); 258 pmap_update(pmap_kernel()); 259 #endif 260 return (void *)va; 261 } 262 263 void 264 kunmap(void *addr) 265 { 266 vaddr_t va = (vaddr_t)addr; 267 268 #if defined (__HAVE_PMAP_DIRECT) 269 pmap_unmap_direct(va); 270 #else 271 pmap_kremove(va, PAGE_SIZE); 272 pmap_update(pmap_kernel()); 273 uvm_km_free_wakeup(phys_map, va, PAGE_SIZE); 274 #endif 275 } 276 277 void * 278 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags, 279 pgprot_t prot) 280 { 281 vaddr_t va; 282 paddr_t pa; 283 int i; 284 285 va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages); 286 if (va == 0) 287 return NULL; 288 for (i = 0; i < npages; i++) { 289 pa = VM_PAGE_TO_PHYS(pages[i]) | prot; 290 pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa, 291 PROT_READ | PROT_WRITE, 292 PROT_READ | PROT_WRITE | PMAP_WIRED); 293 pmap_update(pmap_kernel()); 294 } 295 296 return (void *)va; 297 } 298 299 void 300 vunmap(void *addr, size_t size) 301 { 302 vaddr_t va = (vaddr_t)addr; 303 304 pmap_remove(pmap_kernel(), va, va + size); 305 pmap_update(pmap_kernel()); 306 uvm_km_free(kernel_map, va, size); 307 } 308 309 void 310 print_hex_dump(const char *level, const char *prefix_str, int prefix_type, 311 int rowsize, int groupsize, const void *buf, size_t len, bool ascii) 312 { 313 const uint8_t *cbuf = buf; 314 int i; 315 316 for (i = 0; i < len; i++) { 317 if ((i % rowsize) == 0) 318 printf("%s", prefix_str); 319 printf("%02x", cbuf[i]); 320 if ((i % rowsize) == (rowsize - 1)) 321 printf("\n"); 322 else 323 printf(" "); 324 } 325 } 326 327 void * 328 memchr_inv(const void *s, int c, size_t n) 329 { 330 if (n != 0) { 331 const unsigned char *p = s; 332 333 do { 334 if (*p++ != (unsigned char)c) 335 return ((void *)(p - 1)); 336 }while (--n != 0); 337 } 338 return (NULL); 339 } 340 341 int 342 panic_cmp(struct rb_node *a, struct rb_node *b) 343 { 344 panic(__func__); 345 } 346 347 #undef RB_ROOT 348 #define RB_ROOT(head) (head)->rbh_root 349 350 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 351 352 /* 353 * This is a fairly minimal implementation of the Linux "idr" API. It 354 * probably isn't very efficient, and defenitely isn't RCU safe. The 355 * pre-load buffer is global instead of per-cpu; we rely on the kernel 356 * lock to make this work. We do randomize our IDs in order to make 357 * them harder to guess. 358 */ 359 360 int idr_cmp(struct idr_entry *, struct idr_entry *); 361 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp); 362 363 struct pool idr_pool; 364 struct idr_entry *idr_entry_cache; 365 366 void 367 idr_init(struct idr *idr) 368 { 369 static int initialized; 370 371 if (!initialized) { 372 pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0, 373 "idrpl", NULL); 374 initialized = 1; 375 } 376 SPLAY_INIT(&idr->tree); 377 } 378 379 void 380 idr_destroy(struct idr *idr) 381 { 382 struct idr_entry *id; 383 384 while ((id = SPLAY_MIN(idr_tree, &idr->tree))) { 385 SPLAY_REMOVE(idr_tree, &idr->tree, id); 386 pool_put(&idr_pool, id); 387 } 388 } 389 390 void 391 idr_preload(unsigned int gfp_mask) 392 { 393 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 394 395 KERNEL_ASSERT_LOCKED(); 396 397 if (idr_entry_cache == NULL) 398 idr_entry_cache = pool_get(&idr_pool, flags); 399 } 400 401 int 402 idr_alloc(struct idr *idr, void *ptr, int start, int end, 403 unsigned int gfp_mask) 404 { 405 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 406 struct idr_entry *id; 407 int begin; 408 409 KERNEL_ASSERT_LOCKED(); 410 411 if (idr_entry_cache) { 412 id = idr_entry_cache; 413 idr_entry_cache = NULL; 414 } else { 415 id = pool_get(&idr_pool, flags); 416 if (id == NULL) 417 return -ENOMEM; 418 } 419 420 if (end <= 0) 421 end = INT_MAX; 422 423 #ifdef notyet 424 id->id = begin = start + arc4random_uniform(end - start); 425 #else 426 id->id = begin = start; 427 #endif 428 while (SPLAY_INSERT(idr_tree, &idr->tree, id)) { 429 if (++id->id == end) 430 id->id = start; 431 if (id->id == begin) { 432 pool_put(&idr_pool, id); 433 return -ENOSPC; 434 } 435 } 436 id->ptr = ptr; 437 return id->id; 438 } 439 440 void * 441 idr_replace(struct idr *idr, void *ptr, int id) 442 { 443 struct idr_entry find, *res; 444 void *old; 445 446 find.id = id; 447 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 448 if (res == NULL) 449 return ERR_PTR(-ENOENT); 450 old = res->ptr; 451 res->ptr = ptr; 452 return old; 453 } 454 455 void 456 idr_remove(struct idr *idr, int id) 457 { 458 struct idr_entry find, *res; 459 460 find.id = id; 461 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 462 if (res) { 463 SPLAY_REMOVE(idr_tree, &idr->tree, res); 464 pool_put(&idr_pool, res); 465 } 466 } 467 468 void * 469 idr_find(struct idr *idr, int id) 470 { 471 struct idr_entry find, *res; 472 473 find.id = id; 474 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 475 if (res == NULL) 476 return NULL; 477 return res->ptr; 478 } 479 480 void * 481 idr_get_next(struct idr *idr, int *id) 482 { 483 struct idr_entry *res; 484 485 res = idr_find(idr, *id); 486 if (res == NULL) 487 res = SPLAY_MIN(idr_tree, &idr->tree); 488 else 489 res = SPLAY_NEXT(idr_tree, &idr->tree, res); 490 if (res == NULL) 491 return NULL; 492 *id = res->id; 493 return res->ptr; 494 } 495 496 int 497 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data) 498 { 499 struct idr_entry *id; 500 int ret; 501 502 SPLAY_FOREACH(id, idr_tree, &idr->tree) { 503 ret = func(id->id, id->ptr, data); 504 if (ret) 505 return ret; 506 } 507 508 return 0; 509 } 510 511 int 512 idr_cmp(struct idr_entry *a, struct idr_entry *b) 513 { 514 return (a->id < b->id ? -1 : a->id > b->id); 515 } 516 517 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp); 518 519 void 520 ida_init(struct ida *ida) 521 { 522 ida->counter = 0; 523 } 524 525 void 526 ida_destroy(struct ida *ida) 527 { 528 } 529 530 void 531 ida_remove(struct ida *ida, int id) 532 { 533 } 534 535 int 536 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 537 int flags) 538 { 539 if (end <= 0) 540 end = INT_MAX; 541 542 if (start > ida->counter) 543 ida->counter = start; 544 545 if (ida->counter >= end) 546 return -ENOSPC; 547 548 return ida->counter++; 549 } 550 551 int 552 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) 553 { 554 table->sgl = mallocarray(nents, sizeof(struct scatterlist), 555 M_DRM, gfp_mask); 556 if (table->sgl == NULL) 557 return -ENOMEM; 558 table->nents = table->orig_nents = nents; 559 return 0; 560 } 561 562 void 563 sg_free_table(struct sg_table *table) 564 { 565 free(table->sgl, M_DRM, 566 table->orig_nents * sizeof(struct scatterlist)); 567 } 568 569 size_t 570 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 571 const void *buf, size_t buflen) 572 { 573 panic("%s", __func__); 574 } 575 576 int 577 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 578 { 579 void *cmd = NULL; 580 int cmdlen = 0; 581 int err, ret = 0; 582 int op; 583 584 iic_acquire_bus(&adap->ic, 0); 585 586 while (num > 2) { 587 op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE; 588 err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0, 589 msgs->buf, msgs->len, 0); 590 if (err) { 591 ret = -err; 592 goto fail; 593 } 594 msgs++; 595 num--; 596 ret++; 597 } 598 599 if (num > 1) { 600 cmd = msgs->buf; 601 cmdlen = msgs->len; 602 msgs++; 603 num--; 604 ret++; 605 } 606 607 op = (msgs->flags & I2C_M_RD) ? 608 I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP; 609 err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen, 610 msgs->buf, msgs->len, 0); 611 if (err) { 612 ret = -err; 613 goto fail; 614 } 615 msgs++; 616 ret++; 617 618 fail: 619 iic_release_bus(&adap->ic, 0); 620 621 return ret; 622 } 623 624 int 625 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 626 { 627 if (adap->algo) 628 return adap->algo->master_xfer(adap, msgs, num); 629 630 return i2c_master_xfer(adap, msgs, num); 631 } 632 633 int 634 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 635 { 636 struct i2c_algo_bit_data *algo = adap->algo_data; 637 struct i2c_adapter bb; 638 639 memset(&bb, 0, sizeof(bb)); 640 bb.ic = algo->ic; 641 bb.retries = adap->retries; 642 return i2c_master_xfer(&bb, msgs, num); 643 } 644 645 uint32_t 646 i2c_bb_functionality(struct i2c_adapter *adap) 647 { 648 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 649 } 650 651 struct i2c_algorithm i2c_bit_algo = { 652 .master_xfer = i2c_bb_master_xfer, 653 .functionality = i2c_bb_functionality 654 }; 655 656 int 657 i2c_bit_add_bus(struct i2c_adapter *adap) 658 { 659 adap->algo = &i2c_bit_algo; 660 adap->retries = 3; 661 662 return 0; 663 } 664 665 #if defined(__amd64__) || defined(__i386__) 666 667 /* 668 * This is a minimal implementation of the Linux vga_get/vga_put 669 * interface. In all likelyhood, it will only work for inteldrm(4) as 670 * it assumes that if there is another active VGA device in the 671 * system, it is sitting behind a PCI bridge. 672 */ 673 674 extern int pci_enumerate_bus(struct pci_softc *, 675 int (*)(struct pci_attach_args *), struct pci_attach_args *); 676 677 pcitag_t vga_bridge_tag; 678 int vga_bridge_disabled; 679 680 int 681 vga_disable_bridge(struct pci_attach_args *pa) 682 { 683 pcireg_t bhlc, bc; 684 685 if (pa->pa_domain != 0) 686 return 0; 687 688 bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 689 if (PCI_HDRTYPE_TYPE(bhlc) != 1) 690 return 0; 691 692 bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL); 693 if ((bc & PPB_BC_VGA_ENABLE) == 0) 694 return 0; 695 bc &= ~PPB_BC_VGA_ENABLE; 696 pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc); 697 698 vga_bridge_tag = pa->pa_tag; 699 vga_bridge_disabled = 1; 700 701 return 1; 702 } 703 704 void 705 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc) 706 { 707 KASSERT(pdev->pci->sc_bridgetag == NULL); 708 pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL); 709 } 710 711 void 712 vga_put(struct pci_dev *pdev, int rsrc) 713 { 714 pcireg_t bc; 715 716 if (!vga_bridge_disabled) 717 return; 718 719 bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL); 720 bc |= PPB_BC_VGA_ENABLE; 721 pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc); 722 723 vga_bridge_disabled = 0; 724 } 725 726 #endif 727 728 /* 729 * ACPI types and interfaces. 730 */ 731 732 #ifdef __HAVE_ACPI 733 #include "acpi.h" 734 #endif 735 736 #if NACPI > 0 737 738 #include <dev/acpi/acpireg.h> 739 #include <dev/acpi/acpivar.h> 740 741 acpi_status 742 acpi_get_table_with_size(const char *sig, int instance, 743 struct acpi_table_header **hdr, acpi_size *size) 744 { 745 struct acpi_softc *sc = acpi_softc; 746 struct acpi_q *entry; 747 748 KASSERT(instance == 1); 749 750 if (sc == NULL) 751 return AE_NOT_FOUND; 752 753 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 754 if (memcmp(entry->q_table, sig, strlen(sig)) == 0) { 755 *hdr = entry->q_table; 756 *size = (*hdr)->length; 757 return 0; 758 } 759 } 760 761 return AE_NOT_FOUND; 762 } 763 764 #endif 765 766 void 767 backlight_do_update_status(void *arg) 768 { 769 backlight_update_status(arg); 770 } 771 772 struct backlight_device * 773 backlight_device_register(const char *name, void *kdev, void *data, 774 const struct backlight_ops *ops, struct backlight_properties *props) 775 { 776 struct backlight_device *bd; 777 778 bd = malloc(sizeof(*bd), M_DRM, M_WAITOK); 779 bd->ops = ops; 780 bd->props = *props; 781 bd->data = data; 782 783 task_set(&bd->task, backlight_do_update_status, bd); 784 785 return bd; 786 } 787 788 void 789 backlight_device_unregister(struct backlight_device *bd) 790 { 791 free(bd, M_DRM, sizeof(*bd)); 792 } 793 794 void 795 backlight_schedule_update_status(struct backlight_device *bd) 796 { 797 task_add(systq, &bd->task); 798 } 799 800 void 801 drm_sysfs_hotplug_event(struct drm_device *dev) 802 { 803 KNOTE(&dev->note, NOTE_CHANGE); 804 } 805 806 unsigned int drm_fence_count; 807 808 unsigned int 809 fence_context_alloc(unsigned int num) 810 { 811 return __sync_add_and_fetch(&drm_fence_count, num) - num; 812 } 813 814 int 815 dmabuf_read(struct file *fp, struct uio *uio, int fflags) 816 { 817 return (ENXIO); 818 } 819 820 int 821 dmabuf_write(struct file *fp, struct uio *uio, int fflags) 822 { 823 return (ENXIO); 824 } 825 826 int 827 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) 828 { 829 return (ENOTTY); 830 } 831 832 int 833 dmabuf_poll(struct file *fp, int events, struct proc *p) 834 { 835 return (0); 836 } 837 838 int 839 dmabuf_kqfilter(struct file *fp, struct knote *kn) 840 { 841 return (EINVAL); 842 } 843 844 int 845 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p) 846 { 847 struct dma_buf *dmabuf = fp->f_data; 848 849 memset(st, 0, sizeof(*st)); 850 st->st_size = dmabuf->size; 851 st->st_mode = S_IFIFO; /* XXX */ 852 return (0); 853 } 854 855 int 856 dmabuf_close(struct file *fp, struct proc *p) 857 { 858 struct dma_buf *dmabuf = fp->f_data; 859 860 fp->f_data = NULL; 861 KERNEL_LOCK(); 862 dmabuf->ops->release(dmabuf); 863 KERNEL_UNLOCK(); 864 free(dmabuf, M_DRM, sizeof(struct dma_buf)); 865 return (0); 866 } 867 868 int 869 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p) 870 { 871 struct dma_buf *dmabuf = fp->f_data; 872 off_t newoff; 873 874 if (*offset != 0) 875 return (EINVAL); 876 877 switch (whence) { 878 case SEEK_SET: 879 newoff = 0; 880 break; 881 case SEEK_END: 882 newoff = dmabuf->size; 883 break; 884 default: 885 return (EINVAL); 886 } 887 fp->f_offset = *offset = newoff; 888 return (0); 889 } 890 891 struct fileops dmabufops = { 892 .fo_read = dmabuf_read, 893 .fo_write = dmabuf_write, 894 .fo_ioctl = dmabuf_ioctl, 895 .fo_poll = dmabuf_poll, 896 .fo_kqfilter = dmabuf_kqfilter, 897 .fo_stat = dmabuf_stat, 898 .fo_close = dmabuf_close, 899 .fo_seek = dmabuf_seek, 900 }; 901 902 struct dma_buf * 903 dma_buf_export(const struct dma_buf_export_info *info) 904 { 905 struct proc *p = curproc; 906 struct dma_buf *dmabuf; 907 struct file *fp; 908 909 fp = fnew(p); 910 if (fp == NULL) 911 return ERR_PTR(-ENFILE); 912 fp->f_type = DTYPE_DMABUF; 913 fp->f_ops = &dmabufops; 914 dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO); 915 dmabuf->priv = info->priv; 916 dmabuf->ops = info->ops; 917 dmabuf->size = info->size; 918 dmabuf->file = fp; 919 fp->f_data = dmabuf; 920 return dmabuf; 921 } 922 923 struct dma_buf * 924 dma_buf_get(int fd) 925 { 926 struct proc *p = curproc; 927 struct filedesc *fdp = p->p_fd; 928 struct file *fp; 929 930 if ((fp = fd_getfile(fdp, fd)) == NULL) 931 return ERR_PTR(-EBADF); 932 933 if (fp->f_type != DTYPE_DMABUF) { 934 FRELE(fp, p); 935 return ERR_PTR(-EINVAL); 936 } 937 938 return fp->f_data; 939 } 940 941 void 942 dma_buf_put(struct dma_buf *dmabuf) 943 { 944 KASSERT(dmabuf); 945 KASSERT(dmabuf->file); 946 947 FRELE(dmabuf->file, curproc); 948 } 949 950 int 951 dma_buf_fd(struct dma_buf *dmabuf, int flags) 952 { 953 struct proc *p = curproc; 954 struct filedesc *fdp = p->p_fd; 955 struct file *fp = dmabuf->file; 956 int fd, cloexec, error; 957 958 cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0; 959 960 fdplock(fdp); 961 restart: 962 if ((error = fdalloc(p, 0, &fd)) != 0) { 963 if (error == ENOSPC) { 964 fdexpand(p); 965 goto restart; 966 } 967 fdpunlock(fdp); 968 return -error; 969 } 970 971 fdinsert(fdp, fd, cloexec, fp); 972 fdpunlock(fdp); 973 974 return fd; 975 } 976 977 void 978 get_dma_buf(struct dma_buf *dmabuf) 979 { 980 FREF(dmabuf->file); 981 } 982