1 /* $OpenBSD: drm_linux.c,v 1.105 2023/12/23 14:18:27 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org> 4 * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/systm.h> 21 #include <sys/param.h> 22 #include <sys/event.h> 23 #include <sys/filedesc.h> 24 #include <sys/kthread.h> 25 #include <sys/stat.h> 26 #include <sys/unistd.h> 27 #include <sys/proc.h> 28 #include <sys/pool.h> 29 #include <sys/fcntl.h> 30 31 #include <dev/pci/ppbreg.h> 32 33 #include <linux/dma-buf.h> 34 #include <linux/mod_devicetable.h> 35 #include <linux/acpi.h> 36 #include <linux/pagevec.h> 37 #include <linux/dma-fence-array.h> 38 #include <linux/dma-fence-chain.h> 39 #include <linux/interrupt.h> 40 #include <linux/err.h> 41 #include <linux/idr.h> 42 #include <linux/scatterlist.h> 43 #include <linux/i2c.h> 44 #include <linux/pci.h> 45 #include <linux/notifier.h> 46 #include <linux/backlight.h> 47 #include <linux/shrinker.h> 48 #include <linux/fb.h> 49 #include <linux/xarray.h> 50 #include <linux/interval_tree.h> 51 #include <linux/kthread.h> 52 #include <linux/processor.h> 53 #include <linux/sync_file.h> 54 55 #include <drm/drm_device.h> 56 #include <drm/drm_connector.h> 57 #include <drm/drm_print.h> 58 59 #if defined(__amd64__) || defined(__i386__) 60 #include "bios.h" 61 #endif 62 63 /* allowed to sleep */ 64 void 65 tasklet_unlock_wait(struct tasklet_struct *ts) 66 { 67 while (test_bit(TASKLET_STATE_RUN, &ts->state)) 68 cpu_relax(); 69 } 70 71 /* must not sleep */ 72 void 73 tasklet_unlock_spin_wait(struct tasklet_struct *ts) 74 { 75 while (test_bit(TASKLET_STATE_RUN, &ts->state)) 76 cpu_relax(); 77 } 78 79 void 80 tasklet_run(void *arg) 81 { 82 struct tasklet_struct *ts = arg; 83 84 clear_bit(TASKLET_STATE_SCHED, &ts->state); 85 if (tasklet_trylock(ts)) { 86 if (!atomic_read(&ts->count)) { 87 if (ts->use_callback) 88 ts->callback(ts); 89 else 90 ts->func(ts->data); 91 } 92 tasklet_unlock(ts); 93 } 94 } 95 96 /* 32 bit powerpc lacks 64 bit atomics */ 97 #if defined(__powerpc__) && !defined(__powerpc64__) 98 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH); 99 #endif 100 101 void 102 set_current_state(int state) 103 { 104 int prio = state; 105 106 KASSERT(state != TASK_RUNNING); 107 /* check if already on the sleep list */ 108 if (curproc->p_wchan != NULL) 109 return; 110 sleep_setup(curproc, prio, "schto"); 111 } 112 113 void 114 __set_current_state(int state) 115 { 116 struct proc *p = curproc; 117 int s; 118 119 KASSERT(state == TASK_RUNNING); 120 SCHED_LOCK(s); 121 unsleep(p); 122 p->p_stat = SONPROC; 123 atomic_clearbits_int(&p->p_flag, P_WSLEEP); 124 SCHED_UNLOCK(s); 125 } 126 127 void 128 schedule(void) 129 { 130 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 131 } 132 133 long 134 schedule_timeout(long timeout) 135 { 136 unsigned long deadline; 137 int timo = 0; 138 139 KASSERT(!cold); 140 141 if (timeout != MAX_SCHEDULE_TIMEOUT) 142 timo = timeout; 143 if (timeout != MAX_SCHEDULE_TIMEOUT) 144 deadline = jiffies + timeout; 145 sleep_finish(timo, timeout > 0); 146 if (timeout != MAX_SCHEDULE_TIMEOUT) 147 timeout = deadline - jiffies; 148 149 return timeout > 0 ? timeout : 0; 150 } 151 152 long 153 schedule_timeout_uninterruptible(long timeout) 154 { 155 tsleep(curproc, PWAIT, "schtou", timeout); 156 return 0; 157 } 158 159 int 160 wake_up_process(struct proc *p) 161 { 162 int s, rv; 163 164 SCHED_LOCK(s); 165 rv = wakeup_proc(p, NULL, 0); 166 SCHED_UNLOCK(s); 167 return rv; 168 } 169 170 int 171 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode, 172 int sync, void *key) 173 { 174 if (wqe->private) 175 wake_up_process(wqe->private); 176 list_del_init(&wqe->entry); 177 return 0; 178 } 179 180 void 181 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state) 182 { 183 mtx_enter(&wqh->lock); 184 if (list_empty(&wqe->entry)) 185 __add_wait_queue(wqh, wqe); 186 mtx_leave(&wqh->lock); 187 188 set_current_state(state); 189 } 190 191 void 192 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe) 193 { 194 __set_current_state(TASK_RUNNING); 195 196 mtx_enter(&wqh->lock); 197 if (!list_empty(&wqe->entry)) 198 list_del_init(&wqe->entry); 199 mtx_leave(&wqh->lock); 200 } 201 202 void 203 flush_workqueue(struct workqueue_struct *wq) 204 { 205 if (cold) 206 return; 207 208 if (wq) 209 taskq_barrier((struct taskq *)wq); 210 } 211 212 bool 213 flush_work(struct work_struct *work) 214 { 215 if (cold) 216 return false; 217 218 if (work->tq) 219 taskq_barrier(work->tq); 220 return false; 221 } 222 223 bool 224 flush_delayed_work(struct delayed_work *dwork) 225 { 226 bool ret = false; 227 228 if (cold) 229 return false; 230 231 while (timeout_pending(&dwork->to)) { 232 tsleep(dwork, PWAIT, "fldwto", 1); 233 ret = true; 234 } 235 236 if (dwork->tq) 237 taskq_barrier(dwork->tq); 238 return ret; 239 } 240 241 struct kthread { 242 int (*func)(void *); 243 void *data; 244 struct proc *proc; 245 volatile u_int flags; 246 #define KTHREAD_SHOULDSTOP 0x0000001 247 #define KTHREAD_STOPPED 0x0000002 248 #define KTHREAD_SHOULDPARK 0x0000004 249 #define KTHREAD_PARKED 0x0000008 250 LIST_ENTRY(kthread) next; 251 }; 252 253 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list); 254 255 void 256 kthread_func(void *arg) 257 { 258 struct kthread *thread = arg; 259 int ret; 260 261 ret = thread->func(thread->data); 262 thread->flags |= KTHREAD_STOPPED; 263 wakeup(thread); 264 kthread_exit(ret); 265 } 266 267 struct proc * 268 kthread_run(int (*func)(void *), void *data, const char *name) 269 { 270 struct kthread *thread; 271 272 thread = malloc(sizeof(*thread), M_DRM, M_WAITOK); 273 thread->func = func; 274 thread->data = data; 275 thread->flags = 0; 276 277 if (kthread_create(kthread_func, thread, &thread->proc, name)) { 278 free(thread, M_DRM, sizeof(*thread)); 279 return ERR_PTR(-ENOMEM); 280 } 281 282 LIST_INSERT_HEAD(&kthread_list, thread, next); 283 return thread->proc; 284 } 285 286 struct kthread_worker * 287 kthread_create_worker(unsigned int flags, const char *fmt, ...) 288 { 289 char name[MAXCOMLEN+1]; 290 va_list ap; 291 292 struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK); 293 va_start(ap, fmt); 294 vsnprintf(name, sizeof(name), fmt, ap); 295 va_end(ap); 296 w->tq = taskq_create(name, 1, IPL_HIGH, 0); 297 298 return w; 299 } 300 301 void 302 kthread_destroy_worker(struct kthread_worker *worker) 303 { 304 taskq_destroy(worker->tq); 305 free(worker, M_DRM, sizeof(*worker)); 306 307 } 308 309 void 310 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *)) 311 { 312 work->tq = NULL; 313 task_set(&work->task, (void (*)(void *))func, work); 314 } 315 316 bool 317 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work) 318 { 319 work->tq = worker->tq; 320 return task_add(work->tq, &work->task); 321 } 322 323 bool 324 kthread_cancel_work_sync(struct kthread_work *work) 325 { 326 return task_del(work->tq, &work->task); 327 } 328 329 void 330 kthread_flush_work(struct kthread_work *work) 331 { 332 if (cold) 333 return; 334 335 if (work->tq) 336 taskq_barrier(work->tq); 337 } 338 339 void 340 kthread_flush_worker(struct kthread_worker *worker) 341 { 342 if (cold) 343 return; 344 345 if (worker->tq) 346 taskq_barrier(worker->tq); 347 } 348 349 struct kthread * 350 kthread_lookup(struct proc *p) 351 { 352 struct kthread *thread; 353 354 LIST_FOREACH(thread, &kthread_list, next) { 355 if (thread->proc == p) 356 break; 357 } 358 KASSERT(thread); 359 360 return thread; 361 } 362 363 int 364 kthread_should_park(void) 365 { 366 struct kthread *thread = kthread_lookup(curproc); 367 return (thread->flags & KTHREAD_SHOULDPARK); 368 } 369 370 void 371 kthread_parkme(void) 372 { 373 struct kthread *thread = kthread_lookup(curproc); 374 375 while (thread->flags & KTHREAD_SHOULDPARK) { 376 thread->flags |= KTHREAD_PARKED; 377 wakeup(thread); 378 tsleep_nsec(thread, PPAUSE, "parkme", INFSLP); 379 thread->flags &= ~KTHREAD_PARKED; 380 } 381 } 382 383 void 384 kthread_park(struct proc *p) 385 { 386 struct kthread *thread = kthread_lookup(p); 387 388 while ((thread->flags & KTHREAD_PARKED) == 0) { 389 thread->flags |= KTHREAD_SHOULDPARK; 390 wake_up_process(thread->proc); 391 tsleep_nsec(thread, PPAUSE, "park", INFSLP); 392 } 393 } 394 395 void 396 kthread_unpark(struct proc *p) 397 { 398 struct kthread *thread = kthread_lookup(p); 399 400 thread->flags &= ~KTHREAD_SHOULDPARK; 401 wakeup(thread); 402 } 403 404 int 405 kthread_should_stop(void) 406 { 407 struct kthread *thread = kthread_lookup(curproc); 408 return (thread->flags & KTHREAD_SHOULDSTOP); 409 } 410 411 void 412 kthread_stop(struct proc *p) 413 { 414 struct kthread *thread = kthread_lookup(p); 415 416 while ((thread->flags & KTHREAD_STOPPED) == 0) { 417 thread->flags |= KTHREAD_SHOULDSTOP; 418 kthread_unpark(p); 419 wake_up_process(thread->proc); 420 tsleep_nsec(thread, PPAUSE, "stop", INFSLP); 421 } 422 LIST_REMOVE(thread, next); 423 free(thread, M_DRM, sizeof(*thread)); 424 } 425 426 #if NBIOS > 0 427 extern char smbios_board_vendor[]; 428 extern char smbios_board_prod[]; 429 extern char smbios_board_serial[]; 430 #endif 431 432 bool 433 dmi_match(int slot, const char *str) 434 { 435 switch (slot) { 436 case DMI_SYS_VENDOR: 437 if (hw_vendor != NULL && 438 !strcmp(hw_vendor, str)) 439 return true; 440 break; 441 case DMI_PRODUCT_NAME: 442 if (hw_prod != NULL && 443 !strcmp(hw_prod, str)) 444 return true; 445 break; 446 case DMI_PRODUCT_VERSION: 447 if (hw_ver != NULL && 448 !strcmp(hw_ver, str)) 449 return true; 450 break; 451 #if NBIOS > 0 452 case DMI_BOARD_VENDOR: 453 if (strcmp(smbios_board_vendor, str) == 0) 454 return true; 455 break; 456 case DMI_BOARD_NAME: 457 if (strcmp(smbios_board_prod, str) == 0) 458 return true; 459 break; 460 case DMI_BOARD_SERIAL: 461 if (strcmp(smbios_board_serial, str) == 0) 462 return true; 463 break; 464 #else 465 case DMI_BOARD_VENDOR: 466 if (hw_vendor != NULL && 467 !strcmp(hw_vendor, str)) 468 return true; 469 break; 470 case DMI_BOARD_NAME: 471 if (hw_prod != NULL && 472 !strcmp(hw_prod, str)) 473 return true; 474 break; 475 #endif 476 case DMI_NONE: 477 default: 478 return false; 479 } 480 481 return false; 482 } 483 484 static bool 485 dmi_found(const struct dmi_system_id *dsi) 486 { 487 int i, slot; 488 489 for (i = 0; i < nitems(dsi->matches); i++) { 490 slot = dsi->matches[i].slot; 491 if (slot == DMI_NONE) 492 break; 493 if (!dmi_match(slot, dsi->matches[i].substr)) 494 return false; 495 } 496 497 return true; 498 } 499 500 const struct dmi_system_id * 501 dmi_first_match(const struct dmi_system_id *sysid) 502 { 503 const struct dmi_system_id *dsi; 504 505 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { 506 if (dmi_found(dsi)) 507 return dsi; 508 } 509 510 return NULL; 511 } 512 513 #if NBIOS > 0 514 extern char smbios_bios_date[]; 515 extern char smbios_bios_version[]; 516 #endif 517 518 const char * 519 dmi_get_system_info(int slot) 520 { 521 #if NBIOS > 0 522 switch (slot) { 523 case DMI_BIOS_DATE: 524 return smbios_bios_date; 525 case DMI_BIOS_VERSION: 526 return smbios_bios_version; 527 default: 528 printf("%s slot %d not handled\n", __func__, slot); 529 } 530 #endif 531 return NULL; 532 } 533 534 int 535 dmi_check_system(const struct dmi_system_id *sysid) 536 { 537 const struct dmi_system_id *dsi; 538 int num = 0; 539 540 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { 541 if (dmi_found(dsi)) { 542 num++; 543 if (dsi->callback && dsi->callback(dsi)) 544 break; 545 } 546 } 547 return (num); 548 } 549 550 struct vm_page * 551 alloc_pages(unsigned int gfp_mask, unsigned int order) 552 { 553 int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK; 554 struct uvm_constraint_range *constraint = &no_constraint; 555 struct pglist mlist; 556 557 if (gfp_mask & M_CANFAIL) 558 flags |= UVM_PLA_FAILOK; 559 if (gfp_mask & M_ZERO) 560 flags |= UVM_PLA_ZERO; 561 if (gfp_mask & __GFP_DMA32) 562 constraint = &dma_constraint; 563 564 TAILQ_INIT(&mlist); 565 if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low, 566 constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags)) 567 return NULL; 568 return TAILQ_FIRST(&mlist); 569 } 570 571 void 572 __free_pages(struct vm_page *page, unsigned int order) 573 { 574 struct pglist mlist; 575 int i; 576 577 TAILQ_INIT(&mlist); 578 for (i = 0; i < (1 << order); i++) 579 TAILQ_INSERT_TAIL(&mlist, &page[i], pageq); 580 uvm_pglistfree(&mlist); 581 } 582 583 void 584 __pagevec_release(struct pagevec *pvec) 585 { 586 struct pglist mlist; 587 int i; 588 589 TAILQ_INIT(&mlist); 590 for (i = 0; i < pvec->nr; i++) 591 TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq); 592 uvm_pglistfree(&mlist); 593 pagevec_reinit(pvec); 594 } 595 596 static struct kmem_va_mode kv_physwait = { 597 .kv_map = &phys_map, 598 .kv_wait = 1, 599 }; 600 601 void * 602 kmap(struct vm_page *pg) 603 { 604 vaddr_t va; 605 606 #if defined (__HAVE_PMAP_DIRECT) 607 va = pmap_map_direct(pg); 608 #else 609 va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok); 610 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE); 611 pmap_update(pmap_kernel()); 612 #endif 613 return (void *)va; 614 } 615 616 void 617 kunmap_va(void *addr) 618 { 619 vaddr_t va = (vaddr_t)addr; 620 621 #if defined (__HAVE_PMAP_DIRECT) 622 pmap_unmap_direct(va); 623 #else 624 pmap_kremove(va, PAGE_SIZE); 625 pmap_update(pmap_kernel()); 626 km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none); 627 #endif 628 } 629 630 vaddr_t kmap_atomic_va; 631 int kmap_atomic_inuse; 632 633 void * 634 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot) 635 { 636 KASSERT(!kmap_atomic_inuse); 637 638 kmap_atomic_inuse = 1; 639 pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot, 640 PROT_READ | PROT_WRITE); 641 return (void *)kmap_atomic_va; 642 } 643 644 void 645 kunmap_atomic(void *addr) 646 { 647 KASSERT(kmap_atomic_inuse); 648 649 pmap_kremove(kmap_atomic_va, PAGE_SIZE); 650 kmap_atomic_inuse = 0; 651 } 652 653 void * 654 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags, 655 pgprot_t prot) 656 { 657 vaddr_t va; 658 paddr_t pa; 659 int i; 660 661 va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none, 662 &kd_nowait); 663 if (va == 0) 664 return NULL; 665 for (i = 0; i < npages; i++) { 666 pa = VM_PAGE_TO_PHYS(pages[i]) | prot; 667 pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa, 668 PROT_READ | PROT_WRITE, 669 PROT_READ | PROT_WRITE | PMAP_WIRED); 670 pmap_update(pmap_kernel()); 671 } 672 673 return (void *)va; 674 } 675 676 void 677 vunmap(void *addr, size_t size) 678 { 679 vaddr_t va = (vaddr_t)addr; 680 681 pmap_remove(pmap_kernel(), va, va + size); 682 pmap_update(pmap_kernel()); 683 km_free((void *)va, size, &kv_any, &kp_none); 684 } 685 686 bool 687 is_vmalloc_addr(const void *p) 688 { 689 vaddr_t min, max, addr; 690 691 min = vm_map_min(kernel_map); 692 max = vm_map_max(kernel_map); 693 addr = (vaddr_t)p; 694 695 if (addr >= min && addr <= max) 696 return true; 697 else 698 return false; 699 } 700 701 void 702 print_hex_dump(const char *level, const char *prefix_str, int prefix_type, 703 int rowsize, int groupsize, const void *buf, size_t len, bool ascii) 704 { 705 const uint8_t *cbuf = buf; 706 int i; 707 708 for (i = 0; i < len; i++) { 709 if ((i % rowsize) == 0) 710 printf("%s", prefix_str); 711 printf("%02x", cbuf[i]); 712 if ((i % rowsize) == (rowsize - 1)) 713 printf("\n"); 714 else 715 printf(" "); 716 } 717 } 718 719 void * 720 memchr_inv(const void *s, int c, size_t n) 721 { 722 if (n != 0) { 723 const unsigned char *p = s; 724 725 do { 726 if (*p++ != (unsigned char)c) 727 return ((void *)(p - 1)); 728 } while (--n != 0); 729 } 730 return (NULL); 731 } 732 733 int 734 panic_cmp(struct rb_node *a, struct rb_node *b) 735 { 736 panic(__func__); 737 } 738 739 #undef RB_ROOT 740 #define RB_ROOT(head) (head)->rbh_root 741 742 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 743 744 /* 745 * This is a fairly minimal implementation of the Linux "idr" API. It 746 * probably isn't very efficient, and definitely isn't RCU safe. The 747 * pre-load buffer is global instead of per-cpu; we rely on the kernel 748 * lock to make this work. We do randomize our IDs in order to make 749 * them harder to guess. 750 */ 751 752 int idr_cmp(struct idr_entry *, struct idr_entry *); 753 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp); 754 755 struct pool idr_pool; 756 struct idr_entry *idr_entry_cache; 757 758 void 759 idr_init(struct idr *idr) 760 { 761 SPLAY_INIT(&idr->tree); 762 } 763 764 void 765 idr_destroy(struct idr *idr) 766 { 767 struct idr_entry *id; 768 769 while ((id = SPLAY_MIN(idr_tree, &idr->tree))) { 770 SPLAY_REMOVE(idr_tree, &idr->tree, id); 771 pool_put(&idr_pool, id); 772 } 773 } 774 775 void 776 idr_preload(unsigned int gfp_mask) 777 { 778 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 779 780 KERNEL_ASSERT_LOCKED(); 781 782 if (idr_entry_cache == NULL) 783 idr_entry_cache = pool_get(&idr_pool, flags); 784 } 785 786 int 787 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) 788 { 789 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 790 struct idr_entry *id; 791 int begin; 792 793 KERNEL_ASSERT_LOCKED(); 794 795 if (idr_entry_cache) { 796 id = idr_entry_cache; 797 idr_entry_cache = NULL; 798 } else { 799 id = pool_get(&idr_pool, flags); 800 if (id == NULL) 801 return -ENOMEM; 802 } 803 804 if (end <= 0) 805 end = INT_MAX; 806 807 #ifdef notyet 808 id->id = begin = start + arc4random_uniform(end - start); 809 #else 810 id->id = begin = start; 811 #endif 812 while (SPLAY_INSERT(idr_tree, &idr->tree, id)) { 813 if (id->id == end) 814 id->id = start; 815 else 816 id->id++; 817 if (id->id == begin) { 818 pool_put(&idr_pool, id); 819 return -ENOSPC; 820 } 821 } 822 id->ptr = ptr; 823 return id->id; 824 } 825 826 void * 827 idr_replace(struct idr *idr, void *ptr, unsigned long id) 828 { 829 struct idr_entry find, *res; 830 void *old; 831 832 find.id = id; 833 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 834 if (res == NULL) 835 return ERR_PTR(-ENOENT); 836 old = res->ptr; 837 res->ptr = ptr; 838 return old; 839 } 840 841 void * 842 idr_remove(struct idr *idr, unsigned long id) 843 { 844 struct idr_entry find, *res; 845 void *ptr = NULL; 846 847 find.id = id; 848 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 849 if (res) { 850 SPLAY_REMOVE(idr_tree, &idr->tree, res); 851 ptr = res->ptr; 852 pool_put(&idr_pool, res); 853 } 854 return ptr; 855 } 856 857 void * 858 idr_find(struct idr *idr, unsigned long id) 859 { 860 struct idr_entry find, *res; 861 862 find.id = id; 863 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 864 if (res == NULL) 865 return NULL; 866 return res->ptr; 867 } 868 869 void * 870 idr_get_next(struct idr *idr, int *id) 871 { 872 struct idr_entry *res; 873 874 SPLAY_FOREACH(res, idr_tree, &idr->tree) { 875 if (res->id >= *id) { 876 *id = res->id; 877 return res->ptr; 878 } 879 } 880 881 return NULL; 882 } 883 884 int 885 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data) 886 { 887 struct idr_entry *id; 888 int ret; 889 890 SPLAY_FOREACH(id, idr_tree, &idr->tree) { 891 ret = func(id->id, id->ptr, data); 892 if (ret) 893 return ret; 894 } 895 896 return 0; 897 } 898 899 int 900 idr_cmp(struct idr_entry *a, struct idr_entry *b) 901 { 902 return (a->id < b->id ? -1 : a->id > b->id); 903 } 904 905 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp); 906 907 void 908 ida_init(struct ida *ida) 909 { 910 idr_init(&ida->idr); 911 } 912 913 void 914 ida_destroy(struct ida *ida) 915 { 916 idr_destroy(&ida->idr); 917 } 918 919 int 920 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 921 gfp_t gfp_mask) 922 { 923 return idr_alloc(&ida->idr, NULL, start, end, gfp_mask); 924 } 925 926 void 927 ida_simple_remove(struct ida *ida, unsigned int id) 928 { 929 idr_remove(&ida->idr, id); 930 } 931 932 int 933 ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) 934 { 935 return idr_alloc(&ida->idr, NULL, min, INT_MAX, gfp); 936 } 937 938 int 939 ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) 940 { 941 return idr_alloc(&ida->idr, NULL, 0, max - 1, gfp); 942 } 943 944 void 945 ida_free(struct ida *ida, unsigned int id) 946 { 947 idr_remove(&ida->idr, id); 948 } 949 950 int 951 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b) 952 { 953 return (a->id < b->id ? -1 : a->id > b->id); 954 } 955 956 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp); 957 struct pool xa_pool; 958 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp); 959 960 void 961 xa_init_flags(struct xarray *xa, gfp_t flags) 962 { 963 static int initialized; 964 965 if (!initialized) { 966 pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0, 967 "xapl", NULL); 968 initialized = 1; 969 } 970 SPLAY_INIT(&xa->xa_tree); 971 if (flags & XA_FLAGS_LOCK_IRQ) 972 mtx_init(&xa->xa_lock, IPL_TTY); 973 else 974 mtx_init(&xa->xa_lock, IPL_NONE); 975 } 976 977 void 978 xa_destroy(struct xarray *xa) 979 { 980 struct xarray_entry *id; 981 982 while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) { 983 SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id); 984 pool_put(&xa_pool, id); 985 } 986 } 987 988 /* Don't wrap ids. */ 989 int 990 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp) 991 { 992 struct xarray_entry *xid; 993 int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0; 994 int begin; 995 996 if (gfp & GFP_NOWAIT) { 997 xid = pool_get(&xa_pool, PR_NOWAIT); 998 } else { 999 mtx_leave(&xa->xa_lock); 1000 xid = pool_get(&xa_pool, PR_WAITOK); 1001 mtx_enter(&xa->xa_lock); 1002 } 1003 1004 if (xid == NULL) 1005 return -ENOMEM; 1006 1007 if (limit <= 0) 1008 limit = INT_MAX; 1009 1010 xid->id = begin = start; 1011 1012 while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) { 1013 if (xid->id == limit) 1014 xid->id = start; 1015 else 1016 xid->id++; 1017 if (xid->id == begin) { 1018 pool_put(&xa_pool, xid); 1019 return -EBUSY; 1020 } 1021 } 1022 xid->ptr = entry; 1023 *id = xid->id; 1024 return 0; 1025 } 1026 1027 /* 1028 * Wrap ids and store next id. 1029 * We walk the entire tree so don't special case wrapping. 1030 * The only caller of this (i915_drm_client.c) doesn't use next id. 1031 */ 1032 int 1033 __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, int limit, u32 *next, 1034 gfp_t gfp) 1035 { 1036 int r = __xa_alloc(xa, id, entry, limit, gfp); 1037 *next = *id + 1; 1038 return r; 1039 } 1040 1041 void * 1042 __xa_erase(struct xarray *xa, unsigned long index) 1043 { 1044 struct xarray_entry find, *res; 1045 void *ptr = NULL; 1046 1047 find.id = index; 1048 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find); 1049 if (res) { 1050 SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res); 1051 ptr = res->ptr; 1052 pool_put(&xa_pool, res); 1053 } 1054 return ptr; 1055 } 1056 1057 void * 1058 __xa_load(struct xarray *xa, unsigned long index) 1059 { 1060 struct xarray_entry find, *res; 1061 1062 find.id = index; 1063 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find); 1064 if (res == NULL) 1065 return NULL; 1066 return res->ptr; 1067 } 1068 1069 void * 1070 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) 1071 { 1072 struct xarray_entry find, *res; 1073 void *prev; 1074 1075 if (entry == NULL) 1076 return __xa_erase(xa, index); 1077 1078 find.id = index; 1079 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find); 1080 if (res != NULL) { 1081 /* index exists */ 1082 /* XXX Multislot entries updates not implemented yet */ 1083 prev = res->ptr; 1084 res->ptr = entry; 1085 return prev; 1086 } 1087 1088 /* index not found, add new */ 1089 if (gfp & GFP_NOWAIT) { 1090 res = pool_get(&xa_pool, PR_NOWAIT); 1091 } else { 1092 mtx_leave(&xa->xa_lock); 1093 res = pool_get(&xa_pool, PR_WAITOK); 1094 mtx_enter(&xa->xa_lock); 1095 } 1096 if (res == NULL) 1097 return XA_ERROR(-ENOMEM); 1098 res->id = index; 1099 res->ptr = entry; 1100 if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL) 1101 return XA_ERROR(-EINVAL); 1102 return NULL; /* no prev entry at index */ 1103 } 1104 1105 void * 1106 xa_get_next(struct xarray *xa, unsigned long *index) 1107 { 1108 struct xarray_entry *res; 1109 1110 SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) { 1111 if (res->id >= *index) { 1112 *index = res->id; 1113 return res->ptr; 1114 } 1115 } 1116 1117 return NULL; 1118 } 1119 1120 int 1121 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) 1122 { 1123 table->sgl = mallocarray(nents, sizeof(struct scatterlist), 1124 M_DRM, gfp_mask | M_ZERO); 1125 if (table->sgl == NULL) 1126 return -ENOMEM; 1127 table->nents = table->orig_nents = nents; 1128 sg_mark_end(&table->sgl[nents - 1]); 1129 return 0; 1130 } 1131 1132 void 1133 sg_free_table(struct sg_table *table) 1134 { 1135 free(table->sgl, M_DRM, 1136 table->orig_nents * sizeof(struct scatterlist)); 1137 table->orig_nents = 0; 1138 table->sgl = NULL; 1139 } 1140 1141 size_t 1142 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 1143 const void *buf, size_t buflen) 1144 { 1145 panic("%s", __func__); 1146 } 1147 1148 int 1149 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 1150 { 1151 void *cmd = NULL; 1152 int cmdlen = 0; 1153 int err, ret = 0; 1154 int op; 1155 1156 iic_acquire_bus(&adap->ic, 0); 1157 1158 while (num > 2) { 1159 op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE; 1160 err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0, 1161 msgs->buf, msgs->len, 0); 1162 if (err) { 1163 ret = -err; 1164 goto fail; 1165 } 1166 msgs++; 1167 num--; 1168 ret++; 1169 } 1170 1171 if (num > 1) { 1172 cmd = msgs->buf; 1173 cmdlen = msgs->len; 1174 msgs++; 1175 num--; 1176 ret++; 1177 } 1178 1179 op = (msgs->flags & I2C_M_RD) ? 1180 I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP; 1181 err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen, 1182 msgs->buf, msgs->len, 0); 1183 if (err) { 1184 ret = -err; 1185 goto fail; 1186 } 1187 msgs++; 1188 ret++; 1189 1190 fail: 1191 iic_release_bus(&adap->ic, 0); 1192 1193 return ret; 1194 } 1195 1196 int 1197 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 1198 { 1199 int ret, retries; 1200 1201 retries = adap->retries; 1202 retry: 1203 if (adap->algo) 1204 ret = adap->algo->master_xfer(adap, msgs, num); 1205 else 1206 ret = i2c_master_xfer(adap, msgs, num); 1207 if (ret == -EAGAIN && retries > 0) { 1208 retries--; 1209 goto retry; 1210 } 1211 1212 return ret; 1213 } 1214 1215 int 1216 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 1217 { 1218 int ret; 1219 1220 if (adap->lock_ops) 1221 adap->lock_ops->lock_bus(adap, 0); 1222 1223 ret = __i2c_transfer(adap, msgs, num); 1224 1225 if (adap->lock_ops) 1226 adap->lock_ops->unlock_bus(adap, 0); 1227 1228 return ret; 1229 } 1230 1231 int 1232 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 1233 { 1234 struct i2c_algo_bit_data *algo = adap->algo_data; 1235 struct i2c_adapter bb; 1236 1237 memset(&bb, 0, sizeof(bb)); 1238 bb.ic = algo->ic; 1239 bb.retries = adap->retries; 1240 return i2c_master_xfer(&bb, msgs, num); 1241 } 1242 1243 uint32_t 1244 i2c_bb_functionality(struct i2c_adapter *adap) 1245 { 1246 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 1247 } 1248 1249 struct i2c_algorithm i2c_bit_algo = { 1250 .master_xfer = i2c_bb_master_xfer, 1251 .functionality = i2c_bb_functionality 1252 }; 1253 1254 int 1255 i2c_bit_add_bus(struct i2c_adapter *adap) 1256 { 1257 adap->algo = &i2c_bit_algo; 1258 adap->retries = 3; 1259 1260 return 0; 1261 } 1262 1263 #if defined(__amd64__) || defined(__i386__) 1264 1265 /* 1266 * This is a minimal implementation of the Linux vga_get/vga_put 1267 * interface. In all likelihood, it will only work for inteldrm(4) as 1268 * it assumes that if there is another active VGA device in the 1269 * system, it is sitting behind a PCI bridge. 1270 */ 1271 1272 extern int pci_enumerate_bus(struct pci_softc *, 1273 int (*)(struct pci_attach_args *), struct pci_attach_args *); 1274 1275 pcitag_t vga_bridge_tag; 1276 int vga_bridge_disabled; 1277 1278 int 1279 vga_disable_bridge(struct pci_attach_args *pa) 1280 { 1281 pcireg_t bhlc, bc; 1282 1283 if (pa->pa_domain != 0) 1284 return 0; 1285 1286 bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 1287 if (PCI_HDRTYPE_TYPE(bhlc) != 1) 1288 return 0; 1289 1290 bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL); 1291 if ((bc & PPB_BC_VGA_ENABLE) == 0) 1292 return 0; 1293 bc &= ~PPB_BC_VGA_ENABLE; 1294 pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc); 1295 1296 vga_bridge_tag = pa->pa_tag; 1297 vga_bridge_disabled = 1; 1298 1299 return 1; 1300 } 1301 1302 void 1303 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc) 1304 { 1305 KASSERT(pdev->pci->sc_bridgetag == NULL); 1306 pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL); 1307 } 1308 1309 void 1310 vga_put(struct pci_dev *pdev, int rsrc) 1311 { 1312 pcireg_t bc; 1313 1314 if (!vga_bridge_disabled) 1315 return; 1316 1317 bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL); 1318 bc |= PPB_BC_VGA_ENABLE; 1319 pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc); 1320 1321 vga_bridge_disabled = 0; 1322 } 1323 1324 #endif 1325 1326 /* 1327 * ACPI types and interfaces. 1328 */ 1329 1330 #ifdef __HAVE_ACPI 1331 #include "acpi.h" 1332 #endif 1333 1334 #if NACPI > 0 1335 1336 #include <dev/acpi/acpireg.h> 1337 #include <dev/acpi/acpivar.h> 1338 #include <dev/acpi/amltypes.h> 1339 #include <dev/acpi/dsdt.h> 1340 1341 acpi_status 1342 acpi_get_table(const char *sig, int instance, 1343 struct acpi_table_header **hdr) 1344 { 1345 struct acpi_softc *sc = acpi_softc; 1346 struct acpi_q *entry; 1347 1348 KASSERT(instance == 1); 1349 1350 if (sc == NULL) 1351 return AE_NOT_FOUND; 1352 1353 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 1354 if (memcmp(entry->q_table, sig, strlen(sig)) == 0) { 1355 *hdr = entry->q_table; 1356 return 0; 1357 } 1358 } 1359 1360 return AE_NOT_FOUND; 1361 } 1362 1363 void 1364 acpi_put_table(struct acpi_table_header *hdr) 1365 { 1366 } 1367 1368 acpi_status 1369 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode) 1370 { 1371 node = aml_searchname(node, name); 1372 if (node == NULL) 1373 return AE_NOT_FOUND; 1374 1375 *rnode = node; 1376 return 0; 1377 } 1378 1379 acpi_status 1380 acpi_get_name(acpi_handle node, int type, struct acpi_buffer *buffer) 1381 { 1382 KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER); 1383 KASSERT(type == ACPI_FULL_PATHNAME); 1384 strlcpy(buffer->pointer, aml_nodename(node), buffer->length); 1385 return 0; 1386 } 1387 1388 acpi_status 1389 acpi_evaluate_object(acpi_handle node, const char *name, 1390 struct acpi_object_list *params, struct acpi_buffer *result) 1391 { 1392 struct aml_value args[4], res; 1393 union acpi_object *obj; 1394 uint8_t *data; 1395 int i; 1396 1397 KASSERT(params->count <= nitems(args)); 1398 1399 for (i = 0; i < params->count; i++) { 1400 args[i].type = params->pointer[i].type; 1401 switch (args[i].type) { 1402 case AML_OBJTYPE_INTEGER: 1403 args[i].v_integer = params->pointer[i].integer.value; 1404 break; 1405 case AML_OBJTYPE_BUFFER: 1406 args[i].length = params->pointer[i].buffer.length; 1407 args[i].v_buffer = params->pointer[i].buffer.pointer; 1408 break; 1409 default: 1410 printf("%s: arg type 0x%02x", __func__, args[i].type); 1411 return AE_BAD_PARAMETER; 1412 } 1413 } 1414 1415 if (name) { 1416 node = aml_searchname(node, name); 1417 if (node == NULL) 1418 return AE_NOT_FOUND; 1419 } 1420 if (aml_evalnode(acpi_softc, node, params->count, args, &res)) { 1421 aml_freevalue(&res); 1422 return AE_ERROR; 1423 } 1424 1425 KASSERT(result->length == ACPI_ALLOCATE_BUFFER); 1426 1427 result->length = sizeof(union acpi_object); 1428 switch (res.type) { 1429 case AML_OBJTYPE_BUFFER: 1430 result->length += res.length; 1431 result->pointer = malloc(result->length, M_DRM, M_WAITOK); 1432 obj = (union acpi_object *)result->pointer; 1433 data = (uint8_t *)(obj + 1); 1434 obj->type = res.type; 1435 obj->buffer.length = res.length; 1436 obj->buffer.pointer = data; 1437 memcpy(data, res.v_buffer, res.length); 1438 break; 1439 default: 1440 printf("%s: return type 0x%02x", __func__, res.type); 1441 aml_freevalue(&res); 1442 return AE_ERROR; 1443 } 1444 1445 aml_freevalue(&res); 1446 return 0; 1447 } 1448 1449 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list = 1450 SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list); 1451 1452 int 1453 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg) 1454 { 1455 struct acpi_bus_event event; 1456 struct notifier_block *nb; 1457 1458 event.device_class = ACPI_VIDEO_CLASS; 1459 event.type = notify; 1460 1461 SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link) 1462 nb->notifier_call(nb, 0, &event); 1463 return 0; 1464 } 1465 1466 int 1467 register_acpi_notifier(struct notifier_block *nb) 1468 { 1469 SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link); 1470 return 0; 1471 } 1472 1473 int 1474 unregister_acpi_notifier(struct notifier_block *nb) 1475 { 1476 struct notifier_block *tmp; 1477 1478 SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) { 1479 if (tmp == nb) { 1480 SLIST_REMOVE(&drm_linux_acpi_notify_list, nb, 1481 notifier_block, link); 1482 return 0; 1483 } 1484 } 1485 1486 return -ENOENT; 1487 } 1488 1489 const char * 1490 acpi_format_exception(acpi_status status) 1491 { 1492 switch (status) { 1493 case AE_NOT_FOUND: 1494 return "not found"; 1495 case AE_BAD_PARAMETER: 1496 return "bad parameter"; 1497 default: 1498 return "unknown"; 1499 } 1500 } 1501 1502 #endif 1503 1504 SLIST_HEAD(,backlight_device) backlight_device_list = 1505 SLIST_HEAD_INITIALIZER(backlight_device_list); 1506 1507 void 1508 backlight_do_update_status(void *arg) 1509 { 1510 backlight_update_status(arg); 1511 } 1512 1513 struct backlight_device * 1514 backlight_device_register(const char *name, void *kdev, void *data, 1515 const struct backlight_ops *ops, const struct backlight_properties *props) 1516 { 1517 struct backlight_device *bd; 1518 1519 bd = malloc(sizeof(*bd), M_DRM, M_WAITOK); 1520 bd->ops = ops; 1521 bd->props = *props; 1522 bd->data = data; 1523 1524 task_set(&bd->task, backlight_do_update_status, bd); 1525 1526 SLIST_INSERT_HEAD(&backlight_device_list, bd, next); 1527 bd->name = name; 1528 1529 return bd; 1530 } 1531 1532 void 1533 backlight_device_unregister(struct backlight_device *bd) 1534 { 1535 SLIST_REMOVE(&backlight_device_list, bd, backlight_device, next); 1536 free(bd, M_DRM, sizeof(*bd)); 1537 } 1538 1539 void 1540 backlight_schedule_update_status(struct backlight_device *bd) 1541 { 1542 task_add(systq, &bd->task); 1543 } 1544 1545 int 1546 backlight_enable(struct backlight_device *bd) 1547 { 1548 if (bd == NULL) 1549 return 0; 1550 1551 bd->props.power = FB_BLANK_UNBLANK; 1552 1553 return bd->ops->update_status(bd); 1554 } 1555 1556 int 1557 backlight_disable(struct backlight_device *bd) 1558 { 1559 if (bd == NULL) 1560 return 0; 1561 1562 bd->props.power = FB_BLANK_POWERDOWN; 1563 1564 return bd->ops->update_status(bd); 1565 } 1566 1567 struct backlight_device * 1568 backlight_device_get_by_name(const char *name) 1569 { 1570 struct backlight_device *bd; 1571 1572 SLIST_FOREACH(bd, &backlight_device_list, next) { 1573 if (strcmp(name, bd->name) == 0) 1574 return bd; 1575 } 1576 1577 return NULL; 1578 } 1579 1580 struct drvdata { 1581 struct device *dev; 1582 void *data; 1583 SLIST_ENTRY(drvdata) next; 1584 }; 1585 1586 SLIST_HEAD(,drvdata) drvdata_list = SLIST_HEAD_INITIALIZER(drvdata_list); 1587 1588 void 1589 dev_set_drvdata(struct device *dev, void *data) 1590 { 1591 struct drvdata *drvdata; 1592 1593 SLIST_FOREACH(drvdata, &drvdata_list, next) { 1594 if (drvdata->dev == dev) { 1595 drvdata->data = data; 1596 return; 1597 } 1598 } 1599 1600 if (data == NULL) 1601 return; 1602 1603 drvdata = malloc(sizeof(*drvdata), M_DRM, M_WAITOK); 1604 drvdata->dev = dev; 1605 drvdata->data = data; 1606 1607 SLIST_INSERT_HEAD(&drvdata_list, drvdata, next); 1608 } 1609 1610 void * 1611 dev_get_drvdata(struct device *dev) 1612 { 1613 struct drvdata *drvdata; 1614 1615 SLIST_FOREACH(drvdata, &drvdata_list, next) { 1616 if (drvdata->dev == dev) 1617 return drvdata->data; 1618 } 1619 1620 return NULL; 1621 } 1622 1623 void 1624 drm_sysfs_hotplug_event(struct drm_device *dev) 1625 { 1626 knote_locked(&dev->note, NOTE_CHANGE); 1627 } 1628 1629 void 1630 drm_sysfs_connector_hotplug_event(struct drm_connector *connector) 1631 { 1632 knote_locked(&connector->dev->note, NOTE_CHANGE); 1633 } 1634 1635 void 1636 drm_sysfs_connector_status_event(struct drm_connector *connector, 1637 struct drm_property *property) 1638 { 1639 STUB(); 1640 } 1641 1642 struct dma_fence * 1643 dma_fence_get(struct dma_fence *fence) 1644 { 1645 if (fence) 1646 kref_get(&fence->refcount); 1647 return fence; 1648 } 1649 1650 struct dma_fence * 1651 dma_fence_get_rcu(struct dma_fence *fence) 1652 { 1653 if (fence) 1654 kref_get(&fence->refcount); 1655 return fence; 1656 } 1657 1658 struct dma_fence * 1659 dma_fence_get_rcu_safe(struct dma_fence **dfp) 1660 { 1661 struct dma_fence *fence; 1662 if (dfp == NULL) 1663 return NULL; 1664 fence = *dfp; 1665 if (fence) 1666 kref_get(&fence->refcount); 1667 return fence; 1668 } 1669 1670 void 1671 dma_fence_release(struct kref *ref) 1672 { 1673 struct dma_fence *fence = container_of(ref, struct dma_fence, refcount); 1674 if (fence->ops && fence->ops->release) 1675 fence->ops->release(fence); 1676 else 1677 free(fence, M_DRM, 0); 1678 } 1679 1680 void 1681 dma_fence_put(struct dma_fence *fence) 1682 { 1683 if (fence) 1684 kref_put(&fence->refcount, dma_fence_release); 1685 } 1686 1687 int 1688 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp) 1689 { 1690 struct dma_fence_cb *cur, *tmp; 1691 struct list_head cb_list; 1692 1693 if (fence == NULL) 1694 return -EINVAL; 1695 1696 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1697 return -EINVAL; 1698 1699 list_replace(&fence->cb_list, &cb_list); 1700 1701 fence->timestamp = timestamp; 1702 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); 1703 1704 list_for_each_entry_safe(cur, tmp, &cb_list, node) { 1705 INIT_LIST_HEAD(&cur->node); 1706 cur->func(fence, cur); 1707 } 1708 1709 return 0; 1710 } 1711 1712 int 1713 dma_fence_signal(struct dma_fence *fence) 1714 { 1715 int r; 1716 1717 if (fence == NULL) 1718 return -EINVAL; 1719 1720 mtx_enter(fence->lock); 1721 r = dma_fence_signal_timestamp_locked(fence, ktime_get()); 1722 mtx_leave(fence->lock); 1723 1724 return r; 1725 } 1726 1727 int 1728 dma_fence_signal_locked(struct dma_fence *fence) 1729 { 1730 if (fence == NULL) 1731 return -EINVAL; 1732 1733 return dma_fence_signal_timestamp_locked(fence, ktime_get()); 1734 } 1735 1736 int 1737 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) 1738 { 1739 int r; 1740 1741 if (fence == NULL) 1742 return -EINVAL; 1743 1744 mtx_enter(fence->lock); 1745 r = dma_fence_signal_timestamp_locked(fence, timestamp); 1746 mtx_leave(fence->lock); 1747 1748 return r; 1749 } 1750 1751 bool 1752 dma_fence_is_signaled(struct dma_fence *fence) 1753 { 1754 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1755 return true; 1756 1757 if (fence->ops->signaled && fence->ops->signaled(fence)) { 1758 dma_fence_signal(fence); 1759 return true; 1760 } 1761 1762 return false; 1763 } 1764 1765 bool 1766 dma_fence_is_signaled_locked(struct dma_fence *fence) 1767 { 1768 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1769 return true; 1770 1771 if (fence->ops->signaled && fence->ops->signaled(fence)) { 1772 dma_fence_signal_locked(fence); 1773 return true; 1774 } 1775 1776 return false; 1777 } 1778 1779 ktime_t 1780 dma_fence_timestamp(struct dma_fence *fence) 1781 { 1782 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 1783 while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) 1784 CPU_BUSY_CYCLE(); 1785 return fence->timestamp; 1786 } else { 1787 return ktime_get(); 1788 } 1789 } 1790 1791 long 1792 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout) 1793 { 1794 if (timeout < 0) 1795 return -EINVAL; 1796 1797 if (fence->ops->wait) 1798 return fence->ops->wait(fence, intr, timeout); 1799 else 1800 return dma_fence_default_wait(fence, intr, timeout); 1801 } 1802 1803 long 1804 dma_fence_wait(struct dma_fence *fence, bool intr) 1805 { 1806 long ret; 1807 1808 ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); 1809 if (ret < 0) 1810 return ret; 1811 1812 return 0; 1813 } 1814 1815 void 1816 dma_fence_enable_sw_signaling(struct dma_fence *fence) 1817 { 1818 if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && 1819 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && 1820 fence->ops->enable_signaling) { 1821 mtx_enter(fence->lock); 1822 if (!fence->ops->enable_signaling(fence)) 1823 dma_fence_signal_locked(fence); 1824 mtx_leave(fence->lock); 1825 } 1826 } 1827 1828 void 1829 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, 1830 struct mutex *lock, uint64_t context, uint64_t seqno) 1831 { 1832 fence->ops = ops; 1833 fence->lock = lock; 1834 fence->context = context; 1835 fence->seqno = seqno; 1836 fence->flags = 0; 1837 fence->error = 0; 1838 kref_init(&fence->refcount); 1839 INIT_LIST_HEAD(&fence->cb_list); 1840 } 1841 1842 int 1843 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, 1844 dma_fence_func_t func) 1845 { 1846 int ret = 0; 1847 bool was_set; 1848 1849 if (WARN_ON(!fence || !func)) 1850 return -EINVAL; 1851 1852 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 1853 INIT_LIST_HEAD(&cb->node); 1854 return -ENOENT; 1855 } 1856 1857 mtx_enter(fence->lock); 1858 1859 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); 1860 1861 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1862 ret = -ENOENT; 1863 else if (!was_set && fence->ops->enable_signaling) { 1864 if (!fence->ops->enable_signaling(fence)) { 1865 dma_fence_signal_locked(fence); 1866 ret = -ENOENT; 1867 } 1868 } 1869 1870 if (!ret) { 1871 cb->func = func; 1872 list_add_tail(&cb->node, &fence->cb_list); 1873 } else 1874 INIT_LIST_HEAD(&cb->node); 1875 mtx_leave(fence->lock); 1876 1877 return ret; 1878 } 1879 1880 bool 1881 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) 1882 { 1883 bool ret; 1884 1885 mtx_enter(fence->lock); 1886 1887 ret = !list_empty(&cb->node); 1888 if (ret) 1889 list_del_init(&cb->node); 1890 1891 mtx_leave(fence->lock); 1892 1893 return ret; 1894 } 1895 1896 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1); 1897 1898 uint64_t 1899 dma_fence_context_alloc(unsigned int num) 1900 { 1901 return atomic64_add_return(num, &drm_fence_context_count) - num; 1902 } 1903 1904 struct default_wait_cb { 1905 struct dma_fence_cb base; 1906 struct proc *proc; 1907 }; 1908 1909 static void 1910 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 1911 { 1912 struct default_wait_cb *wait = 1913 container_of(cb, struct default_wait_cb, base); 1914 wake_up_process(wait->proc); 1915 } 1916 1917 long 1918 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) 1919 { 1920 long ret = timeout ? timeout : 1; 1921 unsigned long end; 1922 int err; 1923 struct default_wait_cb cb; 1924 bool was_set; 1925 1926 KASSERT(timeout <= INT_MAX); 1927 1928 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1929 return ret; 1930 1931 mtx_enter(fence->lock); 1932 1933 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 1934 &fence->flags); 1935 1936 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1937 goto out; 1938 1939 if (!was_set && fence->ops->enable_signaling) { 1940 if (!fence->ops->enable_signaling(fence)) { 1941 dma_fence_signal_locked(fence); 1942 goto out; 1943 } 1944 } 1945 1946 if (timeout == 0) { 1947 ret = 0; 1948 goto out; 1949 } 1950 1951 cb.base.func = dma_fence_default_wait_cb; 1952 cb.proc = curproc; 1953 list_add(&cb.base.node, &fence->cb_list); 1954 1955 end = jiffies + timeout; 1956 for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) { 1957 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1958 break; 1959 err = msleep(curproc, fence->lock, intr ? PCATCH : 0, 1960 "dmafence", ret); 1961 if (err == EINTR || err == ERESTART) { 1962 ret = -ERESTARTSYS; 1963 break; 1964 } 1965 } 1966 1967 if (!list_empty(&cb.base.node)) 1968 list_del(&cb.base.node); 1969 out: 1970 mtx_leave(fence->lock); 1971 1972 return ret; 1973 } 1974 1975 static bool 1976 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, 1977 uint32_t *idx) 1978 { 1979 int i; 1980 1981 for (i = 0; i < count; ++i) { 1982 struct dma_fence *fence = fences[i]; 1983 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 1984 if (idx) 1985 *idx = i; 1986 return true; 1987 } 1988 } 1989 return false; 1990 } 1991 1992 long 1993 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, 1994 bool intr, long timeout, uint32_t *idx) 1995 { 1996 struct default_wait_cb *cb; 1997 long ret = timeout; 1998 unsigned long end; 1999 int i, err; 2000 2001 KASSERT(timeout <= INT_MAX); 2002 2003 if (timeout == 0) { 2004 for (i = 0; i < count; i++) { 2005 if (dma_fence_is_signaled(fences[i])) { 2006 if (idx) 2007 *idx = i; 2008 return 1; 2009 } 2010 } 2011 return 0; 2012 } 2013 2014 cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO); 2015 if (cb == NULL) 2016 return -ENOMEM; 2017 2018 for (i = 0; i < count; i++) { 2019 struct dma_fence *fence = fences[i]; 2020 cb[i].proc = curproc; 2021 if (dma_fence_add_callback(fence, &cb[i].base, 2022 dma_fence_default_wait_cb)) { 2023 if (idx) 2024 *idx = i; 2025 goto cb_cleanup; 2026 } 2027 } 2028 2029 end = jiffies + timeout; 2030 for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) { 2031 if (dma_fence_test_signaled_any(fences, count, idx)) 2032 break; 2033 err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret); 2034 if (err == EINTR || err == ERESTART) { 2035 ret = -ERESTARTSYS; 2036 break; 2037 } 2038 } 2039 2040 cb_cleanup: 2041 while (i-- > 0) 2042 dma_fence_remove_callback(fences[i], &cb[i].base); 2043 free(cb, M_DRM, count * sizeof(*cb)); 2044 return ret; 2045 } 2046 2047 static struct dma_fence dma_fence_stub; 2048 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY); 2049 2050 static const char * 2051 dma_fence_stub_get_name(struct dma_fence *fence) 2052 { 2053 return "stub"; 2054 } 2055 2056 static const struct dma_fence_ops dma_fence_stub_ops = { 2057 .get_driver_name = dma_fence_stub_get_name, 2058 .get_timeline_name = dma_fence_stub_get_name, 2059 }; 2060 2061 struct dma_fence * 2062 dma_fence_get_stub(void) 2063 { 2064 mtx_enter(&dma_fence_stub_mtx); 2065 if (dma_fence_stub.ops == NULL) { 2066 dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops, 2067 &dma_fence_stub_mtx, 0, 0); 2068 dma_fence_signal_locked(&dma_fence_stub); 2069 } 2070 mtx_leave(&dma_fence_stub_mtx); 2071 2072 return dma_fence_get(&dma_fence_stub); 2073 } 2074 2075 struct dma_fence * 2076 dma_fence_allocate_private_stub(ktime_t ts) 2077 { 2078 struct dma_fence *f = malloc(sizeof(*f), M_DRM, 2079 M_ZERO | M_WAITOK | M_CANFAIL); 2080 if (f == NULL) 2081 return NULL; 2082 dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0); 2083 dma_fence_signal_timestamp(f, ts); 2084 return f; 2085 } 2086 2087 static const char * 2088 dma_fence_array_get_driver_name(struct dma_fence *fence) 2089 { 2090 return "dma_fence_array"; 2091 } 2092 2093 static const char * 2094 dma_fence_array_get_timeline_name(struct dma_fence *fence) 2095 { 2096 return "unbound"; 2097 } 2098 2099 static void 2100 irq_dma_fence_array_work(void *arg) 2101 { 2102 struct dma_fence_array *dfa = (struct dma_fence_array *)arg; 2103 dma_fence_signal(&dfa->base); 2104 dma_fence_put(&dfa->base); 2105 } 2106 2107 static void 2108 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) 2109 { 2110 struct dma_fence_array_cb *array_cb = 2111 container_of(cb, struct dma_fence_array_cb, cb); 2112 struct dma_fence_array *dfa = array_cb->array; 2113 2114 if (atomic_dec_and_test(&dfa->num_pending)) 2115 timeout_add(&dfa->to, 1); 2116 else 2117 dma_fence_put(&dfa->base); 2118 } 2119 2120 static bool 2121 dma_fence_array_enable_signaling(struct dma_fence *fence) 2122 { 2123 struct dma_fence_array *dfa = to_dma_fence_array(fence); 2124 struct dma_fence_array_cb *cb = (void *)(&dfa[1]); 2125 int i; 2126 2127 for (i = 0; i < dfa->num_fences; ++i) { 2128 cb[i].array = dfa; 2129 dma_fence_get(&dfa->base); 2130 if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb, 2131 dma_fence_array_cb_func)) { 2132 dma_fence_put(&dfa->base); 2133 if (atomic_dec_and_test(&dfa->num_pending)) 2134 return false; 2135 } 2136 } 2137 2138 return true; 2139 } 2140 2141 static bool 2142 dma_fence_array_signaled(struct dma_fence *fence) 2143 { 2144 struct dma_fence_array *dfa = to_dma_fence_array(fence); 2145 2146 return atomic_read(&dfa->num_pending) <= 0; 2147 } 2148 2149 static void 2150 dma_fence_array_release(struct dma_fence *fence) 2151 { 2152 struct dma_fence_array *dfa = to_dma_fence_array(fence); 2153 int i; 2154 2155 for (i = 0; i < dfa->num_fences; ++i) 2156 dma_fence_put(dfa->fences[i]); 2157 2158 free(dfa->fences, M_DRM, 0); 2159 dma_fence_free(fence); 2160 } 2161 2162 struct dma_fence_array * 2163 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context, 2164 unsigned seqno, bool signal_on_any) 2165 { 2166 struct dma_fence_array *dfa = malloc(sizeof(*dfa) + 2167 (num_fences * sizeof(struct dma_fence_array_cb)), 2168 M_DRM, M_WAITOK|M_CANFAIL|M_ZERO); 2169 if (dfa == NULL) 2170 return NULL; 2171 2172 mtx_init(&dfa->lock, IPL_TTY); 2173 dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock, 2174 context, seqno); 2175 timeout_set(&dfa->to, irq_dma_fence_array_work, dfa); 2176 2177 dfa->num_fences = num_fences; 2178 atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences); 2179 dfa->fences = fences; 2180 2181 return dfa; 2182 } 2183 2184 struct dma_fence * 2185 dma_fence_array_first(struct dma_fence *f) 2186 { 2187 struct dma_fence_array *dfa; 2188 2189 if (f == NULL) 2190 return NULL; 2191 2192 if ((dfa = to_dma_fence_array(f)) == NULL) 2193 return f; 2194 2195 if (dfa->num_fences > 0) 2196 return dfa->fences[0]; 2197 2198 return NULL; 2199 } 2200 2201 struct dma_fence * 2202 dma_fence_array_next(struct dma_fence *f, unsigned int i) 2203 { 2204 struct dma_fence_array *dfa; 2205 2206 if (f == NULL) 2207 return NULL; 2208 2209 if ((dfa = to_dma_fence_array(f)) == NULL) 2210 return NULL; 2211 2212 if (i < dfa->num_fences) 2213 return dfa->fences[i]; 2214 2215 return NULL; 2216 } 2217 2218 const struct dma_fence_ops dma_fence_array_ops = { 2219 .get_driver_name = dma_fence_array_get_driver_name, 2220 .get_timeline_name = dma_fence_array_get_timeline_name, 2221 .enable_signaling = dma_fence_array_enable_signaling, 2222 .signaled = dma_fence_array_signaled, 2223 .release = dma_fence_array_release, 2224 }; 2225 2226 int 2227 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno) 2228 { 2229 struct dma_fence_chain *chain; 2230 struct dma_fence *fence; 2231 2232 if (seqno == 0) 2233 return 0; 2234 2235 if ((chain = to_dma_fence_chain(*df)) == NULL) 2236 return -EINVAL; 2237 2238 fence = &chain->base; 2239 if (fence->seqno < seqno) 2240 return -EINVAL; 2241 2242 dma_fence_chain_for_each(*df, fence) { 2243 if ((*df)->context != fence->context) 2244 break; 2245 2246 chain = to_dma_fence_chain(*df); 2247 if (chain->prev_seqno < seqno) 2248 break; 2249 } 2250 dma_fence_put(fence); 2251 2252 return 0; 2253 } 2254 2255 void 2256 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev, 2257 struct dma_fence *fence, uint64_t seqno) 2258 { 2259 uint64_t context; 2260 2261 chain->fence = fence; 2262 chain->prev = prev; 2263 mtx_init(&chain->lock, IPL_TTY); 2264 2265 /* if prev is a chain */ 2266 if (to_dma_fence_chain(prev) != NULL) { 2267 if (__dma_fence_is_later(seqno, prev->seqno, prev->ops)) { 2268 chain->prev_seqno = prev->seqno; 2269 context = prev->context; 2270 } else { 2271 chain->prev_seqno = 0; 2272 context = dma_fence_context_alloc(1); 2273 seqno = prev->seqno; 2274 } 2275 } else { 2276 chain->prev_seqno = 0; 2277 context = dma_fence_context_alloc(1); 2278 } 2279 2280 dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock, 2281 context, seqno); 2282 } 2283 2284 static const char * 2285 dma_fence_chain_get_driver_name(struct dma_fence *fence) 2286 { 2287 return "dma_fence_chain"; 2288 } 2289 2290 static const char * 2291 dma_fence_chain_get_timeline_name(struct dma_fence *fence) 2292 { 2293 return "unbound"; 2294 } 2295 2296 static bool dma_fence_chain_enable_signaling(struct dma_fence *); 2297 2298 static void 2299 dma_fence_chain_timo(void *arg) 2300 { 2301 struct dma_fence_chain *chain = (struct dma_fence_chain *)arg; 2302 2303 if (dma_fence_chain_enable_signaling(&chain->base) == false) 2304 dma_fence_signal(&chain->base); 2305 dma_fence_put(&chain->base); 2306 } 2307 2308 static void 2309 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) 2310 { 2311 struct dma_fence_chain *chain = 2312 container_of(cb, struct dma_fence_chain, cb); 2313 timeout_set(&chain->to, dma_fence_chain_timo, chain); 2314 timeout_add(&chain->to, 1); 2315 dma_fence_put(f); 2316 } 2317 2318 static bool 2319 dma_fence_chain_enable_signaling(struct dma_fence *fence) 2320 { 2321 struct dma_fence_chain *chain, *h; 2322 struct dma_fence *f; 2323 2324 h = to_dma_fence_chain(fence); 2325 dma_fence_get(&h->base); 2326 dma_fence_chain_for_each(fence, &h->base) { 2327 chain = to_dma_fence_chain(fence); 2328 if (chain == NULL) 2329 f = fence; 2330 else 2331 f = chain->fence; 2332 2333 dma_fence_get(f); 2334 if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) { 2335 dma_fence_put(fence); 2336 return true; 2337 } 2338 dma_fence_put(f); 2339 } 2340 dma_fence_put(&h->base); 2341 return false; 2342 } 2343 2344 static bool 2345 dma_fence_chain_signaled(struct dma_fence *fence) 2346 { 2347 struct dma_fence_chain *chain; 2348 struct dma_fence *f; 2349 2350 dma_fence_chain_for_each(fence, fence) { 2351 chain = to_dma_fence_chain(fence); 2352 if (chain == NULL) 2353 f = fence; 2354 else 2355 f = chain->fence; 2356 2357 if (dma_fence_is_signaled(f) == false) { 2358 dma_fence_put(fence); 2359 return false; 2360 } 2361 } 2362 return true; 2363 } 2364 2365 static void 2366 dma_fence_chain_release(struct dma_fence *fence) 2367 { 2368 struct dma_fence_chain *chain = to_dma_fence_chain(fence); 2369 struct dma_fence_chain *prev_chain; 2370 struct dma_fence *prev; 2371 2372 for (prev = chain->prev; prev != NULL; prev = chain->prev) { 2373 if (kref_read(&prev->refcount) > 1) 2374 break; 2375 if ((prev_chain = to_dma_fence_chain(prev)) == NULL) 2376 break; 2377 chain->prev = prev_chain->prev; 2378 prev_chain->prev = NULL; 2379 dma_fence_put(prev); 2380 } 2381 dma_fence_put(prev); 2382 dma_fence_put(chain->fence); 2383 dma_fence_free(fence); 2384 } 2385 2386 struct dma_fence * 2387 dma_fence_chain_walk(struct dma_fence *fence) 2388 { 2389 struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain; 2390 struct dma_fence *prev, *new_prev, *tmp; 2391 2392 if (chain == NULL) { 2393 dma_fence_put(fence); 2394 return NULL; 2395 } 2396 2397 while ((prev = dma_fence_get(chain->prev)) != NULL) { 2398 prev_chain = to_dma_fence_chain(prev); 2399 if (prev_chain != NULL) { 2400 if (!dma_fence_is_signaled(prev_chain->fence)) 2401 break; 2402 new_prev = dma_fence_get(prev_chain->prev); 2403 } else { 2404 if (!dma_fence_is_signaled(prev)) 2405 break; 2406 new_prev = NULL; 2407 } 2408 tmp = atomic_cas_ptr(&chain->prev, prev, new_prev); 2409 dma_fence_put(tmp == prev ? prev : new_prev); 2410 dma_fence_put(prev); 2411 } 2412 2413 dma_fence_put(fence); 2414 return prev; 2415 } 2416 2417 const struct dma_fence_ops dma_fence_chain_ops = { 2418 .get_driver_name = dma_fence_chain_get_driver_name, 2419 .get_timeline_name = dma_fence_chain_get_timeline_name, 2420 .enable_signaling = dma_fence_chain_enable_signaling, 2421 .signaled = dma_fence_chain_signaled, 2422 .release = dma_fence_chain_release, 2423 .use_64bit_seqno = true, 2424 }; 2425 2426 bool 2427 dma_fence_is_container(struct dma_fence *fence) 2428 { 2429 return (fence->ops == &dma_fence_chain_ops) || 2430 (fence->ops == &dma_fence_array_ops); 2431 } 2432 2433 int 2434 dmabuf_read(struct file *fp, struct uio *uio, int fflags) 2435 { 2436 return (ENXIO); 2437 } 2438 2439 int 2440 dmabuf_write(struct file *fp, struct uio *uio, int fflags) 2441 { 2442 return (ENXIO); 2443 } 2444 2445 int 2446 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) 2447 { 2448 return (ENOTTY); 2449 } 2450 2451 int 2452 dmabuf_kqfilter(struct file *fp, struct knote *kn) 2453 { 2454 return (EINVAL); 2455 } 2456 2457 int 2458 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p) 2459 { 2460 struct dma_buf *dmabuf = fp->f_data; 2461 2462 memset(st, 0, sizeof(*st)); 2463 st->st_size = dmabuf->size; 2464 st->st_mode = S_IFIFO; /* XXX */ 2465 return (0); 2466 } 2467 2468 int 2469 dmabuf_close(struct file *fp, struct proc *p) 2470 { 2471 struct dma_buf *dmabuf = fp->f_data; 2472 2473 fp->f_data = NULL; 2474 KERNEL_LOCK(); 2475 dmabuf->ops->release(dmabuf); 2476 KERNEL_UNLOCK(); 2477 free(dmabuf, M_DRM, sizeof(struct dma_buf)); 2478 return (0); 2479 } 2480 2481 int 2482 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p) 2483 { 2484 struct dma_buf *dmabuf = fp->f_data; 2485 off_t newoff; 2486 2487 if (*offset != 0) 2488 return (EINVAL); 2489 2490 switch (whence) { 2491 case SEEK_SET: 2492 newoff = 0; 2493 break; 2494 case SEEK_END: 2495 newoff = dmabuf->size; 2496 break; 2497 default: 2498 return (EINVAL); 2499 } 2500 mtx_enter(&fp->f_mtx); 2501 fp->f_offset = newoff; 2502 mtx_leave(&fp->f_mtx); 2503 *offset = newoff; 2504 return (0); 2505 } 2506 2507 const struct fileops dmabufops = { 2508 .fo_read = dmabuf_read, 2509 .fo_write = dmabuf_write, 2510 .fo_ioctl = dmabuf_ioctl, 2511 .fo_kqfilter = dmabuf_kqfilter, 2512 .fo_stat = dmabuf_stat, 2513 .fo_close = dmabuf_close, 2514 .fo_seek = dmabuf_seek, 2515 }; 2516 2517 struct dma_buf * 2518 dma_buf_export(const struct dma_buf_export_info *info) 2519 { 2520 struct proc *p = curproc; 2521 struct dma_buf *dmabuf; 2522 struct file *fp; 2523 2524 fp = fnew(p); 2525 if (fp == NULL) 2526 return ERR_PTR(-ENFILE); 2527 fp->f_type = DTYPE_DMABUF; 2528 fp->f_ops = &dmabufops; 2529 dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO); 2530 dmabuf->priv = info->priv; 2531 dmabuf->ops = info->ops; 2532 dmabuf->size = info->size; 2533 dmabuf->file = fp; 2534 fp->f_data = dmabuf; 2535 INIT_LIST_HEAD(&dmabuf->attachments); 2536 return dmabuf; 2537 } 2538 2539 struct dma_buf * 2540 dma_buf_get(int fd) 2541 { 2542 struct proc *p = curproc; 2543 struct filedesc *fdp = p->p_fd; 2544 struct file *fp; 2545 2546 if ((fp = fd_getfile(fdp, fd)) == NULL) 2547 return ERR_PTR(-EBADF); 2548 2549 if (fp->f_type != DTYPE_DMABUF) { 2550 FRELE(fp, p); 2551 return ERR_PTR(-EINVAL); 2552 } 2553 2554 return fp->f_data; 2555 } 2556 2557 void 2558 dma_buf_put(struct dma_buf *dmabuf) 2559 { 2560 KASSERT(dmabuf); 2561 KASSERT(dmabuf->file); 2562 2563 FRELE(dmabuf->file, curproc); 2564 } 2565 2566 int 2567 dma_buf_fd(struct dma_buf *dmabuf, int flags) 2568 { 2569 struct proc *p = curproc; 2570 struct filedesc *fdp = p->p_fd; 2571 struct file *fp = dmabuf->file; 2572 int fd, cloexec, error; 2573 2574 cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0; 2575 2576 fdplock(fdp); 2577 restart: 2578 if ((error = fdalloc(p, 0, &fd)) != 0) { 2579 if (error == ENOSPC) { 2580 fdexpand(p); 2581 goto restart; 2582 } 2583 fdpunlock(fdp); 2584 return -error; 2585 } 2586 2587 fdinsert(fdp, fd, cloexec, fp); 2588 fdpunlock(fdp); 2589 2590 return fd; 2591 } 2592 2593 void 2594 get_dma_buf(struct dma_buf *dmabuf) 2595 { 2596 FREF(dmabuf->file); 2597 } 2598 2599 enum pci_bus_speed 2600 pcie_get_speed_cap(struct pci_dev *pdev) 2601 { 2602 pci_chipset_tag_t pc; 2603 pcitag_t tag; 2604 int pos ; 2605 pcireg_t xcap, lnkcap = 0, lnkcap2 = 0; 2606 pcireg_t id; 2607 enum pci_bus_speed cap = PCI_SPEED_UNKNOWN; 2608 int bus, device, function; 2609 2610 if (pdev == NULL) 2611 return PCI_SPEED_UNKNOWN; 2612 2613 pc = pdev->pc; 2614 tag = pdev->tag; 2615 2616 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, 2617 &pos, NULL)) 2618 return PCI_SPEED_UNKNOWN; 2619 2620 id = pci_conf_read(pc, tag, PCI_ID_REG); 2621 pci_decompose_tag(pc, tag, &bus, &device, &function); 2622 2623 /* we've been informed via and serverworks don't make the cut */ 2624 if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH || 2625 PCI_VENDOR(id) == PCI_VENDOR_RCC) 2626 return PCI_SPEED_UNKNOWN; 2627 2628 lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP); 2629 xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP); 2630 if (PCI_PCIE_XCAP_VER(xcap) >= 2) 2631 lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2); 2632 2633 lnkcap &= 0x0f; 2634 lnkcap2 &= 0xfe; 2635 2636 if (lnkcap2) { /* PCIE GEN 3.0 */ 2637 if (lnkcap2 & 0x02) 2638 cap = PCIE_SPEED_2_5GT; 2639 if (lnkcap2 & 0x04) 2640 cap = PCIE_SPEED_5_0GT; 2641 if (lnkcap2 & 0x08) 2642 cap = PCIE_SPEED_8_0GT; 2643 if (lnkcap2 & 0x10) 2644 cap = PCIE_SPEED_16_0GT; 2645 if (lnkcap2 & 0x20) 2646 cap = PCIE_SPEED_32_0GT; 2647 if (lnkcap2 & 0x40) 2648 cap = PCIE_SPEED_64_0GT; 2649 } else { 2650 if (lnkcap & 0x01) 2651 cap = PCIE_SPEED_2_5GT; 2652 if (lnkcap & 0x02) 2653 cap = PCIE_SPEED_5_0GT; 2654 } 2655 2656 DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n", 2657 bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap, 2658 lnkcap2); 2659 return cap; 2660 } 2661 2662 enum pcie_link_width 2663 pcie_get_width_cap(struct pci_dev *pdev) 2664 { 2665 pci_chipset_tag_t pc = pdev->pc; 2666 pcitag_t tag = pdev->tag; 2667 int pos ; 2668 pcireg_t lnkcap = 0; 2669 pcireg_t id; 2670 int bus, device, function; 2671 2672 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, 2673 &pos, NULL)) 2674 return PCIE_LNK_WIDTH_UNKNOWN; 2675 2676 id = pci_conf_read(pc, tag, PCI_ID_REG); 2677 pci_decompose_tag(pc, tag, &bus, &device, &function); 2678 2679 lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP); 2680 2681 DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n", 2682 bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap); 2683 2684 if (lnkcap) 2685 return (lnkcap & 0x3f0) >> 4; 2686 return PCIE_LNK_WIDTH_UNKNOWN; 2687 } 2688 2689 bool 2690 pcie_aspm_enabled(struct pci_dev *pdev) 2691 { 2692 pci_chipset_tag_t pc = pdev->pc; 2693 pcitag_t tag = pdev->tag; 2694 int pos ; 2695 pcireg_t lcsr; 2696 2697 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, 2698 &pos, NULL)) 2699 return false; 2700 2701 lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR); 2702 if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0) 2703 return true; 2704 2705 return false; 2706 } 2707 2708 static wait_queue_head_t bit_waitq; 2709 wait_queue_head_t var_waitq; 2710 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY); 2711 2712 int 2713 wait_on_bit(unsigned long *word, int bit, unsigned mode) 2714 { 2715 int err; 2716 2717 if (!test_bit(bit, word)) 2718 return 0; 2719 2720 mtx_enter(&wait_bit_mtx); 2721 while (test_bit(bit, word)) { 2722 err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb", 2723 INFSLP); 2724 if (err) { 2725 mtx_leave(&wait_bit_mtx); 2726 return 1; 2727 } 2728 } 2729 mtx_leave(&wait_bit_mtx); 2730 return 0; 2731 } 2732 2733 int 2734 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo) 2735 { 2736 int err; 2737 2738 if (!test_bit(bit, word)) 2739 return 0; 2740 2741 mtx_enter(&wait_bit_mtx); 2742 while (test_bit(bit, word)) { 2743 err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo); 2744 if (err) { 2745 mtx_leave(&wait_bit_mtx); 2746 return 1; 2747 } 2748 } 2749 mtx_leave(&wait_bit_mtx); 2750 return 0; 2751 } 2752 2753 void 2754 wake_up_bit(void *word, int bit) 2755 { 2756 mtx_enter(&wait_bit_mtx); 2757 wakeup(word); 2758 mtx_leave(&wait_bit_mtx); 2759 } 2760 2761 void 2762 clear_and_wake_up_bit(int bit, void *word) 2763 { 2764 clear_bit(bit, word); 2765 wake_up_bit(word, bit); 2766 } 2767 2768 wait_queue_head_t * 2769 bit_waitqueue(void *word, int bit) 2770 { 2771 /* XXX hash table of wait queues? */ 2772 return &bit_waitq; 2773 } 2774 2775 wait_queue_head_t * 2776 __var_waitqueue(void *p) 2777 { 2778 /* XXX hash table of wait queues? */ 2779 return &bit_waitq; 2780 } 2781 2782 struct workqueue_struct *system_wq; 2783 struct workqueue_struct *system_highpri_wq; 2784 struct workqueue_struct *system_unbound_wq; 2785 struct workqueue_struct *system_long_wq; 2786 struct taskq *taskletq; 2787 2788 void 2789 drm_linux_init(void) 2790 { 2791 system_wq = (struct workqueue_struct *) 2792 taskq_create("drmwq", 4, IPL_HIGH, 0); 2793 system_highpri_wq = (struct workqueue_struct *) 2794 taskq_create("drmhpwq", 4, IPL_HIGH, 0); 2795 system_unbound_wq = (struct workqueue_struct *) 2796 taskq_create("drmubwq", 4, IPL_HIGH, 0); 2797 system_long_wq = (struct workqueue_struct *) 2798 taskq_create("drmlwq", 4, IPL_HIGH, 0); 2799 2800 taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0); 2801 2802 init_waitqueue_head(&bit_waitq); 2803 init_waitqueue_head(&var_waitq); 2804 2805 pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0, 2806 "idrpl", NULL); 2807 2808 kmap_atomic_va = 2809 (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok); 2810 } 2811 2812 void 2813 drm_linux_exit(void) 2814 { 2815 pool_destroy(&idr_pool); 2816 2817 taskq_destroy(taskletq); 2818 2819 taskq_destroy((struct taskq *)system_long_wq); 2820 taskq_destroy((struct taskq *)system_unbound_wq); 2821 taskq_destroy((struct taskq *)system_highpri_wq); 2822 taskq_destroy((struct taskq *)system_wq); 2823 } 2824 2825 #define PCIE_ECAP_RESIZE_BAR 0x15 2826 #define RBCAP0 0x04 2827 #define RBCTRL0 0x08 2828 #define RBCTRL_BARINDEX_MASK 0x07 2829 #define RBCTRL_BARSIZE_MASK 0x1f00 2830 #define RBCTRL_BARSIZE_SHIFT 8 2831 2832 /* size in MB is 1 << nsize */ 2833 int 2834 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize) 2835 { 2836 pcireg_t reg; 2837 uint32_t offset, capid; 2838 2839 KASSERT(bar == 0); 2840 2841 offset = PCI_PCIE_ECAP; 2842 2843 /* search PCI Express Extended Capabilities */ 2844 do { 2845 reg = pci_conf_read(pdev->pc, pdev->tag, offset); 2846 capid = PCI_PCIE_ECAP_ID(reg); 2847 if (capid == PCIE_ECAP_RESIZE_BAR) 2848 break; 2849 offset = PCI_PCIE_ECAP_NEXT(reg); 2850 } while (capid != 0); 2851 2852 if (capid == 0) { 2853 printf("%s: could not find resize bar cap!\n", __func__); 2854 return -ENOTSUP; 2855 } 2856 2857 reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0); 2858 2859 if ((reg & (1 << (nsize + 4))) == 0) { 2860 printf("%s size not supported\n", __func__); 2861 return -ENOTSUP; 2862 } 2863 2864 reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0); 2865 if ((reg & RBCTRL_BARINDEX_MASK) != 0) { 2866 printf("%s BAR index not 0\n", __func__); 2867 return -EINVAL; 2868 } 2869 2870 reg &= ~RBCTRL_BARSIZE_MASK; 2871 reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK; 2872 2873 pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg); 2874 2875 return 0; 2876 } 2877 2878 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers); 2879 2880 int 2881 register_shrinker(struct shrinker *shrinker, const char *format, ...) 2882 { 2883 TAILQ_INSERT_TAIL(&shrinkers, shrinker, next); 2884 return 0; 2885 } 2886 2887 void 2888 unregister_shrinker(struct shrinker *shrinker) 2889 { 2890 TAILQ_REMOVE(&shrinkers, shrinker, next); 2891 } 2892 2893 void 2894 drmbackoff(long npages) 2895 { 2896 struct shrink_control sc; 2897 struct shrinker *shrinker; 2898 u_long ret; 2899 2900 shrinker = TAILQ_FIRST(&shrinkers); 2901 while (shrinker && npages > 0) { 2902 sc.nr_to_scan = npages; 2903 ret = shrinker->scan_objects(shrinker, &sc); 2904 npages -= ret; 2905 shrinker = TAILQ_NEXT(shrinker, next); 2906 } 2907 } 2908 2909 void * 2910 bitmap_zalloc(u_int n, gfp_t flags) 2911 { 2912 return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags); 2913 } 2914 2915 void 2916 bitmap_free(void *p) 2917 { 2918 kfree(p); 2919 } 2920 2921 int 2922 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock) 2923 { 2924 if (atomic_add_unless(v, -1, 1)) 2925 return 0; 2926 2927 rw_enter_write(lock); 2928 if (atomic_dec_return(v) == 0) 2929 return 1; 2930 rw_exit_write(lock); 2931 return 0; 2932 } 2933 2934 int 2935 printk(const char *fmt, ...) 2936 { 2937 int ret, level; 2938 va_list ap; 2939 2940 if (fmt != NULL && *fmt == '\001') { 2941 level = fmt[1]; 2942 #ifndef DRMDEBUG 2943 if (level >= KERN_INFO[1] && level <= '9') 2944 return 0; 2945 #endif 2946 fmt += 2; 2947 } 2948 2949 va_start(ap, fmt); 2950 ret = vprintf(fmt, ap); 2951 va_end(ap); 2952 2953 return ret; 2954 } 2955 2956 #define START(node) ((node)->start) 2957 #define LAST(node) ((node)->last) 2958 2959 struct interval_tree_node * 2960 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, 2961 unsigned long last) 2962 { 2963 struct interval_tree_node *node; 2964 struct rb_node *rb; 2965 2966 for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) { 2967 node = rb_entry(rb, typeof(*node), rb); 2968 if (LAST(node) >= start && START(node) <= last) 2969 return node; 2970 } 2971 return NULL; 2972 } 2973 2974 void 2975 interval_tree_remove(struct interval_tree_node *node, 2976 struct rb_root_cached *root) 2977 { 2978 rb_erase_cached(&node->rb, root); 2979 } 2980 2981 void 2982 interval_tree_insert(struct interval_tree_node *node, 2983 struct rb_root_cached *root) 2984 { 2985 struct rb_node **iter = &root->rb_root.rb_node; 2986 struct rb_node *parent = NULL; 2987 struct interval_tree_node *iter_node; 2988 2989 while (*iter) { 2990 parent = *iter; 2991 iter_node = rb_entry(*iter, struct interval_tree_node, rb); 2992 2993 if (node->start < iter_node->start) 2994 iter = &(*iter)->rb_left; 2995 else 2996 iter = &(*iter)->rb_right; 2997 } 2998 2999 rb_link_node(&node->rb, parent, iter); 3000 rb_insert_color_cached(&node->rb, root, false); 3001 } 3002 3003 int 3004 syncfile_read(struct file *fp, struct uio *uio, int fflags) 3005 { 3006 return ENXIO; 3007 } 3008 3009 int 3010 syncfile_write(struct file *fp, struct uio *uio, int fflags) 3011 { 3012 return ENXIO; 3013 } 3014 3015 int 3016 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) 3017 { 3018 return ENOTTY; 3019 } 3020 3021 int 3022 syncfile_kqfilter(struct file *fp, struct knote *kn) 3023 { 3024 return EINVAL; 3025 } 3026 3027 int 3028 syncfile_stat(struct file *fp, struct stat *st, struct proc *p) 3029 { 3030 memset(st, 0, sizeof(*st)); 3031 st->st_mode = S_IFIFO; /* XXX */ 3032 return 0; 3033 } 3034 3035 int 3036 syncfile_close(struct file *fp, struct proc *p) 3037 { 3038 struct sync_file *sf = fp->f_data; 3039 3040 dma_fence_put(sf->fence); 3041 fp->f_data = NULL; 3042 free(sf, M_DRM, sizeof(struct sync_file)); 3043 return 0; 3044 } 3045 3046 int 3047 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p) 3048 { 3049 off_t newoff; 3050 3051 if (*offset != 0) 3052 return EINVAL; 3053 3054 switch (whence) { 3055 case SEEK_SET: 3056 newoff = 0; 3057 break; 3058 case SEEK_END: 3059 newoff = 0; 3060 break; 3061 default: 3062 return EINVAL; 3063 } 3064 mtx_enter(&fp->f_mtx); 3065 fp->f_offset = newoff; 3066 mtx_leave(&fp->f_mtx); 3067 *offset = newoff; 3068 return 0; 3069 } 3070 3071 const struct fileops syncfileops = { 3072 .fo_read = syncfile_read, 3073 .fo_write = syncfile_write, 3074 .fo_ioctl = syncfile_ioctl, 3075 .fo_kqfilter = syncfile_kqfilter, 3076 .fo_stat = syncfile_stat, 3077 .fo_close = syncfile_close, 3078 .fo_seek = syncfile_seek, 3079 }; 3080 3081 void 3082 fd_install(int fd, struct file *fp) 3083 { 3084 struct proc *p = curproc; 3085 struct filedesc *fdp = p->p_fd; 3086 3087 if (fp->f_type != DTYPE_SYNC) 3088 return; 3089 3090 fdplock(fdp); 3091 /* all callers use get_unused_fd_flags(O_CLOEXEC) */ 3092 fdinsert(fdp, fd, UF_EXCLOSE, fp); 3093 fdpunlock(fdp); 3094 } 3095 3096 void 3097 fput(struct file *fp) 3098 { 3099 if (fp->f_type != DTYPE_SYNC) 3100 return; 3101 3102 FRELE(fp, curproc); 3103 } 3104 3105 int 3106 get_unused_fd_flags(unsigned int flags) 3107 { 3108 struct proc *p = curproc; 3109 struct filedesc *fdp = p->p_fd; 3110 int error, fd; 3111 3112 KASSERT((flags & O_CLOEXEC) != 0); 3113 3114 fdplock(fdp); 3115 retryalloc: 3116 if ((error = fdalloc(p, 0, &fd)) != 0) { 3117 if (error == ENOSPC) { 3118 fdexpand(p); 3119 goto retryalloc; 3120 } 3121 fdpunlock(fdp); 3122 return -1; 3123 } 3124 fdpunlock(fdp); 3125 3126 return fd; 3127 } 3128 3129 void 3130 put_unused_fd(int fd) 3131 { 3132 struct filedesc *fdp = curproc->p_fd; 3133 3134 fdplock(fdp); 3135 fdremove(fdp, fd); 3136 fdpunlock(fdp); 3137 } 3138 3139 struct dma_fence * 3140 sync_file_get_fence(int fd) 3141 { 3142 struct proc *p = curproc; 3143 struct filedesc *fdp = p->p_fd; 3144 struct file *fp; 3145 struct sync_file *sf; 3146 struct dma_fence *f; 3147 3148 if ((fp = fd_getfile(fdp, fd)) == NULL) 3149 return NULL; 3150 3151 if (fp->f_type != DTYPE_SYNC) { 3152 FRELE(fp, p); 3153 return NULL; 3154 } 3155 sf = fp->f_data; 3156 f = dma_fence_get(sf->fence); 3157 FRELE(sf->file, p); 3158 return f; 3159 } 3160 3161 struct sync_file * 3162 sync_file_create(struct dma_fence *fence) 3163 { 3164 struct proc *p = curproc; 3165 struct sync_file *sf; 3166 struct file *fp; 3167 3168 fp = fnew(p); 3169 if (fp == NULL) 3170 return NULL; 3171 fp->f_type = DTYPE_SYNC; 3172 fp->f_ops = &syncfileops; 3173 sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO); 3174 sf->file = fp; 3175 sf->fence = dma_fence_get(fence); 3176 fp->f_data = sf; 3177 return sf; 3178 } 3179 3180 bool 3181 drm_firmware_drivers_only(void) 3182 { 3183 return false; 3184 } 3185