1 /* $OpenBSD: drm_linux.c,v 1.74 2020/12/31 06:31:55 jsg Exp $ */ 2 /* 3 * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org> 4 * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/systm.h> 21 #include <sys/param.h> 22 #include <sys/event.h> 23 #include <sys/filedesc.h> 24 #include <sys/kthread.h> 25 #include <sys/stat.h> 26 #include <sys/unistd.h> 27 #include <sys/proc.h> 28 #include <sys/pool.h> 29 #include <sys/fcntl.h> 30 31 #include <dev/pci/ppbreg.h> 32 33 #include <linux/dma-buf.h> 34 #include <linux/mod_devicetable.h> 35 #include <linux/acpi.h> 36 #include <linux/pagevec.h> 37 #include <linux/dma-fence-array.h> 38 #include <linux/interrupt.h> 39 #include <linux/err.h> 40 #include <linux/idr.h> 41 #include <linux/scatterlist.h> 42 #include <linux/i2c.h> 43 #include <linux/pci.h> 44 #include <linux/notifier.h> 45 #include <linux/backlight.h> 46 #include <linux/shrinker.h> 47 #include <linux/fb.h> 48 #include <linux/xarray.h> 49 #include <linux/interval_tree.h> 50 51 #include <drm/drm_device.h> 52 #include <drm/drm_print.h> 53 54 #if defined(__amd64__) || defined(__i386__) 55 #include "bios.h" 56 #endif 57 58 void 59 tasklet_run(void *arg) 60 { 61 struct tasklet_struct *ts = arg; 62 63 clear_bit(TASKLET_STATE_SCHED, &ts->state); 64 if (tasklet_trylock(ts)) { 65 if (!atomic_read(&ts->count)) 66 ts->func(ts->data); 67 tasklet_unlock(ts); 68 } 69 } 70 71 /* 32 bit powerpc lacks 64 bit atomics */ 72 #if defined(__powerpc__) && !defined(__powerpc64__) 73 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH); 74 #endif 75 76 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED); 77 volatile struct proc *sch_proc; 78 volatile void *sch_ident; 79 int sch_priority; 80 81 void 82 set_current_state(int state) 83 { 84 if (sch_ident != curproc) 85 mtx_enter(&sch_mtx); 86 MUTEX_ASSERT_LOCKED(&sch_mtx); 87 sch_ident = sch_proc = curproc; 88 sch_priority = state; 89 } 90 91 void 92 __set_current_state(int state) 93 { 94 KASSERT(state == TASK_RUNNING); 95 if (sch_ident == curproc) { 96 MUTEX_ASSERT_LOCKED(&sch_mtx); 97 sch_ident = NULL; 98 mtx_leave(&sch_mtx); 99 } 100 } 101 102 void 103 schedule(void) 104 { 105 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 106 } 107 108 long 109 schedule_timeout(long timeout) 110 { 111 struct sleep_state sls; 112 unsigned long deadline; 113 int wait, spl; 114 115 MUTEX_ASSERT_LOCKED(&sch_mtx); 116 KASSERT(!cold); 117 118 sleep_setup(&sls, sch_ident, sch_priority, "schto"); 119 if (timeout != MAX_SCHEDULE_TIMEOUT) 120 sleep_setup_timeout(&sls, timeout); 121 122 wait = (sch_proc == curproc && timeout > 0); 123 124 spl = MUTEX_OLDIPL(&sch_mtx); 125 MUTEX_OLDIPL(&sch_mtx) = splsched(); 126 mtx_leave(&sch_mtx); 127 128 sleep_setup_signal(&sls); 129 130 if (timeout != MAX_SCHEDULE_TIMEOUT) 131 deadline = jiffies + timeout; 132 sleep_finish_all(&sls, wait); 133 if (timeout != MAX_SCHEDULE_TIMEOUT) 134 timeout = deadline - jiffies; 135 136 mtx_enter(&sch_mtx); 137 MUTEX_OLDIPL(&sch_mtx) = spl; 138 sch_ident = curproc; 139 140 return timeout > 0 ? timeout : 0; 141 } 142 143 long 144 schedule_timeout_uninterruptible(long timeout) 145 { 146 tsleep(curproc, PWAIT, "schtou", timeout); 147 return 0; 148 } 149 150 int 151 wake_up_process(struct proc *p) 152 { 153 atomic_cas_ptr(&sch_proc, p, NULL); 154 return wakeup_proc(p, NULL); 155 } 156 157 void 158 flush_workqueue(struct workqueue_struct *wq) 159 { 160 if (cold) 161 return; 162 163 taskq_barrier((struct taskq *)wq); 164 } 165 166 bool 167 flush_work(struct work_struct *work) 168 { 169 if (cold) 170 return false; 171 172 taskq_barrier(work->tq); 173 return false; 174 } 175 176 bool 177 flush_delayed_work(struct delayed_work *dwork) 178 { 179 bool ret = false; 180 181 if (cold) 182 return false; 183 184 while (timeout_pending(&dwork->to)) { 185 tsleep(dwork, PWAIT, "fldwto", 1); 186 ret = true; 187 } 188 189 taskq_barrier(dwork->tq ? dwork->tq : (struct taskq *)system_wq); 190 return ret; 191 } 192 193 struct kthread { 194 int (*func)(void *); 195 void *data; 196 struct proc *proc; 197 volatile u_int flags; 198 #define KTHREAD_SHOULDSTOP 0x0000001 199 #define KTHREAD_STOPPED 0x0000002 200 #define KTHREAD_SHOULDPARK 0x0000004 201 #define KTHREAD_PARKED 0x0000008 202 LIST_ENTRY(kthread) next; 203 }; 204 205 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list); 206 207 void 208 kthread_func(void *arg) 209 { 210 struct kthread *thread = arg; 211 int ret; 212 213 ret = thread->func(thread->data); 214 thread->flags |= KTHREAD_STOPPED; 215 wakeup(thread); 216 kthread_exit(ret); 217 } 218 219 struct proc * 220 kthread_run(int (*func)(void *), void *data, const char *name) 221 { 222 struct kthread *thread; 223 224 thread = malloc(sizeof(*thread), M_DRM, M_WAITOK); 225 thread->func = func; 226 thread->data = data; 227 thread->flags = 0; 228 229 if (kthread_create(kthread_func, thread, &thread->proc, name)) { 230 free(thread, M_DRM, sizeof(*thread)); 231 return ERR_PTR(-ENOMEM); 232 } 233 234 LIST_INSERT_HEAD(&kthread_list, thread, next); 235 return thread->proc; 236 } 237 238 struct kthread * 239 kthread_lookup(struct proc *p) 240 { 241 struct kthread *thread; 242 243 LIST_FOREACH(thread, &kthread_list, next) { 244 if (thread->proc == p) 245 break; 246 } 247 KASSERT(thread); 248 249 return thread; 250 } 251 252 int 253 kthread_should_park(void) 254 { 255 struct kthread *thread = kthread_lookup(curproc); 256 return (thread->flags & KTHREAD_SHOULDPARK); 257 } 258 259 void 260 kthread_parkme(void) 261 { 262 struct kthread *thread = kthread_lookup(curproc); 263 264 while (thread->flags & KTHREAD_SHOULDPARK) { 265 thread->flags |= KTHREAD_PARKED; 266 wakeup(thread); 267 tsleep_nsec(thread, PPAUSE, "parkme", INFSLP); 268 thread->flags &= ~KTHREAD_PARKED; 269 } 270 } 271 272 void 273 kthread_park(struct proc *p) 274 { 275 struct kthread *thread = kthread_lookup(p); 276 277 while ((thread->flags & KTHREAD_PARKED) == 0) { 278 thread->flags |= KTHREAD_SHOULDPARK; 279 wake_up_process(thread->proc); 280 tsleep_nsec(thread, PPAUSE, "park", INFSLP); 281 } 282 } 283 284 void 285 kthread_unpark(struct proc *p) 286 { 287 struct kthread *thread = kthread_lookup(p); 288 289 thread->flags &= ~KTHREAD_SHOULDPARK; 290 wakeup(thread); 291 } 292 293 int 294 kthread_should_stop(void) 295 { 296 struct kthread *thread = kthread_lookup(curproc); 297 return (thread->flags & KTHREAD_SHOULDSTOP); 298 } 299 300 void 301 kthread_stop(struct proc *p) 302 { 303 struct kthread *thread = kthread_lookup(p); 304 305 while ((thread->flags & KTHREAD_STOPPED) == 0) { 306 thread->flags |= KTHREAD_SHOULDSTOP; 307 kthread_unpark(p); 308 wake_up_process(thread->proc); 309 tsleep_nsec(thread, PPAUSE, "stop", INFSLP); 310 } 311 LIST_REMOVE(thread, next); 312 free(thread, M_DRM, sizeof(*thread)); 313 } 314 315 #if NBIOS > 0 316 extern char smbios_board_vendor[]; 317 extern char smbios_board_prod[]; 318 extern char smbios_board_serial[]; 319 #endif 320 321 bool 322 dmi_match(int slot, const char *str) 323 { 324 switch (slot) { 325 case DMI_SYS_VENDOR: 326 if (hw_vendor != NULL && 327 !strcmp(hw_vendor, str)) 328 return true; 329 break; 330 case DMI_PRODUCT_NAME: 331 if (hw_prod != NULL && 332 !strcmp(hw_prod, str)) 333 return true; 334 break; 335 case DMI_PRODUCT_VERSION: 336 if (hw_ver != NULL && 337 !strcmp(hw_ver, str)) 338 return true; 339 break; 340 #if NBIOS > 0 341 case DMI_BOARD_VENDOR: 342 if (strcmp(smbios_board_vendor, str) == 0) 343 return true; 344 break; 345 case DMI_BOARD_NAME: 346 if (strcmp(smbios_board_prod, str) == 0) 347 return true; 348 break; 349 case DMI_BOARD_SERIAL: 350 if (strcmp(smbios_board_serial, str) == 0) 351 return true; 352 break; 353 #else 354 case DMI_BOARD_VENDOR: 355 if (hw_vendor != NULL && 356 !strcmp(hw_vendor, str)) 357 return true; 358 break; 359 case DMI_BOARD_NAME: 360 if (hw_prod != NULL && 361 !strcmp(hw_prod, str)) 362 return true; 363 break; 364 #endif 365 case DMI_NONE: 366 default: 367 return false; 368 } 369 370 return false; 371 } 372 373 static bool 374 dmi_found(const struct dmi_system_id *dsi) 375 { 376 int i, slot; 377 378 for (i = 0; i < nitems(dsi->matches); i++) { 379 slot = dsi->matches[i].slot; 380 if (slot == DMI_NONE) 381 break; 382 if (!dmi_match(slot, dsi->matches[i].substr)) 383 return false; 384 } 385 386 return true; 387 } 388 389 const struct dmi_system_id * 390 dmi_first_match(const struct dmi_system_id *sysid) 391 { 392 const struct dmi_system_id *dsi; 393 394 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { 395 if (dmi_found(dsi)) 396 return dsi; 397 } 398 399 return NULL; 400 } 401 402 #if NBIOS > 0 403 extern char smbios_bios_date[]; 404 #endif 405 406 const char * 407 dmi_get_system_info(int slot) 408 { 409 WARN_ON(slot != DMI_BIOS_DATE); 410 #if NBIOS > 0 411 if (slot == DMI_BIOS_DATE) 412 return smbios_bios_date; 413 #endif 414 return NULL; 415 } 416 417 int 418 dmi_check_system(const struct dmi_system_id *sysid) 419 { 420 const struct dmi_system_id *dsi; 421 int num = 0; 422 423 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { 424 if (dmi_found(dsi)) { 425 num++; 426 if (dsi->callback && dsi->callback(dsi)) 427 break; 428 } 429 } 430 return (num); 431 } 432 433 struct vm_page * 434 alloc_pages(unsigned int gfp_mask, unsigned int order) 435 { 436 int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK; 437 struct uvm_constraint_range *constraint = &no_constraint; 438 struct pglist mlist; 439 440 if (gfp_mask & M_CANFAIL) 441 flags |= UVM_PLA_FAILOK; 442 if (gfp_mask & M_ZERO) 443 flags |= UVM_PLA_ZERO; 444 if (gfp_mask & __GFP_DMA32) 445 constraint = &dma_constraint; 446 447 TAILQ_INIT(&mlist); 448 if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low, 449 constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags)) 450 return NULL; 451 return TAILQ_FIRST(&mlist); 452 } 453 454 void 455 __free_pages(struct vm_page *page, unsigned int order) 456 { 457 struct pglist mlist; 458 int i; 459 460 TAILQ_INIT(&mlist); 461 for (i = 0; i < (1 << order); i++) 462 TAILQ_INSERT_TAIL(&mlist, &page[i], pageq); 463 uvm_pglistfree(&mlist); 464 } 465 466 void 467 __pagevec_release(struct pagevec *pvec) 468 { 469 struct pglist mlist; 470 int i; 471 472 TAILQ_INIT(&mlist); 473 for (i = 0; i < pvec->nr; i++) 474 TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq); 475 uvm_pglistfree(&mlist); 476 pagevec_reinit(pvec); 477 } 478 479 void * 480 kmap(struct vm_page *pg) 481 { 482 vaddr_t va; 483 484 #if defined (__HAVE_PMAP_DIRECT) 485 va = pmap_map_direct(pg); 486 #else 487 va = uvm_km_valloc_wait(phys_map, PAGE_SIZE); 488 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE); 489 pmap_update(pmap_kernel()); 490 #endif 491 return (void *)va; 492 } 493 494 void 495 kunmap_va(void *addr) 496 { 497 vaddr_t va = (vaddr_t)addr; 498 499 #if defined (__HAVE_PMAP_DIRECT) 500 pmap_unmap_direct(va); 501 #else 502 pmap_kremove(va, PAGE_SIZE); 503 pmap_update(pmap_kernel()); 504 uvm_km_free_wakeup(phys_map, va, PAGE_SIZE); 505 #endif 506 } 507 508 void * 509 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags, 510 pgprot_t prot) 511 { 512 vaddr_t va; 513 paddr_t pa; 514 int i; 515 516 va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages); 517 if (va == 0) 518 return NULL; 519 for (i = 0; i < npages; i++) { 520 pa = VM_PAGE_TO_PHYS(pages[i]) | prot; 521 pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa, 522 PROT_READ | PROT_WRITE, 523 PROT_READ | PROT_WRITE | PMAP_WIRED); 524 pmap_update(pmap_kernel()); 525 } 526 527 return (void *)va; 528 } 529 530 void 531 vunmap(void *addr, size_t size) 532 { 533 vaddr_t va = (vaddr_t)addr; 534 535 pmap_remove(pmap_kernel(), va, va + size); 536 pmap_update(pmap_kernel()); 537 uvm_km_free(kernel_map, va, size); 538 } 539 540 void 541 print_hex_dump(const char *level, const char *prefix_str, int prefix_type, 542 int rowsize, int groupsize, const void *buf, size_t len, bool ascii) 543 { 544 const uint8_t *cbuf = buf; 545 int i; 546 547 for (i = 0; i < len; i++) { 548 if ((i % rowsize) == 0) 549 printf("%s", prefix_str); 550 printf("%02x", cbuf[i]); 551 if ((i % rowsize) == (rowsize - 1)) 552 printf("\n"); 553 else 554 printf(" "); 555 } 556 } 557 558 void * 559 memchr_inv(const void *s, int c, size_t n) 560 { 561 if (n != 0) { 562 const unsigned char *p = s; 563 564 do { 565 if (*p++ != (unsigned char)c) 566 return ((void *)(p - 1)); 567 } while (--n != 0); 568 } 569 return (NULL); 570 } 571 572 int 573 panic_cmp(struct rb_node *a, struct rb_node *b) 574 { 575 panic(__func__); 576 } 577 578 #undef RB_ROOT 579 #define RB_ROOT(head) (head)->rbh_root 580 581 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 582 583 /* 584 * This is a fairly minimal implementation of the Linux "idr" API. It 585 * probably isn't very efficient, and defenitely isn't RCU safe. The 586 * pre-load buffer is global instead of per-cpu; we rely on the kernel 587 * lock to make this work. We do randomize our IDs in order to make 588 * them harder to guess. 589 */ 590 591 int idr_cmp(struct idr_entry *, struct idr_entry *); 592 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp); 593 594 struct pool idr_pool; 595 struct idr_entry *idr_entry_cache; 596 597 void 598 idr_init(struct idr *idr) 599 { 600 SPLAY_INIT(&idr->tree); 601 } 602 603 void 604 idr_destroy(struct idr *idr) 605 { 606 struct idr_entry *id; 607 608 while ((id = SPLAY_MIN(idr_tree, &idr->tree))) { 609 SPLAY_REMOVE(idr_tree, &idr->tree, id); 610 pool_put(&idr_pool, id); 611 } 612 } 613 614 void 615 idr_preload(unsigned int gfp_mask) 616 { 617 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 618 619 KERNEL_ASSERT_LOCKED(); 620 621 if (idr_entry_cache == NULL) 622 idr_entry_cache = pool_get(&idr_pool, flags); 623 } 624 625 int 626 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) 627 { 628 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 629 struct idr_entry *id; 630 int begin; 631 632 KERNEL_ASSERT_LOCKED(); 633 634 if (idr_entry_cache) { 635 id = idr_entry_cache; 636 idr_entry_cache = NULL; 637 } else { 638 id = pool_get(&idr_pool, flags); 639 if (id == NULL) 640 return -ENOMEM; 641 } 642 643 if (end <= 0) 644 end = INT_MAX; 645 646 #ifdef notyet 647 id->id = begin = start + arc4random_uniform(end - start); 648 #else 649 id->id = begin = start; 650 #endif 651 while (SPLAY_INSERT(idr_tree, &idr->tree, id)) { 652 if (id->id == end) 653 id->id = start; 654 else 655 id->id++; 656 if (id->id == begin) { 657 pool_put(&idr_pool, id); 658 return -ENOSPC; 659 } 660 } 661 id->ptr = ptr; 662 return id->id; 663 } 664 665 void * 666 idr_replace(struct idr *idr, void *ptr, unsigned long id) 667 { 668 struct idr_entry find, *res; 669 void *old; 670 671 find.id = id; 672 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 673 if (res == NULL) 674 return ERR_PTR(-ENOENT); 675 old = res->ptr; 676 res->ptr = ptr; 677 return old; 678 } 679 680 void * 681 idr_remove(struct idr *idr, unsigned long id) 682 { 683 struct idr_entry find, *res; 684 void *ptr = NULL; 685 686 find.id = id; 687 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 688 if (res) { 689 SPLAY_REMOVE(idr_tree, &idr->tree, res); 690 ptr = res->ptr; 691 pool_put(&idr_pool, res); 692 } 693 return ptr; 694 } 695 696 void * 697 idr_find(struct idr *idr, unsigned long id) 698 { 699 struct idr_entry find, *res; 700 701 find.id = id; 702 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 703 if (res == NULL) 704 return NULL; 705 return res->ptr; 706 } 707 708 void * 709 idr_get_next(struct idr *idr, int *id) 710 { 711 struct idr_entry *res; 712 713 SPLAY_FOREACH(res, idr_tree, &idr->tree) { 714 if (res->id >= *id) { 715 *id = res->id; 716 return res->ptr; 717 } 718 } 719 720 return NULL; 721 } 722 723 int 724 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data) 725 { 726 struct idr_entry *id; 727 int ret; 728 729 SPLAY_FOREACH(id, idr_tree, &idr->tree) { 730 ret = func(id->id, id->ptr, data); 731 if (ret) 732 return ret; 733 } 734 735 return 0; 736 } 737 738 int 739 idr_cmp(struct idr_entry *a, struct idr_entry *b) 740 { 741 return (a->id < b->id ? -1 : a->id > b->id); 742 } 743 744 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp); 745 746 void 747 ida_init(struct ida *ida) 748 { 749 idr_init(&ida->idr); 750 } 751 752 void 753 ida_destroy(struct ida *ida) 754 { 755 idr_destroy(&ida->idr); 756 } 757 758 int 759 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 760 gfp_t gfp_mask) 761 { 762 return idr_alloc(&ida->idr, NULL, start, end, gfp_mask); 763 } 764 765 void 766 ida_simple_remove(struct ida *ida, unsigned int id) 767 { 768 idr_remove(&ida->idr, id); 769 } 770 771 int 772 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b) 773 { 774 return (a->id < b->id ? -1 : a->id > b->id); 775 } 776 777 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp); 778 struct pool xa_pool; 779 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp); 780 781 void 782 xa_init_flags(struct xarray *xa, gfp_t flags) 783 { 784 static int initialized; 785 786 if (!initialized) { 787 pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_TTY, 0, 788 "xapl", NULL); 789 initialized = 1; 790 } 791 SPLAY_INIT(&xa->xa_tree); 792 } 793 794 void 795 xa_destroy(struct xarray *xa) 796 { 797 struct xarray_entry *id; 798 799 while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) { 800 SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id); 801 pool_put(&xa_pool, id); 802 } 803 } 804 805 int 806 xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp) 807 { 808 struct xarray_entry *xid; 809 int flags = (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 810 int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0; 811 int begin; 812 813 xid = pool_get(&xa_pool, flags); 814 if (xid == NULL) 815 return -ENOMEM; 816 817 if (limit <= 0) 818 limit = INT_MAX; 819 820 xid->id = begin = start; 821 822 while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) { 823 if (xid->id == limit) 824 xid->id = start; 825 else 826 xid->id++; 827 if (xid->id == begin) { 828 pool_put(&xa_pool, xid); 829 return -EBUSY; 830 } 831 } 832 xid->ptr = entry; 833 *id = xid->id; 834 return 0; 835 } 836 837 void * 838 xa_erase(struct xarray *xa, unsigned long index) 839 { 840 struct xarray_entry find, *res; 841 void *ptr = NULL; 842 843 find.id = index; 844 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find); 845 if (res) { 846 SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res); 847 ptr = res->ptr; 848 pool_put(&xa_pool, res); 849 } 850 return ptr; 851 } 852 853 void * 854 xa_load(struct xarray *xa, unsigned long index) 855 { 856 struct xarray_entry find, *res; 857 858 find.id = index; 859 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find); 860 if (res == NULL) 861 return NULL; 862 return res->ptr; 863 } 864 865 void * 866 xa_get_next(struct xarray *xa, unsigned long *index) 867 { 868 struct xarray_entry *res; 869 870 SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) { 871 if (res->id >= *index) { 872 *index = res->id; 873 return res->ptr; 874 } 875 } 876 877 return NULL; 878 } 879 880 int 881 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) 882 { 883 table->sgl = mallocarray(nents, sizeof(struct scatterlist), 884 M_DRM, gfp_mask); 885 if (table->sgl == NULL) 886 return -ENOMEM; 887 table->nents = table->orig_nents = nents; 888 return 0; 889 } 890 891 void 892 sg_free_table(struct sg_table *table) 893 { 894 free(table->sgl, M_DRM, 895 table->orig_nents * sizeof(struct scatterlist)); 896 table->sgl = NULL; 897 } 898 899 size_t 900 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 901 const void *buf, size_t buflen) 902 { 903 panic("%s", __func__); 904 } 905 906 int 907 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 908 { 909 void *cmd = NULL; 910 int cmdlen = 0; 911 int err, ret = 0; 912 int op; 913 914 iic_acquire_bus(&adap->ic, 0); 915 916 while (num > 2) { 917 op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE; 918 err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0, 919 msgs->buf, msgs->len, 0); 920 if (err) { 921 ret = -err; 922 goto fail; 923 } 924 msgs++; 925 num--; 926 ret++; 927 } 928 929 if (num > 1) { 930 cmd = msgs->buf; 931 cmdlen = msgs->len; 932 msgs++; 933 num--; 934 ret++; 935 } 936 937 op = (msgs->flags & I2C_M_RD) ? 938 I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP; 939 err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen, 940 msgs->buf, msgs->len, 0); 941 if (err) { 942 ret = -err; 943 goto fail; 944 } 945 msgs++; 946 ret++; 947 948 fail: 949 iic_release_bus(&adap->ic, 0); 950 951 return ret; 952 } 953 954 int 955 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 956 { 957 int ret; 958 959 if (adap->lock_ops) 960 adap->lock_ops->lock_bus(adap, 0); 961 962 if (adap->algo) 963 ret = adap->algo->master_xfer(adap, msgs, num); 964 else 965 ret = i2c_master_xfer(adap, msgs, num); 966 967 if (adap->lock_ops) 968 adap->lock_ops->unlock_bus(adap, 0); 969 970 return ret; 971 } 972 973 int 974 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 975 { 976 struct i2c_algo_bit_data *algo = adap->algo_data; 977 struct i2c_adapter bb; 978 979 memset(&bb, 0, sizeof(bb)); 980 bb.ic = algo->ic; 981 bb.retries = adap->retries; 982 return i2c_master_xfer(&bb, msgs, num); 983 } 984 985 uint32_t 986 i2c_bb_functionality(struct i2c_adapter *adap) 987 { 988 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 989 } 990 991 struct i2c_algorithm i2c_bit_algo = { 992 .master_xfer = i2c_bb_master_xfer, 993 .functionality = i2c_bb_functionality 994 }; 995 996 int 997 i2c_bit_add_bus(struct i2c_adapter *adap) 998 { 999 adap->algo = &i2c_bit_algo; 1000 adap->retries = 3; 1001 1002 return 0; 1003 } 1004 1005 #if defined(__amd64__) || defined(__i386__) 1006 1007 /* 1008 * This is a minimal implementation of the Linux vga_get/vga_put 1009 * interface. In all likelyhood, it will only work for inteldrm(4) as 1010 * it assumes that if there is another active VGA device in the 1011 * system, it is sitting behind a PCI bridge. 1012 */ 1013 1014 extern int pci_enumerate_bus(struct pci_softc *, 1015 int (*)(struct pci_attach_args *), struct pci_attach_args *); 1016 1017 pcitag_t vga_bridge_tag; 1018 int vga_bridge_disabled; 1019 1020 int 1021 vga_disable_bridge(struct pci_attach_args *pa) 1022 { 1023 pcireg_t bhlc, bc; 1024 1025 if (pa->pa_domain != 0) 1026 return 0; 1027 1028 bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 1029 if (PCI_HDRTYPE_TYPE(bhlc) != 1) 1030 return 0; 1031 1032 bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL); 1033 if ((bc & PPB_BC_VGA_ENABLE) == 0) 1034 return 0; 1035 bc &= ~PPB_BC_VGA_ENABLE; 1036 pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc); 1037 1038 vga_bridge_tag = pa->pa_tag; 1039 vga_bridge_disabled = 1; 1040 1041 return 1; 1042 } 1043 1044 void 1045 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc) 1046 { 1047 KASSERT(pdev->pci->sc_bridgetag == NULL); 1048 pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL); 1049 } 1050 1051 void 1052 vga_put(struct pci_dev *pdev, int rsrc) 1053 { 1054 pcireg_t bc; 1055 1056 if (!vga_bridge_disabled) 1057 return; 1058 1059 bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL); 1060 bc |= PPB_BC_VGA_ENABLE; 1061 pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc); 1062 1063 vga_bridge_disabled = 0; 1064 } 1065 1066 #endif 1067 1068 /* 1069 * ACPI types and interfaces. 1070 */ 1071 1072 #ifdef __HAVE_ACPI 1073 #include "acpi.h" 1074 #endif 1075 1076 #if NACPI > 0 1077 1078 #include <dev/acpi/acpireg.h> 1079 #include <dev/acpi/acpivar.h> 1080 #include <dev/acpi/amltypes.h> 1081 #include <dev/acpi/dsdt.h> 1082 1083 acpi_status 1084 acpi_get_table(const char *sig, int instance, 1085 struct acpi_table_header **hdr) 1086 { 1087 struct acpi_softc *sc = acpi_softc; 1088 struct acpi_q *entry; 1089 1090 KASSERT(instance == 1); 1091 1092 if (sc == NULL) 1093 return AE_NOT_FOUND; 1094 1095 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 1096 if (memcmp(entry->q_table, sig, strlen(sig)) == 0) { 1097 *hdr = entry->q_table; 1098 return 0; 1099 } 1100 } 1101 1102 return AE_NOT_FOUND; 1103 } 1104 1105 acpi_status 1106 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode) 1107 { 1108 node = aml_searchname(node, name); 1109 if (node == NULL) 1110 return AE_NOT_FOUND; 1111 1112 *rnode = node; 1113 return 0; 1114 } 1115 1116 acpi_status 1117 acpi_get_name(acpi_handle node, int type, struct acpi_buffer *buffer) 1118 { 1119 KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER); 1120 KASSERT(type == ACPI_FULL_PATHNAME); 1121 strlcpy(buffer->pointer, aml_nodename(node), buffer->length); 1122 return 0; 1123 } 1124 1125 acpi_status 1126 acpi_evaluate_object(acpi_handle node, const char *name, 1127 struct acpi_object_list *params, struct acpi_buffer *result) 1128 { 1129 struct aml_value args[4], res; 1130 union acpi_object *obj; 1131 uint8_t *data; 1132 int i; 1133 1134 KASSERT(params->count <= nitems(args)); 1135 1136 for (i = 0; i < params->count; i++) { 1137 args[i].type = params->pointer[i].type; 1138 switch (args[i].type) { 1139 case AML_OBJTYPE_INTEGER: 1140 args[i].v_integer = params->pointer[i].integer.value; 1141 break; 1142 case AML_OBJTYPE_BUFFER: 1143 args[i].length = params->pointer[i].buffer.length; 1144 args[i].v_buffer = params->pointer[i].buffer.pointer; 1145 break; 1146 default: 1147 printf("%s: arg type 0x%02x", __func__, args[i].type); 1148 return AE_BAD_PARAMETER; 1149 } 1150 } 1151 1152 if (name) { 1153 node = aml_searchname(node, name); 1154 if (node == NULL) 1155 return AE_NOT_FOUND; 1156 } 1157 if (aml_evalnode(acpi_softc, node, params->count, args, &res)) { 1158 aml_freevalue(&res); 1159 return AE_ERROR; 1160 } 1161 1162 KASSERT(result->length == ACPI_ALLOCATE_BUFFER); 1163 1164 result->length = sizeof(union acpi_object); 1165 switch (res.type) { 1166 case AML_OBJTYPE_BUFFER: 1167 result->length += res.length; 1168 result->pointer = malloc(result->length, M_DRM, M_WAITOK); 1169 obj = (union acpi_object *)result->pointer; 1170 data = (uint8_t *)(obj + 1); 1171 obj->type = res.type; 1172 obj->buffer.length = res.length; 1173 obj->buffer.pointer = data; 1174 memcpy(data, res.v_buffer, res.length); 1175 break; 1176 default: 1177 printf("%s: return type 0x%02x", __func__, res.type); 1178 aml_freevalue(&res); 1179 return AE_ERROR; 1180 } 1181 1182 aml_freevalue(&res); 1183 return 0; 1184 } 1185 1186 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list = 1187 SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list); 1188 1189 int 1190 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg) 1191 { 1192 struct acpi_bus_event event; 1193 struct notifier_block *nb; 1194 1195 event.device_class = ACPI_VIDEO_CLASS; 1196 event.type = notify; 1197 1198 SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link) 1199 nb->notifier_call(nb, 0, &event); 1200 return 0; 1201 } 1202 1203 int 1204 register_acpi_notifier(struct notifier_block *nb) 1205 { 1206 SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link); 1207 return 0; 1208 } 1209 1210 int 1211 unregister_acpi_notifier(struct notifier_block *nb) 1212 { 1213 struct notifier_block *tmp; 1214 1215 SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) { 1216 if (tmp == nb) { 1217 SLIST_REMOVE(&drm_linux_acpi_notify_list, nb, 1218 notifier_block, link); 1219 return 0; 1220 } 1221 } 1222 1223 return -ENOENT; 1224 } 1225 1226 const char * 1227 acpi_format_exception(acpi_status status) 1228 { 1229 switch (status) { 1230 case AE_NOT_FOUND: 1231 return "not found"; 1232 case AE_BAD_PARAMETER: 1233 return "bad parameter"; 1234 default: 1235 return "unknown"; 1236 } 1237 } 1238 1239 #endif 1240 1241 void 1242 backlight_do_update_status(void *arg) 1243 { 1244 backlight_update_status(arg); 1245 } 1246 1247 struct backlight_device * 1248 backlight_device_register(const char *name, void *kdev, void *data, 1249 const struct backlight_ops *ops, struct backlight_properties *props) 1250 { 1251 struct backlight_device *bd; 1252 1253 bd = malloc(sizeof(*bd), M_DRM, M_WAITOK); 1254 bd->ops = ops; 1255 bd->props = *props; 1256 bd->data = data; 1257 1258 task_set(&bd->task, backlight_do_update_status, bd); 1259 1260 return bd; 1261 } 1262 1263 void 1264 backlight_device_unregister(struct backlight_device *bd) 1265 { 1266 free(bd, M_DRM, sizeof(*bd)); 1267 } 1268 1269 void 1270 backlight_schedule_update_status(struct backlight_device *bd) 1271 { 1272 task_add(systq, &bd->task); 1273 } 1274 1275 inline int 1276 backlight_enable(struct backlight_device *bd) 1277 { 1278 if (bd == NULL) 1279 return 0; 1280 1281 bd->props.power = FB_BLANK_UNBLANK; 1282 1283 return bd->ops->update_status(bd); 1284 } 1285 1286 inline int 1287 backlight_disable(struct backlight_device *bd) 1288 { 1289 if (bd == NULL) 1290 return 0; 1291 1292 bd->props.power = FB_BLANK_POWERDOWN; 1293 1294 return bd->ops->update_status(bd); 1295 } 1296 1297 void 1298 drm_sysfs_hotplug_event(struct drm_device *dev) 1299 { 1300 KNOTE(&dev->note, NOTE_CHANGE); 1301 } 1302 1303 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1); 1304 1305 uint64_t 1306 dma_fence_context_alloc(unsigned int num) 1307 { 1308 return atomic64_add_return(num, &drm_fence_context_count) - num; 1309 } 1310 1311 struct default_wait_cb { 1312 struct dma_fence_cb base; 1313 struct proc *proc; 1314 }; 1315 1316 static void 1317 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 1318 { 1319 struct default_wait_cb *wait = 1320 container_of(cb, struct default_wait_cb, base); 1321 wake_up_process(wait->proc); 1322 } 1323 1324 long 1325 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) 1326 { 1327 long ret = timeout ? timeout : 1; 1328 unsigned long end; 1329 int err; 1330 struct default_wait_cb cb; 1331 bool was_set; 1332 1333 KASSERT(timeout <= INT_MAX); 1334 1335 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1336 return ret; 1337 1338 mtx_enter(fence->lock); 1339 1340 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 1341 &fence->flags); 1342 1343 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1344 goto out; 1345 1346 if (!was_set && fence->ops->enable_signaling) { 1347 if (!fence->ops->enable_signaling(fence)) { 1348 dma_fence_signal_locked(fence); 1349 goto out; 1350 } 1351 } 1352 1353 if (timeout == 0) { 1354 ret = 0; 1355 goto out; 1356 } 1357 1358 cb.base.func = dma_fence_default_wait_cb; 1359 cb.proc = curproc; 1360 list_add(&cb.base.node, &fence->cb_list); 1361 1362 end = jiffies + timeout; 1363 for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) { 1364 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1365 break; 1366 err = msleep(curproc, fence->lock, intr ? PCATCH : 0, 1367 "dmafence", ret); 1368 if (err == EINTR || err == ERESTART) { 1369 ret = -ERESTARTSYS; 1370 break; 1371 } 1372 } 1373 1374 if (!list_empty(&cb.base.node)) 1375 list_del(&cb.base.node); 1376 out: 1377 mtx_leave(fence->lock); 1378 1379 return ret; 1380 } 1381 1382 static bool 1383 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, 1384 uint32_t *idx) 1385 { 1386 int i; 1387 1388 for (i = 0; i < count; ++i) { 1389 struct dma_fence *fence = fences[i]; 1390 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 1391 if (idx) 1392 *idx = i; 1393 return true; 1394 } 1395 } 1396 return false; 1397 } 1398 1399 long 1400 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, 1401 bool intr, long timeout, uint32_t *idx) 1402 { 1403 struct default_wait_cb *cb; 1404 long ret = timeout; 1405 unsigned long end; 1406 int i, err; 1407 1408 KASSERT(timeout <= INT_MAX); 1409 1410 if (timeout == 0) { 1411 for (i = 0; i < count; i++) { 1412 if (dma_fence_is_signaled(fences[i])) { 1413 if (idx) 1414 *idx = i; 1415 return 1; 1416 } 1417 } 1418 return 0; 1419 } 1420 1421 cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO); 1422 if (cb == NULL) 1423 return -ENOMEM; 1424 1425 for (i = 0; i < count; i++) { 1426 struct dma_fence *fence = fences[i]; 1427 cb[i].proc = curproc; 1428 if (dma_fence_add_callback(fence, &cb[i].base, 1429 dma_fence_default_wait_cb)) { 1430 if (idx) 1431 *idx = i; 1432 goto cb_cleanup; 1433 } 1434 } 1435 1436 end = jiffies + timeout; 1437 for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) { 1438 if (dma_fence_test_signaled_any(fences, count, idx)) 1439 break; 1440 err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret); 1441 if (err == EINTR || err == ERESTART) { 1442 ret = -ERESTARTSYS; 1443 break; 1444 } 1445 } 1446 1447 cb_cleanup: 1448 while (i-- > 0) 1449 dma_fence_remove_callback(fences[i], &cb[i].base); 1450 free(cb, M_DRM, count * sizeof(*cb)); 1451 return ret; 1452 } 1453 1454 static struct dma_fence dma_fence_stub; 1455 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY); 1456 1457 static const char * 1458 dma_fence_stub_get_name(struct dma_fence *fence) 1459 { 1460 return "stub"; 1461 } 1462 1463 static const struct dma_fence_ops dma_fence_stub_ops = { 1464 .get_driver_name = dma_fence_stub_get_name, 1465 .get_timeline_name = dma_fence_stub_get_name, 1466 }; 1467 1468 struct dma_fence * 1469 dma_fence_get_stub(void) 1470 { 1471 mtx_enter(&dma_fence_stub_mtx); 1472 if (dma_fence_stub.ops == NULL) { 1473 dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops, 1474 &dma_fence_stub_mtx, 0, 0); 1475 dma_fence_signal_locked(&dma_fence_stub); 1476 } 1477 mtx_leave(&dma_fence_stub_mtx); 1478 1479 return dma_fence_get(&dma_fence_stub); 1480 } 1481 1482 static const char * 1483 dma_fence_array_get_driver_name(struct dma_fence *fence) 1484 { 1485 return "dma_fence_array"; 1486 } 1487 1488 static const char * 1489 dma_fence_array_get_timeline_name(struct dma_fence *fence) 1490 { 1491 return "unbound"; 1492 } 1493 1494 static void 1495 irq_dma_fence_array_work(struct irq_work *wrk) 1496 { 1497 struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work); 1498 1499 dma_fence_signal(&dfa->base); 1500 dma_fence_put(&dfa->base); 1501 } 1502 1503 static void 1504 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) 1505 { 1506 struct dma_fence_array_cb *array_cb = 1507 container_of(cb, struct dma_fence_array_cb, cb); 1508 struct dma_fence_array *dfa = array_cb->array; 1509 1510 if (atomic_dec_and_test(&dfa->num_pending)) 1511 irq_work_queue(&dfa->work); 1512 else 1513 dma_fence_put(&dfa->base); 1514 } 1515 1516 static bool 1517 dma_fence_array_enable_signaling(struct dma_fence *fence) 1518 { 1519 struct dma_fence_array *dfa = to_dma_fence_array(fence); 1520 struct dma_fence_array_cb *cb = (void *)(&dfa[1]); 1521 int i; 1522 1523 for (i = 0; i < dfa->num_fences; ++i) { 1524 cb[i].array = dfa; 1525 dma_fence_get(&dfa->base); 1526 if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb, 1527 dma_fence_array_cb_func)) { 1528 dma_fence_put(&dfa->base); 1529 if (atomic_dec_and_test(&dfa->num_pending)) 1530 return false; 1531 } 1532 } 1533 1534 return true; 1535 } 1536 1537 static bool dma_fence_array_signaled(struct dma_fence *fence) 1538 { 1539 struct dma_fence_array *dfa = to_dma_fence_array(fence); 1540 1541 return atomic_read(&dfa->num_pending) <= 0; 1542 } 1543 1544 static void dma_fence_array_release(struct dma_fence *fence) 1545 { 1546 struct dma_fence_array *dfa = to_dma_fence_array(fence); 1547 int i; 1548 1549 for (i = 0; i < dfa->num_fences; ++i) 1550 dma_fence_put(dfa->fences[i]); 1551 1552 free(dfa->fences, M_DRM, 0); 1553 dma_fence_free(fence); 1554 } 1555 1556 struct dma_fence_array * 1557 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context, 1558 unsigned seqno, bool signal_on_any) 1559 { 1560 struct dma_fence_array *dfa = malloc(sizeof(*dfa) + 1561 (num_fences * sizeof(struct dma_fence_array_cb)), 1562 M_DRM, M_WAITOK|M_CANFAIL|M_ZERO); 1563 if (dfa == NULL) 1564 return NULL; 1565 1566 mtx_init(&dfa->lock, IPL_TTY); 1567 dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock, 1568 context, seqno); 1569 init_irq_work(&dfa->work, irq_dma_fence_array_work); 1570 1571 dfa->num_fences = num_fences; 1572 atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences); 1573 dfa->fences = fences; 1574 1575 return dfa; 1576 } 1577 1578 const struct dma_fence_ops dma_fence_array_ops = { 1579 .get_driver_name = dma_fence_array_get_driver_name, 1580 .get_timeline_name = dma_fence_array_get_timeline_name, 1581 .enable_signaling = dma_fence_array_enable_signaling, 1582 .signaled = dma_fence_array_signaled, 1583 .release = dma_fence_array_release, 1584 }; 1585 1586 int 1587 dmabuf_read(struct file *fp, struct uio *uio, int fflags) 1588 { 1589 return (ENXIO); 1590 } 1591 1592 int 1593 dmabuf_write(struct file *fp, struct uio *uio, int fflags) 1594 { 1595 return (ENXIO); 1596 } 1597 1598 int 1599 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) 1600 { 1601 return (ENOTTY); 1602 } 1603 1604 int 1605 dmabuf_poll(struct file *fp, int events, struct proc *p) 1606 { 1607 return (0); 1608 } 1609 1610 int 1611 dmabuf_kqfilter(struct file *fp, struct knote *kn) 1612 { 1613 return (EINVAL); 1614 } 1615 1616 int 1617 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p) 1618 { 1619 struct dma_buf *dmabuf = fp->f_data; 1620 1621 memset(st, 0, sizeof(*st)); 1622 st->st_size = dmabuf->size; 1623 st->st_mode = S_IFIFO; /* XXX */ 1624 return (0); 1625 } 1626 1627 int 1628 dmabuf_close(struct file *fp, struct proc *p) 1629 { 1630 struct dma_buf *dmabuf = fp->f_data; 1631 1632 fp->f_data = NULL; 1633 KERNEL_LOCK(); 1634 dmabuf->ops->release(dmabuf); 1635 KERNEL_UNLOCK(); 1636 free(dmabuf, M_DRM, sizeof(struct dma_buf)); 1637 return (0); 1638 } 1639 1640 int 1641 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p) 1642 { 1643 struct dma_buf *dmabuf = fp->f_data; 1644 off_t newoff; 1645 1646 if (*offset != 0) 1647 return (EINVAL); 1648 1649 switch (whence) { 1650 case SEEK_SET: 1651 newoff = 0; 1652 break; 1653 case SEEK_END: 1654 newoff = dmabuf->size; 1655 break; 1656 default: 1657 return (EINVAL); 1658 } 1659 mtx_enter(&fp->f_mtx); 1660 fp->f_offset = newoff; 1661 mtx_leave(&fp->f_mtx); 1662 *offset = newoff; 1663 return (0); 1664 } 1665 1666 const struct fileops dmabufops = { 1667 .fo_read = dmabuf_read, 1668 .fo_write = dmabuf_write, 1669 .fo_ioctl = dmabuf_ioctl, 1670 .fo_poll = dmabuf_poll, 1671 .fo_kqfilter = dmabuf_kqfilter, 1672 .fo_stat = dmabuf_stat, 1673 .fo_close = dmabuf_close, 1674 .fo_seek = dmabuf_seek, 1675 }; 1676 1677 struct dma_buf * 1678 dma_buf_export(const struct dma_buf_export_info *info) 1679 { 1680 struct proc *p = curproc; 1681 struct dma_buf *dmabuf; 1682 struct file *fp; 1683 1684 fp = fnew(p); 1685 if (fp == NULL) 1686 return ERR_PTR(-ENFILE); 1687 fp->f_type = DTYPE_DMABUF; 1688 fp->f_ops = &dmabufops; 1689 dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO); 1690 dmabuf->priv = info->priv; 1691 dmabuf->ops = info->ops; 1692 dmabuf->size = info->size; 1693 dmabuf->file = fp; 1694 fp->f_data = dmabuf; 1695 INIT_LIST_HEAD(&dmabuf->attachments); 1696 return dmabuf; 1697 } 1698 1699 struct dma_buf * 1700 dma_buf_get(int fd) 1701 { 1702 struct proc *p = curproc; 1703 struct filedesc *fdp = p->p_fd; 1704 struct file *fp; 1705 1706 if ((fp = fd_getfile(fdp, fd)) == NULL) 1707 return ERR_PTR(-EBADF); 1708 1709 if (fp->f_type != DTYPE_DMABUF) { 1710 FRELE(fp, p); 1711 return ERR_PTR(-EINVAL); 1712 } 1713 1714 return fp->f_data; 1715 } 1716 1717 void 1718 dma_buf_put(struct dma_buf *dmabuf) 1719 { 1720 KASSERT(dmabuf); 1721 KASSERT(dmabuf->file); 1722 1723 FRELE(dmabuf->file, curproc); 1724 } 1725 1726 int 1727 dma_buf_fd(struct dma_buf *dmabuf, int flags) 1728 { 1729 struct proc *p = curproc; 1730 struct filedesc *fdp = p->p_fd; 1731 struct file *fp = dmabuf->file; 1732 int fd, cloexec, error; 1733 1734 cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0; 1735 1736 fdplock(fdp); 1737 restart: 1738 if ((error = fdalloc(p, 0, &fd)) != 0) { 1739 if (error == ENOSPC) { 1740 fdexpand(p); 1741 goto restart; 1742 } 1743 fdpunlock(fdp); 1744 return -error; 1745 } 1746 1747 fdinsert(fdp, fd, cloexec, fp); 1748 fdpunlock(fdp); 1749 1750 return fd; 1751 } 1752 1753 void 1754 get_dma_buf(struct dma_buf *dmabuf) 1755 { 1756 FREF(dmabuf->file); 1757 } 1758 1759 enum pci_bus_speed 1760 pcie_get_speed_cap(struct pci_dev *pdev) 1761 { 1762 pci_chipset_tag_t pc = pdev->pc; 1763 pcitag_t tag = pdev->tag; 1764 int pos ; 1765 pcireg_t xcap, lnkcap = 0, lnkcap2 = 0; 1766 pcireg_t id; 1767 enum pci_bus_speed cap = PCI_SPEED_UNKNOWN; 1768 int bus, device, function; 1769 1770 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, 1771 &pos, NULL)) 1772 return PCI_SPEED_UNKNOWN; 1773 1774 id = pci_conf_read(pc, tag, PCI_ID_REG); 1775 pci_decompose_tag(pc, tag, &bus, &device, &function); 1776 1777 /* we've been informed via and serverworks don't make the cut */ 1778 if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH || 1779 PCI_VENDOR(id) == PCI_VENDOR_RCC) 1780 return PCI_SPEED_UNKNOWN; 1781 1782 lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP); 1783 xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP); 1784 if (PCI_PCIE_XCAP_VER(xcap) >= 2) 1785 lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2); 1786 1787 lnkcap &= 0x0f; 1788 lnkcap2 &= 0xfe; 1789 1790 if (lnkcap2) { /* PCIE GEN 3.0 */ 1791 if (lnkcap2 & 0x02) 1792 cap = PCIE_SPEED_2_5GT; 1793 if (lnkcap2 & 0x04) 1794 cap = PCIE_SPEED_5_0GT; 1795 if (lnkcap2 & 0x08) 1796 cap = PCIE_SPEED_8_0GT; 1797 if (lnkcap2 & 0x10) 1798 cap = PCIE_SPEED_16_0GT; 1799 } else { 1800 if (lnkcap & 0x01) 1801 cap = PCIE_SPEED_2_5GT; 1802 if (lnkcap & 0x02) 1803 cap = PCIE_SPEED_5_0GT; 1804 } 1805 1806 DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n", 1807 bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap, 1808 lnkcap2); 1809 return cap; 1810 } 1811 1812 enum pcie_link_width 1813 pcie_get_width_cap(struct pci_dev *pdev) 1814 { 1815 pci_chipset_tag_t pc = pdev->pc; 1816 pcitag_t tag = pdev->tag; 1817 int pos ; 1818 pcireg_t lnkcap = 0; 1819 pcireg_t id; 1820 int bus, device, function; 1821 1822 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, 1823 &pos, NULL)) 1824 return PCIE_LNK_WIDTH_UNKNOWN; 1825 1826 id = pci_conf_read(pc, tag, PCI_ID_REG); 1827 pci_decompose_tag(pc, tag, &bus, &device, &function); 1828 1829 lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP); 1830 1831 DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n", 1832 bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap); 1833 1834 if (lnkcap) 1835 return (lnkcap & 0x3f0) >> 4; 1836 return PCIE_LNK_WIDTH_UNKNOWN; 1837 } 1838 1839 int 1840 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode, 1841 int sync, void *key) 1842 { 1843 wakeup(wqe); 1844 if (wqe->proc) 1845 wake_up_process(wqe->proc); 1846 list_del_init(&wqe->entry); 1847 return 0; 1848 } 1849 1850 static wait_queue_head_t bit_waitq; 1851 wait_queue_head_t var_waitq; 1852 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY); 1853 1854 int 1855 wait_on_bit(unsigned long *word, int bit, unsigned mode) 1856 { 1857 int err; 1858 1859 if (!test_bit(bit, word)) 1860 return 0; 1861 1862 mtx_enter(&wait_bit_mtx); 1863 while (test_bit(bit, word)) { 1864 err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb", 1865 INFSLP); 1866 if (err) { 1867 mtx_leave(&wait_bit_mtx); 1868 return 1; 1869 } 1870 } 1871 mtx_leave(&wait_bit_mtx); 1872 return 0; 1873 } 1874 1875 int 1876 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo) 1877 { 1878 int err; 1879 1880 if (!test_bit(bit, word)) 1881 return 0; 1882 1883 mtx_enter(&wait_bit_mtx); 1884 while (test_bit(bit, word)) { 1885 err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo); 1886 if (err) { 1887 mtx_leave(&wait_bit_mtx); 1888 return 1; 1889 } 1890 } 1891 mtx_leave(&wait_bit_mtx); 1892 return 0; 1893 } 1894 1895 void 1896 wake_up_bit(void *word, int bit) 1897 { 1898 mtx_enter(&wait_bit_mtx); 1899 wakeup(word); 1900 mtx_leave(&wait_bit_mtx); 1901 } 1902 1903 void 1904 clear_and_wake_up_bit(int bit, void *word) 1905 { 1906 clear_bit(bit, word); 1907 wake_up_bit(word, bit); 1908 } 1909 1910 wait_queue_head_t * 1911 bit_waitqueue(void *word, int bit) 1912 { 1913 /* XXX hash table of wait queues? */ 1914 return &bit_waitq; 1915 } 1916 1917 struct workqueue_struct *system_wq; 1918 struct workqueue_struct *system_highpri_wq; 1919 struct workqueue_struct *system_unbound_wq; 1920 struct workqueue_struct *system_long_wq; 1921 struct taskq *taskletq; 1922 1923 void 1924 drm_linux_init(void) 1925 { 1926 system_wq = (struct workqueue_struct *) 1927 taskq_create("drmwq", 4, IPL_HIGH, 0); 1928 system_highpri_wq = (struct workqueue_struct *) 1929 taskq_create("drmhpwq", 4, IPL_HIGH, 0); 1930 system_unbound_wq = (struct workqueue_struct *) 1931 taskq_create("drmubwq", 4, IPL_HIGH, 0); 1932 system_long_wq = (struct workqueue_struct *) 1933 taskq_create("drmlwq", 4, IPL_HIGH, 0); 1934 1935 taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0); 1936 1937 init_waitqueue_head(&bit_waitq); 1938 init_waitqueue_head(&var_waitq); 1939 1940 pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0, 1941 "idrpl", NULL); 1942 } 1943 1944 void 1945 drm_linux_exit(void) 1946 { 1947 pool_destroy(&idr_pool); 1948 1949 taskq_destroy(taskletq); 1950 1951 taskq_destroy((struct taskq *)system_long_wq); 1952 taskq_destroy((struct taskq *)system_unbound_wq); 1953 taskq_destroy((struct taskq *)system_highpri_wq); 1954 taskq_destroy((struct taskq *)system_wq); 1955 } 1956 1957 #define PCIE_ECAP_RESIZE_BAR 0x15 1958 #define RBCAP0 0x04 1959 #define RBCTRL0 0x08 1960 #define RBCTRL_BARINDEX_MASK 0x07 1961 #define RBCTRL_BARSIZE_MASK 0x1f00 1962 #define RBCTRL_BARSIZE_SHIFT 8 1963 1964 /* size in MB is 1 << nsize */ 1965 int 1966 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize) 1967 { 1968 pcireg_t reg; 1969 uint32_t offset, capid; 1970 1971 KASSERT(bar == 0); 1972 1973 offset = PCI_PCIE_ECAP; 1974 1975 /* search PCI Express Extended Capabilities */ 1976 do { 1977 reg = pci_conf_read(pdev->pc, pdev->tag, offset); 1978 capid = PCI_PCIE_ECAP_ID(reg); 1979 if (capid == PCIE_ECAP_RESIZE_BAR) 1980 break; 1981 offset = PCI_PCIE_ECAP_NEXT(reg); 1982 } while (capid != 0); 1983 1984 if (capid == 0) { 1985 printf("%s: could not find resize bar cap!\n", __func__); 1986 return -ENOTSUP; 1987 } 1988 1989 reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0); 1990 1991 if ((reg & (1 << (nsize + 4))) == 0) { 1992 printf("%s size not supported\n", __func__); 1993 return -ENOTSUP; 1994 } 1995 1996 reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0); 1997 if ((reg & RBCTRL_BARINDEX_MASK) != 0) { 1998 printf("%s BAR index not 0\n", __func__); 1999 return -EINVAL; 2000 } 2001 2002 reg &= ~RBCTRL_BARSIZE_MASK; 2003 reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK; 2004 2005 pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg); 2006 2007 return 0; 2008 } 2009 2010 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers); 2011 2012 int 2013 register_shrinker(struct shrinker *shrinker) 2014 { 2015 TAILQ_INSERT_TAIL(&shrinkers, shrinker, next); 2016 return 0; 2017 } 2018 2019 void 2020 unregister_shrinker(struct shrinker *shrinker) 2021 { 2022 TAILQ_REMOVE(&shrinkers, shrinker, next); 2023 } 2024 2025 void 2026 drmbackoff(long npages) 2027 { 2028 struct shrink_control sc; 2029 struct shrinker *shrinker; 2030 u_long ret; 2031 2032 shrinker = TAILQ_FIRST(&shrinkers); 2033 while (shrinker && npages > 0) { 2034 sc.nr_to_scan = npages; 2035 ret = shrinker->scan_objects(shrinker, &sc); 2036 npages -= ret; 2037 shrinker = TAILQ_NEXT(shrinker, next); 2038 } 2039 } 2040 2041 void * 2042 bitmap_zalloc(u_int n, gfp_t flags) 2043 { 2044 return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags); 2045 } 2046 2047 void 2048 bitmap_free(void *p) 2049 { 2050 kfree(p); 2051 } 2052 2053 int 2054 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock) 2055 { 2056 if (atomic_add_unless(v, -1, 1)) 2057 return 0; 2058 2059 rw_enter_write(lock); 2060 if (atomic_dec_return(v) == 0) 2061 return 1; 2062 rw_exit_write(lock); 2063 return 0; 2064 } 2065 2066 int 2067 printk(const char *fmt, ...) 2068 { 2069 int ret, level; 2070 va_list ap; 2071 2072 if (fmt != NULL && *fmt == '\001') { 2073 level = fmt[1]; 2074 #ifndef DRMDEBUG 2075 if (level >= KERN_INFO[1] && level <= '9') 2076 return 0; 2077 #endif 2078 fmt += 2; 2079 } 2080 2081 va_start(ap, fmt); 2082 ret = vprintf(fmt, ap); 2083 va_end(ap); 2084 2085 return ret; 2086 } 2087 2088 #define START(node) ((node)->start) 2089 #define LAST(node) ((node)->last) 2090 2091 struct interval_tree_node * 2092 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, 2093 unsigned long last) 2094 { 2095 struct interval_tree_node *node; 2096 struct rb_node *rb; 2097 2098 for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) { 2099 node = rb_entry(rb, typeof(*node), rb); 2100 if (LAST(node) >= start && START(node) <= last) 2101 return node; 2102 } 2103 return NULL; 2104 } 2105 2106 void 2107 interval_tree_remove(struct interval_tree_node *node, 2108 struct rb_root_cached *root) 2109 { 2110 rb_erase_cached(&node->rb, root); 2111 } 2112 2113 void 2114 interval_tree_insert(struct interval_tree_node *node, 2115 struct rb_root_cached *root) 2116 { 2117 struct rb_node **iter = &root->rb_root.rb_node; 2118 struct rb_node *parent = NULL; 2119 struct interval_tree_node *iter_node; 2120 2121 while (*iter) { 2122 parent = *iter; 2123 iter_node = rb_entry(*iter, struct interval_tree_node, rb); 2124 2125 if (node->start < iter_node->start) 2126 iter = &(*iter)->rb_left; 2127 else 2128 iter = &(*iter)->rb_right; 2129 } 2130 2131 rb_link_node(&node->rb, parent, iter); 2132 rb_insert_color_cached(&node->rb, root, false); 2133 } 2134