1 /* $OpenBSD: drm_linux.c,v 1.79 2021/04/11 15:30:51 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org> 4 * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/systm.h> 21 #include <sys/param.h> 22 #include <sys/event.h> 23 #include <sys/filedesc.h> 24 #include <sys/kthread.h> 25 #include <sys/stat.h> 26 #include <sys/unistd.h> 27 #include <sys/proc.h> 28 #include <sys/pool.h> 29 #include <sys/fcntl.h> 30 31 #include <dev/pci/ppbreg.h> 32 33 #include <linux/dma-buf.h> 34 #include <linux/mod_devicetable.h> 35 #include <linux/acpi.h> 36 #include <linux/pagevec.h> 37 #include <linux/dma-fence-array.h> 38 #include <linux/interrupt.h> 39 #include <linux/err.h> 40 #include <linux/idr.h> 41 #include <linux/scatterlist.h> 42 #include <linux/i2c.h> 43 #include <linux/pci.h> 44 #include <linux/notifier.h> 45 #include <linux/backlight.h> 46 #include <linux/shrinker.h> 47 #include <linux/fb.h> 48 #include <linux/xarray.h> 49 #include <linux/interval_tree.h> 50 51 #include <drm/drm_device.h> 52 #include <drm/drm_print.h> 53 54 #if defined(__amd64__) || defined(__i386__) 55 #include "bios.h" 56 #endif 57 58 void 59 tasklet_run(void *arg) 60 { 61 struct tasklet_struct *ts = arg; 62 63 clear_bit(TASKLET_STATE_SCHED, &ts->state); 64 if (tasklet_trylock(ts)) { 65 if (!atomic_read(&ts->count)) 66 ts->func(ts->data); 67 tasklet_unlock(ts); 68 } 69 } 70 71 /* 32 bit powerpc lacks 64 bit atomics */ 72 #if defined(__powerpc__) && !defined(__powerpc64__) 73 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH); 74 #endif 75 76 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED); 77 volatile struct proc *sch_proc; 78 volatile void *sch_ident; 79 int sch_priority; 80 81 void 82 set_current_state(int state) 83 { 84 if (sch_ident != curproc) 85 mtx_enter(&sch_mtx); 86 MUTEX_ASSERT_LOCKED(&sch_mtx); 87 sch_ident = sch_proc = curproc; 88 sch_priority = state; 89 } 90 91 void 92 __set_current_state(int state) 93 { 94 KASSERT(state == TASK_RUNNING); 95 if (sch_ident == curproc) { 96 MUTEX_ASSERT_LOCKED(&sch_mtx); 97 sch_ident = NULL; 98 mtx_leave(&sch_mtx); 99 } 100 } 101 102 void 103 schedule(void) 104 { 105 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 106 } 107 108 long 109 schedule_timeout(long timeout) 110 { 111 struct sleep_state sls; 112 unsigned long deadline; 113 int wait, spl, timo = 0; 114 115 MUTEX_ASSERT_LOCKED(&sch_mtx); 116 KASSERT(!cold); 117 118 if (timeout != MAX_SCHEDULE_TIMEOUT) 119 timo = timeout; 120 sleep_setup(&sls, sch_ident, sch_priority, "schto", timo); 121 122 wait = (sch_proc == curproc && timeout > 0); 123 124 spl = MUTEX_OLDIPL(&sch_mtx); 125 MUTEX_OLDIPL(&sch_mtx) = splsched(); 126 mtx_leave(&sch_mtx); 127 128 if (timeout != MAX_SCHEDULE_TIMEOUT) 129 deadline = jiffies + timeout; 130 sleep_finish(&sls, wait); 131 if (timeout != MAX_SCHEDULE_TIMEOUT) 132 timeout = deadline - jiffies; 133 134 mtx_enter(&sch_mtx); 135 MUTEX_OLDIPL(&sch_mtx) = spl; 136 sch_ident = curproc; 137 138 return timeout > 0 ? timeout : 0; 139 } 140 141 long 142 schedule_timeout_uninterruptible(long timeout) 143 { 144 tsleep(curproc, PWAIT, "schtou", timeout); 145 return 0; 146 } 147 148 int 149 wake_up_process(struct proc *p) 150 { 151 atomic_cas_ptr(&sch_proc, p, NULL); 152 return wakeup_proc(p, NULL); 153 } 154 155 void 156 flush_workqueue(struct workqueue_struct *wq) 157 { 158 if (cold) 159 return; 160 161 if (wq) 162 taskq_barrier((struct taskq *)wq); 163 } 164 165 bool 166 flush_work(struct work_struct *work) 167 { 168 if (cold) 169 return false; 170 171 if (work->tq) 172 taskq_barrier(work->tq); 173 return false; 174 } 175 176 bool 177 flush_delayed_work(struct delayed_work *dwork) 178 { 179 bool ret = false; 180 181 if (cold) 182 return false; 183 184 while (timeout_pending(&dwork->to)) { 185 tsleep(dwork, PWAIT, "fldwto", 1); 186 ret = true; 187 } 188 189 if (dwork->tq) 190 taskq_barrier(dwork->tq); 191 return ret; 192 } 193 194 struct kthread { 195 int (*func)(void *); 196 void *data; 197 struct proc *proc; 198 volatile u_int flags; 199 #define KTHREAD_SHOULDSTOP 0x0000001 200 #define KTHREAD_STOPPED 0x0000002 201 #define KTHREAD_SHOULDPARK 0x0000004 202 #define KTHREAD_PARKED 0x0000008 203 LIST_ENTRY(kthread) next; 204 }; 205 206 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list); 207 208 void 209 kthread_func(void *arg) 210 { 211 struct kthread *thread = arg; 212 int ret; 213 214 ret = thread->func(thread->data); 215 thread->flags |= KTHREAD_STOPPED; 216 wakeup(thread); 217 kthread_exit(ret); 218 } 219 220 struct proc * 221 kthread_run(int (*func)(void *), void *data, const char *name) 222 { 223 struct kthread *thread; 224 225 thread = malloc(sizeof(*thread), M_DRM, M_WAITOK); 226 thread->func = func; 227 thread->data = data; 228 thread->flags = 0; 229 230 if (kthread_create(kthread_func, thread, &thread->proc, name)) { 231 free(thread, M_DRM, sizeof(*thread)); 232 return ERR_PTR(-ENOMEM); 233 } 234 235 LIST_INSERT_HEAD(&kthread_list, thread, next); 236 return thread->proc; 237 } 238 239 struct kthread * 240 kthread_lookup(struct proc *p) 241 { 242 struct kthread *thread; 243 244 LIST_FOREACH(thread, &kthread_list, next) { 245 if (thread->proc == p) 246 break; 247 } 248 KASSERT(thread); 249 250 return thread; 251 } 252 253 int 254 kthread_should_park(void) 255 { 256 struct kthread *thread = kthread_lookup(curproc); 257 return (thread->flags & KTHREAD_SHOULDPARK); 258 } 259 260 void 261 kthread_parkme(void) 262 { 263 struct kthread *thread = kthread_lookup(curproc); 264 265 while (thread->flags & KTHREAD_SHOULDPARK) { 266 thread->flags |= KTHREAD_PARKED; 267 wakeup(thread); 268 tsleep_nsec(thread, PPAUSE, "parkme", INFSLP); 269 thread->flags &= ~KTHREAD_PARKED; 270 } 271 } 272 273 void 274 kthread_park(struct proc *p) 275 { 276 struct kthread *thread = kthread_lookup(p); 277 278 while ((thread->flags & KTHREAD_PARKED) == 0) { 279 thread->flags |= KTHREAD_SHOULDPARK; 280 wake_up_process(thread->proc); 281 tsleep_nsec(thread, PPAUSE, "park", INFSLP); 282 } 283 } 284 285 void 286 kthread_unpark(struct proc *p) 287 { 288 struct kthread *thread = kthread_lookup(p); 289 290 thread->flags &= ~KTHREAD_SHOULDPARK; 291 wakeup(thread); 292 } 293 294 int 295 kthread_should_stop(void) 296 { 297 struct kthread *thread = kthread_lookup(curproc); 298 return (thread->flags & KTHREAD_SHOULDSTOP); 299 } 300 301 void 302 kthread_stop(struct proc *p) 303 { 304 struct kthread *thread = kthread_lookup(p); 305 306 while ((thread->flags & KTHREAD_STOPPED) == 0) { 307 thread->flags |= KTHREAD_SHOULDSTOP; 308 kthread_unpark(p); 309 wake_up_process(thread->proc); 310 tsleep_nsec(thread, PPAUSE, "stop", INFSLP); 311 } 312 LIST_REMOVE(thread, next); 313 free(thread, M_DRM, sizeof(*thread)); 314 } 315 316 #if NBIOS > 0 317 extern char smbios_board_vendor[]; 318 extern char smbios_board_prod[]; 319 extern char smbios_board_serial[]; 320 #endif 321 322 bool 323 dmi_match(int slot, const char *str) 324 { 325 switch (slot) { 326 case DMI_SYS_VENDOR: 327 if (hw_vendor != NULL && 328 !strcmp(hw_vendor, str)) 329 return true; 330 break; 331 case DMI_PRODUCT_NAME: 332 if (hw_prod != NULL && 333 !strcmp(hw_prod, str)) 334 return true; 335 break; 336 case DMI_PRODUCT_VERSION: 337 if (hw_ver != NULL && 338 !strcmp(hw_ver, str)) 339 return true; 340 break; 341 #if NBIOS > 0 342 case DMI_BOARD_VENDOR: 343 if (strcmp(smbios_board_vendor, str) == 0) 344 return true; 345 break; 346 case DMI_BOARD_NAME: 347 if (strcmp(smbios_board_prod, str) == 0) 348 return true; 349 break; 350 case DMI_BOARD_SERIAL: 351 if (strcmp(smbios_board_serial, str) == 0) 352 return true; 353 break; 354 #else 355 case DMI_BOARD_VENDOR: 356 if (hw_vendor != NULL && 357 !strcmp(hw_vendor, str)) 358 return true; 359 break; 360 case DMI_BOARD_NAME: 361 if (hw_prod != NULL && 362 !strcmp(hw_prod, str)) 363 return true; 364 break; 365 #endif 366 case DMI_NONE: 367 default: 368 return false; 369 } 370 371 return false; 372 } 373 374 static bool 375 dmi_found(const struct dmi_system_id *dsi) 376 { 377 int i, slot; 378 379 for (i = 0; i < nitems(dsi->matches); i++) { 380 slot = dsi->matches[i].slot; 381 if (slot == DMI_NONE) 382 break; 383 if (!dmi_match(slot, dsi->matches[i].substr)) 384 return false; 385 } 386 387 return true; 388 } 389 390 const struct dmi_system_id * 391 dmi_first_match(const struct dmi_system_id *sysid) 392 { 393 const struct dmi_system_id *dsi; 394 395 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { 396 if (dmi_found(dsi)) 397 return dsi; 398 } 399 400 return NULL; 401 } 402 403 #if NBIOS > 0 404 extern char smbios_bios_date[]; 405 #endif 406 407 const char * 408 dmi_get_system_info(int slot) 409 { 410 WARN_ON(slot != DMI_BIOS_DATE); 411 #if NBIOS > 0 412 if (slot == DMI_BIOS_DATE) 413 return smbios_bios_date; 414 #endif 415 return NULL; 416 } 417 418 int 419 dmi_check_system(const struct dmi_system_id *sysid) 420 { 421 const struct dmi_system_id *dsi; 422 int num = 0; 423 424 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) { 425 if (dmi_found(dsi)) { 426 num++; 427 if (dsi->callback && dsi->callback(dsi)) 428 break; 429 } 430 } 431 return (num); 432 } 433 434 struct vm_page * 435 alloc_pages(unsigned int gfp_mask, unsigned int order) 436 { 437 int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK; 438 struct uvm_constraint_range *constraint = &no_constraint; 439 struct pglist mlist; 440 441 if (gfp_mask & M_CANFAIL) 442 flags |= UVM_PLA_FAILOK; 443 if (gfp_mask & M_ZERO) 444 flags |= UVM_PLA_ZERO; 445 if (gfp_mask & __GFP_DMA32) 446 constraint = &dma_constraint; 447 448 TAILQ_INIT(&mlist); 449 if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low, 450 constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags)) 451 return NULL; 452 return TAILQ_FIRST(&mlist); 453 } 454 455 void 456 __free_pages(struct vm_page *page, unsigned int order) 457 { 458 struct pglist mlist; 459 int i; 460 461 TAILQ_INIT(&mlist); 462 for (i = 0; i < (1 << order); i++) 463 TAILQ_INSERT_TAIL(&mlist, &page[i], pageq); 464 uvm_pglistfree(&mlist); 465 } 466 467 void 468 __pagevec_release(struct pagevec *pvec) 469 { 470 struct pglist mlist; 471 int i; 472 473 TAILQ_INIT(&mlist); 474 for (i = 0; i < pvec->nr; i++) 475 TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq); 476 uvm_pglistfree(&mlist); 477 pagevec_reinit(pvec); 478 } 479 480 void * 481 kmap(struct vm_page *pg) 482 { 483 vaddr_t va; 484 485 #if defined (__HAVE_PMAP_DIRECT) 486 va = pmap_map_direct(pg); 487 #else 488 va = uvm_km_valloc_wait(phys_map, PAGE_SIZE); 489 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE); 490 pmap_update(pmap_kernel()); 491 #endif 492 return (void *)va; 493 } 494 495 void 496 kunmap_va(void *addr) 497 { 498 vaddr_t va = (vaddr_t)addr; 499 500 #if defined (__HAVE_PMAP_DIRECT) 501 pmap_unmap_direct(va); 502 #else 503 pmap_kremove(va, PAGE_SIZE); 504 pmap_update(pmap_kernel()); 505 uvm_km_free_wakeup(phys_map, va, PAGE_SIZE); 506 #endif 507 } 508 509 void * 510 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags, 511 pgprot_t prot) 512 { 513 vaddr_t va; 514 paddr_t pa; 515 int i; 516 517 va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages); 518 if (va == 0) 519 return NULL; 520 for (i = 0; i < npages; i++) { 521 pa = VM_PAGE_TO_PHYS(pages[i]) | prot; 522 pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa, 523 PROT_READ | PROT_WRITE, 524 PROT_READ | PROT_WRITE | PMAP_WIRED); 525 pmap_update(pmap_kernel()); 526 } 527 528 return (void *)va; 529 } 530 531 void 532 vunmap(void *addr, size_t size) 533 { 534 vaddr_t va = (vaddr_t)addr; 535 536 pmap_remove(pmap_kernel(), va, va + size); 537 pmap_update(pmap_kernel()); 538 uvm_km_free(kernel_map, va, size); 539 } 540 541 void 542 print_hex_dump(const char *level, const char *prefix_str, int prefix_type, 543 int rowsize, int groupsize, const void *buf, size_t len, bool ascii) 544 { 545 const uint8_t *cbuf = buf; 546 int i; 547 548 for (i = 0; i < len; i++) { 549 if ((i % rowsize) == 0) 550 printf("%s", prefix_str); 551 printf("%02x", cbuf[i]); 552 if ((i % rowsize) == (rowsize - 1)) 553 printf("\n"); 554 else 555 printf(" "); 556 } 557 } 558 559 void * 560 memchr_inv(const void *s, int c, size_t n) 561 { 562 if (n != 0) { 563 const unsigned char *p = s; 564 565 do { 566 if (*p++ != (unsigned char)c) 567 return ((void *)(p - 1)); 568 } while (--n != 0); 569 } 570 return (NULL); 571 } 572 573 int 574 panic_cmp(struct rb_node *a, struct rb_node *b) 575 { 576 panic(__func__); 577 } 578 579 #undef RB_ROOT 580 #define RB_ROOT(head) (head)->rbh_root 581 582 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 583 584 /* 585 * This is a fairly minimal implementation of the Linux "idr" API. It 586 * probably isn't very efficient, and defenitely isn't RCU safe. The 587 * pre-load buffer is global instead of per-cpu; we rely on the kernel 588 * lock to make this work. We do randomize our IDs in order to make 589 * them harder to guess. 590 */ 591 592 int idr_cmp(struct idr_entry *, struct idr_entry *); 593 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp); 594 595 struct pool idr_pool; 596 struct idr_entry *idr_entry_cache; 597 598 void 599 idr_init(struct idr *idr) 600 { 601 SPLAY_INIT(&idr->tree); 602 } 603 604 void 605 idr_destroy(struct idr *idr) 606 { 607 struct idr_entry *id; 608 609 while ((id = SPLAY_MIN(idr_tree, &idr->tree))) { 610 SPLAY_REMOVE(idr_tree, &idr->tree, id); 611 pool_put(&idr_pool, id); 612 } 613 } 614 615 void 616 idr_preload(unsigned int gfp_mask) 617 { 618 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 619 620 KERNEL_ASSERT_LOCKED(); 621 622 if (idr_entry_cache == NULL) 623 idr_entry_cache = pool_get(&idr_pool, flags); 624 } 625 626 int 627 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) 628 { 629 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 630 struct idr_entry *id; 631 int begin; 632 633 KERNEL_ASSERT_LOCKED(); 634 635 if (idr_entry_cache) { 636 id = idr_entry_cache; 637 idr_entry_cache = NULL; 638 } else { 639 id = pool_get(&idr_pool, flags); 640 if (id == NULL) 641 return -ENOMEM; 642 } 643 644 if (end <= 0) 645 end = INT_MAX; 646 647 #ifdef notyet 648 id->id = begin = start + arc4random_uniform(end - start); 649 #else 650 id->id = begin = start; 651 #endif 652 while (SPLAY_INSERT(idr_tree, &idr->tree, id)) { 653 if (id->id == end) 654 id->id = start; 655 else 656 id->id++; 657 if (id->id == begin) { 658 pool_put(&idr_pool, id); 659 return -ENOSPC; 660 } 661 } 662 id->ptr = ptr; 663 return id->id; 664 } 665 666 void * 667 idr_replace(struct idr *idr, void *ptr, unsigned long id) 668 { 669 struct idr_entry find, *res; 670 void *old; 671 672 find.id = id; 673 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 674 if (res == NULL) 675 return ERR_PTR(-ENOENT); 676 old = res->ptr; 677 res->ptr = ptr; 678 return old; 679 } 680 681 void * 682 idr_remove(struct idr *idr, unsigned long id) 683 { 684 struct idr_entry find, *res; 685 void *ptr = NULL; 686 687 find.id = id; 688 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 689 if (res) { 690 SPLAY_REMOVE(idr_tree, &idr->tree, res); 691 ptr = res->ptr; 692 pool_put(&idr_pool, res); 693 } 694 return ptr; 695 } 696 697 void * 698 idr_find(struct idr *idr, unsigned long id) 699 { 700 struct idr_entry find, *res; 701 702 find.id = id; 703 res = SPLAY_FIND(idr_tree, &idr->tree, &find); 704 if (res == NULL) 705 return NULL; 706 return res->ptr; 707 } 708 709 void * 710 idr_get_next(struct idr *idr, int *id) 711 { 712 struct idr_entry *res; 713 714 SPLAY_FOREACH(res, idr_tree, &idr->tree) { 715 if (res->id >= *id) { 716 *id = res->id; 717 return res->ptr; 718 } 719 } 720 721 return NULL; 722 } 723 724 int 725 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data) 726 { 727 struct idr_entry *id; 728 int ret; 729 730 SPLAY_FOREACH(id, idr_tree, &idr->tree) { 731 ret = func(id->id, id->ptr, data); 732 if (ret) 733 return ret; 734 } 735 736 return 0; 737 } 738 739 int 740 idr_cmp(struct idr_entry *a, struct idr_entry *b) 741 { 742 return (a->id < b->id ? -1 : a->id > b->id); 743 } 744 745 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp); 746 747 void 748 ida_init(struct ida *ida) 749 { 750 idr_init(&ida->idr); 751 } 752 753 void 754 ida_destroy(struct ida *ida) 755 { 756 idr_destroy(&ida->idr); 757 } 758 759 int 760 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 761 gfp_t gfp_mask) 762 { 763 return idr_alloc(&ida->idr, NULL, start, end, gfp_mask); 764 } 765 766 void 767 ida_simple_remove(struct ida *ida, unsigned int id) 768 { 769 idr_remove(&ida->idr, id); 770 } 771 772 int 773 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b) 774 { 775 return (a->id < b->id ? -1 : a->id > b->id); 776 } 777 778 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp); 779 struct pool xa_pool; 780 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp); 781 782 void 783 xa_init_flags(struct xarray *xa, gfp_t flags) 784 { 785 static int initialized; 786 787 if (!initialized) { 788 pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_TTY, 0, 789 "xapl", NULL); 790 initialized = 1; 791 } 792 SPLAY_INIT(&xa->xa_tree); 793 } 794 795 void 796 xa_destroy(struct xarray *xa) 797 { 798 struct xarray_entry *id; 799 800 while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) { 801 SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id); 802 pool_put(&xa_pool, id); 803 } 804 } 805 806 int 807 xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp) 808 { 809 struct xarray_entry *xid; 810 int flags = (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK; 811 int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0; 812 int begin; 813 814 xid = pool_get(&xa_pool, flags); 815 if (xid == NULL) 816 return -ENOMEM; 817 818 if (limit <= 0) 819 limit = INT_MAX; 820 821 xid->id = begin = start; 822 823 while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) { 824 if (xid->id == limit) 825 xid->id = start; 826 else 827 xid->id++; 828 if (xid->id == begin) { 829 pool_put(&xa_pool, xid); 830 return -EBUSY; 831 } 832 } 833 xid->ptr = entry; 834 *id = xid->id; 835 return 0; 836 } 837 838 void * 839 xa_erase(struct xarray *xa, unsigned long index) 840 { 841 struct xarray_entry find, *res; 842 void *ptr = NULL; 843 844 find.id = index; 845 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find); 846 if (res) { 847 SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res); 848 ptr = res->ptr; 849 pool_put(&xa_pool, res); 850 } 851 return ptr; 852 } 853 854 void * 855 xa_load(struct xarray *xa, unsigned long index) 856 { 857 struct xarray_entry find, *res; 858 859 find.id = index; 860 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find); 861 if (res == NULL) 862 return NULL; 863 return res->ptr; 864 } 865 866 void * 867 xa_get_next(struct xarray *xa, unsigned long *index) 868 { 869 struct xarray_entry *res; 870 871 SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) { 872 if (res->id >= *index) { 873 *index = res->id; 874 return res->ptr; 875 } 876 } 877 878 return NULL; 879 } 880 881 int 882 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) 883 { 884 table->sgl = mallocarray(nents, sizeof(struct scatterlist), 885 M_DRM, gfp_mask); 886 if (table->sgl == NULL) 887 return -ENOMEM; 888 table->nents = table->orig_nents = nents; 889 return 0; 890 } 891 892 void 893 sg_free_table(struct sg_table *table) 894 { 895 free(table->sgl, M_DRM, 896 table->orig_nents * sizeof(struct scatterlist)); 897 table->sgl = NULL; 898 } 899 900 size_t 901 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 902 const void *buf, size_t buflen) 903 { 904 panic("%s", __func__); 905 } 906 907 int 908 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 909 { 910 void *cmd = NULL; 911 int cmdlen = 0; 912 int err, ret = 0; 913 int op; 914 915 iic_acquire_bus(&adap->ic, 0); 916 917 while (num > 2) { 918 op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE; 919 err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0, 920 msgs->buf, msgs->len, 0); 921 if (err) { 922 ret = -err; 923 goto fail; 924 } 925 msgs++; 926 num--; 927 ret++; 928 } 929 930 if (num > 1) { 931 cmd = msgs->buf; 932 cmdlen = msgs->len; 933 msgs++; 934 num--; 935 ret++; 936 } 937 938 op = (msgs->flags & I2C_M_RD) ? 939 I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP; 940 err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen, 941 msgs->buf, msgs->len, 0); 942 if (err) { 943 ret = -err; 944 goto fail; 945 } 946 msgs++; 947 ret++; 948 949 fail: 950 iic_release_bus(&adap->ic, 0); 951 952 return ret; 953 } 954 955 int 956 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 957 { 958 int ret; 959 960 if (adap->lock_ops) 961 adap->lock_ops->lock_bus(adap, 0); 962 963 if (adap->algo) 964 ret = adap->algo->master_xfer(adap, msgs, num); 965 else 966 ret = i2c_master_xfer(adap, msgs, num); 967 968 if (adap->lock_ops) 969 adap->lock_ops->unlock_bus(adap, 0); 970 971 return ret; 972 } 973 974 int 975 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 976 { 977 struct i2c_algo_bit_data *algo = adap->algo_data; 978 struct i2c_adapter bb; 979 980 memset(&bb, 0, sizeof(bb)); 981 bb.ic = algo->ic; 982 bb.retries = adap->retries; 983 return i2c_master_xfer(&bb, msgs, num); 984 } 985 986 uint32_t 987 i2c_bb_functionality(struct i2c_adapter *adap) 988 { 989 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 990 } 991 992 struct i2c_algorithm i2c_bit_algo = { 993 .master_xfer = i2c_bb_master_xfer, 994 .functionality = i2c_bb_functionality 995 }; 996 997 int 998 i2c_bit_add_bus(struct i2c_adapter *adap) 999 { 1000 adap->algo = &i2c_bit_algo; 1001 adap->retries = 3; 1002 1003 return 0; 1004 } 1005 1006 #if defined(__amd64__) || defined(__i386__) 1007 1008 /* 1009 * This is a minimal implementation of the Linux vga_get/vga_put 1010 * interface. In all likelyhood, it will only work for inteldrm(4) as 1011 * it assumes that if there is another active VGA device in the 1012 * system, it is sitting behind a PCI bridge. 1013 */ 1014 1015 extern int pci_enumerate_bus(struct pci_softc *, 1016 int (*)(struct pci_attach_args *), struct pci_attach_args *); 1017 1018 pcitag_t vga_bridge_tag; 1019 int vga_bridge_disabled; 1020 1021 int 1022 vga_disable_bridge(struct pci_attach_args *pa) 1023 { 1024 pcireg_t bhlc, bc; 1025 1026 if (pa->pa_domain != 0) 1027 return 0; 1028 1029 bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 1030 if (PCI_HDRTYPE_TYPE(bhlc) != 1) 1031 return 0; 1032 1033 bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL); 1034 if ((bc & PPB_BC_VGA_ENABLE) == 0) 1035 return 0; 1036 bc &= ~PPB_BC_VGA_ENABLE; 1037 pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc); 1038 1039 vga_bridge_tag = pa->pa_tag; 1040 vga_bridge_disabled = 1; 1041 1042 return 1; 1043 } 1044 1045 void 1046 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc) 1047 { 1048 KASSERT(pdev->pci->sc_bridgetag == NULL); 1049 pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL); 1050 } 1051 1052 void 1053 vga_put(struct pci_dev *pdev, int rsrc) 1054 { 1055 pcireg_t bc; 1056 1057 if (!vga_bridge_disabled) 1058 return; 1059 1060 bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL); 1061 bc |= PPB_BC_VGA_ENABLE; 1062 pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc); 1063 1064 vga_bridge_disabled = 0; 1065 } 1066 1067 #endif 1068 1069 /* 1070 * ACPI types and interfaces. 1071 */ 1072 1073 #ifdef __HAVE_ACPI 1074 #include "acpi.h" 1075 #endif 1076 1077 #if NACPI > 0 1078 1079 #include <dev/acpi/acpireg.h> 1080 #include <dev/acpi/acpivar.h> 1081 #include <dev/acpi/amltypes.h> 1082 #include <dev/acpi/dsdt.h> 1083 1084 acpi_status 1085 acpi_get_table(const char *sig, int instance, 1086 struct acpi_table_header **hdr) 1087 { 1088 struct acpi_softc *sc = acpi_softc; 1089 struct acpi_q *entry; 1090 1091 KASSERT(instance == 1); 1092 1093 if (sc == NULL) 1094 return AE_NOT_FOUND; 1095 1096 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 1097 if (memcmp(entry->q_table, sig, strlen(sig)) == 0) { 1098 *hdr = entry->q_table; 1099 return 0; 1100 } 1101 } 1102 1103 return AE_NOT_FOUND; 1104 } 1105 1106 acpi_status 1107 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode) 1108 { 1109 node = aml_searchname(node, name); 1110 if (node == NULL) 1111 return AE_NOT_FOUND; 1112 1113 *rnode = node; 1114 return 0; 1115 } 1116 1117 acpi_status 1118 acpi_get_name(acpi_handle node, int type, struct acpi_buffer *buffer) 1119 { 1120 KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER); 1121 KASSERT(type == ACPI_FULL_PATHNAME); 1122 strlcpy(buffer->pointer, aml_nodename(node), buffer->length); 1123 return 0; 1124 } 1125 1126 acpi_status 1127 acpi_evaluate_object(acpi_handle node, const char *name, 1128 struct acpi_object_list *params, struct acpi_buffer *result) 1129 { 1130 struct aml_value args[4], res; 1131 union acpi_object *obj; 1132 uint8_t *data; 1133 int i; 1134 1135 KASSERT(params->count <= nitems(args)); 1136 1137 for (i = 0; i < params->count; i++) { 1138 args[i].type = params->pointer[i].type; 1139 switch (args[i].type) { 1140 case AML_OBJTYPE_INTEGER: 1141 args[i].v_integer = params->pointer[i].integer.value; 1142 break; 1143 case AML_OBJTYPE_BUFFER: 1144 args[i].length = params->pointer[i].buffer.length; 1145 args[i].v_buffer = params->pointer[i].buffer.pointer; 1146 break; 1147 default: 1148 printf("%s: arg type 0x%02x", __func__, args[i].type); 1149 return AE_BAD_PARAMETER; 1150 } 1151 } 1152 1153 if (name) { 1154 node = aml_searchname(node, name); 1155 if (node == NULL) 1156 return AE_NOT_FOUND; 1157 } 1158 if (aml_evalnode(acpi_softc, node, params->count, args, &res)) { 1159 aml_freevalue(&res); 1160 return AE_ERROR; 1161 } 1162 1163 KASSERT(result->length == ACPI_ALLOCATE_BUFFER); 1164 1165 result->length = sizeof(union acpi_object); 1166 switch (res.type) { 1167 case AML_OBJTYPE_BUFFER: 1168 result->length += res.length; 1169 result->pointer = malloc(result->length, M_DRM, M_WAITOK); 1170 obj = (union acpi_object *)result->pointer; 1171 data = (uint8_t *)(obj + 1); 1172 obj->type = res.type; 1173 obj->buffer.length = res.length; 1174 obj->buffer.pointer = data; 1175 memcpy(data, res.v_buffer, res.length); 1176 break; 1177 default: 1178 printf("%s: return type 0x%02x", __func__, res.type); 1179 aml_freevalue(&res); 1180 return AE_ERROR; 1181 } 1182 1183 aml_freevalue(&res); 1184 return 0; 1185 } 1186 1187 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list = 1188 SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list); 1189 1190 int 1191 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg) 1192 { 1193 struct acpi_bus_event event; 1194 struct notifier_block *nb; 1195 1196 event.device_class = ACPI_VIDEO_CLASS; 1197 event.type = notify; 1198 1199 SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link) 1200 nb->notifier_call(nb, 0, &event); 1201 return 0; 1202 } 1203 1204 int 1205 register_acpi_notifier(struct notifier_block *nb) 1206 { 1207 SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link); 1208 return 0; 1209 } 1210 1211 int 1212 unregister_acpi_notifier(struct notifier_block *nb) 1213 { 1214 struct notifier_block *tmp; 1215 1216 SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) { 1217 if (tmp == nb) { 1218 SLIST_REMOVE(&drm_linux_acpi_notify_list, nb, 1219 notifier_block, link); 1220 return 0; 1221 } 1222 } 1223 1224 return -ENOENT; 1225 } 1226 1227 const char * 1228 acpi_format_exception(acpi_status status) 1229 { 1230 switch (status) { 1231 case AE_NOT_FOUND: 1232 return "not found"; 1233 case AE_BAD_PARAMETER: 1234 return "bad parameter"; 1235 default: 1236 return "unknown"; 1237 } 1238 } 1239 1240 #endif 1241 1242 void 1243 backlight_do_update_status(void *arg) 1244 { 1245 backlight_update_status(arg); 1246 } 1247 1248 struct backlight_device * 1249 backlight_device_register(const char *name, void *kdev, void *data, 1250 const struct backlight_ops *ops, struct backlight_properties *props) 1251 { 1252 struct backlight_device *bd; 1253 1254 bd = malloc(sizeof(*bd), M_DRM, M_WAITOK); 1255 bd->ops = ops; 1256 bd->props = *props; 1257 bd->data = data; 1258 1259 task_set(&bd->task, backlight_do_update_status, bd); 1260 1261 return bd; 1262 } 1263 1264 void 1265 backlight_device_unregister(struct backlight_device *bd) 1266 { 1267 free(bd, M_DRM, sizeof(*bd)); 1268 } 1269 1270 void 1271 backlight_schedule_update_status(struct backlight_device *bd) 1272 { 1273 task_add(systq, &bd->task); 1274 } 1275 1276 inline int 1277 backlight_enable(struct backlight_device *bd) 1278 { 1279 if (bd == NULL) 1280 return 0; 1281 1282 bd->props.power = FB_BLANK_UNBLANK; 1283 1284 return bd->ops->update_status(bd); 1285 } 1286 1287 inline int 1288 backlight_disable(struct backlight_device *bd) 1289 { 1290 if (bd == NULL) 1291 return 0; 1292 1293 bd->props.power = FB_BLANK_POWERDOWN; 1294 1295 return bd->ops->update_status(bd); 1296 } 1297 1298 void 1299 drm_sysfs_hotplug_event(struct drm_device *dev) 1300 { 1301 KNOTE(&dev->note, NOTE_CHANGE); 1302 } 1303 1304 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1); 1305 1306 uint64_t 1307 dma_fence_context_alloc(unsigned int num) 1308 { 1309 return atomic64_add_return(num, &drm_fence_context_count) - num; 1310 } 1311 1312 struct default_wait_cb { 1313 struct dma_fence_cb base; 1314 struct proc *proc; 1315 }; 1316 1317 static void 1318 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 1319 { 1320 struct default_wait_cb *wait = 1321 container_of(cb, struct default_wait_cb, base); 1322 wake_up_process(wait->proc); 1323 } 1324 1325 long 1326 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) 1327 { 1328 long ret = timeout ? timeout : 1; 1329 unsigned long end; 1330 int err; 1331 struct default_wait_cb cb; 1332 bool was_set; 1333 1334 KASSERT(timeout <= INT_MAX); 1335 1336 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1337 return ret; 1338 1339 mtx_enter(fence->lock); 1340 1341 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 1342 &fence->flags); 1343 1344 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1345 goto out; 1346 1347 if (!was_set && fence->ops->enable_signaling) { 1348 if (!fence->ops->enable_signaling(fence)) { 1349 dma_fence_signal_locked(fence); 1350 goto out; 1351 } 1352 } 1353 1354 if (timeout == 0) { 1355 ret = 0; 1356 goto out; 1357 } 1358 1359 cb.base.func = dma_fence_default_wait_cb; 1360 cb.proc = curproc; 1361 list_add(&cb.base.node, &fence->cb_list); 1362 1363 end = jiffies + timeout; 1364 for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) { 1365 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1366 break; 1367 err = msleep(curproc, fence->lock, intr ? PCATCH : 0, 1368 "dmafence", ret); 1369 if (err == EINTR || err == ERESTART) { 1370 ret = -ERESTARTSYS; 1371 break; 1372 } 1373 } 1374 1375 if (!list_empty(&cb.base.node)) 1376 list_del(&cb.base.node); 1377 out: 1378 mtx_leave(fence->lock); 1379 1380 return ret; 1381 } 1382 1383 static bool 1384 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, 1385 uint32_t *idx) 1386 { 1387 int i; 1388 1389 for (i = 0; i < count; ++i) { 1390 struct dma_fence *fence = fences[i]; 1391 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 1392 if (idx) 1393 *idx = i; 1394 return true; 1395 } 1396 } 1397 return false; 1398 } 1399 1400 long 1401 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, 1402 bool intr, long timeout, uint32_t *idx) 1403 { 1404 struct default_wait_cb *cb; 1405 long ret = timeout; 1406 unsigned long end; 1407 int i, err; 1408 1409 KASSERT(timeout <= INT_MAX); 1410 1411 if (timeout == 0) { 1412 for (i = 0; i < count; i++) { 1413 if (dma_fence_is_signaled(fences[i])) { 1414 if (idx) 1415 *idx = i; 1416 return 1; 1417 } 1418 } 1419 return 0; 1420 } 1421 1422 cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO); 1423 if (cb == NULL) 1424 return -ENOMEM; 1425 1426 for (i = 0; i < count; i++) { 1427 struct dma_fence *fence = fences[i]; 1428 cb[i].proc = curproc; 1429 if (dma_fence_add_callback(fence, &cb[i].base, 1430 dma_fence_default_wait_cb)) { 1431 if (idx) 1432 *idx = i; 1433 goto cb_cleanup; 1434 } 1435 } 1436 1437 end = jiffies + timeout; 1438 for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) { 1439 if (dma_fence_test_signaled_any(fences, count, idx)) 1440 break; 1441 err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret); 1442 if (err == EINTR || err == ERESTART) { 1443 ret = -ERESTARTSYS; 1444 break; 1445 } 1446 } 1447 1448 cb_cleanup: 1449 while (i-- > 0) 1450 dma_fence_remove_callback(fences[i], &cb[i].base); 1451 free(cb, M_DRM, count * sizeof(*cb)); 1452 return ret; 1453 } 1454 1455 static struct dma_fence dma_fence_stub; 1456 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY); 1457 1458 static const char * 1459 dma_fence_stub_get_name(struct dma_fence *fence) 1460 { 1461 return "stub"; 1462 } 1463 1464 static const struct dma_fence_ops dma_fence_stub_ops = { 1465 .get_driver_name = dma_fence_stub_get_name, 1466 .get_timeline_name = dma_fence_stub_get_name, 1467 }; 1468 1469 struct dma_fence * 1470 dma_fence_get_stub(void) 1471 { 1472 mtx_enter(&dma_fence_stub_mtx); 1473 if (dma_fence_stub.ops == NULL) { 1474 dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops, 1475 &dma_fence_stub_mtx, 0, 0); 1476 dma_fence_signal_locked(&dma_fence_stub); 1477 } 1478 mtx_leave(&dma_fence_stub_mtx); 1479 1480 return dma_fence_get(&dma_fence_stub); 1481 } 1482 1483 static const char * 1484 dma_fence_array_get_driver_name(struct dma_fence *fence) 1485 { 1486 return "dma_fence_array"; 1487 } 1488 1489 static const char * 1490 dma_fence_array_get_timeline_name(struct dma_fence *fence) 1491 { 1492 return "unbound"; 1493 } 1494 1495 static void 1496 irq_dma_fence_array_work(struct irq_work *wrk) 1497 { 1498 struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work); 1499 1500 dma_fence_signal(&dfa->base); 1501 dma_fence_put(&dfa->base); 1502 } 1503 1504 static void 1505 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) 1506 { 1507 struct dma_fence_array_cb *array_cb = 1508 container_of(cb, struct dma_fence_array_cb, cb); 1509 struct dma_fence_array *dfa = array_cb->array; 1510 1511 if (atomic_dec_and_test(&dfa->num_pending)) 1512 irq_work_queue(&dfa->work); 1513 else 1514 dma_fence_put(&dfa->base); 1515 } 1516 1517 static bool 1518 dma_fence_array_enable_signaling(struct dma_fence *fence) 1519 { 1520 struct dma_fence_array *dfa = to_dma_fence_array(fence); 1521 struct dma_fence_array_cb *cb = (void *)(&dfa[1]); 1522 int i; 1523 1524 for (i = 0; i < dfa->num_fences; ++i) { 1525 cb[i].array = dfa; 1526 dma_fence_get(&dfa->base); 1527 if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb, 1528 dma_fence_array_cb_func)) { 1529 dma_fence_put(&dfa->base); 1530 if (atomic_dec_and_test(&dfa->num_pending)) 1531 return false; 1532 } 1533 } 1534 1535 return true; 1536 } 1537 1538 static bool dma_fence_array_signaled(struct dma_fence *fence) 1539 { 1540 struct dma_fence_array *dfa = to_dma_fence_array(fence); 1541 1542 return atomic_read(&dfa->num_pending) <= 0; 1543 } 1544 1545 static void dma_fence_array_release(struct dma_fence *fence) 1546 { 1547 struct dma_fence_array *dfa = to_dma_fence_array(fence); 1548 int i; 1549 1550 for (i = 0; i < dfa->num_fences; ++i) 1551 dma_fence_put(dfa->fences[i]); 1552 1553 free(dfa->fences, M_DRM, 0); 1554 dma_fence_free(fence); 1555 } 1556 1557 struct dma_fence_array * 1558 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context, 1559 unsigned seqno, bool signal_on_any) 1560 { 1561 struct dma_fence_array *dfa = malloc(sizeof(*dfa) + 1562 (num_fences * sizeof(struct dma_fence_array_cb)), 1563 M_DRM, M_WAITOK|M_CANFAIL|M_ZERO); 1564 if (dfa == NULL) 1565 return NULL; 1566 1567 mtx_init(&dfa->lock, IPL_TTY); 1568 dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock, 1569 context, seqno); 1570 init_irq_work(&dfa->work, irq_dma_fence_array_work); 1571 1572 dfa->num_fences = num_fences; 1573 atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences); 1574 dfa->fences = fences; 1575 1576 return dfa; 1577 } 1578 1579 const struct dma_fence_ops dma_fence_array_ops = { 1580 .get_driver_name = dma_fence_array_get_driver_name, 1581 .get_timeline_name = dma_fence_array_get_timeline_name, 1582 .enable_signaling = dma_fence_array_enable_signaling, 1583 .signaled = dma_fence_array_signaled, 1584 .release = dma_fence_array_release, 1585 }; 1586 1587 int 1588 dmabuf_read(struct file *fp, struct uio *uio, int fflags) 1589 { 1590 return (ENXIO); 1591 } 1592 1593 int 1594 dmabuf_write(struct file *fp, struct uio *uio, int fflags) 1595 { 1596 return (ENXIO); 1597 } 1598 1599 int 1600 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) 1601 { 1602 return (ENOTTY); 1603 } 1604 1605 int 1606 dmabuf_poll(struct file *fp, int events, struct proc *p) 1607 { 1608 return (0); 1609 } 1610 1611 int 1612 dmabuf_kqfilter(struct file *fp, struct knote *kn) 1613 { 1614 return (EINVAL); 1615 } 1616 1617 int 1618 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p) 1619 { 1620 struct dma_buf *dmabuf = fp->f_data; 1621 1622 memset(st, 0, sizeof(*st)); 1623 st->st_size = dmabuf->size; 1624 st->st_mode = S_IFIFO; /* XXX */ 1625 return (0); 1626 } 1627 1628 int 1629 dmabuf_close(struct file *fp, struct proc *p) 1630 { 1631 struct dma_buf *dmabuf = fp->f_data; 1632 1633 fp->f_data = NULL; 1634 KERNEL_LOCK(); 1635 dmabuf->ops->release(dmabuf); 1636 KERNEL_UNLOCK(); 1637 free(dmabuf, M_DRM, sizeof(struct dma_buf)); 1638 return (0); 1639 } 1640 1641 int 1642 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p) 1643 { 1644 struct dma_buf *dmabuf = fp->f_data; 1645 off_t newoff; 1646 1647 if (*offset != 0) 1648 return (EINVAL); 1649 1650 switch (whence) { 1651 case SEEK_SET: 1652 newoff = 0; 1653 break; 1654 case SEEK_END: 1655 newoff = dmabuf->size; 1656 break; 1657 default: 1658 return (EINVAL); 1659 } 1660 mtx_enter(&fp->f_mtx); 1661 fp->f_offset = newoff; 1662 mtx_leave(&fp->f_mtx); 1663 *offset = newoff; 1664 return (0); 1665 } 1666 1667 const struct fileops dmabufops = { 1668 .fo_read = dmabuf_read, 1669 .fo_write = dmabuf_write, 1670 .fo_ioctl = dmabuf_ioctl, 1671 .fo_poll = dmabuf_poll, 1672 .fo_kqfilter = dmabuf_kqfilter, 1673 .fo_stat = dmabuf_stat, 1674 .fo_close = dmabuf_close, 1675 .fo_seek = dmabuf_seek, 1676 }; 1677 1678 struct dma_buf * 1679 dma_buf_export(const struct dma_buf_export_info *info) 1680 { 1681 struct proc *p = curproc; 1682 struct dma_buf *dmabuf; 1683 struct file *fp; 1684 1685 fp = fnew(p); 1686 if (fp == NULL) 1687 return ERR_PTR(-ENFILE); 1688 fp->f_type = DTYPE_DMABUF; 1689 fp->f_ops = &dmabufops; 1690 dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO); 1691 dmabuf->priv = info->priv; 1692 dmabuf->ops = info->ops; 1693 dmabuf->size = info->size; 1694 dmabuf->file = fp; 1695 fp->f_data = dmabuf; 1696 INIT_LIST_HEAD(&dmabuf->attachments); 1697 return dmabuf; 1698 } 1699 1700 struct dma_buf * 1701 dma_buf_get(int fd) 1702 { 1703 struct proc *p = curproc; 1704 struct filedesc *fdp = p->p_fd; 1705 struct file *fp; 1706 1707 if ((fp = fd_getfile(fdp, fd)) == NULL) 1708 return ERR_PTR(-EBADF); 1709 1710 if (fp->f_type != DTYPE_DMABUF) { 1711 FRELE(fp, p); 1712 return ERR_PTR(-EINVAL); 1713 } 1714 1715 return fp->f_data; 1716 } 1717 1718 void 1719 dma_buf_put(struct dma_buf *dmabuf) 1720 { 1721 KASSERT(dmabuf); 1722 KASSERT(dmabuf->file); 1723 1724 FRELE(dmabuf->file, curproc); 1725 } 1726 1727 int 1728 dma_buf_fd(struct dma_buf *dmabuf, int flags) 1729 { 1730 struct proc *p = curproc; 1731 struct filedesc *fdp = p->p_fd; 1732 struct file *fp = dmabuf->file; 1733 int fd, cloexec, error; 1734 1735 cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0; 1736 1737 fdplock(fdp); 1738 restart: 1739 if ((error = fdalloc(p, 0, &fd)) != 0) { 1740 if (error == ENOSPC) { 1741 fdexpand(p); 1742 goto restart; 1743 } 1744 fdpunlock(fdp); 1745 return -error; 1746 } 1747 1748 fdinsert(fdp, fd, cloexec, fp); 1749 fdpunlock(fdp); 1750 1751 return fd; 1752 } 1753 1754 void 1755 get_dma_buf(struct dma_buf *dmabuf) 1756 { 1757 FREF(dmabuf->file); 1758 } 1759 1760 enum pci_bus_speed 1761 pcie_get_speed_cap(struct pci_dev *pdev) 1762 { 1763 pci_chipset_tag_t pc; 1764 pcitag_t tag; 1765 int pos ; 1766 pcireg_t xcap, lnkcap = 0, lnkcap2 = 0; 1767 pcireg_t id; 1768 enum pci_bus_speed cap = PCI_SPEED_UNKNOWN; 1769 int bus, device, function; 1770 1771 if (pdev == NULL) 1772 return PCI_SPEED_UNKNOWN; 1773 1774 pc = pdev->pc; 1775 tag = pdev->tag; 1776 1777 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, 1778 &pos, NULL)) 1779 return PCI_SPEED_UNKNOWN; 1780 1781 id = pci_conf_read(pc, tag, PCI_ID_REG); 1782 pci_decompose_tag(pc, tag, &bus, &device, &function); 1783 1784 /* we've been informed via and serverworks don't make the cut */ 1785 if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH || 1786 PCI_VENDOR(id) == PCI_VENDOR_RCC) 1787 return PCI_SPEED_UNKNOWN; 1788 1789 lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP); 1790 xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP); 1791 if (PCI_PCIE_XCAP_VER(xcap) >= 2) 1792 lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2); 1793 1794 lnkcap &= 0x0f; 1795 lnkcap2 &= 0xfe; 1796 1797 if (lnkcap2) { /* PCIE GEN 3.0 */ 1798 if (lnkcap2 & 0x02) 1799 cap = PCIE_SPEED_2_5GT; 1800 if (lnkcap2 & 0x04) 1801 cap = PCIE_SPEED_5_0GT; 1802 if (lnkcap2 & 0x08) 1803 cap = PCIE_SPEED_8_0GT; 1804 if (lnkcap2 & 0x10) 1805 cap = PCIE_SPEED_16_0GT; 1806 } else { 1807 if (lnkcap & 0x01) 1808 cap = PCIE_SPEED_2_5GT; 1809 if (lnkcap & 0x02) 1810 cap = PCIE_SPEED_5_0GT; 1811 } 1812 1813 DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n", 1814 bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap, 1815 lnkcap2); 1816 return cap; 1817 } 1818 1819 enum pcie_link_width 1820 pcie_get_width_cap(struct pci_dev *pdev) 1821 { 1822 pci_chipset_tag_t pc = pdev->pc; 1823 pcitag_t tag = pdev->tag; 1824 int pos ; 1825 pcireg_t lnkcap = 0; 1826 pcireg_t id; 1827 int bus, device, function; 1828 1829 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, 1830 &pos, NULL)) 1831 return PCIE_LNK_WIDTH_UNKNOWN; 1832 1833 id = pci_conf_read(pc, tag, PCI_ID_REG); 1834 pci_decompose_tag(pc, tag, &bus, &device, &function); 1835 1836 lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP); 1837 1838 DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n", 1839 bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap); 1840 1841 if (lnkcap) 1842 return (lnkcap & 0x3f0) >> 4; 1843 return PCIE_LNK_WIDTH_UNKNOWN; 1844 } 1845 1846 int 1847 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode, 1848 int sync, void *key) 1849 { 1850 wakeup(wqe); 1851 if (wqe->proc) 1852 wake_up_process(wqe->proc); 1853 list_del_init(&wqe->entry); 1854 return 0; 1855 } 1856 1857 static wait_queue_head_t bit_waitq; 1858 wait_queue_head_t var_waitq; 1859 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY); 1860 1861 int 1862 wait_on_bit(unsigned long *word, int bit, unsigned mode) 1863 { 1864 int err; 1865 1866 if (!test_bit(bit, word)) 1867 return 0; 1868 1869 mtx_enter(&wait_bit_mtx); 1870 while (test_bit(bit, word)) { 1871 err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb", 1872 INFSLP); 1873 if (err) { 1874 mtx_leave(&wait_bit_mtx); 1875 return 1; 1876 } 1877 } 1878 mtx_leave(&wait_bit_mtx); 1879 return 0; 1880 } 1881 1882 int 1883 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo) 1884 { 1885 int err; 1886 1887 if (!test_bit(bit, word)) 1888 return 0; 1889 1890 mtx_enter(&wait_bit_mtx); 1891 while (test_bit(bit, word)) { 1892 err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo); 1893 if (err) { 1894 mtx_leave(&wait_bit_mtx); 1895 return 1; 1896 } 1897 } 1898 mtx_leave(&wait_bit_mtx); 1899 return 0; 1900 } 1901 1902 void 1903 wake_up_bit(void *word, int bit) 1904 { 1905 mtx_enter(&wait_bit_mtx); 1906 wakeup(word); 1907 mtx_leave(&wait_bit_mtx); 1908 } 1909 1910 void 1911 clear_and_wake_up_bit(int bit, void *word) 1912 { 1913 clear_bit(bit, word); 1914 wake_up_bit(word, bit); 1915 } 1916 1917 wait_queue_head_t * 1918 bit_waitqueue(void *word, int bit) 1919 { 1920 /* XXX hash table of wait queues? */ 1921 return &bit_waitq; 1922 } 1923 1924 struct workqueue_struct *system_wq; 1925 struct workqueue_struct *system_highpri_wq; 1926 struct workqueue_struct *system_unbound_wq; 1927 struct workqueue_struct *system_long_wq; 1928 struct taskq *taskletq; 1929 1930 void 1931 drm_linux_init(void) 1932 { 1933 system_wq = (struct workqueue_struct *) 1934 taskq_create("drmwq", 4, IPL_HIGH, 0); 1935 system_highpri_wq = (struct workqueue_struct *) 1936 taskq_create("drmhpwq", 4, IPL_HIGH, 0); 1937 system_unbound_wq = (struct workqueue_struct *) 1938 taskq_create("drmubwq", 4, IPL_HIGH, 0); 1939 system_long_wq = (struct workqueue_struct *) 1940 taskq_create("drmlwq", 4, IPL_HIGH, 0); 1941 1942 taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0); 1943 1944 init_waitqueue_head(&bit_waitq); 1945 init_waitqueue_head(&var_waitq); 1946 1947 pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0, 1948 "idrpl", NULL); 1949 } 1950 1951 void 1952 drm_linux_exit(void) 1953 { 1954 pool_destroy(&idr_pool); 1955 1956 taskq_destroy(taskletq); 1957 1958 taskq_destroy((struct taskq *)system_long_wq); 1959 taskq_destroy((struct taskq *)system_unbound_wq); 1960 taskq_destroy((struct taskq *)system_highpri_wq); 1961 taskq_destroy((struct taskq *)system_wq); 1962 } 1963 1964 #define PCIE_ECAP_RESIZE_BAR 0x15 1965 #define RBCAP0 0x04 1966 #define RBCTRL0 0x08 1967 #define RBCTRL_BARINDEX_MASK 0x07 1968 #define RBCTRL_BARSIZE_MASK 0x1f00 1969 #define RBCTRL_BARSIZE_SHIFT 8 1970 1971 /* size in MB is 1 << nsize */ 1972 int 1973 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize) 1974 { 1975 pcireg_t reg; 1976 uint32_t offset, capid; 1977 1978 KASSERT(bar == 0); 1979 1980 offset = PCI_PCIE_ECAP; 1981 1982 /* search PCI Express Extended Capabilities */ 1983 do { 1984 reg = pci_conf_read(pdev->pc, pdev->tag, offset); 1985 capid = PCI_PCIE_ECAP_ID(reg); 1986 if (capid == PCIE_ECAP_RESIZE_BAR) 1987 break; 1988 offset = PCI_PCIE_ECAP_NEXT(reg); 1989 } while (capid != 0); 1990 1991 if (capid == 0) { 1992 printf("%s: could not find resize bar cap!\n", __func__); 1993 return -ENOTSUP; 1994 } 1995 1996 reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0); 1997 1998 if ((reg & (1 << (nsize + 4))) == 0) { 1999 printf("%s size not supported\n", __func__); 2000 return -ENOTSUP; 2001 } 2002 2003 reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0); 2004 if ((reg & RBCTRL_BARINDEX_MASK) != 0) { 2005 printf("%s BAR index not 0\n", __func__); 2006 return -EINVAL; 2007 } 2008 2009 reg &= ~RBCTRL_BARSIZE_MASK; 2010 reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK; 2011 2012 pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg); 2013 2014 return 0; 2015 } 2016 2017 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers); 2018 2019 int 2020 register_shrinker(struct shrinker *shrinker) 2021 { 2022 TAILQ_INSERT_TAIL(&shrinkers, shrinker, next); 2023 return 0; 2024 } 2025 2026 void 2027 unregister_shrinker(struct shrinker *shrinker) 2028 { 2029 TAILQ_REMOVE(&shrinkers, shrinker, next); 2030 } 2031 2032 void 2033 drmbackoff(long npages) 2034 { 2035 struct shrink_control sc; 2036 struct shrinker *shrinker; 2037 u_long ret; 2038 2039 shrinker = TAILQ_FIRST(&shrinkers); 2040 while (shrinker && npages > 0) { 2041 sc.nr_to_scan = npages; 2042 ret = shrinker->scan_objects(shrinker, &sc); 2043 npages -= ret; 2044 shrinker = TAILQ_NEXT(shrinker, next); 2045 } 2046 } 2047 2048 void * 2049 bitmap_zalloc(u_int n, gfp_t flags) 2050 { 2051 return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags); 2052 } 2053 2054 void 2055 bitmap_free(void *p) 2056 { 2057 kfree(p); 2058 } 2059 2060 int 2061 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock) 2062 { 2063 if (atomic_add_unless(v, -1, 1)) 2064 return 0; 2065 2066 rw_enter_write(lock); 2067 if (atomic_dec_return(v) == 0) 2068 return 1; 2069 rw_exit_write(lock); 2070 return 0; 2071 } 2072 2073 int 2074 printk(const char *fmt, ...) 2075 { 2076 int ret, level; 2077 va_list ap; 2078 2079 if (fmt != NULL && *fmt == '\001') { 2080 level = fmt[1]; 2081 #ifndef DRMDEBUG 2082 if (level >= KERN_INFO[1] && level <= '9') 2083 return 0; 2084 #endif 2085 fmt += 2; 2086 } 2087 2088 va_start(ap, fmt); 2089 ret = vprintf(fmt, ap); 2090 va_end(ap); 2091 2092 return ret; 2093 } 2094 2095 #define START(node) ((node)->start) 2096 #define LAST(node) ((node)->last) 2097 2098 struct interval_tree_node * 2099 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, 2100 unsigned long last) 2101 { 2102 struct interval_tree_node *node; 2103 struct rb_node *rb; 2104 2105 for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) { 2106 node = rb_entry(rb, typeof(*node), rb); 2107 if (LAST(node) >= start && START(node) <= last) 2108 return node; 2109 } 2110 return NULL; 2111 } 2112 2113 void 2114 interval_tree_remove(struct interval_tree_node *node, 2115 struct rb_root_cached *root) 2116 { 2117 rb_erase_cached(&node->rb, root); 2118 } 2119 2120 void 2121 interval_tree_insert(struct interval_tree_node *node, 2122 struct rb_root_cached *root) 2123 { 2124 struct rb_node **iter = &root->rb_root.rb_node; 2125 struct rb_node *parent = NULL; 2126 struct interval_tree_node *iter_node; 2127 2128 while (*iter) { 2129 parent = *iter; 2130 iter_node = rb_entry(*iter, struct interval_tree_node, rb); 2131 2132 if (node->start < iter_node->start) 2133 iter = &(*iter)->rb_left; 2134 else 2135 iter = &(*iter)->rb_right; 2136 } 2137 2138 rb_link_node(&node->rb, parent, iter); 2139 rb_insert_color_cached(&node->rb, root, false); 2140 } 2141