xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*	$OpenBSD: drm_linux.c,v 1.84 2021/08/11 16:14:00 sthen Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 
53 #include <drm/drm_device.h>
54 #include <drm/drm_print.h>
55 
56 #if defined(__amd64__) || defined(__i386__)
57 #include "bios.h"
58 #endif
59 
60 void
61 tasklet_run(void *arg)
62 {
63 	struct tasklet_struct *ts = arg;
64 
65 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
66 	if (tasklet_trylock(ts)) {
67 		if (!atomic_read(&ts->count))
68 			ts->func(ts->data);
69 		tasklet_unlock(ts);
70 	}
71 }
72 
73 /* 32 bit powerpc lacks 64 bit atomics */
74 #if defined(__powerpc__) && !defined(__powerpc64__)
75 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
76 #endif
77 
78 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
79 volatile struct proc *sch_proc;
80 volatile void *sch_ident;
81 int sch_priority;
82 
83 void
84 set_current_state(int state)
85 {
86 	if (sch_ident != curproc)
87 		mtx_enter(&sch_mtx);
88 	MUTEX_ASSERT_LOCKED(&sch_mtx);
89 	sch_ident = sch_proc = curproc;
90 	sch_priority = state;
91 }
92 
93 void
94 __set_current_state(int state)
95 {
96 	KASSERT(state == TASK_RUNNING);
97 	if (sch_ident == curproc) {
98 		MUTEX_ASSERT_LOCKED(&sch_mtx);
99 		sch_ident = NULL;
100 		mtx_leave(&sch_mtx);
101 	}
102 }
103 
104 void
105 schedule(void)
106 {
107 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
108 }
109 
110 long
111 schedule_timeout(long timeout)
112 {
113 	struct sleep_state sls;
114 	unsigned long deadline;
115 	int wait, spl, timo = 0;
116 
117 	MUTEX_ASSERT_LOCKED(&sch_mtx);
118 	KASSERT(!cold);
119 
120 	if (timeout != MAX_SCHEDULE_TIMEOUT)
121 		timo = timeout;
122 	sleep_setup(&sls, sch_ident, sch_priority, "schto", timo);
123 
124 	wait = (sch_proc == curproc && timeout > 0);
125 
126 	spl = MUTEX_OLDIPL(&sch_mtx);
127 	MUTEX_OLDIPL(&sch_mtx) = splsched();
128 	mtx_leave(&sch_mtx);
129 
130 	if (timeout != MAX_SCHEDULE_TIMEOUT)
131 		deadline = jiffies + timeout;
132 	sleep_finish(&sls, wait);
133 	if (timeout != MAX_SCHEDULE_TIMEOUT)
134 		timeout = deadline - jiffies;
135 
136 	mtx_enter(&sch_mtx);
137 	MUTEX_OLDIPL(&sch_mtx) = spl;
138 	sch_ident = curproc;
139 
140 	return timeout > 0 ? timeout : 0;
141 }
142 
143 long
144 schedule_timeout_uninterruptible(long timeout)
145 {
146 	tsleep(curproc, PWAIT, "schtou", timeout);
147 	return 0;
148 }
149 
150 int
151 wake_up_process(struct proc *p)
152 {
153 	atomic_cas_ptr(&sch_proc, p, NULL);
154 	return wakeup_proc(p, NULL);
155 }
156 
157 void
158 flush_workqueue(struct workqueue_struct *wq)
159 {
160 	if (cold)
161 		return;
162 
163 	if (wq)
164 		taskq_barrier((struct taskq *)wq);
165 }
166 
167 bool
168 flush_work(struct work_struct *work)
169 {
170 	if (cold)
171 		return false;
172 
173 	if (work->tq)
174 		taskq_barrier(work->tq);
175 	return false;
176 }
177 
178 bool
179 flush_delayed_work(struct delayed_work *dwork)
180 {
181 	bool ret = false;
182 
183 	if (cold)
184 		return false;
185 
186 	while (timeout_pending(&dwork->to)) {
187 		tsleep(dwork, PWAIT, "fldwto", 1);
188 		ret = true;
189 	}
190 
191 	if (dwork->tq)
192 		taskq_barrier(dwork->tq);
193 	return ret;
194 }
195 
196 struct kthread {
197 	int (*func)(void *);
198 	void *data;
199 	struct proc *proc;
200 	volatile u_int flags;
201 #define KTHREAD_SHOULDSTOP	0x0000001
202 #define KTHREAD_STOPPED		0x0000002
203 #define KTHREAD_SHOULDPARK	0x0000004
204 #define KTHREAD_PARKED		0x0000008
205 	LIST_ENTRY(kthread) next;
206 };
207 
208 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
209 
210 void
211 kthread_func(void *arg)
212 {
213 	struct kthread *thread = arg;
214 	int ret;
215 
216 	ret = thread->func(thread->data);
217 	thread->flags |= KTHREAD_STOPPED;
218 	wakeup(thread);
219 	kthread_exit(ret);
220 }
221 
222 struct proc *
223 kthread_run(int (*func)(void *), void *data, const char *name)
224 {
225 	struct kthread *thread;
226 
227 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
228 	thread->func = func;
229 	thread->data = data;
230 	thread->flags = 0;
231 
232 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
233 		free(thread, M_DRM, sizeof(*thread));
234 		return ERR_PTR(-ENOMEM);
235 	}
236 
237 	LIST_INSERT_HEAD(&kthread_list, thread, next);
238 	return thread->proc;
239 }
240 
241 struct kthread_worker *
242 kthread_create_worker(unsigned int flags, const char *fmt, ...)
243 {
244 	char name[MAXCOMLEN+1];
245 	va_list ap;
246 
247 	struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
248 	va_start(ap, fmt);
249 	vsnprintf(name, sizeof(name), fmt, ap);
250 	va_end(ap);
251 	w->tq = taskq_create(name, 1, IPL_HIGH, 0);
252 
253 	return w;
254 }
255 
256 void
257 kthread_destroy_worker(struct kthread_worker *worker)
258 {
259 	taskq_destroy(worker->tq);
260 	free(worker, M_DRM, sizeof(*worker));
261 
262 }
263 
264 void
265 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
266 {
267 	work->tq = NULL;
268 	task_set(&work->task, (void (*)(void *))func, work);
269 }
270 
271 bool
272 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
273 {
274 	work->tq = worker->tq;
275 	return task_add(work->tq, &work->task);
276 }
277 
278 bool
279 kthread_cancel_work_sync(struct kthread_work *work)
280 {
281 	return task_del(work->tq, &work->task);
282 }
283 
284 void
285 kthread_flush_work(struct kthread_work *work)
286 {
287 	if (cold)
288 		return;
289 
290 	if (work->tq)
291 		taskq_barrier(work->tq);
292 }
293 
294 void
295 kthread_flush_worker(struct kthread_worker *worker)
296 {
297 	if (cold)
298 		return;
299 
300 	if (worker->tq)
301 		taskq_barrier(worker->tq);
302 }
303 
304 struct kthread *
305 kthread_lookup(struct proc *p)
306 {
307 	struct kthread *thread;
308 
309 	LIST_FOREACH(thread, &kthread_list, next) {
310 		if (thread->proc == p)
311 			break;
312 	}
313 	KASSERT(thread);
314 
315 	return thread;
316 }
317 
318 int
319 kthread_should_park(void)
320 {
321 	struct kthread *thread = kthread_lookup(curproc);
322 	return (thread->flags & KTHREAD_SHOULDPARK);
323 }
324 
325 void
326 kthread_parkme(void)
327 {
328 	struct kthread *thread = kthread_lookup(curproc);
329 
330 	while (thread->flags & KTHREAD_SHOULDPARK) {
331 		thread->flags |= KTHREAD_PARKED;
332 		wakeup(thread);
333 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
334 		thread->flags &= ~KTHREAD_PARKED;
335 	}
336 }
337 
338 void
339 kthread_park(struct proc *p)
340 {
341 	struct kthread *thread = kthread_lookup(p);
342 
343 	while ((thread->flags & KTHREAD_PARKED) == 0) {
344 		thread->flags |= KTHREAD_SHOULDPARK;
345 		wake_up_process(thread->proc);
346 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
347 	}
348 }
349 
350 void
351 kthread_unpark(struct proc *p)
352 {
353 	struct kthread *thread = kthread_lookup(p);
354 
355 	thread->flags &= ~KTHREAD_SHOULDPARK;
356 	wakeup(thread);
357 }
358 
359 int
360 kthread_should_stop(void)
361 {
362 	struct kthread *thread = kthread_lookup(curproc);
363 	return (thread->flags & KTHREAD_SHOULDSTOP);
364 }
365 
366 void
367 kthread_stop(struct proc *p)
368 {
369 	struct kthread *thread = kthread_lookup(p);
370 
371 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
372 		thread->flags |= KTHREAD_SHOULDSTOP;
373 		kthread_unpark(p);
374 		wake_up_process(thread->proc);
375 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
376 	}
377 	LIST_REMOVE(thread, next);
378 	free(thread, M_DRM, sizeof(*thread));
379 }
380 
381 #if NBIOS > 0
382 extern char smbios_board_vendor[];
383 extern char smbios_board_prod[];
384 extern char smbios_board_serial[];
385 #endif
386 
387 bool
388 dmi_match(int slot, const char *str)
389 {
390 	switch (slot) {
391 	case DMI_SYS_VENDOR:
392 		if (hw_vendor != NULL &&
393 		    !strcmp(hw_vendor, str))
394 			return true;
395 		break;
396 	case DMI_PRODUCT_NAME:
397 		if (hw_prod != NULL &&
398 		    !strcmp(hw_prod, str))
399 			return true;
400 		break;
401 	case DMI_PRODUCT_VERSION:
402 		if (hw_ver != NULL &&
403 		    !strcmp(hw_ver, str))
404 			return true;
405 		break;
406 #if NBIOS > 0
407 	case DMI_BOARD_VENDOR:
408 		if (strcmp(smbios_board_vendor, str) == 0)
409 			return true;
410 		break;
411 	case DMI_BOARD_NAME:
412 		if (strcmp(smbios_board_prod, str) == 0)
413 			return true;
414 		break;
415 	case DMI_BOARD_SERIAL:
416 		if (strcmp(smbios_board_serial, str) == 0)
417 			return true;
418 		break;
419 #else
420 	case DMI_BOARD_VENDOR:
421 		if (hw_vendor != NULL &&
422 		    !strcmp(hw_vendor, str))
423 			return true;
424 		break;
425 	case DMI_BOARD_NAME:
426 		if (hw_prod != NULL &&
427 		    !strcmp(hw_prod, str))
428 			return true;
429 		break;
430 #endif
431 	case DMI_NONE:
432 	default:
433 		return false;
434 	}
435 
436 	return false;
437 }
438 
439 static bool
440 dmi_found(const struct dmi_system_id *dsi)
441 {
442 	int i, slot;
443 
444 	for (i = 0; i < nitems(dsi->matches); i++) {
445 		slot = dsi->matches[i].slot;
446 		if (slot == DMI_NONE)
447 			break;
448 		if (!dmi_match(slot, dsi->matches[i].substr))
449 			return false;
450 	}
451 
452 	return true;
453 }
454 
455 const struct dmi_system_id *
456 dmi_first_match(const struct dmi_system_id *sysid)
457 {
458 	const struct dmi_system_id *dsi;
459 
460 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
461 		if (dmi_found(dsi))
462 			return dsi;
463 	}
464 
465 	return NULL;
466 }
467 
468 #if NBIOS > 0
469 extern char smbios_bios_date[];
470 #endif
471 
472 const char *
473 dmi_get_system_info(int slot)
474 {
475 	WARN_ON(slot != DMI_BIOS_DATE);
476 #if NBIOS > 0
477 	if (slot == DMI_BIOS_DATE)
478 		return smbios_bios_date;
479 #endif
480 	return NULL;
481 }
482 
483 int
484 dmi_check_system(const struct dmi_system_id *sysid)
485 {
486 	const struct dmi_system_id *dsi;
487 	int num = 0;
488 
489 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
490 		if (dmi_found(dsi)) {
491 			num++;
492 			if (dsi->callback && dsi->callback(dsi))
493 				break;
494 		}
495 	}
496 	return (num);
497 }
498 
499 struct vm_page *
500 alloc_pages(unsigned int gfp_mask, unsigned int order)
501 {
502 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
503 	struct uvm_constraint_range *constraint = &no_constraint;
504 	struct pglist mlist;
505 
506 	if (gfp_mask & M_CANFAIL)
507 		flags |= UVM_PLA_FAILOK;
508 	if (gfp_mask & M_ZERO)
509 		flags |= UVM_PLA_ZERO;
510 	if (gfp_mask & __GFP_DMA32)
511 		constraint = &dma_constraint;
512 
513 	TAILQ_INIT(&mlist);
514 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
515 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
516 		return NULL;
517 	return TAILQ_FIRST(&mlist);
518 }
519 
520 void
521 __free_pages(struct vm_page *page, unsigned int order)
522 {
523 	struct pglist mlist;
524 	int i;
525 
526 	TAILQ_INIT(&mlist);
527 	for (i = 0; i < (1 << order); i++)
528 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
529 	uvm_pglistfree(&mlist);
530 }
531 
532 void
533 __pagevec_release(struct pagevec *pvec)
534 {
535 	struct pglist mlist;
536 	int i;
537 
538 	TAILQ_INIT(&mlist);
539 	for (i = 0; i < pvec->nr; i++)
540 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
541 	uvm_pglistfree(&mlist);
542 	pagevec_reinit(pvec);
543 }
544 
545 void *
546 kmap(struct vm_page *pg)
547 {
548 	vaddr_t va;
549 
550 #if defined (__HAVE_PMAP_DIRECT)
551 	va = pmap_map_direct(pg);
552 #else
553 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
554 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
555 	pmap_update(pmap_kernel());
556 #endif
557 	return (void *)va;
558 }
559 
560 void
561 kunmap_va(void *addr)
562 {
563 	vaddr_t va = (vaddr_t)addr;
564 
565 #if defined (__HAVE_PMAP_DIRECT)
566 	pmap_unmap_direct(va);
567 #else
568 	pmap_kremove(va, PAGE_SIZE);
569 	pmap_update(pmap_kernel());
570 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
571 #endif
572 }
573 
574 vaddr_t kmap_atomic_va;
575 int kmap_atomic_inuse;
576 
577 void *
578 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
579 {
580 	KASSERT(!kmap_atomic_inuse);
581 
582 	kmap_atomic_inuse = 1;
583 	pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
584 	    PROT_READ | PROT_WRITE);
585 	return (void *)kmap_atomic_va;
586 }
587 
588 void
589 kunmap_atomic(void *addr)
590 {
591 	KASSERT(kmap_atomic_inuse);
592 
593 	pmap_kremove(kmap_atomic_va, PAGE_SIZE);
594 	kmap_atomic_inuse = 0;
595 }
596 
597 void *
598 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
599      pgprot_t prot)
600 {
601 	vaddr_t va;
602 	paddr_t pa;
603 	int i;
604 
605 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
606 	if (va == 0)
607 		return NULL;
608 	for (i = 0; i < npages; i++) {
609 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
610 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
611 		    PROT_READ | PROT_WRITE,
612 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
613 		pmap_update(pmap_kernel());
614 	}
615 
616 	return (void *)va;
617 }
618 
619 void
620 vunmap(void *addr, size_t size)
621 {
622 	vaddr_t va = (vaddr_t)addr;
623 
624 	pmap_remove(pmap_kernel(), va, va + size);
625 	pmap_update(pmap_kernel());
626 	uvm_km_free(kernel_map, va, size);
627 }
628 
629 bool
630 is_vmalloc_addr(const void *p)
631 {
632 	vaddr_t min, max, addr;
633 
634 	min = vm_map_min(kernel_map);
635 	max = vm_map_max(kernel_map);
636 	addr = (vaddr_t)p;
637 
638 	if (addr >= min && addr <= max)
639 		return true;
640 	else
641 		return false;
642 }
643 
644 void
645 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
646     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
647 {
648 	const uint8_t *cbuf = buf;
649 	int i;
650 
651 	for (i = 0; i < len; i++) {
652 		if ((i % rowsize) == 0)
653 			printf("%s", prefix_str);
654 		printf("%02x", cbuf[i]);
655 		if ((i % rowsize) == (rowsize - 1))
656 			printf("\n");
657 		else
658 			printf(" ");
659 	}
660 }
661 
662 void *
663 memchr_inv(const void *s, int c, size_t n)
664 {
665 	if (n != 0) {
666 		const unsigned char *p = s;
667 
668 		do {
669 			if (*p++ != (unsigned char)c)
670 				return ((void *)(p - 1));
671 		} while (--n != 0);
672 	}
673 	return (NULL);
674 }
675 
676 int
677 panic_cmp(struct rb_node *a, struct rb_node *b)
678 {
679 	panic(__func__);
680 }
681 
682 #undef RB_ROOT
683 #define RB_ROOT(head)	(head)->rbh_root
684 
685 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
686 
687 /*
688  * This is a fairly minimal implementation of the Linux "idr" API.  It
689  * probably isn't very efficient, and defenitely isn't RCU safe.  The
690  * pre-load buffer is global instead of per-cpu; we rely on the kernel
691  * lock to make this work.  We do randomize our IDs in order to make
692  * them harder to guess.
693  */
694 
695 int idr_cmp(struct idr_entry *, struct idr_entry *);
696 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
697 
698 struct pool idr_pool;
699 struct idr_entry *idr_entry_cache;
700 
701 void
702 idr_init(struct idr *idr)
703 {
704 	SPLAY_INIT(&idr->tree);
705 }
706 
707 void
708 idr_destroy(struct idr *idr)
709 {
710 	struct idr_entry *id;
711 
712 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
713 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
714 		pool_put(&idr_pool, id);
715 	}
716 }
717 
718 void
719 idr_preload(unsigned int gfp_mask)
720 {
721 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
722 
723 	KERNEL_ASSERT_LOCKED();
724 
725 	if (idr_entry_cache == NULL)
726 		idr_entry_cache = pool_get(&idr_pool, flags);
727 }
728 
729 int
730 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
731 {
732 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
733 	struct idr_entry *id;
734 	int begin;
735 
736 	KERNEL_ASSERT_LOCKED();
737 
738 	if (idr_entry_cache) {
739 		id = idr_entry_cache;
740 		idr_entry_cache = NULL;
741 	} else {
742 		id = pool_get(&idr_pool, flags);
743 		if (id == NULL)
744 			return -ENOMEM;
745 	}
746 
747 	if (end <= 0)
748 		end = INT_MAX;
749 
750 #ifdef notyet
751 	id->id = begin = start + arc4random_uniform(end - start);
752 #else
753 	id->id = begin = start;
754 #endif
755 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
756 		if (id->id == end)
757 			id->id = start;
758 		else
759 			id->id++;
760 		if (id->id == begin) {
761 			pool_put(&idr_pool, id);
762 			return -ENOSPC;
763 		}
764 	}
765 	id->ptr = ptr;
766 	return id->id;
767 }
768 
769 void *
770 idr_replace(struct idr *idr, void *ptr, unsigned long id)
771 {
772 	struct idr_entry find, *res;
773 	void *old;
774 
775 	find.id = id;
776 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
777 	if (res == NULL)
778 		return ERR_PTR(-ENOENT);
779 	old = res->ptr;
780 	res->ptr = ptr;
781 	return old;
782 }
783 
784 void *
785 idr_remove(struct idr *idr, unsigned long id)
786 {
787 	struct idr_entry find, *res;
788 	void *ptr = NULL;
789 
790 	find.id = id;
791 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
792 	if (res) {
793 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
794 		ptr = res->ptr;
795 		pool_put(&idr_pool, res);
796 	}
797 	return ptr;
798 }
799 
800 void *
801 idr_find(struct idr *idr, unsigned long id)
802 {
803 	struct idr_entry find, *res;
804 
805 	find.id = id;
806 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
807 	if (res == NULL)
808 		return NULL;
809 	return res->ptr;
810 }
811 
812 void *
813 idr_get_next(struct idr *idr, int *id)
814 {
815 	struct idr_entry *res;
816 
817 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
818 		if (res->id >= *id) {
819 			*id = res->id;
820 			return res->ptr;
821 		}
822 	}
823 
824 	return NULL;
825 }
826 
827 int
828 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
829 {
830 	struct idr_entry *id;
831 	int ret;
832 
833 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
834 		ret = func(id->id, id->ptr, data);
835 		if (ret)
836 			return ret;
837 	}
838 
839 	return 0;
840 }
841 
842 int
843 idr_cmp(struct idr_entry *a, struct idr_entry *b)
844 {
845 	return (a->id < b->id ? -1 : a->id > b->id);
846 }
847 
848 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
849 
850 void
851 ida_init(struct ida *ida)
852 {
853 	idr_init(&ida->idr);
854 }
855 
856 void
857 ida_destroy(struct ida *ida)
858 {
859 	idr_destroy(&ida->idr);
860 }
861 
862 int
863 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
864     gfp_t gfp_mask)
865 {
866 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
867 }
868 
869 void
870 ida_simple_remove(struct ida *ida, unsigned int id)
871 {
872 	idr_remove(&ida->idr, id);
873 }
874 
875 int
876 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
877 {
878 	return (a->id < b->id ? -1 : a->id > b->id);
879 }
880 
881 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
882 struct pool xa_pool;
883 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
884 
885 void
886 xa_init_flags(struct xarray *xa, gfp_t flags)
887 {
888 	static int initialized;
889 
890 	if (!initialized) {
891 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_TTY, 0,
892 		    "xapl", NULL);
893 		initialized = 1;
894 	}
895 	SPLAY_INIT(&xa->xa_tree);
896 }
897 
898 void
899 xa_destroy(struct xarray *xa)
900 {
901 	struct xarray_entry *id;
902 
903 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
904 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
905 		pool_put(&xa_pool, id);
906 	}
907 }
908 
909 int
910 xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
911 {
912 	struct xarray_entry *xid;
913 	int flags = (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
914 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
915 	int begin;
916 
917 	xid = pool_get(&xa_pool, flags);
918 	if (xid == NULL)
919 		return -ENOMEM;
920 
921 	if (limit <= 0)
922 		limit = INT_MAX;
923 
924 	xid->id = begin = start;
925 
926 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
927 		if (xid->id == limit)
928 			xid->id = start;
929 		else
930 			xid->id++;
931 		if (xid->id == begin) {
932 			pool_put(&xa_pool, xid);
933 			return -EBUSY;
934 		}
935 	}
936 	xid->ptr = entry;
937 	*id = xid->id;
938 	return 0;
939 }
940 
941 void *
942 xa_erase(struct xarray *xa, unsigned long index)
943 {
944 	struct xarray_entry find, *res;
945 	void *ptr = NULL;
946 
947 	find.id = index;
948 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
949 	if (res) {
950 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
951 		ptr = res->ptr;
952 		pool_put(&xa_pool, res);
953 	}
954 	return ptr;
955 }
956 
957 void *
958 xa_load(struct xarray *xa, unsigned long index)
959 {
960 	struct xarray_entry find, *res;
961 
962 	find.id = index;
963 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
964 	if (res == NULL)
965 		return NULL;
966 	return res->ptr;
967 }
968 
969 void *
970 xa_get_next(struct xarray *xa, unsigned long *index)
971 {
972 	struct xarray_entry *res;
973 
974 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
975 		if (res->id >= *index) {
976 			*index = res->id;
977 			return res->ptr;
978 		}
979 	}
980 
981 	return NULL;
982 }
983 
984 int
985 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
986 {
987 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
988 	    M_DRM, gfp_mask | M_ZERO);
989 	if (table->sgl == NULL)
990 		return -ENOMEM;
991 	table->nents = table->orig_nents = nents;
992 	sg_mark_end(&table->sgl[nents - 1]);
993 	return 0;
994 }
995 
996 void
997 sg_free_table(struct sg_table *table)
998 {
999 	free(table->sgl, M_DRM,
1000 	    table->orig_nents * sizeof(struct scatterlist));
1001 	table->orig_nents = 0;
1002 	table->sgl = NULL;
1003 }
1004 
1005 size_t
1006 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1007     const void *buf, size_t buflen)
1008 {
1009 	panic("%s", __func__);
1010 }
1011 
1012 int
1013 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1014 {
1015 	void *cmd = NULL;
1016 	int cmdlen = 0;
1017 	int err, ret = 0;
1018 	int op;
1019 
1020 	iic_acquire_bus(&adap->ic, 0);
1021 
1022 	while (num > 2) {
1023 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1024 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1025 		    msgs->buf, msgs->len, 0);
1026 		if (err) {
1027 			ret = -err;
1028 			goto fail;
1029 		}
1030 		msgs++;
1031 		num--;
1032 		ret++;
1033 	}
1034 
1035 	if (num > 1) {
1036 		cmd = msgs->buf;
1037 		cmdlen = msgs->len;
1038 		msgs++;
1039 		num--;
1040 		ret++;
1041 	}
1042 
1043 	op = (msgs->flags & I2C_M_RD) ?
1044 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1045 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1046 	    msgs->buf, msgs->len, 0);
1047 	if (err) {
1048 		ret = -err;
1049 		goto fail;
1050 	}
1051 	msgs++;
1052 	ret++;
1053 
1054 fail:
1055 	iic_release_bus(&adap->ic, 0);
1056 
1057 	return ret;
1058 }
1059 
1060 int
1061 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1062 {
1063 	int ret, retries;
1064 
1065 	if (adap->lock_ops)
1066 		adap->lock_ops->lock_bus(adap, 0);
1067 
1068 	retries = adap->retries;
1069 retry:
1070 	if (adap->algo)
1071 		ret = adap->algo->master_xfer(adap, msgs, num);
1072 	else
1073 		ret = i2c_master_xfer(adap, msgs, num);
1074 	if (ret == -EAGAIN && retries > 0) {
1075 		retries--;
1076 		goto retry;
1077 	}
1078 
1079 	if (adap->lock_ops)
1080 		adap->lock_ops->unlock_bus(adap, 0);
1081 
1082 	return ret;
1083 }
1084 
1085 int
1086 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1087 {
1088 	struct i2c_algo_bit_data *algo = adap->algo_data;
1089 	struct i2c_adapter bb;
1090 
1091 	memset(&bb, 0, sizeof(bb));
1092 	bb.ic = algo->ic;
1093 	bb.retries = adap->retries;
1094 	return i2c_master_xfer(&bb, msgs, num);
1095 }
1096 
1097 uint32_t
1098 i2c_bb_functionality(struct i2c_adapter *adap)
1099 {
1100 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1101 }
1102 
1103 struct i2c_algorithm i2c_bit_algo = {
1104 	.master_xfer = i2c_bb_master_xfer,
1105 	.functionality = i2c_bb_functionality
1106 };
1107 
1108 int
1109 i2c_bit_add_bus(struct i2c_adapter *adap)
1110 {
1111 	adap->algo = &i2c_bit_algo;
1112 	adap->retries = 3;
1113 
1114 	return 0;
1115 }
1116 
1117 #if defined(__amd64__) || defined(__i386__)
1118 
1119 /*
1120  * This is a minimal implementation of the Linux vga_get/vga_put
1121  * interface.  In all likelyhood, it will only work for inteldrm(4) as
1122  * it assumes that if there is another active VGA device in the
1123  * system, it is sitting behind a PCI bridge.
1124  */
1125 
1126 extern int pci_enumerate_bus(struct pci_softc *,
1127     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1128 
1129 pcitag_t vga_bridge_tag;
1130 int vga_bridge_disabled;
1131 
1132 int
1133 vga_disable_bridge(struct pci_attach_args *pa)
1134 {
1135 	pcireg_t bhlc, bc;
1136 
1137 	if (pa->pa_domain != 0)
1138 		return 0;
1139 
1140 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1141 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1142 		return 0;
1143 
1144 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1145 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1146 		return 0;
1147 	bc &= ~PPB_BC_VGA_ENABLE;
1148 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1149 
1150 	vga_bridge_tag = pa->pa_tag;
1151 	vga_bridge_disabled = 1;
1152 
1153 	return 1;
1154 }
1155 
1156 void
1157 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1158 {
1159 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1160 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1161 }
1162 
1163 void
1164 vga_put(struct pci_dev *pdev, int rsrc)
1165 {
1166 	pcireg_t bc;
1167 
1168 	if (!vga_bridge_disabled)
1169 		return;
1170 
1171 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1172 	bc |= PPB_BC_VGA_ENABLE;
1173 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1174 
1175 	vga_bridge_disabled = 0;
1176 }
1177 
1178 #endif
1179 
1180 /*
1181  * ACPI types and interfaces.
1182  */
1183 
1184 #ifdef __HAVE_ACPI
1185 #include "acpi.h"
1186 #endif
1187 
1188 #if NACPI > 0
1189 
1190 #include <dev/acpi/acpireg.h>
1191 #include <dev/acpi/acpivar.h>
1192 #include <dev/acpi/amltypes.h>
1193 #include <dev/acpi/dsdt.h>
1194 
1195 acpi_status
1196 acpi_get_table(const char *sig, int instance,
1197     struct acpi_table_header **hdr)
1198 {
1199 	struct acpi_softc *sc = acpi_softc;
1200 	struct acpi_q *entry;
1201 
1202 	KASSERT(instance == 1);
1203 
1204 	if (sc == NULL)
1205 		return AE_NOT_FOUND;
1206 
1207 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1208 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1209 			*hdr = entry->q_table;
1210 			return 0;
1211 		}
1212 	}
1213 
1214 	return AE_NOT_FOUND;
1215 }
1216 
1217 acpi_status
1218 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1219 {
1220 	node = aml_searchname(node, name);
1221 	if (node == NULL)
1222 		return AE_NOT_FOUND;
1223 
1224 	*rnode = node;
1225 	return 0;
1226 }
1227 
1228 acpi_status
1229 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1230 {
1231 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1232 	KASSERT(type == ACPI_FULL_PATHNAME);
1233 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1234 	return 0;
1235 }
1236 
1237 acpi_status
1238 acpi_evaluate_object(acpi_handle node, const char *name,
1239     struct acpi_object_list *params, struct acpi_buffer *result)
1240 {
1241 	struct aml_value args[4], res;
1242 	union acpi_object *obj;
1243 	uint8_t *data;
1244 	int i;
1245 
1246 	KASSERT(params->count <= nitems(args));
1247 
1248 	for (i = 0; i < params->count; i++) {
1249 		args[i].type = params->pointer[i].type;
1250 		switch (args[i].type) {
1251 		case AML_OBJTYPE_INTEGER:
1252 			args[i].v_integer = params->pointer[i].integer.value;
1253 			break;
1254 		case AML_OBJTYPE_BUFFER:
1255 			args[i].length = params->pointer[i].buffer.length;
1256 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1257 			break;
1258 		default:
1259 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1260 			return AE_BAD_PARAMETER;
1261 		}
1262 	}
1263 
1264 	if (name) {
1265 		node = aml_searchname(node, name);
1266 		if (node == NULL)
1267 			return AE_NOT_FOUND;
1268 	}
1269 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1270 		aml_freevalue(&res);
1271 		return AE_ERROR;
1272 	}
1273 
1274 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1275 
1276 	result->length = sizeof(union acpi_object);
1277 	switch (res.type) {
1278 	case AML_OBJTYPE_BUFFER:
1279 		result->length += res.length;
1280 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1281 		obj = (union acpi_object *)result->pointer;
1282 		data = (uint8_t *)(obj + 1);
1283 		obj->type = res.type;
1284 		obj->buffer.length = res.length;
1285 		obj->buffer.pointer = data;
1286 		memcpy(data, res.v_buffer, res.length);
1287 		break;
1288 	default:
1289 		printf("%s: return type 0x%02x", __func__, res.type);
1290 		aml_freevalue(&res);
1291 		return AE_ERROR;
1292 	}
1293 
1294 	aml_freevalue(&res);
1295 	return 0;
1296 }
1297 
1298 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1299 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1300 
1301 int
1302 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1303 {
1304 	struct acpi_bus_event event;
1305 	struct notifier_block *nb;
1306 
1307 	event.device_class = ACPI_VIDEO_CLASS;
1308 	event.type = notify;
1309 
1310 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1311 		nb->notifier_call(nb, 0, &event);
1312 	return 0;
1313 }
1314 
1315 int
1316 register_acpi_notifier(struct notifier_block *nb)
1317 {
1318 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1319 	return 0;
1320 }
1321 
1322 int
1323 unregister_acpi_notifier(struct notifier_block *nb)
1324 {
1325 	struct notifier_block *tmp;
1326 
1327 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1328 		if (tmp == nb) {
1329 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1330 			    notifier_block, link);
1331 			return 0;
1332 		}
1333 	}
1334 
1335 	return -ENOENT;
1336 }
1337 
1338 const char *
1339 acpi_format_exception(acpi_status status)
1340 {
1341 	switch (status) {
1342 	case AE_NOT_FOUND:
1343 		return "not found";
1344 	case AE_BAD_PARAMETER:
1345 		return "bad parameter";
1346 	default:
1347 		return "unknown";
1348 	}
1349 }
1350 
1351 #endif
1352 
1353 void
1354 backlight_do_update_status(void *arg)
1355 {
1356 	backlight_update_status(arg);
1357 }
1358 
1359 struct backlight_device *
1360 backlight_device_register(const char *name, void *kdev, void *data,
1361     const struct backlight_ops *ops, struct backlight_properties *props)
1362 {
1363 	struct backlight_device *bd;
1364 
1365 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1366 	bd->ops = ops;
1367 	bd->props = *props;
1368 	bd->data = data;
1369 
1370 	task_set(&bd->task, backlight_do_update_status, bd);
1371 
1372 	return bd;
1373 }
1374 
1375 void
1376 backlight_device_unregister(struct backlight_device *bd)
1377 {
1378 	free(bd, M_DRM, sizeof(*bd));
1379 }
1380 
1381 void
1382 backlight_schedule_update_status(struct backlight_device *bd)
1383 {
1384 	task_add(systq, &bd->task);
1385 }
1386 
1387 inline int
1388 backlight_enable(struct backlight_device *bd)
1389 {
1390 	if (bd == NULL)
1391 		return 0;
1392 
1393 	bd->props.power = FB_BLANK_UNBLANK;
1394 
1395 	return bd->ops->update_status(bd);
1396 }
1397 
1398 inline int
1399 backlight_disable(struct backlight_device *bd)
1400 {
1401 	if (bd == NULL)
1402 		return 0;
1403 
1404 	bd->props.power = FB_BLANK_POWERDOWN;
1405 
1406 	return bd->ops->update_status(bd);
1407 }
1408 
1409 void
1410 drm_sysfs_hotplug_event(struct drm_device *dev)
1411 {
1412 	KNOTE(&dev->note, NOTE_CHANGE);
1413 }
1414 
1415 struct dma_fence *
1416 dma_fence_get(struct dma_fence *fence)
1417 {
1418 	if (fence)
1419 		kref_get(&fence->refcount);
1420 	return fence;
1421 }
1422 
1423 struct dma_fence *
1424 dma_fence_get_rcu(struct dma_fence *fence)
1425 {
1426 	if (fence)
1427 		kref_get(&fence->refcount);
1428 	return fence;
1429 }
1430 
1431 struct dma_fence *
1432 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1433 {
1434 	struct dma_fence *fence;
1435 	if (dfp == NULL)
1436 		return NULL;
1437 	fence = *dfp;
1438 	if (fence)
1439 		kref_get(&fence->refcount);
1440 	return fence;
1441 }
1442 
1443 void
1444 dma_fence_release(struct kref *ref)
1445 {
1446 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1447 	if (fence->ops && fence->ops->release)
1448 		fence->ops->release(fence);
1449 	else
1450 		free(fence, M_DRM, 0);
1451 }
1452 
1453 void
1454 dma_fence_put(struct dma_fence *fence)
1455 {
1456 	if (fence)
1457 		kref_put(&fence->refcount, dma_fence_release);
1458 }
1459 
1460 int
1461 dma_fence_signal_locked(struct dma_fence *fence)
1462 {
1463 	struct dma_fence_cb *cur, *tmp;
1464 	struct list_head cb_list;
1465 
1466 	if (fence == NULL)
1467 		return -EINVAL;
1468 
1469 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1470 		return -EINVAL;
1471 
1472 	list_replace(&fence->cb_list, &cb_list);
1473 
1474 	fence->timestamp = ktime_get();
1475 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1476 
1477 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1478 		INIT_LIST_HEAD(&cur->node);
1479 		cur->func(fence, cur);
1480 	}
1481 
1482 	return 0;
1483 }
1484 
1485 int
1486 dma_fence_signal(struct dma_fence *fence)
1487 {
1488 	int r;
1489 
1490 	if (fence == NULL)
1491 		return -EINVAL;
1492 
1493 	mtx_enter(fence->lock);
1494 	r = dma_fence_signal_locked(fence);
1495 	mtx_leave(fence->lock);
1496 
1497 	return r;
1498 }
1499 
1500 bool
1501 dma_fence_is_signaled(struct dma_fence *fence)
1502 {
1503 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1504 		return true;
1505 
1506 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1507 		dma_fence_signal(fence);
1508 		return true;
1509 	}
1510 
1511 	return false;
1512 }
1513 
1514 bool
1515 dma_fence_is_signaled_locked(struct dma_fence *fence)
1516 {
1517 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1518 		return true;
1519 
1520 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1521 		dma_fence_signal_locked(fence);
1522 		return true;
1523 	}
1524 
1525 	return false;
1526 }
1527 
1528 long
1529 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1530 {
1531 	if (timeout < 0)
1532 		return -EINVAL;
1533 
1534 	if (fence->ops->wait)
1535 		return fence->ops->wait(fence, intr, timeout);
1536 	else
1537 		return dma_fence_default_wait(fence, intr, timeout);
1538 }
1539 
1540 long
1541 dma_fence_wait(struct dma_fence *fence, bool intr)
1542 {
1543 	long ret;
1544 
1545 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1546 	if (ret < 0)
1547 		return ret;
1548 
1549 	return 0;
1550 }
1551 
1552 void
1553 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1554 {
1555 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1556 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1557 	    fence->ops->enable_signaling) {
1558 		mtx_enter(fence->lock);
1559 		if (!fence->ops->enable_signaling(fence))
1560 			dma_fence_signal_locked(fence);
1561 		mtx_leave(fence->lock);
1562 	}
1563 }
1564 
1565 void
1566 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1567     struct mutex *lock, uint64_t context, uint64_t seqno)
1568 {
1569 	fence->ops = ops;
1570 	fence->lock = lock;
1571 	fence->context = context;
1572 	fence->seqno = seqno;
1573 	fence->flags = 0;
1574 	fence->error = 0;
1575 	kref_init(&fence->refcount);
1576 	INIT_LIST_HEAD(&fence->cb_list);
1577 }
1578 
1579 int
1580 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1581     dma_fence_func_t func)
1582 {
1583 	int ret = 0;
1584 	bool was_set;
1585 
1586 	if (WARN_ON(!fence || !func))
1587 		return -EINVAL;
1588 
1589 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1590 		INIT_LIST_HEAD(&cb->node);
1591 		return -ENOENT;
1592 	}
1593 
1594 	mtx_enter(fence->lock);
1595 
1596 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1597 
1598 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1599 		ret = -ENOENT;
1600 	else if (!was_set && fence->ops->enable_signaling) {
1601 		if (!fence->ops->enable_signaling(fence)) {
1602 			dma_fence_signal_locked(fence);
1603 			ret = -ENOENT;
1604 		}
1605 	}
1606 
1607 	if (!ret) {
1608 		cb->func = func;
1609 		list_add_tail(&cb->node, &fence->cb_list);
1610 	} else
1611 		INIT_LIST_HEAD(&cb->node);
1612 	mtx_leave(fence->lock);
1613 
1614 	return ret;
1615 }
1616 
1617 bool
1618 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1619 {
1620 	bool ret;
1621 
1622 	mtx_enter(fence->lock);
1623 
1624 	ret = !list_empty(&cb->node);
1625 	if (ret)
1626 		list_del_init(&cb->node);
1627 
1628 	mtx_leave(fence->lock);
1629 
1630 	return ret;
1631 }
1632 
1633 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1634 
1635 uint64_t
1636 dma_fence_context_alloc(unsigned int num)
1637 {
1638   return atomic64_add_return(num, &drm_fence_context_count) - num;
1639 }
1640 
1641 struct default_wait_cb {
1642 	struct dma_fence_cb base;
1643 	struct proc *proc;
1644 };
1645 
1646 static void
1647 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1648 {
1649 	struct default_wait_cb *wait =
1650 	    container_of(cb, struct default_wait_cb, base);
1651 	wake_up_process(wait->proc);
1652 }
1653 
1654 long
1655 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1656 {
1657 	long ret = timeout ? timeout : 1;
1658 	unsigned long end;
1659 	int err;
1660 	struct default_wait_cb cb;
1661 	bool was_set;
1662 
1663 	KASSERT(timeout <= INT_MAX);
1664 
1665 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1666 		return ret;
1667 
1668 	mtx_enter(fence->lock);
1669 
1670 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1671 	    &fence->flags);
1672 
1673 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1674 		goto out;
1675 
1676 	if (!was_set && fence->ops->enable_signaling) {
1677 		if (!fence->ops->enable_signaling(fence)) {
1678 			dma_fence_signal_locked(fence);
1679 			goto out;
1680 		}
1681 	}
1682 
1683 	if (timeout == 0) {
1684 		ret = 0;
1685 		goto out;
1686 	}
1687 
1688 	cb.base.func = dma_fence_default_wait_cb;
1689 	cb.proc = curproc;
1690 	list_add(&cb.base.node, &fence->cb_list);
1691 
1692 	end = jiffies + timeout;
1693 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1694 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1695 			break;
1696 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1697 		    "dmafence", ret);
1698 		if (err == EINTR || err == ERESTART) {
1699 			ret = -ERESTARTSYS;
1700 			break;
1701 		}
1702 	}
1703 
1704 	if (!list_empty(&cb.base.node))
1705 		list_del(&cb.base.node);
1706 out:
1707 	mtx_leave(fence->lock);
1708 
1709 	return ret;
1710 }
1711 
1712 static bool
1713 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1714     uint32_t *idx)
1715 {
1716 	int i;
1717 
1718 	for (i = 0; i < count; ++i) {
1719 		struct dma_fence *fence = fences[i];
1720 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1721 			if (idx)
1722 				*idx = i;
1723 			return true;
1724 		}
1725 	}
1726 	return false;
1727 }
1728 
1729 long
1730 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1731     bool intr, long timeout, uint32_t *idx)
1732 {
1733 	struct default_wait_cb *cb;
1734 	long ret = timeout;
1735 	unsigned long end;
1736 	int i, err;
1737 
1738 	KASSERT(timeout <= INT_MAX);
1739 
1740 	if (timeout == 0) {
1741 		for (i = 0; i < count; i++) {
1742 			if (dma_fence_is_signaled(fences[i])) {
1743 				if (idx)
1744 					*idx = i;
1745 				return 1;
1746 			}
1747 		}
1748 		return 0;
1749 	}
1750 
1751 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1752 	if (cb == NULL)
1753 		return -ENOMEM;
1754 
1755 	for (i = 0; i < count; i++) {
1756 		struct dma_fence *fence = fences[i];
1757 		cb[i].proc = curproc;
1758 		if (dma_fence_add_callback(fence, &cb[i].base,
1759 		    dma_fence_default_wait_cb)) {
1760 			if (idx)
1761 				*idx = i;
1762 			goto cb_cleanup;
1763 		}
1764 	}
1765 
1766 	end = jiffies + timeout;
1767 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1768 		if (dma_fence_test_signaled_any(fences, count, idx))
1769 			break;
1770 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
1771 		if (err == EINTR || err == ERESTART) {
1772 			ret = -ERESTARTSYS;
1773 			break;
1774 		}
1775 	}
1776 
1777 cb_cleanup:
1778 	while (i-- > 0)
1779 		dma_fence_remove_callback(fences[i], &cb[i].base);
1780 	free(cb, M_DRM, count * sizeof(*cb));
1781 	return ret;
1782 }
1783 
1784 static struct dma_fence dma_fence_stub;
1785 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1786 
1787 static const char *
1788 dma_fence_stub_get_name(struct dma_fence *fence)
1789 {
1790 	return "stub";
1791 }
1792 
1793 static const struct dma_fence_ops dma_fence_stub_ops = {
1794 	.get_driver_name = dma_fence_stub_get_name,
1795 	.get_timeline_name = dma_fence_stub_get_name,
1796 };
1797 
1798 struct dma_fence *
1799 dma_fence_get_stub(void)
1800 {
1801 	mtx_enter(&dma_fence_stub_mtx);
1802 	if (dma_fence_stub.ops == NULL) {
1803 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1804 		    &dma_fence_stub_mtx, 0, 0);
1805 		dma_fence_signal_locked(&dma_fence_stub);
1806 	}
1807 	mtx_leave(&dma_fence_stub_mtx);
1808 
1809 	return dma_fence_get(&dma_fence_stub);
1810 }
1811 
1812 static const char *
1813 dma_fence_array_get_driver_name(struct dma_fence *fence)
1814 {
1815 	return "dma_fence_array";
1816 }
1817 
1818 static const char *
1819 dma_fence_array_get_timeline_name(struct dma_fence *fence)
1820 {
1821 	return "unbound";
1822 }
1823 
1824 static void
1825 irq_dma_fence_array_work(struct irq_work *wrk)
1826 {
1827 	struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work);
1828 
1829 	dma_fence_signal(&dfa->base);
1830 	dma_fence_put(&dfa->base);
1831 }
1832 
1833 static void
1834 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
1835 {
1836 	struct dma_fence_array_cb *array_cb =
1837 	    container_of(cb, struct dma_fence_array_cb, cb);
1838 	struct dma_fence_array *dfa = array_cb->array;
1839 
1840 	if (atomic_dec_and_test(&dfa->num_pending))
1841 		irq_work_queue(&dfa->work);
1842 	else
1843 		dma_fence_put(&dfa->base);
1844 }
1845 
1846 static bool
1847 dma_fence_array_enable_signaling(struct dma_fence *fence)
1848 {
1849 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1850 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
1851 	int i;
1852 
1853 	for (i = 0; i < dfa->num_fences; ++i) {
1854 		cb[i].array = dfa;
1855 		dma_fence_get(&dfa->base);
1856 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
1857 		    dma_fence_array_cb_func)) {
1858 			dma_fence_put(&dfa->base);
1859 			if (atomic_dec_and_test(&dfa->num_pending))
1860 				return false;
1861 		}
1862 	}
1863 
1864 	return true;
1865 }
1866 
1867 static bool
1868 dma_fence_array_signaled(struct dma_fence *fence)
1869 {
1870 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1871 
1872 	return atomic_read(&dfa->num_pending) <= 0;
1873 }
1874 
1875 static void
1876 dma_fence_array_release(struct dma_fence *fence)
1877 {
1878 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1879 	int i;
1880 
1881 	for (i = 0; i < dfa->num_fences; ++i)
1882 		dma_fence_put(dfa->fences[i]);
1883 
1884 	free(dfa->fences, M_DRM, 0);
1885 	dma_fence_free(fence);
1886 }
1887 
1888 struct dma_fence_array *
1889 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
1890     unsigned seqno, bool signal_on_any)
1891 {
1892 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
1893 	    (num_fences * sizeof(struct dma_fence_array_cb)),
1894 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1895 	if (dfa == NULL)
1896 		return NULL;
1897 
1898 	mtx_init(&dfa->lock, IPL_TTY);
1899 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
1900 	    context, seqno);
1901 	init_irq_work(&dfa->work, irq_dma_fence_array_work);
1902 
1903 	dfa->num_fences = num_fences;
1904 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
1905 	dfa->fences = fences;
1906 
1907 	return dfa;
1908 }
1909 
1910 const struct dma_fence_ops dma_fence_array_ops = {
1911 	.get_driver_name = dma_fence_array_get_driver_name,
1912 	.get_timeline_name = dma_fence_array_get_timeline_name,
1913 	.enable_signaling = dma_fence_array_enable_signaling,
1914 	.signaled = dma_fence_array_signaled,
1915 	.release = dma_fence_array_release,
1916 };
1917 
1918 int
1919 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
1920 {
1921 	if (seqno == 0)
1922 		return 0;
1923 	STUB();
1924 	return -ENOSYS;
1925 }
1926 
1927 void
1928 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
1929     struct dma_fence *fence, uint64_t seqno)
1930 {
1931 	struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
1932 	uint64_t context;
1933 
1934 	chain->fence = fence;
1935 	chain->prev = prev;
1936 	mtx_init(&chain->lock, IPL_TTY);
1937 
1938 	if (prev_chain && seqno > prev->seqno) {
1939 		chain->prev_seqno = prev->seqno;
1940 		context = prev->context;
1941 	} else {
1942 		chain->prev_seqno = 0;
1943 		context = dma_fence_context_alloc(1);
1944 	}
1945 
1946 	dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
1947 	    context, seqno);
1948 }
1949 
1950 static const char *
1951 dma_fence_chain_get_driver_name(struct dma_fence *fence)
1952 {
1953 	return "dma_fence_chain";
1954 }
1955 
1956 static const char *
1957 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
1958 {
1959 	return "unbound";
1960 }
1961 
1962 static bool
1963 dma_fence_chain_enable_signaling(struct dma_fence *fence)
1964 {
1965 	STUB();
1966 	return false;
1967 }
1968 
1969 static bool
1970 dma_fence_chain_signaled(struct dma_fence *fence)
1971 {
1972 	STUB();
1973 	return false;
1974 }
1975 
1976 static void
1977 dma_fence_chain_release(struct dma_fence *fence)
1978 {
1979 	STUB();
1980 }
1981 
1982 struct dma_fence *
1983 dma_fence_chain_next(struct dma_fence *fence)
1984 {
1985 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
1986 
1987 	if (chain == NULL) {
1988 		dma_fence_put(fence);
1989 		return NULL;
1990 	}
1991 
1992 	STUB();
1993 	dma_fence_put(fence);
1994 	return NULL;
1995 }
1996 
1997 const struct dma_fence_ops dma_fence_chain_ops = {
1998 	.get_driver_name = dma_fence_chain_get_driver_name,
1999 	.get_timeline_name = dma_fence_chain_get_timeline_name,
2000 	.enable_signaling = dma_fence_chain_enable_signaling,
2001 	.signaled = dma_fence_chain_signaled,
2002 	.release = dma_fence_chain_release,
2003 };
2004 
2005 int
2006 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2007 {
2008 	return (ENXIO);
2009 }
2010 
2011 int
2012 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2013 {
2014 	return (ENXIO);
2015 }
2016 
2017 int
2018 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2019 {
2020 	return (ENOTTY);
2021 }
2022 
2023 int
2024 dmabuf_poll(struct file *fp, int events, struct proc *p)
2025 {
2026 	return (0);
2027 }
2028 
2029 int
2030 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2031 {
2032 	return (EINVAL);
2033 }
2034 
2035 int
2036 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2037 {
2038 	struct dma_buf *dmabuf = fp->f_data;
2039 
2040 	memset(st, 0, sizeof(*st));
2041 	st->st_size = dmabuf->size;
2042 	st->st_mode = S_IFIFO;	/* XXX */
2043 	return (0);
2044 }
2045 
2046 int
2047 dmabuf_close(struct file *fp, struct proc *p)
2048 {
2049 	struct dma_buf *dmabuf = fp->f_data;
2050 
2051 	fp->f_data = NULL;
2052 	KERNEL_LOCK();
2053 	dmabuf->ops->release(dmabuf);
2054 	KERNEL_UNLOCK();
2055 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
2056 	return (0);
2057 }
2058 
2059 int
2060 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2061 {
2062 	struct dma_buf *dmabuf = fp->f_data;
2063 	off_t newoff;
2064 
2065 	if (*offset != 0)
2066 		return (EINVAL);
2067 
2068 	switch (whence) {
2069 	case SEEK_SET:
2070 		newoff = 0;
2071 		break;
2072 	case SEEK_END:
2073 		newoff = dmabuf->size;
2074 		break;
2075 	default:
2076 		return (EINVAL);
2077 	}
2078 	mtx_enter(&fp->f_mtx);
2079 	fp->f_offset = newoff;
2080 	mtx_leave(&fp->f_mtx);
2081 	*offset = newoff;
2082 	return (0);
2083 }
2084 
2085 const struct fileops dmabufops = {
2086 	.fo_read	= dmabuf_read,
2087 	.fo_write	= dmabuf_write,
2088 	.fo_ioctl	= dmabuf_ioctl,
2089 	.fo_poll	= dmabuf_poll,
2090 	.fo_kqfilter	= dmabuf_kqfilter,
2091 	.fo_stat	= dmabuf_stat,
2092 	.fo_close	= dmabuf_close,
2093 	.fo_seek	= dmabuf_seek,
2094 };
2095 
2096 struct dma_buf *
2097 dma_buf_export(const struct dma_buf_export_info *info)
2098 {
2099 	struct proc *p = curproc;
2100 	struct dma_buf *dmabuf;
2101 	struct file *fp;
2102 
2103 	fp = fnew(p);
2104 	if (fp == NULL)
2105 		return ERR_PTR(-ENFILE);
2106 	fp->f_type = DTYPE_DMABUF;
2107 	fp->f_ops = &dmabufops;
2108 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2109 	dmabuf->priv = info->priv;
2110 	dmabuf->ops = info->ops;
2111 	dmabuf->size = info->size;
2112 	dmabuf->file = fp;
2113 	fp->f_data = dmabuf;
2114 	INIT_LIST_HEAD(&dmabuf->attachments);
2115 	return dmabuf;
2116 }
2117 
2118 struct dma_buf *
2119 dma_buf_get(int fd)
2120 {
2121 	struct proc *p = curproc;
2122 	struct filedesc *fdp = p->p_fd;
2123 	struct file *fp;
2124 
2125 	if ((fp = fd_getfile(fdp, fd)) == NULL)
2126 		return ERR_PTR(-EBADF);
2127 
2128 	if (fp->f_type != DTYPE_DMABUF) {
2129 		FRELE(fp, p);
2130 		return ERR_PTR(-EINVAL);
2131 	}
2132 
2133 	return fp->f_data;
2134 }
2135 
2136 void
2137 dma_buf_put(struct dma_buf *dmabuf)
2138 {
2139 	KASSERT(dmabuf);
2140 	KASSERT(dmabuf->file);
2141 
2142 	FRELE(dmabuf->file, curproc);
2143 }
2144 
2145 int
2146 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2147 {
2148 	struct proc *p = curproc;
2149 	struct filedesc *fdp = p->p_fd;
2150 	struct file *fp = dmabuf->file;
2151 	int fd, cloexec, error;
2152 
2153 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2154 
2155 	fdplock(fdp);
2156 restart:
2157 	if ((error = fdalloc(p, 0, &fd)) != 0) {
2158 		if (error == ENOSPC) {
2159 			fdexpand(p);
2160 			goto restart;
2161 		}
2162 		fdpunlock(fdp);
2163 		return -error;
2164 	}
2165 
2166 	fdinsert(fdp, fd, cloexec, fp);
2167 	fdpunlock(fdp);
2168 
2169 	return fd;
2170 }
2171 
2172 void
2173 get_dma_buf(struct dma_buf *dmabuf)
2174 {
2175 	FREF(dmabuf->file);
2176 }
2177 
2178 enum pci_bus_speed
2179 pcie_get_speed_cap(struct pci_dev *pdev)
2180 {
2181 	pci_chipset_tag_t	pc;
2182 	pcitag_t		tag;
2183 	int			pos ;
2184 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
2185 	pcireg_t		id;
2186 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
2187 	int			bus, device, function;
2188 
2189 	if (pdev == NULL)
2190 		return PCI_SPEED_UNKNOWN;
2191 
2192 	pc = pdev->pc;
2193 	tag = pdev->tag;
2194 
2195 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2196 	    &pos, NULL))
2197 		return PCI_SPEED_UNKNOWN;
2198 
2199 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2200 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2201 
2202 	/* we've been informed via and serverworks don't make the cut */
2203 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2204 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
2205 		return PCI_SPEED_UNKNOWN;
2206 
2207 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2208 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2209 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2210 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2211 
2212 	lnkcap &= 0x0f;
2213 	lnkcap2 &= 0xfe;
2214 
2215 	if (lnkcap2) { /* PCIE GEN 3.0 */
2216 		if (lnkcap2 & 0x02)
2217 			cap = PCIE_SPEED_2_5GT;
2218 		if (lnkcap2 & 0x04)
2219 			cap = PCIE_SPEED_5_0GT;
2220 		if (lnkcap2 & 0x08)
2221 			cap = PCIE_SPEED_8_0GT;
2222 		if (lnkcap2 & 0x10)
2223 			cap = PCIE_SPEED_16_0GT;
2224 	} else {
2225 		if (lnkcap & 0x01)
2226 			cap = PCIE_SPEED_2_5GT;
2227 		if (lnkcap & 0x02)
2228 			cap = PCIE_SPEED_5_0GT;
2229 	}
2230 
2231 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2232 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2233 	    lnkcap2);
2234 	return cap;
2235 }
2236 
2237 enum pcie_link_width
2238 pcie_get_width_cap(struct pci_dev *pdev)
2239 {
2240 	pci_chipset_tag_t	pc = pdev->pc;
2241 	pcitag_t		tag = pdev->tag;
2242 	int			pos ;
2243 	pcireg_t		lnkcap = 0;
2244 	pcireg_t		id;
2245 	int			bus, device, function;
2246 
2247 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2248 	    &pos, NULL))
2249 		return PCIE_LNK_WIDTH_UNKNOWN;
2250 
2251 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2252 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2253 
2254 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2255 
2256 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2257 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2258 
2259 	if (lnkcap)
2260 		return (lnkcap & 0x3f0) >> 4;
2261 	return PCIE_LNK_WIDTH_UNKNOWN;
2262 }
2263 
2264 int
2265 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
2266     int sync, void *key)
2267 {
2268 	wakeup(wqe);
2269 	if (wqe->private)
2270 		wake_up_process(wqe->private);
2271 	list_del_init(&wqe->entry);
2272 	return 0;
2273 }
2274 
2275 static wait_queue_head_t bit_waitq;
2276 wait_queue_head_t var_waitq;
2277 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2278 
2279 int
2280 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2281 {
2282 	int err;
2283 
2284 	if (!test_bit(bit, word))
2285 		return 0;
2286 
2287 	mtx_enter(&wait_bit_mtx);
2288 	while (test_bit(bit, word)) {
2289 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2290 		    INFSLP);
2291 		if (err) {
2292 			mtx_leave(&wait_bit_mtx);
2293 			return 1;
2294 		}
2295 	}
2296 	mtx_leave(&wait_bit_mtx);
2297 	return 0;
2298 }
2299 
2300 int
2301 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2302 {
2303 	int err;
2304 
2305 	if (!test_bit(bit, word))
2306 		return 0;
2307 
2308 	mtx_enter(&wait_bit_mtx);
2309 	while (test_bit(bit, word)) {
2310 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2311 		if (err) {
2312 			mtx_leave(&wait_bit_mtx);
2313 			return 1;
2314 		}
2315 	}
2316 	mtx_leave(&wait_bit_mtx);
2317 	return 0;
2318 }
2319 
2320 void
2321 wake_up_bit(void *word, int bit)
2322 {
2323 	mtx_enter(&wait_bit_mtx);
2324 	wakeup(word);
2325 	mtx_leave(&wait_bit_mtx);
2326 }
2327 
2328 void
2329 clear_and_wake_up_bit(int bit, void *word)
2330 {
2331 	clear_bit(bit, word);
2332 	wake_up_bit(word, bit);
2333 }
2334 
2335 wait_queue_head_t *
2336 bit_waitqueue(void *word, int bit)
2337 {
2338 	/* XXX hash table of wait queues? */
2339 	return &bit_waitq;
2340 }
2341 
2342 wait_queue_head_t *
2343 __var_waitqueue(void *p)
2344 {
2345 	/* XXX hash table of wait queues? */
2346 	return &bit_waitq;
2347 }
2348 
2349 struct workqueue_struct *system_wq;
2350 struct workqueue_struct *system_highpri_wq;
2351 struct workqueue_struct *system_unbound_wq;
2352 struct workqueue_struct *system_long_wq;
2353 struct taskq *taskletq;
2354 
2355 void
2356 drm_linux_init(void)
2357 {
2358 	system_wq = (struct workqueue_struct *)
2359 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
2360 	system_highpri_wq = (struct workqueue_struct *)
2361 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2362 	system_unbound_wq = (struct workqueue_struct *)
2363 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2364 	system_long_wq = (struct workqueue_struct *)
2365 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2366 
2367 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2368 
2369 	init_waitqueue_head(&bit_waitq);
2370 	init_waitqueue_head(&var_waitq);
2371 
2372 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2373 	    "idrpl", NULL);
2374 
2375 	kmap_atomic_va =
2376 	    (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2377 }
2378 
2379 void
2380 drm_linux_exit(void)
2381 {
2382 	pool_destroy(&idr_pool);
2383 
2384 	taskq_destroy(taskletq);
2385 
2386 	taskq_destroy((struct taskq *)system_long_wq);
2387 	taskq_destroy((struct taskq *)system_unbound_wq);
2388 	taskq_destroy((struct taskq *)system_highpri_wq);
2389 	taskq_destroy((struct taskq *)system_wq);
2390 }
2391 
2392 #define PCIE_ECAP_RESIZE_BAR	0x15
2393 #define RBCAP0			0x04
2394 #define RBCTRL0			0x08
2395 #define RBCTRL_BARINDEX_MASK	0x07
2396 #define RBCTRL_BARSIZE_MASK	0x1f00
2397 #define RBCTRL_BARSIZE_SHIFT	8
2398 
2399 /* size in MB is 1 << nsize */
2400 int
2401 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2402 {
2403 	pcireg_t	reg;
2404 	uint32_t	offset, capid;
2405 
2406 	KASSERT(bar == 0);
2407 
2408 	offset = PCI_PCIE_ECAP;
2409 
2410 	/* search PCI Express Extended Capabilities */
2411 	do {
2412 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2413 		capid = PCI_PCIE_ECAP_ID(reg);
2414 		if (capid == PCIE_ECAP_RESIZE_BAR)
2415 			break;
2416 		offset = PCI_PCIE_ECAP_NEXT(reg);
2417 	} while (capid != 0);
2418 
2419 	if (capid == 0) {
2420 		printf("%s: could not find resize bar cap!\n", __func__);
2421 		return -ENOTSUP;
2422 	}
2423 
2424 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2425 
2426 	if ((reg & (1 << (nsize + 4))) == 0) {
2427 		printf("%s size not supported\n", __func__);
2428 		return -ENOTSUP;
2429 	}
2430 
2431 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2432 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2433 		printf("%s BAR index not 0\n", __func__);
2434 		return -EINVAL;
2435 	}
2436 
2437 	reg &= ~RBCTRL_BARSIZE_MASK;
2438 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2439 
2440 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2441 
2442 	return 0;
2443 }
2444 
2445 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2446 
2447 int
2448 register_shrinker(struct shrinker *shrinker)
2449 {
2450 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2451 	return 0;
2452 }
2453 
2454 void
2455 unregister_shrinker(struct shrinker *shrinker)
2456 {
2457 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2458 }
2459 
2460 void
2461 drmbackoff(long npages)
2462 {
2463 	struct shrink_control sc;
2464 	struct shrinker *shrinker;
2465 	u_long ret;
2466 
2467 	shrinker = TAILQ_FIRST(&shrinkers);
2468 	while (shrinker && npages > 0) {
2469 		sc.nr_to_scan = npages;
2470 		ret = shrinker->scan_objects(shrinker, &sc);
2471 		npages -= ret;
2472 		shrinker = TAILQ_NEXT(shrinker, next);
2473 	}
2474 }
2475 
2476 void *
2477 bitmap_zalloc(u_int n, gfp_t flags)
2478 {
2479 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2480 }
2481 
2482 void
2483 bitmap_free(void *p)
2484 {
2485 	kfree(p);
2486 }
2487 
2488 int
2489 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2490 {
2491 	if (atomic_add_unless(v, -1, 1))
2492 		return 0;
2493 
2494 	rw_enter_write(lock);
2495 	if (atomic_dec_return(v) == 0)
2496 		return 1;
2497 	rw_exit_write(lock);
2498 	return 0;
2499 }
2500 
2501 int
2502 printk(const char *fmt, ...)
2503 {
2504 	int ret, level;
2505 	va_list ap;
2506 
2507 	if (fmt != NULL && *fmt == '\001') {
2508 		level = fmt[1];
2509 #ifndef DRMDEBUG
2510 		if (level >= KERN_INFO[1] && level <= '9')
2511 			return 0;
2512 #endif
2513 		fmt += 2;
2514 	}
2515 
2516 	va_start(ap, fmt);
2517 	ret = vprintf(fmt, ap);
2518 	va_end(ap);
2519 
2520 	return ret;
2521 }
2522 
2523 #define START(node) ((node)->start)
2524 #define LAST(node) ((node)->last)
2525 
2526 struct interval_tree_node *
2527 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2528     unsigned long last)
2529 {
2530 	struct interval_tree_node *node;
2531 	struct rb_node *rb;
2532 
2533 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2534 		node = rb_entry(rb, typeof(*node), rb);
2535 		if (LAST(node) >= start && START(node) <= last)
2536 			return node;
2537 	}
2538 	return NULL;
2539 }
2540 
2541 void
2542 interval_tree_remove(struct interval_tree_node *node,
2543     struct rb_root_cached *root)
2544 {
2545 	rb_erase_cached(&node->rb, root);
2546 }
2547 
2548 void
2549 interval_tree_insert(struct interval_tree_node *node,
2550     struct rb_root_cached *root)
2551 {
2552 	struct rb_node **iter = &root->rb_root.rb_node;
2553 	struct rb_node *parent = NULL;
2554 	struct interval_tree_node *iter_node;
2555 
2556 	while (*iter) {
2557 		parent = *iter;
2558 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
2559 
2560 		if (node->start < iter_node->start)
2561 			iter = &(*iter)->rb_left;
2562 		else
2563 			iter = &(*iter)->rb_right;
2564 	}
2565 
2566 	rb_link_node(&node->rb, parent, iter);
2567 	rb_insert_color_cached(&node->rb, root, false);
2568 }
2569