xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision fc405d53b73a2d73393cb97f684863d17b583e38)
1 /*	$OpenBSD: drm_linux.c,v 1.97 2023/03/15 08:24:56 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 #include <linux/processor.h>
53 #include <linux/sync_file.h>
54 
55 #include <drm/drm_device.h>
56 #include <drm/drm_connector.h>
57 #include <drm/drm_print.h>
58 
59 #if defined(__amd64__) || defined(__i386__)
60 #include "bios.h"
61 #endif
62 
63 /* allowed to sleep */
64 void
65 tasklet_unlock_wait(struct tasklet_struct *ts)
66 {
67 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
68 		cpu_relax();
69 }
70 
71 /* must not sleep */
72 void
73 tasklet_unlock_spin_wait(struct tasklet_struct *ts)
74 {
75 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
76 		cpu_relax();
77 }
78 
79 void
80 tasklet_run(void *arg)
81 {
82 	struct tasklet_struct *ts = arg;
83 
84 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
85 	if (tasklet_trylock(ts)) {
86 		if (!atomic_read(&ts->count)) {
87 			if (ts->use_callback)
88 				ts->callback(ts);
89 			else
90 				ts->func(ts->data);
91 		}
92 		tasklet_unlock(ts);
93 	}
94 }
95 
96 /* 32 bit powerpc lacks 64 bit atomics */
97 #if defined(__powerpc__) && !defined(__powerpc64__)
98 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
99 #endif
100 
101 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
102 volatile struct proc *sch_proc;
103 volatile void *sch_ident;
104 int sch_priority;
105 
106 void
107 set_current_state(int state)
108 {
109 	if (sch_ident != curproc)
110 		mtx_enter(&sch_mtx);
111 	MUTEX_ASSERT_LOCKED(&sch_mtx);
112 	sch_ident = sch_proc = curproc;
113 	sch_priority = state;
114 }
115 
116 void
117 __set_current_state(int state)
118 {
119 	KASSERT(state == TASK_RUNNING);
120 	if (sch_ident == curproc) {
121 		MUTEX_ASSERT_LOCKED(&sch_mtx);
122 		sch_ident = NULL;
123 		mtx_leave(&sch_mtx);
124 	}
125 }
126 
127 void
128 schedule(void)
129 {
130 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
131 }
132 
133 long
134 schedule_timeout(long timeout)
135 {
136 	struct sleep_state sls;
137 	unsigned long deadline;
138 	int wait, spl, timo = 0;
139 
140 	MUTEX_ASSERT_LOCKED(&sch_mtx);
141 	KASSERT(!cold);
142 
143 	if (timeout != MAX_SCHEDULE_TIMEOUT)
144 		timo = timeout;
145 	sleep_setup(&sls, sch_ident, sch_priority, "schto", timo);
146 
147 	wait = (sch_proc == curproc && timeout > 0);
148 
149 	spl = MUTEX_OLDIPL(&sch_mtx);
150 	MUTEX_OLDIPL(&sch_mtx) = splsched();
151 	mtx_leave(&sch_mtx);
152 
153 	if (timeout != MAX_SCHEDULE_TIMEOUT)
154 		deadline = jiffies + timeout;
155 	sleep_finish(&sls, wait);
156 	if (timeout != MAX_SCHEDULE_TIMEOUT)
157 		timeout = deadline - jiffies;
158 
159 	mtx_enter(&sch_mtx);
160 	MUTEX_OLDIPL(&sch_mtx) = spl;
161 	sch_ident = curproc;
162 
163 	return timeout > 0 ? timeout : 0;
164 }
165 
166 long
167 schedule_timeout_uninterruptible(long timeout)
168 {
169 	tsleep(curproc, PWAIT, "schtou", timeout);
170 	return 0;
171 }
172 
173 int
174 wake_up_process(struct proc *p)
175 {
176 	atomic_cas_ptr(&sch_proc, p, NULL);
177 	return wakeup_proc(p, NULL);
178 }
179 
180 void
181 flush_workqueue(struct workqueue_struct *wq)
182 {
183 	if (cold)
184 		return;
185 
186 	if (wq)
187 		taskq_barrier((struct taskq *)wq);
188 }
189 
190 bool
191 flush_work(struct work_struct *work)
192 {
193 	if (cold)
194 		return false;
195 
196 	if (work->tq)
197 		taskq_barrier(work->tq);
198 	return false;
199 }
200 
201 bool
202 flush_delayed_work(struct delayed_work *dwork)
203 {
204 	bool ret = false;
205 
206 	if (cold)
207 		return false;
208 
209 	while (timeout_pending(&dwork->to)) {
210 		tsleep(dwork, PWAIT, "fldwto", 1);
211 		ret = true;
212 	}
213 
214 	if (dwork->tq)
215 		taskq_barrier(dwork->tq);
216 	return ret;
217 }
218 
219 struct kthread {
220 	int (*func)(void *);
221 	void *data;
222 	struct proc *proc;
223 	volatile u_int flags;
224 #define KTHREAD_SHOULDSTOP	0x0000001
225 #define KTHREAD_STOPPED		0x0000002
226 #define KTHREAD_SHOULDPARK	0x0000004
227 #define KTHREAD_PARKED		0x0000008
228 	LIST_ENTRY(kthread) next;
229 };
230 
231 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
232 
233 void
234 kthread_func(void *arg)
235 {
236 	struct kthread *thread = arg;
237 	int ret;
238 
239 	ret = thread->func(thread->data);
240 	thread->flags |= KTHREAD_STOPPED;
241 	wakeup(thread);
242 	kthread_exit(ret);
243 }
244 
245 struct proc *
246 kthread_run(int (*func)(void *), void *data, const char *name)
247 {
248 	struct kthread *thread;
249 
250 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
251 	thread->func = func;
252 	thread->data = data;
253 	thread->flags = 0;
254 
255 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
256 		free(thread, M_DRM, sizeof(*thread));
257 		return ERR_PTR(-ENOMEM);
258 	}
259 
260 	LIST_INSERT_HEAD(&kthread_list, thread, next);
261 	return thread->proc;
262 }
263 
264 struct kthread_worker *
265 kthread_create_worker(unsigned int flags, const char *fmt, ...)
266 {
267 	char name[MAXCOMLEN+1];
268 	va_list ap;
269 
270 	struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
271 	va_start(ap, fmt);
272 	vsnprintf(name, sizeof(name), fmt, ap);
273 	va_end(ap);
274 	w->tq = taskq_create(name, 1, IPL_HIGH, 0);
275 
276 	return w;
277 }
278 
279 void
280 kthread_destroy_worker(struct kthread_worker *worker)
281 {
282 	taskq_destroy(worker->tq);
283 	free(worker, M_DRM, sizeof(*worker));
284 
285 }
286 
287 void
288 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
289 {
290 	work->tq = NULL;
291 	task_set(&work->task, (void (*)(void *))func, work);
292 }
293 
294 bool
295 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
296 {
297 	work->tq = worker->tq;
298 	return task_add(work->tq, &work->task);
299 }
300 
301 bool
302 kthread_cancel_work_sync(struct kthread_work *work)
303 {
304 	return task_del(work->tq, &work->task);
305 }
306 
307 void
308 kthread_flush_work(struct kthread_work *work)
309 {
310 	if (cold)
311 		return;
312 
313 	if (work->tq)
314 		taskq_barrier(work->tq);
315 }
316 
317 void
318 kthread_flush_worker(struct kthread_worker *worker)
319 {
320 	if (cold)
321 		return;
322 
323 	if (worker->tq)
324 		taskq_barrier(worker->tq);
325 }
326 
327 struct kthread *
328 kthread_lookup(struct proc *p)
329 {
330 	struct kthread *thread;
331 
332 	LIST_FOREACH(thread, &kthread_list, next) {
333 		if (thread->proc == p)
334 			break;
335 	}
336 	KASSERT(thread);
337 
338 	return thread;
339 }
340 
341 int
342 kthread_should_park(void)
343 {
344 	struct kthread *thread = kthread_lookup(curproc);
345 	return (thread->flags & KTHREAD_SHOULDPARK);
346 }
347 
348 void
349 kthread_parkme(void)
350 {
351 	struct kthread *thread = kthread_lookup(curproc);
352 
353 	while (thread->flags & KTHREAD_SHOULDPARK) {
354 		thread->flags |= KTHREAD_PARKED;
355 		wakeup(thread);
356 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
357 		thread->flags &= ~KTHREAD_PARKED;
358 	}
359 }
360 
361 void
362 kthread_park(struct proc *p)
363 {
364 	struct kthread *thread = kthread_lookup(p);
365 
366 	while ((thread->flags & KTHREAD_PARKED) == 0) {
367 		thread->flags |= KTHREAD_SHOULDPARK;
368 		wake_up_process(thread->proc);
369 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
370 	}
371 }
372 
373 void
374 kthread_unpark(struct proc *p)
375 {
376 	struct kthread *thread = kthread_lookup(p);
377 
378 	thread->flags &= ~KTHREAD_SHOULDPARK;
379 	wakeup(thread);
380 }
381 
382 int
383 kthread_should_stop(void)
384 {
385 	struct kthread *thread = kthread_lookup(curproc);
386 	return (thread->flags & KTHREAD_SHOULDSTOP);
387 }
388 
389 void
390 kthread_stop(struct proc *p)
391 {
392 	struct kthread *thread = kthread_lookup(p);
393 
394 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
395 		thread->flags |= KTHREAD_SHOULDSTOP;
396 		kthread_unpark(p);
397 		wake_up_process(thread->proc);
398 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
399 	}
400 	LIST_REMOVE(thread, next);
401 	free(thread, M_DRM, sizeof(*thread));
402 }
403 
404 #if NBIOS > 0
405 extern char smbios_board_vendor[];
406 extern char smbios_board_prod[];
407 extern char smbios_board_serial[];
408 #endif
409 
410 bool
411 dmi_match(int slot, const char *str)
412 {
413 	switch (slot) {
414 	case DMI_SYS_VENDOR:
415 		if (hw_vendor != NULL &&
416 		    !strcmp(hw_vendor, str))
417 			return true;
418 		break;
419 	case DMI_PRODUCT_NAME:
420 		if (hw_prod != NULL &&
421 		    !strcmp(hw_prod, str))
422 			return true;
423 		break;
424 	case DMI_PRODUCT_VERSION:
425 		if (hw_ver != NULL &&
426 		    !strcmp(hw_ver, str))
427 			return true;
428 		break;
429 #if NBIOS > 0
430 	case DMI_BOARD_VENDOR:
431 		if (strcmp(smbios_board_vendor, str) == 0)
432 			return true;
433 		break;
434 	case DMI_BOARD_NAME:
435 		if (strcmp(smbios_board_prod, str) == 0)
436 			return true;
437 		break;
438 	case DMI_BOARD_SERIAL:
439 		if (strcmp(smbios_board_serial, str) == 0)
440 			return true;
441 		break;
442 #else
443 	case DMI_BOARD_VENDOR:
444 		if (hw_vendor != NULL &&
445 		    !strcmp(hw_vendor, str))
446 			return true;
447 		break;
448 	case DMI_BOARD_NAME:
449 		if (hw_prod != NULL &&
450 		    !strcmp(hw_prod, str))
451 			return true;
452 		break;
453 #endif
454 	case DMI_NONE:
455 	default:
456 		return false;
457 	}
458 
459 	return false;
460 }
461 
462 static bool
463 dmi_found(const struct dmi_system_id *dsi)
464 {
465 	int i, slot;
466 
467 	for (i = 0; i < nitems(dsi->matches); i++) {
468 		slot = dsi->matches[i].slot;
469 		if (slot == DMI_NONE)
470 			break;
471 		if (!dmi_match(slot, dsi->matches[i].substr))
472 			return false;
473 	}
474 
475 	return true;
476 }
477 
478 const struct dmi_system_id *
479 dmi_first_match(const struct dmi_system_id *sysid)
480 {
481 	const struct dmi_system_id *dsi;
482 
483 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
484 		if (dmi_found(dsi))
485 			return dsi;
486 	}
487 
488 	return NULL;
489 }
490 
491 #if NBIOS > 0
492 extern char smbios_bios_date[];
493 extern char smbios_bios_version[];
494 #endif
495 
496 const char *
497 dmi_get_system_info(int slot)
498 {
499 #if NBIOS > 0
500 	switch (slot) {
501 	case DMI_BIOS_DATE:
502 		return smbios_bios_date;
503 	case DMI_BIOS_VERSION:
504 		return smbios_bios_version;
505 	default:
506 		printf("%s slot %d not handled\n", __func__, slot);
507 	}
508 #endif
509 	return NULL;
510 }
511 
512 int
513 dmi_check_system(const struct dmi_system_id *sysid)
514 {
515 	const struct dmi_system_id *dsi;
516 	int num = 0;
517 
518 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
519 		if (dmi_found(dsi)) {
520 			num++;
521 			if (dsi->callback && dsi->callback(dsi))
522 				break;
523 		}
524 	}
525 	return (num);
526 }
527 
528 struct vm_page *
529 alloc_pages(unsigned int gfp_mask, unsigned int order)
530 {
531 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
532 	struct uvm_constraint_range *constraint = &no_constraint;
533 	struct pglist mlist;
534 
535 	if (gfp_mask & M_CANFAIL)
536 		flags |= UVM_PLA_FAILOK;
537 	if (gfp_mask & M_ZERO)
538 		flags |= UVM_PLA_ZERO;
539 	if (gfp_mask & __GFP_DMA32)
540 		constraint = &dma_constraint;
541 
542 	TAILQ_INIT(&mlist);
543 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
544 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
545 		return NULL;
546 	return TAILQ_FIRST(&mlist);
547 }
548 
549 void
550 __free_pages(struct vm_page *page, unsigned int order)
551 {
552 	struct pglist mlist;
553 	int i;
554 
555 	TAILQ_INIT(&mlist);
556 	for (i = 0; i < (1 << order); i++)
557 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
558 	uvm_pglistfree(&mlist);
559 }
560 
561 void
562 __pagevec_release(struct pagevec *pvec)
563 {
564 	struct pglist mlist;
565 	int i;
566 
567 	TAILQ_INIT(&mlist);
568 	for (i = 0; i < pvec->nr; i++)
569 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
570 	uvm_pglistfree(&mlist);
571 	pagevec_reinit(pvec);
572 }
573 
574 static struct kmem_va_mode kv_physwait = {
575 	.kv_map = &phys_map,
576 	.kv_wait = 1,
577 };
578 
579 void *
580 kmap(struct vm_page *pg)
581 {
582 	vaddr_t va;
583 
584 #if defined (__HAVE_PMAP_DIRECT)
585 	va = pmap_map_direct(pg);
586 #else
587 	va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok);
588 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
589 	pmap_update(pmap_kernel());
590 #endif
591 	return (void *)va;
592 }
593 
594 void
595 kunmap_va(void *addr)
596 {
597 	vaddr_t va = (vaddr_t)addr;
598 
599 #if defined (__HAVE_PMAP_DIRECT)
600 	pmap_unmap_direct(va);
601 #else
602 	pmap_kremove(va, PAGE_SIZE);
603 	pmap_update(pmap_kernel());
604 	km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none);
605 #endif
606 }
607 
608 vaddr_t kmap_atomic_va;
609 int kmap_atomic_inuse;
610 
611 void *
612 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
613 {
614 	KASSERT(!kmap_atomic_inuse);
615 
616 	kmap_atomic_inuse = 1;
617 	pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
618 	    PROT_READ | PROT_WRITE);
619 	return (void *)kmap_atomic_va;
620 }
621 
622 void
623 kunmap_atomic(void *addr)
624 {
625 	KASSERT(kmap_atomic_inuse);
626 
627 	pmap_kremove(kmap_atomic_va, PAGE_SIZE);
628 	kmap_atomic_inuse = 0;
629 }
630 
631 void *
632 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
633      pgprot_t prot)
634 {
635 	vaddr_t va;
636 	paddr_t pa;
637 	int i;
638 
639 	va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none,
640 	    &kd_nowait);
641 	if (va == 0)
642 		return NULL;
643 	for (i = 0; i < npages; i++) {
644 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
645 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
646 		    PROT_READ | PROT_WRITE,
647 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
648 		pmap_update(pmap_kernel());
649 	}
650 
651 	return (void *)va;
652 }
653 
654 void
655 vunmap(void *addr, size_t size)
656 {
657 	vaddr_t va = (vaddr_t)addr;
658 
659 	pmap_remove(pmap_kernel(), va, va + size);
660 	pmap_update(pmap_kernel());
661 	km_free((void *)va, size, &kv_any, &kp_none);
662 }
663 
664 bool
665 is_vmalloc_addr(const void *p)
666 {
667 	vaddr_t min, max, addr;
668 
669 	min = vm_map_min(kernel_map);
670 	max = vm_map_max(kernel_map);
671 	addr = (vaddr_t)p;
672 
673 	if (addr >= min && addr <= max)
674 		return true;
675 	else
676 		return false;
677 }
678 
679 void
680 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
681     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
682 {
683 	const uint8_t *cbuf = buf;
684 	int i;
685 
686 	for (i = 0; i < len; i++) {
687 		if ((i % rowsize) == 0)
688 			printf("%s", prefix_str);
689 		printf("%02x", cbuf[i]);
690 		if ((i % rowsize) == (rowsize - 1))
691 			printf("\n");
692 		else
693 			printf(" ");
694 	}
695 }
696 
697 void *
698 memchr_inv(const void *s, int c, size_t n)
699 {
700 	if (n != 0) {
701 		const unsigned char *p = s;
702 
703 		do {
704 			if (*p++ != (unsigned char)c)
705 				return ((void *)(p - 1));
706 		} while (--n != 0);
707 	}
708 	return (NULL);
709 }
710 
711 int
712 panic_cmp(struct rb_node *a, struct rb_node *b)
713 {
714 	panic(__func__);
715 }
716 
717 #undef RB_ROOT
718 #define RB_ROOT(head)	(head)->rbh_root
719 
720 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
721 
722 /*
723  * This is a fairly minimal implementation of the Linux "idr" API.  It
724  * probably isn't very efficient, and definitely isn't RCU safe.  The
725  * pre-load buffer is global instead of per-cpu; we rely on the kernel
726  * lock to make this work.  We do randomize our IDs in order to make
727  * them harder to guess.
728  */
729 
730 int idr_cmp(struct idr_entry *, struct idr_entry *);
731 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
732 
733 struct pool idr_pool;
734 struct idr_entry *idr_entry_cache;
735 
736 void
737 idr_init(struct idr *idr)
738 {
739 	SPLAY_INIT(&idr->tree);
740 }
741 
742 void
743 idr_destroy(struct idr *idr)
744 {
745 	struct idr_entry *id;
746 
747 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
748 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
749 		pool_put(&idr_pool, id);
750 	}
751 }
752 
753 void
754 idr_preload(unsigned int gfp_mask)
755 {
756 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
757 
758 	KERNEL_ASSERT_LOCKED();
759 
760 	if (idr_entry_cache == NULL)
761 		idr_entry_cache = pool_get(&idr_pool, flags);
762 }
763 
764 int
765 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
766 {
767 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
768 	struct idr_entry *id;
769 	int begin;
770 
771 	KERNEL_ASSERT_LOCKED();
772 
773 	if (idr_entry_cache) {
774 		id = idr_entry_cache;
775 		idr_entry_cache = NULL;
776 	} else {
777 		id = pool_get(&idr_pool, flags);
778 		if (id == NULL)
779 			return -ENOMEM;
780 	}
781 
782 	if (end <= 0)
783 		end = INT_MAX;
784 
785 #ifdef notyet
786 	id->id = begin = start + arc4random_uniform(end - start);
787 #else
788 	id->id = begin = start;
789 #endif
790 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
791 		if (id->id == end)
792 			id->id = start;
793 		else
794 			id->id++;
795 		if (id->id == begin) {
796 			pool_put(&idr_pool, id);
797 			return -ENOSPC;
798 		}
799 	}
800 	id->ptr = ptr;
801 	return id->id;
802 }
803 
804 void *
805 idr_replace(struct idr *idr, void *ptr, unsigned long id)
806 {
807 	struct idr_entry find, *res;
808 	void *old;
809 
810 	find.id = id;
811 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
812 	if (res == NULL)
813 		return ERR_PTR(-ENOENT);
814 	old = res->ptr;
815 	res->ptr = ptr;
816 	return old;
817 }
818 
819 void *
820 idr_remove(struct idr *idr, unsigned long id)
821 {
822 	struct idr_entry find, *res;
823 	void *ptr = NULL;
824 
825 	find.id = id;
826 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
827 	if (res) {
828 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
829 		ptr = res->ptr;
830 		pool_put(&idr_pool, res);
831 	}
832 	return ptr;
833 }
834 
835 void *
836 idr_find(struct idr *idr, unsigned long id)
837 {
838 	struct idr_entry find, *res;
839 
840 	find.id = id;
841 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
842 	if (res == NULL)
843 		return NULL;
844 	return res->ptr;
845 }
846 
847 void *
848 idr_get_next(struct idr *idr, int *id)
849 {
850 	struct idr_entry *res;
851 
852 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
853 		if (res->id >= *id) {
854 			*id = res->id;
855 			return res->ptr;
856 		}
857 	}
858 
859 	return NULL;
860 }
861 
862 int
863 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
864 {
865 	struct idr_entry *id;
866 	int ret;
867 
868 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
869 		ret = func(id->id, id->ptr, data);
870 		if (ret)
871 			return ret;
872 	}
873 
874 	return 0;
875 }
876 
877 int
878 idr_cmp(struct idr_entry *a, struct idr_entry *b)
879 {
880 	return (a->id < b->id ? -1 : a->id > b->id);
881 }
882 
883 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
884 
885 void
886 ida_init(struct ida *ida)
887 {
888 	idr_init(&ida->idr);
889 }
890 
891 void
892 ida_destroy(struct ida *ida)
893 {
894 	idr_destroy(&ida->idr);
895 }
896 
897 int
898 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
899     gfp_t gfp_mask)
900 {
901 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
902 }
903 
904 void
905 ida_simple_remove(struct ida *ida, unsigned int id)
906 {
907 	idr_remove(&ida->idr, id);
908 }
909 
910 int
911 ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
912 {
913 	return idr_alloc(&ida->idr, NULL, min, INT_MAX, gfp);
914 }
915 
916 int
917 ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
918 {
919 	return idr_alloc(&ida->idr, NULL, 0, max - 1, gfp);
920 }
921 
922 void
923 ida_free(struct ida *ida, unsigned int id)
924 {
925 	idr_remove(&ida->idr, id);
926 }
927 
928 int
929 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
930 {
931 	return (a->id < b->id ? -1 : a->id > b->id);
932 }
933 
934 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
935 struct pool xa_pool;
936 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
937 
938 void
939 xa_init_flags(struct xarray *xa, gfp_t flags)
940 {
941 	static int initialized;
942 
943 	if (!initialized) {
944 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0,
945 		    "xapl", NULL);
946 		initialized = 1;
947 	}
948 	SPLAY_INIT(&xa->xa_tree);
949 	if (flags & XA_FLAGS_LOCK_IRQ)
950 		mtx_init(&xa->xa_lock, IPL_TTY);
951 	else
952 		mtx_init(&xa->xa_lock, IPL_NONE);
953 }
954 
955 void
956 xa_destroy(struct xarray *xa)
957 {
958 	struct xarray_entry *id;
959 
960 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
961 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
962 		pool_put(&xa_pool, id);
963 	}
964 }
965 
966 /* Don't wrap ids. */
967 int
968 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
969 {
970 	struct xarray_entry *xid;
971 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
972 	int begin;
973 
974 	if (gfp & GFP_NOWAIT) {
975 		xid = pool_get(&xa_pool, PR_NOWAIT);
976 	} else {
977 		mtx_leave(&xa->xa_lock);
978 		xid = pool_get(&xa_pool, PR_WAITOK);
979 		mtx_enter(&xa->xa_lock);
980 	}
981 
982 	if (xid == NULL)
983 		return -ENOMEM;
984 
985 	if (limit <= 0)
986 		limit = INT_MAX;
987 
988 	xid->id = begin = start;
989 
990 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
991 		if (xid->id == limit)
992 			xid->id = start;
993 		else
994 			xid->id++;
995 		if (xid->id == begin) {
996 			pool_put(&xa_pool, xid);
997 			return -EBUSY;
998 		}
999 	}
1000 	xid->ptr = entry;
1001 	*id = xid->id;
1002 	return 0;
1003 }
1004 
1005 /*
1006  * Wrap ids and store next id.
1007  * We walk the entire tree so don't special case wrapping.
1008  * The only caller of this (i915_drm_client.c) doesn't use next id.
1009  */
1010 int
1011 __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, int limit, u32 *next,
1012     gfp_t gfp)
1013 {
1014 	int r = __xa_alloc(xa, id, entry, limit, gfp);
1015 	*next = *id + 1;
1016 	return r;
1017 }
1018 
1019 void *
1020 __xa_erase(struct xarray *xa, unsigned long index)
1021 {
1022 	struct xarray_entry find, *res;
1023 	void *ptr = NULL;
1024 
1025 	find.id = index;
1026 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1027 	if (res) {
1028 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
1029 		ptr = res->ptr;
1030 		pool_put(&xa_pool, res);
1031 	}
1032 	return ptr;
1033 }
1034 
1035 void *
1036 __xa_load(struct xarray *xa, unsigned long index)
1037 {
1038 	struct xarray_entry find, *res;
1039 
1040 	find.id = index;
1041 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1042 	if (res == NULL)
1043 		return NULL;
1044 	return res->ptr;
1045 }
1046 
1047 void *
1048 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1049 {
1050 	struct xarray_entry find, *res;
1051 	void *prev;
1052 
1053 	if (entry == NULL)
1054 		return __xa_erase(xa, index);
1055 
1056 	find.id = index;
1057 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1058 	if (res != NULL) {
1059 		/* index exists */
1060 		/* XXX Multislot entries updates not implemented yet */
1061 		prev = res->ptr;
1062 		res->ptr = entry;
1063 		return prev;
1064 	}
1065 
1066 	/* index not found, add new */
1067 	if (gfp & GFP_NOWAIT) {
1068 		res = pool_get(&xa_pool, PR_NOWAIT);
1069 	} else {
1070 		mtx_leave(&xa->xa_lock);
1071 		res = pool_get(&xa_pool, PR_WAITOK);
1072 		mtx_enter(&xa->xa_lock);
1073 	}
1074 	if (res == NULL)
1075 		return XA_ERROR(-ENOMEM);
1076 	res->id = index;
1077 	res->ptr = entry;
1078 	if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL)
1079 		return XA_ERROR(-EINVAL);
1080 	return NULL; /* no prev entry at index */
1081 }
1082 
1083 void *
1084 xa_get_next(struct xarray *xa, unsigned long *index)
1085 {
1086 	struct xarray_entry *res;
1087 
1088 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
1089 		if (res->id >= *index) {
1090 			*index = res->id;
1091 			return res->ptr;
1092 		}
1093 	}
1094 
1095 	return NULL;
1096 }
1097 
1098 int
1099 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1100 {
1101 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
1102 	    M_DRM, gfp_mask | M_ZERO);
1103 	if (table->sgl == NULL)
1104 		return -ENOMEM;
1105 	table->nents = table->orig_nents = nents;
1106 	sg_mark_end(&table->sgl[nents - 1]);
1107 	return 0;
1108 }
1109 
1110 void
1111 sg_free_table(struct sg_table *table)
1112 {
1113 	free(table->sgl, M_DRM,
1114 	    table->orig_nents * sizeof(struct scatterlist));
1115 	table->orig_nents = 0;
1116 	table->sgl = NULL;
1117 }
1118 
1119 size_t
1120 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1121     const void *buf, size_t buflen)
1122 {
1123 	panic("%s", __func__);
1124 }
1125 
1126 int
1127 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1128 {
1129 	void *cmd = NULL;
1130 	int cmdlen = 0;
1131 	int err, ret = 0;
1132 	int op;
1133 
1134 	iic_acquire_bus(&adap->ic, 0);
1135 
1136 	while (num > 2) {
1137 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1138 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1139 		    msgs->buf, msgs->len, 0);
1140 		if (err) {
1141 			ret = -err;
1142 			goto fail;
1143 		}
1144 		msgs++;
1145 		num--;
1146 		ret++;
1147 	}
1148 
1149 	if (num > 1) {
1150 		cmd = msgs->buf;
1151 		cmdlen = msgs->len;
1152 		msgs++;
1153 		num--;
1154 		ret++;
1155 	}
1156 
1157 	op = (msgs->flags & I2C_M_RD) ?
1158 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1159 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1160 	    msgs->buf, msgs->len, 0);
1161 	if (err) {
1162 		ret = -err;
1163 		goto fail;
1164 	}
1165 	msgs++;
1166 	ret++;
1167 
1168 fail:
1169 	iic_release_bus(&adap->ic, 0);
1170 
1171 	return ret;
1172 }
1173 
1174 int
1175 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1176 {
1177 	int ret, retries;
1178 
1179 	retries = adap->retries;
1180 retry:
1181 	if (adap->algo)
1182 		ret = adap->algo->master_xfer(adap, msgs, num);
1183 	else
1184 		ret = i2c_master_xfer(adap, msgs, num);
1185 	if (ret == -EAGAIN && retries > 0) {
1186 		retries--;
1187 		goto retry;
1188 	}
1189 
1190 	return ret;
1191 }
1192 
1193 int
1194 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1195 {
1196 	int ret;
1197 
1198 	if (adap->lock_ops)
1199 		adap->lock_ops->lock_bus(adap, 0);
1200 
1201 	ret = __i2c_transfer(adap, msgs, num);
1202 
1203 	if (adap->lock_ops)
1204 		adap->lock_ops->unlock_bus(adap, 0);
1205 
1206 	return ret;
1207 }
1208 
1209 int
1210 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1211 {
1212 	struct i2c_algo_bit_data *algo = adap->algo_data;
1213 	struct i2c_adapter bb;
1214 
1215 	memset(&bb, 0, sizeof(bb));
1216 	bb.ic = algo->ic;
1217 	bb.retries = adap->retries;
1218 	return i2c_master_xfer(&bb, msgs, num);
1219 }
1220 
1221 uint32_t
1222 i2c_bb_functionality(struct i2c_adapter *adap)
1223 {
1224 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1225 }
1226 
1227 struct i2c_algorithm i2c_bit_algo = {
1228 	.master_xfer = i2c_bb_master_xfer,
1229 	.functionality = i2c_bb_functionality
1230 };
1231 
1232 int
1233 i2c_bit_add_bus(struct i2c_adapter *adap)
1234 {
1235 	adap->algo = &i2c_bit_algo;
1236 	adap->retries = 3;
1237 
1238 	return 0;
1239 }
1240 
1241 #if defined(__amd64__) || defined(__i386__)
1242 
1243 /*
1244  * This is a minimal implementation of the Linux vga_get/vga_put
1245  * interface.  In all likelihood, it will only work for inteldrm(4) as
1246  * it assumes that if there is another active VGA device in the
1247  * system, it is sitting behind a PCI bridge.
1248  */
1249 
1250 extern int pci_enumerate_bus(struct pci_softc *,
1251     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1252 
1253 pcitag_t vga_bridge_tag;
1254 int vga_bridge_disabled;
1255 
1256 int
1257 vga_disable_bridge(struct pci_attach_args *pa)
1258 {
1259 	pcireg_t bhlc, bc;
1260 
1261 	if (pa->pa_domain != 0)
1262 		return 0;
1263 
1264 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1265 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1266 		return 0;
1267 
1268 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1269 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1270 		return 0;
1271 	bc &= ~PPB_BC_VGA_ENABLE;
1272 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1273 
1274 	vga_bridge_tag = pa->pa_tag;
1275 	vga_bridge_disabled = 1;
1276 
1277 	return 1;
1278 }
1279 
1280 void
1281 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1282 {
1283 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1284 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1285 }
1286 
1287 void
1288 vga_put(struct pci_dev *pdev, int rsrc)
1289 {
1290 	pcireg_t bc;
1291 
1292 	if (!vga_bridge_disabled)
1293 		return;
1294 
1295 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1296 	bc |= PPB_BC_VGA_ENABLE;
1297 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1298 
1299 	vga_bridge_disabled = 0;
1300 }
1301 
1302 #endif
1303 
1304 /*
1305  * ACPI types and interfaces.
1306  */
1307 
1308 #ifdef __HAVE_ACPI
1309 #include "acpi.h"
1310 #endif
1311 
1312 #if NACPI > 0
1313 
1314 #include <dev/acpi/acpireg.h>
1315 #include <dev/acpi/acpivar.h>
1316 #include <dev/acpi/amltypes.h>
1317 #include <dev/acpi/dsdt.h>
1318 
1319 acpi_status
1320 acpi_get_table(const char *sig, int instance,
1321     struct acpi_table_header **hdr)
1322 {
1323 	struct acpi_softc *sc = acpi_softc;
1324 	struct acpi_q *entry;
1325 
1326 	KASSERT(instance == 1);
1327 
1328 	if (sc == NULL)
1329 		return AE_NOT_FOUND;
1330 
1331 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1332 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1333 			*hdr = entry->q_table;
1334 			return 0;
1335 		}
1336 	}
1337 
1338 	return AE_NOT_FOUND;
1339 }
1340 
1341 void
1342 acpi_put_table(struct acpi_table_header *hdr)
1343 {
1344 }
1345 
1346 acpi_status
1347 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1348 {
1349 	node = aml_searchname(node, name);
1350 	if (node == NULL)
1351 		return AE_NOT_FOUND;
1352 
1353 	*rnode = node;
1354 	return 0;
1355 }
1356 
1357 acpi_status
1358 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1359 {
1360 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1361 	KASSERT(type == ACPI_FULL_PATHNAME);
1362 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1363 	return 0;
1364 }
1365 
1366 acpi_status
1367 acpi_evaluate_object(acpi_handle node, const char *name,
1368     struct acpi_object_list *params, struct acpi_buffer *result)
1369 {
1370 	struct aml_value args[4], res;
1371 	union acpi_object *obj;
1372 	uint8_t *data;
1373 	int i;
1374 
1375 	KASSERT(params->count <= nitems(args));
1376 
1377 	for (i = 0; i < params->count; i++) {
1378 		args[i].type = params->pointer[i].type;
1379 		switch (args[i].type) {
1380 		case AML_OBJTYPE_INTEGER:
1381 			args[i].v_integer = params->pointer[i].integer.value;
1382 			break;
1383 		case AML_OBJTYPE_BUFFER:
1384 			args[i].length = params->pointer[i].buffer.length;
1385 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1386 			break;
1387 		default:
1388 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1389 			return AE_BAD_PARAMETER;
1390 		}
1391 	}
1392 
1393 	if (name) {
1394 		node = aml_searchname(node, name);
1395 		if (node == NULL)
1396 			return AE_NOT_FOUND;
1397 	}
1398 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1399 		aml_freevalue(&res);
1400 		return AE_ERROR;
1401 	}
1402 
1403 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1404 
1405 	result->length = sizeof(union acpi_object);
1406 	switch (res.type) {
1407 	case AML_OBJTYPE_BUFFER:
1408 		result->length += res.length;
1409 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1410 		obj = (union acpi_object *)result->pointer;
1411 		data = (uint8_t *)(obj + 1);
1412 		obj->type = res.type;
1413 		obj->buffer.length = res.length;
1414 		obj->buffer.pointer = data;
1415 		memcpy(data, res.v_buffer, res.length);
1416 		break;
1417 	default:
1418 		printf("%s: return type 0x%02x", __func__, res.type);
1419 		aml_freevalue(&res);
1420 		return AE_ERROR;
1421 	}
1422 
1423 	aml_freevalue(&res);
1424 	return 0;
1425 }
1426 
1427 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1428 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1429 
1430 int
1431 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1432 {
1433 	struct acpi_bus_event event;
1434 	struct notifier_block *nb;
1435 
1436 	event.device_class = ACPI_VIDEO_CLASS;
1437 	event.type = notify;
1438 
1439 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1440 		nb->notifier_call(nb, 0, &event);
1441 	return 0;
1442 }
1443 
1444 int
1445 register_acpi_notifier(struct notifier_block *nb)
1446 {
1447 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1448 	return 0;
1449 }
1450 
1451 int
1452 unregister_acpi_notifier(struct notifier_block *nb)
1453 {
1454 	struct notifier_block *tmp;
1455 
1456 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1457 		if (tmp == nb) {
1458 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1459 			    notifier_block, link);
1460 			return 0;
1461 		}
1462 	}
1463 
1464 	return -ENOENT;
1465 }
1466 
1467 const char *
1468 acpi_format_exception(acpi_status status)
1469 {
1470 	switch (status) {
1471 	case AE_NOT_FOUND:
1472 		return "not found";
1473 	case AE_BAD_PARAMETER:
1474 		return "bad parameter";
1475 	default:
1476 		return "unknown";
1477 	}
1478 }
1479 
1480 #endif
1481 
1482 void
1483 backlight_do_update_status(void *arg)
1484 {
1485 	backlight_update_status(arg);
1486 }
1487 
1488 struct backlight_device *
1489 backlight_device_register(const char *name, void *kdev, void *data,
1490     const struct backlight_ops *ops, struct backlight_properties *props)
1491 {
1492 	struct backlight_device *bd;
1493 
1494 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1495 	bd->ops = ops;
1496 	bd->props = *props;
1497 	bd->data = data;
1498 
1499 	task_set(&bd->task, backlight_do_update_status, bd);
1500 
1501 	return bd;
1502 }
1503 
1504 void
1505 backlight_device_unregister(struct backlight_device *bd)
1506 {
1507 	free(bd, M_DRM, sizeof(*bd));
1508 }
1509 
1510 struct backlight_device *
1511 devm_backlight_device_register(void *dev, const char *name, void *parent,
1512     void *data, const struct backlight_ops *bo,
1513     const struct backlight_properties *bp)
1514 {
1515 	STUB();
1516 	return NULL;
1517 }
1518 
1519 void
1520 backlight_schedule_update_status(struct backlight_device *bd)
1521 {
1522 	task_add(systq, &bd->task);
1523 }
1524 
1525 inline int
1526 backlight_enable(struct backlight_device *bd)
1527 {
1528 	if (bd == NULL)
1529 		return 0;
1530 
1531 	bd->props.power = FB_BLANK_UNBLANK;
1532 
1533 	return bd->ops->update_status(bd);
1534 }
1535 
1536 inline int
1537 backlight_disable(struct backlight_device *bd)
1538 {
1539 	if (bd == NULL)
1540 		return 0;
1541 
1542 	bd->props.power = FB_BLANK_POWERDOWN;
1543 
1544 	return bd->ops->update_status(bd);
1545 }
1546 
1547 void
1548 drm_sysfs_hotplug_event(struct drm_device *dev)
1549 {
1550 	knote_locked(&dev->note, NOTE_CHANGE);
1551 }
1552 
1553 void
1554 drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
1555 {
1556 	knote_locked(&connector->dev->note, NOTE_CHANGE);
1557 }
1558 
1559 void
1560 drm_sysfs_connector_status_event(struct drm_connector *connector,
1561     struct drm_property *property)
1562 {
1563 	STUB();
1564 }
1565 
1566 struct dma_fence *
1567 dma_fence_get(struct dma_fence *fence)
1568 {
1569 	if (fence)
1570 		kref_get(&fence->refcount);
1571 	return fence;
1572 }
1573 
1574 struct dma_fence *
1575 dma_fence_get_rcu(struct dma_fence *fence)
1576 {
1577 	if (fence)
1578 		kref_get(&fence->refcount);
1579 	return fence;
1580 }
1581 
1582 struct dma_fence *
1583 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1584 {
1585 	struct dma_fence *fence;
1586 	if (dfp == NULL)
1587 		return NULL;
1588 	fence = *dfp;
1589 	if (fence)
1590 		kref_get(&fence->refcount);
1591 	return fence;
1592 }
1593 
1594 void
1595 dma_fence_release(struct kref *ref)
1596 {
1597 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1598 	if (fence->ops && fence->ops->release)
1599 		fence->ops->release(fence);
1600 	else
1601 		free(fence, M_DRM, 0);
1602 }
1603 
1604 void
1605 dma_fence_put(struct dma_fence *fence)
1606 {
1607 	if (fence)
1608 		kref_put(&fence->refcount, dma_fence_release);
1609 }
1610 
1611 int
1612 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp)
1613 {
1614 	struct dma_fence_cb *cur, *tmp;
1615 	struct list_head cb_list;
1616 
1617 	if (fence == NULL)
1618 		return -EINVAL;
1619 
1620 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1621 		return -EINVAL;
1622 
1623 	list_replace(&fence->cb_list, &cb_list);
1624 
1625 	fence->timestamp = timestamp;
1626 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1627 
1628 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1629 		INIT_LIST_HEAD(&cur->node);
1630 		cur->func(fence, cur);
1631 	}
1632 
1633 	return 0;
1634 }
1635 
1636 int
1637 dma_fence_signal(struct dma_fence *fence)
1638 {
1639 	int r;
1640 
1641 	if (fence == NULL)
1642 		return -EINVAL;
1643 
1644 	mtx_enter(fence->lock);
1645 	r = dma_fence_signal_timestamp_locked(fence, ktime_get());
1646 	mtx_leave(fence->lock);
1647 
1648 	return r;
1649 }
1650 
1651 int
1652 dma_fence_signal_locked(struct dma_fence *fence)
1653 {
1654 	if (fence == NULL)
1655 		return -EINVAL;
1656 
1657 	return dma_fence_signal_timestamp_locked(fence, ktime_get());
1658 }
1659 
1660 int
1661 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
1662 {
1663 	int r;
1664 
1665 	if (fence == NULL)
1666 		return -EINVAL;
1667 
1668 	mtx_enter(fence->lock);
1669 	r = dma_fence_signal_timestamp_locked(fence, timestamp);
1670 	mtx_leave(fence->lock);
1671 
1672 	return r;
1673 }
1674 
1675 bool
1676 dma_fence_is_signaled(struct dma_fence *fence)
1677 {
1678 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1679 		return true;
1680 
1681 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1682 		dma_fence_signal(fence);
1683 		return true;
1684 	}
1685 
1686 	return false;
1687 }
1688 
1689 bool
1690 dma_fence_is_signaled_locked(struct dma_fence *fence)
1691 {
1692 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1693 		return true;
1694 
1695 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1696 		dma_fence_signal_locked(fence);
1697 		return true;
1698 	}
1699 
1700 	return false;
1701 }
1702 
1703 long
1704 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1705 {
1706 	if (timeout < 0)
1707 		return -EINVAL;
1708 
1709 	if (fence->ops->wait)
1710 		return fence->ops->wait(fence, intr, timeout);
1711 	else
1712 		return dma_fence_default_wait(fence, intr, timeout);
1713 }
1714 
1715 long
1716 dma_fence_wait(struct dma_fence *fence, bool intr)
1717 {
1718 	long ret;
1719 
1720 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1721 	if (ret < 0)
1722 		return ret;
1723 
1724 	return 0;
1725 }
1726 
1727 void
1728 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1729 {
1730 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1731 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1732 	    fence->ops->enable_signaling) {
1733 		mtx_enter(fence->lock);
1734 		if (!fence->ops->enable_signaling(fence))
1735 			dma_fence_signal_locked(fence);
1736 		mtx_leave(fence->lock);
1737 	}
1738 }
1739 
1740 void
1741 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1742     struct mutex *lock, uint64_t context, uint64_t seqno)
1743 {
1744 	fence->ops = ops;
1745 	fence->lock = lock;
1746 	fence->context = context;
1747 	fence->seqno = seqno;
1748 	fence->flags = 0;
1749 	fence->error = 0;
1750 	kref_init(&fence->refcount);
1751 	INIT_LIST_HEAD(&fence->cb_list);
1752 }
1753 
1754 int
1755 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1756     dma_fence_func_t func)
1757 {
1758 	int ret = 0;
1759 	bool was_set;
1760 
1761 	if (WARN_ON(!fence || !func))
1762 		return -EINVAL;
1763 
1764 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1765 		INIT_LIST_HEAD(&cb->node);
1766 		return -ENOENT;
1767 	}
1768 
1769 	mtx_enter(fence->lock);
1770 
1771 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1772 
1773 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1774 		ret = -ENOENT;
1775 	else if (!was_set && fence->ops->enable_signaling) {
1776 		if (!fence->ops->enable_signaling(fence)) {
1777 			dma_fence_signal_locked(fence);
1778 			ret = -ENOENT;
1779 		}
1780 	}
1781 
1782 	if (!ret) {
1783 		cb->func = func;
1784 		list_add_tail(&cb->node, &fence->cb_list);
1785 	} else
1786 		INIT_LIST_HEAD(&cb->node);
1787 	mtx_leave(fence->lock);
1788 
1789 	return ret;
1790 }
1791 
1792 bool
1793 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1794 {
1795 	bool ret;
1796 
1797 	mtx_enter(fence->lock);
1798 
1799 	ret = !list_empty(&cb->node);
1800 	if (ret)
1801 		list_del_init(&cb->node);
1802 
1803 	mtx_leave(fence->lock);
1804 
1805 	return ret;
1806 }
1807 
1808 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1809 
1810 uint64_t
1811 dma_fence_context_alloc(unsigned int num)
1812 {
1813   return atomic64_add_return(num, &drm_fence_context_count) - num;
1814 }
1815 
1816 struct default_wait_cb {
1817 	struct dma_fence_cb base;
1818 	struct proc *proc;
1819 };
1820 
1821 static void
1822 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1823 {
1824 	struct default_wait_cb *wait =
1825 	    container_of(cb, struct default_wait_cb, base);
1826 	wake_up_process(wait->proc);
1827 }
1828 
1829 long
1830 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1831 {
1832 	long ret = timeout ? timeout : 1;
1833 	unsigned long end;
1834 	int err;
1835 	struct default_wait_cb cb;
1836 	bool was_set;
1837 
1838 	KASSERT(timeout <= INT_MAX);
1839 
1840 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1841 		return ret;
1842 
1843 	mtx_enter(fence->lock);
1844 
1845 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1846 	    &fence->flags);
1847 
1848 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1849 		goto out;
1850 
1851 	if (!was_set && fence->ops->enable_signaling) {
1852 		if (!fence->ops->enable_signaling(fence)) {
1853 			dma_fence_signal_locked(fence);
1854 			goto out;
1855 		}
1856 	}
1857 
1858 	if (timeout == 0) {
1859 		ret = 0;
1860 		goto out;
1861 	}
1862 
1863 	cb.base.func = dma_fence_default_wait_cb;
1864 	cb.proc = curproc;
1865 	list_add(&cb.base.node, &fence->cb_list);
1866 
1867 	end = jiffies + timeout;
1868 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1869 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1870 			break;
1871 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1872 		    "dmafence", ret);
1873 		if (err == EINTR || err == ERESTART) {
1874 			ret = -ERESTARTSYS;
1875 			break;
1876 		}
1877 	}
1878 
1879 	if (!list_empty(&cb.base.node))
1880 		list_del(&cb.base.node);
1881 out:
1882 	mtx_leave(fence->lock);
1883 
1884 	return ret;
1885 }
1886 
1887 static bool
1888 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1889     uint32_t *idx)
1890 {
1891 	int i;
1892 
1893 	for (i = 0; i < count; ++i) {
1894 		struct dma_fence *fence = fences[i];
1895 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1896 			if (idx)
1897 				*idx = i;
1898 			return true;
1899 		}
1900 	}
1901 	return false;
1902 }
1903 
1904 long
1905 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1906     bool intr, long timeout, uint32_t *idx)
1907 {
1908 	struct default_wait_cb *cb;
1909 	long ret = timeout;
1910 	unsigned long end;
1911 	int i, err;
1912 
1913 	KASSERT(timeout <= INT_MAX);
1914 
1915 	if (timeout == 0) {
1916 		for (i = 0; i < count; i++) {
1917 			if (dma_fence_is_signaled(fences[i])) {
1918 				if (idx)
1919 					*idx = i;
1920 				return 1;
1921 			}
1922 		}
1923 		return 0;
1924 	}
1925 
1926 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1927 	if (cb == NULL)
1928 		return -ENOMEM;
1929 
1930 	for (i = 0; i < count; i++) {
1931 		struct dma_fence *fence = fences[i];
1932 		cb[i].proc = curproc;
1933 		if (dma_fence_add_callback(fence, &cb[i].base,
1934 		    dma_fence_default_wait_cb)) {
1935 			if (idx)
1936 				*idx = i;
1937 			goto cb_cleanup;
1938 		}
1939 	}
1940 
1941 	end = jiffies + timeout;
1942 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1943 		if (dma_fence_test_signaled_any(fences, count, idx))
1944 			break;
1945 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
1946 		if (err == EINTR || err == ERESTART) {
1947 			ret = -ERESTARTSYS;
1948 			break;
1949 		}
1950 	}
1951 
1952 cb_cleanup:
1953 	while (i-- > 0)
1954 		dma_fence_remove_callback(fences[i], &cb[i].base);
1955 	free(cb, M_DRM, count * sizeof(*cb));
1956 	return ret;
1957 }
1958 
1959 static struct dma_fence dma_fence_stub;
1960 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1961 
1962 static const char *
1963 dma_fence_stub_get_name(struct dma_fence *fence)
1964 {
1965 	return "stub";
1966 }
1967 
1968 static const struct dma_fence_ops dma_fence_stub_ops = {
1969 	.get_driver_name = dma_fence_stub_get_name,
1970 	.get_timeline_name = dma_fence_stub_get_name,
1971 };
1972 
1973 struct dma_fence *
1974 dma_fence_get_stub(void)
1975 {
1976 	mtx_enter(&dma_fence_stub_mtx);
1977 	if (dma_fence_stub.ops == NULL) {
1978 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1979 		    &dma_fence_stub_mtx, 0, 0);
1980 		dma_fence_signal_locked(&dma_fence_stub);
1981 	}
1982 	mtx_leave(&dma_fence_stub_mtx);
1983 
1984 	return dma_fence_get(&dma_fence_stub);
1985 }
1986 
1987 struct dma_fence *
1988 dma_fence_allocate_private_stub(void)
1989 {
1990 	struct dma_fence *f = malloc(sizeof(*f), M_DRM,
1991 	    M_ZERO | M_WAITOK | M_CANFAIL);
1992 	if (f == NULL)
1993 		return ERR_PTR(-ENOMEM);
1994 	dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
1995 	dma_fence_signal(f);
1996 	return f;
1997 }
1998 
1999 static const char *
2000 dma_fence_array_get_driver_name(struct dma_fence *fence)
2001 {
2002 	return "dma_fence_array";
2003 }
2004 
2005 static const char *
2006 dma_fence_array_get_timeline_name(struct dma_fence *fence)
2007 {
2008 	return "unbound";
2009 }
2010 
2011 static void
2012 irq_dma_fence_array_work(void *arg)
2013 {
2014 	struct dma_fence_array *dfa = (struct dma_fence_array *)arg;
2015 	dma_fence_signal(&dfa->base);
2016 	dma_fence_put(&dfa->base);
2017 }
2018 
2019 static void
2020 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
2021 {
2022 	struct dma_fence_array_cb *array_cb =
2023 	    container_of(cb, struct dma_fence_array_cb, cb);
2024 	struct dma_fence_array *dfa = array_cb->array;
2025 
2026 	if (atomic_dec_and_test(&dfa->num_pending))
2027 		timeout_add(&dfa->to, 1);
2028 	else
2029 		dma_fence_put(&dfa->base);
2030 }
2031 
2032 static bool
2033 dma_fence_array_enable_signaling(struct dma_fence *fence)
2034 {
2035 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2036 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
2037 	int i;
2038 
2039 	for (i = 0; i < dfa->num_fences; ++i) {
2040 		cb[i].array = dfa;
2041 		dma_fence_get(&dfa->base);
2042 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
2043 		    dma_fence_array_cb_func)) {
2044 			dma_fence_put(&dfa->base);
2045 			if (atomic_dec_and_test(&dfa->num_pending))
2046 				return false;
2047 		}
2048 	}
2049 
2050 	return true;
2051 }
2052 
2053 static bool
2054 dma_fence_array_signaled(struct dma_fence *fence)
2055 {
2056 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2057 
2058 	return atomic_read(&dfa->num_pending) <= 0;
2059 }
2060 
2061 static void
2062 dma_fence_array_release(struct dma_fence *fence)
2063 {
2064 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2065 	int i;
2066 
2067 	for (i = 0; i < dfa->num_fences; ++i)
2068 		dma_fence_put(dfa->fences[i]);
2069 
2070 	free(dfa->fences, M_DRM, 0);
2071 	dma_fence_free(fence);
2072 }
2073 
2074 struct dma_fence_array *
2075 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
2076     unsigned seqno, bool signal_on_any)
2077 {
2078 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
2079 	    (num_fences * sizeof(struct dma_fence_array_cb)),
2080 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2081 	if (dfa == NULL)
2082 		return NULL;
2083 
2084 	mtx_init(&dfa->lock, IPL_TTY);
2085 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
2086 	    context, seqno);
2087 	timeout_set(&dfa->to, irq_dma_fence_array_work, dfa);
2088 
2089 	dfa->num_fences = num_fences;
2090 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
2091 	dfa->fences = fences;
2092 
2093 	return dfa;
2094 }
2095 
2096 struct dma_fence *
2097 dma_fence_array_first(struct dma_fence *f)
2098 {
2099 	struct dma_fence_array *dfa;
2100 
2101 	if (f == NULL)
2102 		return NULL;
2103 
2104 	if ((dfa = to_dma_fence_array(f)) == NULL)
2105 		return f;
2106 
2107 	if (dfa->num_fences > 0)
2108 		return dfa->fences[0];
2109 
2110 	return NULL;
2111 }
2112 
2113 struct dma_fence *
2114 dma_fence_array_next(struct dma_fence *f, unsigned int i)
2115 {
2116 	struct dma_fence_array *dfa;
2117 
2118 	if (f == NULL)
2119 		return NULL;
2120 
2121 	if ((dfa = to_dma_fence_array(f)) == NULL)
2122 		return NULL;
2123 
2124 	if (i < dfa->num_fences)
2125 		return dfa->fences[i];
2126 
2127 	return NULL;
2128 }
2129 
2130 const struct dma_fence_ops dma_fence_array_ops = {
2131 	.get_driver_name = dma_fence_array_get_driver_name,
2132 	.get_timeline_name = dma_fence_array_get_timeline_name,
2133 	.enable_signaling = dma_fence_array_enable_signaling,
2134 	.signaled = dma_fence_array_signaled,
2135 	.release = dma_fence_array_release,
2136 };
2137 
2138 int
2139 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
2140 {
2141 	struct dma_fence_chain *chain;
2142 	struct dma_fence *fence;
2143 
2144 	if (seqno == 0)
2145 		return 0;
2146 
2147 	if ((chain = to_dma_fence_chain(*df)) == NULL)
2148 		return -EINVAL;
2149 
2150 	fence = &chain->base;
2151 	if (fence->seqno < seqno)
2152 		return -EINVAL;
2153 
2154 	dma_fence_chain_for_each(*df, fence) {
2155 		if ((*df)->context != fence->context)
2156 			break;
2157 
2158 		chain = to_dma_fence_chain(*df);
2159 		if (chain->prev_seqno < seqno)
2160 			break;
2161 	}
2162 	dma_fence_put(fence);
2163 
2164 	return 0;
2165 }
2166 
2167 void
2168 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
2169     struct dma_fence *fence, uint64_t seqno)
2170 {
2171 	uint64_t context;
2172 
2173 	chain->fence = fence;
2174 	chain->prev = prev;
2175 	mtx_init(&chain->lock, IPL_TTY);
2176 
2177 	/* if prev is a chain */
2178 	if (to_dma_fence_chain(prev) != NULL) {
2179 		if (__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
2180 			chain->prev_seqno = prev->seqno;
2181 			context = prev->context;
2182 		} else {
2183 			chain->prev_seqno = 0;
2184 			context = dma_fence_context_alloc(1);
2185 			seqno = prev->seqno;
2186 		}
2187 	} else {
2188 		chain->prev_seqno = 0;
2189 		context = dma_fence_context_alloc(1);
2190 	}
2191 
2192 	dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
2193 	    context, seqno);
2194 }
2195 
2196 static const char *
2197 dma_fence_chain_get_driver_name(struct dma_fence *fence)
2198 {
2199 	return "dma_fence_chain";
2200 }
2201 
2202 static const char *
2203 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
2204 {
2205 	return "unbound";
2206 }
2207 
2208 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
2209 
2210 static void
2211 dma_fence_chain_timo(void *arg)
2212 {
2213 	struct dma_fence_chain *chain = (struct dma_fence_chain *)arg;
2214 
2215 	if (dma_fence_chain_enable_signaling(&chain->base) == false)
2216 		dma_fence_signal(&chain->base);
2217 	dma_fence_put(&chain->base);
2218 }
2219 
2220 static void
2221 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
2222 {
2223 	struct dma_fence_chain *chain =
2224 	    container_of(cb, struct dma_fence_chain, cb);
2225 	timeout_set(&chain->to, dma_fence_chain_timo, chain);
2226 	timeout_add(&chain->to, 1);
2227 	dma_fence_put(f);
2228 }
2229 
2230 static bool
2231 dma_fence_chain_enable_signaling(struct dma_fence *fence)
2232 {
2233 	struct dma_fence_chain *chain, *h;
2234 	struct dma_fence *f;
2235 
2236 	h = to_dma_fence_chain(fence);
2237 	dma_fence_get(&h->base);
2238 	dma_fence_chain_for_each(fence, &h->base) {
2239 		chain = to_dma_fence_chain(fence);
2240 		if (chain == NULL)
2241 			f = fence;
2242 		else
2243 			f = chain->fence;
2244 
2245 		dma_fence_get(f);
2246 		if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) {
2247 			dma_fence_put(fence);
2248 			return true;
2249 		}
2250 		dma_fence_put(f);
2251 	}
2252 	dma_fence_put(&h->base);
2253 	return false;
2254 }
2255 
2256 static bool
2257 dma_fence_chain_signaled(struct dma_fence *fence)
2258 {
2259 	struct dma_fence_chain *chain;
2260 	struct dma_fence *f;
2261 
2262 	dma_fence_chain_for_each(fence, fence) {
2263 		chain = to_dma_fence_chain(fence);
2264 		if (chain == NULL)
2265 			f = fence;
2266 		else
2267 			f = chain->fence;
2268 
2269 		if (dma_fence_is_signaled(f) == false) {
2270 			dma_fence_put(fence);
2271 			return false;
2272 		}
2273 	}
2274 	return true;
2275 }
2276 
2277 static void
2278 dma_fence_chain_release(struct dma_fence *fence)
2279 {
2280 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
2281 	struct dma_fence_chain *prev_chain;
2282 	struct dma_fence *prev;
2283 
2284 	for (prev = chain->prev; prev != NULL; prev = chain->prev) {
2285 		if (kref_read(&prev->refcount) > 1)
2286 			break;
2287 		if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
2288 			break;
2289 		chain->prev = prev_chain->prev;
2290 		prev_chain->prev = NULL;
2291 		dma_fence_put(prev);
2292 	}
2293 	dma_fence_put(prev);
2294 	dma_fence_put(chain->fence);
2295 	dma_fence_free(fence);
2296 }
2297 
2298 struct dma_fence *
2299 dma_fence_chain_walk(struct dma_fence *fence)
2300 {
2301 	struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain;
2302 	struct dma_fence *prev, *new_prev, *tmp;
2303 
2304 	if (chain == NULL) {
2305 		dma_fence_put(fence);
2306 		return NULL;
2307 	}
2308 
2309 	while ((prev = dma_fence_get(chain->prev)) != NULL) {
2310 		prev_chain = to_dma_fence_chain(prev);
2311 		if (prev_chain != NULL) {
2312 			if (!dma_fence_is_signaled(prev_chain->fence))
2313 				break;
2314 			new_prev = dma_fence_get(prev_chain->prev);
2315 		} else {
2316 			if (!dma_fence_is_signaled(prev))
2317 				break;
2318 			new_prev = NULL;
2319 		}
2320 		tmp = atomic_cas_ptr(&chain->prev, prev, new_prev);
2321 		dma_fence_put(tmp == prev ? prev : new_prev);
2322 		dma_fence_put(prev);
2323 	}
2324 
2325 	dma_fence_put(fence);
2326 	return prev;
2327 }
2328 
2329 const struct dma_fence_ops dma_fence_chain_ops = {
2330 	.get_driver_name = dma_fence_chain_get_driver_name,
2331 	.get_timeline_name = dma_fence_chain_get_timeline_name,
2332 	.enable_signaling = dma_fence_chain_enable_signaling,
2333 	.signaled = dma_fence_chain_signaled,
2334 	.release = dma_fence_chain_release,
2335 	.use_64bit_seqno = true,
2336 };
2337 
2338 bool
2339 dma_fence_is_container(struct dma_fence *fence)
2340 {
2341 	return (fence->ops == &dma_fence_chain_ops) ||
2342 	    (fence->ops == &dma_fence_array_ops);
2343 }
2344 
2345 int
2346 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2347 {
2348 	return (ENXIO);
2349 }
2350 
2351 int
2352 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2353 {
2354 	return (ENXIO);
2355 }
2356 
2357 int
2358 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2359 {
2360 	return (ENOTTY);
2361 }
2362 
2363 int
2364 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2365 {
2366 	return (EINVAL);
2367 }
2368 
2369 int
2370 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2371 {
2372 	struct dma_buf *dmabuf = fp->f_data;
2373 
2374 	memset(st, 0, sizeof(*st));
2375 	st->st_size = dmabuf->size;
2376 	st->st_mode = S_IFIFO;	/* XXX */
2377 	return (0);
2378 }
2379 
2380 int
2381 dmabuf_close(struct file *fp, struct proc *p)
2382 {
2383 	struct dma_buf *dmabuf = fp->f_data;
2384 
2385 	fp->f_data = NULL;
2386 	KERNEL_LOCK();
2387 	dmabuf->ops->release(dmabuf);
2388 	KERNEL_UNLOCK();
2389 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
2390 	return (0);
2391 }
2392 
2393 int
2394 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2395 {
2396 	struct dma_buf *dmabuf = fp->f_data;
2397 	off_t newoff;
2398 
2399 	if (*offset != 0)
2400 		return (EINVAL);
2401 
2402 	switch (whence) {
2403 	case SEEK_SET:
2404 		newoff = 0;
2405 		break;
2406 	case SEEK_END:
2407 		newoff = dmabuf->size;
2408 		break;
2409 	default:
2410 		return (EINVAL);
2411 	}
2412 	mtx_enter(&fp->f_mtx);
2413 	fp->f_offset = newoff;
2414 	mtx_leave(&fp->f_mtx);
2415 	*offset = newoff;
2416 	return (0);
2417 }
2418 
2419 const struct fileops dmabufops = {
2420 	.fo_read	= dmabuf_read,
2421 	.fo_write	= dmabuf_write,
2422 	.fo_ioctl	= dmabuf_ioctl,
2423 	.fo_kqfilter	= dmabuf_kqfilter,
2424 	.fo_stat	= dmabuf_stat,
2425 	.fo_close	= dmabuf_close,
2426 	.fo_seek	= dmabuf_seek,
2427 };
2428 
2429 struct dma_buf *
2430 dma_buf_export(const struct dma_buf_export_info *info)
2431 {
2432 	struct proc *p = curproc;
2433 	struct dma_buf *dmabuf;
2434 	struct file *fp;
2435 
2436 	fp = fnew(p);
2437 	if (fp == NULL)
2438 		return ERR_PTR(-ENFILE);
2439 	fp->f_type = DTYPE_DMABUF;
2440 	fp->f_ops = &dmabufops;
2441 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2442 	dmabuf->priv = info->priv;
2443 	dmabuf->ops = info->ops;
2444 	dmabuf->size = info->size;
2445 	dmabuf->file = fp;
2446 	fp->f_data = dmabuf;
2447 	INIT_LIST_HEAD(&dmabuf->attachments);
2448 	return dmabuf;
2449 }
2450 
2451 struct dma_buf *
2452 dma_buf_get(int fd)
2453 {
2454 	struct proc *p = curproc;
2455 	struct filedesc *fdp = p->p_fd;
2456 	struct file *fp;
2457 
2458 	if ((fp = fd_getfile(fdp, fd)) == NULL)
2459 		return ERR_PTR(-EBADF);
2460 
2461 	if (fp->f_type != DTYPE_DMABUF) {
2462 		FRELE(fp, p);
2463 		return ERR_PTR(-EINVAL);
2464 	}
2465 
2466 	return fp->f_data;
2467 }
2468 
2469 void
2470 dma_buf_put(struct dma_buf *dmabuf)
2471 {
2472 	KASSERT(dmabuf);
2473 	KASSERT(dmabuf->file);
2474 
2475 	FRELE(dmabuf->file, curproc);
2476 }
2477 
2478 int
2479 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2480 {
2481 	struct proc *p = curproc;
2482 	struct filedesc *fdp = p->p_fd;
2483 	struct file *fp = dmabuf->file;
2484 	int fd, cloexec, error;
2485 
2486 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2487 
2488 	fdplock(fdp);
2489 restart:
2490 	if ((error = fdalloc(p, 0, &fd)) != 0) {
2491 		if (error == ENOSPC) {
2492 			fdexpand(p);
2493 			goto restart;
2494 		}
2495 		fdpunlock(fdp);
2496 		return -error;
2497 	}
2498 
2499 	fdinsert(fdp, fd, cloexec, fp);
2500 	fdpunlock(fdp);
2501 
2502 	return fd;
2503 }
2504 
2505 void
2506 get_dma_buf(struct dma_buf *dmabuf)
2507 {
2508 	FREF(dmabuf->file);
2509 }
2510 
2511 enum pci_bus_speed
2512 pcie_get_speed_cap(struct pci_dev *pdev)
2513 {
2514 	pci_chipset_tag_t	pc;
2515 	pcitag_t		tag;
2516 	int			pos ;
2517 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
2518 	pcireg_t		id;
2519 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
2520 	int			bus, device, function;
2521 
2522 	if (pdev == NULL)
2523 		return PCI_SPEED_UNKNOWN;
2524 
2525 	pc = pdev->pc;
2526 	tag = pdev->tag;
2527 
2528 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2529 	    &pos, NULL))
2530 		return PCI_SPEED_UNKNOWN;
2531 
2532 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2533 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2534 
2535 	/* we've been informed via and serverworks don't make the cut */
2536 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2537 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
2538 		return PCI_SPEED_UNKNOWN;
2539 
2540 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2541 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2542 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2543 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2544 
2545 	lnkcap &= 0x0f;
2546 	lnkcap2 &= 0xfe;
2547 
2548 	if (lnkcap2) { /* PCIE GEN 3.0 */
2549 		if (lnkcap2 & 0x02)
2550 			cap = PCIE_SPEED_2_5GT;
2551 		if (lnkcap2 & 0x04)
2552 			cap = PCIE_SPEED_5_0GT;
2553 		if (lnkcap2 & 0x08)
2554 			cap = PCIE_SPEED_8_0GT;
2555 		if (lnkcap2 & 0x10)
2556 			cap = PCIE_SPEED_16_0GT;
2557 		if (lnkcap2 & 0x20)
2558 			cap = PCIE_SPEED_32_0GT;
2559 		if (lnkcap2 & 0x40)
2560 			cap = PCIE_SPEED_64_0GT;
2561 	} else {
2562 		if (lnkcap & 0x01)
2563 			cap = PCIE_SPEED_2_5GT;
2564 		if (lnkcap & 0x02)
2565 			cap = PCIE_SPEED_5_0GT;
2566 	}
2567 
2568 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2569 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2570 	    lnkcap2);
2571 	return cap;
2572 }
2573 
2574 enum pcie_link_width
2575 pcie_get_width_cap(struct pci_dev *pdev)
2576 {
2577 	pci_chipset_tag_t	pc = pdev->pc;
2578 	pcitag_t		tag = pdev->tag;
2579 	int			pos ;
2580 	pcireg_t		lnkcap = 0;
2581 	pcireg_t		id;
2582 	int			bus, device, function;
2583 
2584 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2585 	    &pos, NULL))
2586 		return PCIE_LNK_WIDTH_UNKNOWN;
2587 
2588 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2589 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2590 
2591 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2592 
2593 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2594 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2595 
2596 	if (lnkcap)
2597 		return (lnkcap & 0x3f0) >> 4;
2598 	return PCIE_LNK_WIDTH_UNKNOWN;
2599 }
2600 
2601 bool
2602 pcie_aspm_enabled(struct pci_dev *pdev)
2603 {
2604 	pci_chipset_tag_t	pc = pdev->pc;
2605 	pcitag_t		tag = pdev->tag;
2606 	int			pos ;
2607 	pcireg_t		lcsr;
2608 
2609 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2610 	    &pos, NULL))
2611 		return false;
2612 
2613 	lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR);
2614 	if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0)
2615 		return true;
2616 
2617 	return false;
2618 }
2619 
2620 int
2621 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
2622     int sync, void *key)
2623 {
2624 	wakeup(wqe);
2625 	if (wqe->private)
2626 		wake_up_process(wqe->private);
2627 	list_del_init(&wqe->entry);
2628 	return 0;
2629 }
2630 
2631 static wait_queue_head_t bit_waitq;
2632 wait_queue_head_t var_waitq;
2633 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2634 
2635 int
2636 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2637 {
2638 	int err;
2639 
2640 	if (!test_bit(bit, word))
2641 		return 0;
2642 
2643 	mtx_enter(&wait_bit_mtx);
2644 	while (test_bit(bit, word)) {
2645 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2646 		    INFSLP);
2647 		if (err) {
2648 			mtx_leave(&wait_bit_mtx);
2649 			return 1;
2650 		}
2651 	}
2652 	mtx_leave(&wait_bit_mtx);
2653 	return 0;
2654 }
2655 
2656 int
2657 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2658 {
2659 	int err;
2660 
2661 	if (!test_bit(bit, word))
2662 		return 0;
2663 
2664 	mtx_enter(&wait_bit_mtx);
2665 	while (test_bit(bit, word)) {
2666 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2667 		if (err) {
2668 			mtx_leave(&wait_bit_mtx);
2669 			return 1;
2670 		}
2671 	}
2672 	mtx_leave(&wait_bit_mtx);
2673 	return 0;
2674 }
2675 
2676 void
2677 wake_up_bit(void *word, int bit)
2678 {
2679 	mtx_enter(&wait_bit_mtx);
2680 	wakeup(word);
2681 	mtx_leave(&wait_bit_mtx);
2682 }
2683 
2684 void
2685 clear_and_wake_up_bit(int bit, void *word)
2686 {
2687 	clear_bit(bit, word);
2688 	wake_up_bit(word, bit);
2689 }
2690 
2691 wait_queue_head_t *
2692 bit_waitqueue(void *word, int bit)
2693 {
2694 	/* XXX hash table of wait queues? */
2695 	return &bit_waitq;
2696 }
2697 
2698 wait_queue_head_t *
2699 __var_waitqueue(void *p)
2700 {
2701 	/* XXX hash table of wait queues? */
2702 	return &bit_waitq;
2703 }
2704 
2705 struct workqueue_struct *system_wq;
2706 struct workqueue_struct *system_highpri_wq;
2707 struct workqueue_struct *system_unbound_wq;
2708 struct workqueue_struct *system_long_wq;
2709 struct taskq *taskletq;
2710 
2711 void
2712 drm_linux_init(void)
2713 {
2714 	system_wq = (struct workqueue_struct *)
2715 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
2716 	system_highpri_wq = (struct workqueue_struct *)
2717 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2718 	system_unbound_wq = (struct workqueue_struct *)
2719 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2720 	system_long_wq = (struct workqueue_struct *)
2721 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2722 
2723 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2724 
2725 	init_waitqueue_head(&bit_waitq);
2726 	init_waitqueue_head(&var_waitq);
2727 
2728 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2729 	    "idrpl", NULL);
2730 
2731 	kmap_atomic_va =
2732 	    (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2733 }
2734 
2735 void
2736 drm_linux_exit(void)
2737 {
2738 	pool_destroy(&idr_pool);
2739 
2740 	taskq_destroy(taskletq);
2741 
2742 	taskq_destroy((struct taskq *)system_long_wq);
2743 	taskq_destroy((struct taskq *)system_unbound_wq);
2744 	taskq_destroy((struct taskq *)system_highpri_wq);
2745 	taskq_destroy((struct taskq *)system_wq);
2746 }
2747 
2748 #define PCIE_ECAP_RESIZE_BAR	0x15
2749 #define RBCAP0			0x04
2750 #define RBCTRL0			0x08
2751 #define RBCTRL_BARINDEX_MASK	0x07
2752 #define RBCTRL_BARSIZE_MASK	0x1f00
2753 #define RBCTRL_BARSIZE_SHIFT	8
2754 
2755 /* size in MB is 1 << nsize */
2756 int
2757 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2758 {
2759 	pcireg_t	reg;
2760 	uint32_t	offset, capid;
2761 
2762 	KASSERT(bar == 0);
2763 
2764 	offset = PCI_PCIE_ECAP;
2765 
2766 	/* search PCI Express Extended Capabilities */
2767 	do {
2768 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2769 		capid = PCI_PCIE_ECAP_ID(reg);
2770 		if (capid == PCIE_ECAP_RESIZE_BAR)
2771 			break;
2772 		offset = PCI_PCIE_ECAP_NEXT(reg);
2773 	} while (capid != 0);
2774 
2775 	if (capid == 0) {
2776 		printf("%s: could not find resize bar cap!\n", __func__);
2777 		return -ENOTSUP;
2778 	}
2779 
2780 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2781 
2782 	if ((reg & (1 << (nsize + 4))) == 0) {
2783 		printf("%s size not supported\n", __func__);
2784 		return -ENOTSUP;
2785 	}
2786 
2787 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2788 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2789 		printf("%s BAR index not 0\n", __func__);
2790 		return -EINVAL;
2791 	}
2792 
2793 	reg &= ~RBCTRL_BARSIZE_MASK;
2794 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2795 
2796 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2797 
2798 	return 0;
2799 }
2800 
2801 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2802 
2803 int
2804 register_shrinker(struct shrinker *shrinker, const char *format, ...)
2805 {
2806 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2807 	return 0;
2808 }
2809 
2810 void
2811 unregister_shrinker(struct shrinker *shrinker)
2812 {
2813 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2814 }
2815 
2816 void
2817 drmbackoff(long npages)
2818 {
2819 	struct shrink_control sc;
2820 	struct shrinker *shrinker;
2821 	u_long ret;
2822 
2823 	shrinker = TAILQ_FIRST(&shrinkers);
2824 	while (shrinker && npages > 0) {
2825 		sc.nr_to_scan = npages;
2826 		ret = shrinker->scan_objects(shrinker, &sc);
2827 		npages -= ret;
2828 		shrinker = TAILQ_NEXT(shrinker, next);
2829 	}
2830 }
2831 
2832 void *
2833 bitmap_zalloc(u_int n, gfp_t flags)
2834 {
2835 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2836 }
2837 
2838 void
2839 bitmap_free(void *p)
2840 {
2841 	kfree(p);
2842 }
2843 
2844 int
2845 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2846 {
2847 	if (atomic_add_unless(v, -1, 1))
2848 		return 0;
2849 
2850 	rw_enter_write(lock);
2851 	if (atomic_dec_return(v) == 0)
2852 		return 1;
2853 	rw_exit_write(lock);
2854 	return 0;
2855 }
2856 
2857 int
2858 printk(const char *fmt, ...)
2859 {
2860 	int ret, level;
2861 	va_list ap;
2862 
2863 	if (fmt != NULL && *fmt == '\001') {
2864 		level = fmt[1];
2865 #ifndef DRMDEBUG
2866 		if (level >= KERN_INFO[1] && level <= '9')
2867 			return 0;
2868 #endif
2869 		fmt += 2;
2870 	}
2871 
2872 	va_start(ap, fmt);
2873 	ret = vprintf(fmt, ap);
2874 	va_end(ap);
2875 
2876 	return ret;
2877 }
2878 
2879 #define START(node) ((node)->start)
2880 #define LAST(node) ((node)->last)
2881 
2882 struct interval_tree_node *
2883 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2884     unsigned long last)
2885 {
2886 	struct interval_tree_node *node;
2887 	struct rb_node *rb;
2888 
2889 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2890 		node = rb_entry(rb, typeof(*node), rb);
2891 		if (LAST(node) >= start && START(node) <= last)
2892 			return node;
2893 	}
2894 	return NULL;
2895 }
2896 
2897 void
2898 interval_tree_remove(struct interval_tree_node *node,
2899     struct rb_root_cached *root)
2900 {
2901 	rb_erase_cached(&node->rb, root);
2902 }
2903 
2904 void
2905 interval_tree_insert(struct interval_tree_node *node,
2906     struct rb_root_cached *root)
2907 {
2908 	struct rb_node **iter = &root->rb_root.rb_node;
2909 	struct rb_node *parent = NULL;
2910 	struct interval_tree_node *iter_node;
2911 
2912 	while (*iter) {
2913 		parent = *iter;
2914 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
2915 
2916 		if (node->start < iter_node->start)
2917 			iter = &(*iter)->rb_left;
2918 		else
2919 			iter = &(*iter)->rb_right;
2920 	}
2921 
2922 	rb_link_node(&node->rb, parent, iter);
2923 	rb_insert_color_cached(&node->rb, root, false);
2924 }
2925 
2926 int
2927 syncfile_read(struct file *fp, struct uio *uio, int fflags)
2928 {
2929 	return ENXIO;
2930 }
2931 
2932 int
2933 syncfile_write(struct file *fp, struct uio *uio, int fflags)
2934 {
2935 	return ENXIO;
2936 }
2937 
2938 int
2939 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2940 {
2941 	return ENOTTY;
2942 }
2943 
2944 int
2945 syncfile_kqfilter(struct file *fp, struct knote *kn)
2946 {
2947 	return EINVAL;
2948 }
2949 
2950 int
2951 syncfile_stat(struct file *fp, struct stat *st, struct proc *p)
2952 {
2953 	memset(st, 0, sizeof(*st));
2954 	st->st_mode = S_IFIFO;	/* XXX */
2955 	return 0;
2956 }
2957 
2958 int
2959 syncfile_close(struct file *fp, struct proc *p)
2960 {
2961 	struct sync_file *sf = fp->f_data;
2962 
2963 	dma_fence_put(sf->fence);
2964 	fp->f_data = NULL;
2965 	free(sf, M_DRM, sizeof(struct sync_file));
2966 	return 0;
2967 }
2968 
2969 int
2970 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2971 {
2972 	off_t newoff;
2973 
2974 	if (*offset != 0)
2975 		return EINVAL;
2976 
2977 	switch (whence) {
2978 	case SEEK_SET:
2979 		newoff = 0;
2980 		break;
2981 	case SEEK_END:
2982 		newoff = 0;
2983 		break;
2984 	default:
2985 		return EINVAL;
2986 	}
2987 	mtx_enter(&fp->f_mtx);
2988 	fp->f_offset = newoff;
2989 	mtx_leave(&fp->f_mtx);
2990 	*offset = newoff;
2991 	return 0;
2992 }
2993 
2994 const struct fileops syncfileops = {
2995 	.fo_read	= syncfile_read,
2996 	.fo_write	= syncfile_write,
2997 	.fo_ioctl	= syncfile_ioctl,
2998 	.fo_kqfilter	= syncfile_kqfilter,
2999 	.fo_stat	= syncfile_stat,
3000 	.fo_close	= syncfile_close,
3001 	.fo_seek	= syncfile_seek,
3002 };
3003 
3004 void
3005 fd_install(int fd, struct file *fp)
3006 {
3007 	struct proc *p = curproc;
3008 	struct filedesc *fdp = p->p_fd;
3009 
3010 	if (fp->f_type != DTYPE_SYNC)
3011 		return;
3012 
3013 	fdplock(fdp);
3014 	/* all callers use get_unused_fd_flags(O_CLOEXEC) */
3015 	fdinsert(fdp, fd, UF_EXCLOSE, fp);
3016 	fdpunlock(fdp);
3017 }
3018 
3019 void
3020 fput(struct file *fp)
3021 {
3022 	if (fp->f_type != DTYPE_SYNC)
3023 		return;
3024 
3025 	FRELE(fp, curproc);
3026 }
3027 
3028 int
3029 get_unused_fd_flags(unsigned int flags)
3030 {
3031 	struct proc *p = curproc;
3032 	struct filedesc *fdp = p->p_fd;
3033 	int error, fd;
3034 
3035 	KASSERT((flags & O_CLOEXEC) != 0);
3036 
3037 	fdplock(fdp);
3038 retryalloc:
3039 	if ((error = fdalloc(p, 0, &fd)) != 0) {
3040 		if (error == ENOSPC) {
3041 			fdexpand(p);
3042 			goto retryalloc;
3043 		}
3044 		fdpunlock(fdp);
3045 		return -1;
3046 	}
3047 	fdpunlock(fdp);
3048 
3049 	return fd;
3050 }
3051 
3052 void
3053 put_unused_fd(int fd)
3054 {
3055 	struct filedesc *fdp = curproc->p_fd;
3056 
3057 	fdplock(fdp);
3058 	fdremove(fdp, fd);
3059 	fdpunlock(fdp);
3060 }
3061 
3062 struct dma_fence *
3063 sync_file_get_fence(int fd)
3064 {
3065 	struct proc *p = curproc;
3066 	struct filedesc *fdp = p->p_fd;
3067 	struct file *fp;
3068 	struct sync_file *sf;
3069 	struct dma_fence *f;
3070 
3071 	if ((fp = fd_getfile(fdp, fd)) == NULL)
3072 		return NULL;
3073 
3074 	if (fp->f_type != DTYPE_SYNC) {
3075 		FRELE(fp, p);
3076 		return NULL;
3077 	}
3078 	sf = fp->f_data;
3079 	f = dma_fence_get(sf->fence);
3080 	FRELE(sf->file, p);
3081 	return f;
3082 }
3083 
3084 struct sync_file *
3085 sync_file_create(struct dma_fence *fence)
3086 {
3087 	struct proc *p = curproc;
3088 	struct sync_file *sf;
3089 	struct file *fp;
3090 
3091 	fp = fnew(p);
3092 	if (fp == NULL)
3093 		return NULL;
3094 	fp->f_type = DTYPE_SYNC;
3095 	fp->f_ops = &syncfileops;
3096 	sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO);
3097 	sf->file = fp;
3098 	sf->fence = dma_fence_get(fence);
3099 	fp->f_data = sf;
3100 	return sf;
3101 }
3102 
3103 bool
3104 drm_firmware_drivers_only(void)
3105 {
3106 	return false;
3107 }
3108