xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision 521ba2f2ab0e0e89d1776559874b3ecc227442fc)
1 /*	$OpenBSD: drm_linux.c,v 1.99 2023/06/28 08:23:25 claudio Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 #include <linux/processor.h>
53 #include <linux/sync_file.h>
54 
55 #include <drm/drm_device.h>
56 #include <drm/drm_connector.h>
57 #include <drm/drm_print.h>
58 
59 #if defined(__amd64__) || defined(__i386__)
60 #include "bios.h"
61 #endif
62 
63 /* allowed to sleep */
64 void
65 tasklet_unlock_wait(struct tasklet_struct *ts)
66 {
67 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
68 		cpu_relax();
69 }
70 
71 /* must not sleep */
72 void
73 tasklet_unlock_spin_wait(struct tasklet_struct *ts)
74 {
75 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
76 		cpu_relax();
77 }
78 
79 void
80 tasklet_run(void *arg)
81 {
82 	struct tasklet_struct *ts = arg;
83 
84 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
85 	if (tasklet_trylock(ts)) {
86 		if (!atomic_read(&ts->count)) {
87 			if (ts->use_callback)
88 				ts->callback(ts);
89 			else
90 				ts->func(ts->data);
91 		}
92 		tasklet_unlock(ts);
93 	}
94 }
95 
96 /* 32 bit powerpc lacks 64 bit atomics */
97 #if defined(__powerpc__) && !defined(__powerpc64__)
98 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
99 #endif
100 
101 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
102 volatile struct proc *sch_proc;
103 volatile void *sch_ident;
104 int sch_priority;
105 
106 void
107 set_current_state(int state)
108 {
109 	if (sch_ident != curproc)
110 		mtx_enter(&sch_mtx);
111 	MUTEX_ASSERT_LOCKED(&sch_mtx);
112 	sch_ident = sch_proc = curproc;
113 	sch_priority = state;
114 }
115 
116 void
117 __set_current_state(int state)
118 {
119 	KASSERT(state == TASK_RUNNING);
120 	if (sch_ident == curproc) {
121 		MUTEX_ASSERT_LOCKED(&sch_mtx);
122 		sch_ident = NULL;
123 		mtx_leave(&sch_mtx);
124 	}
125 }
126 
127 void
128 schedule(void)
129 {
130 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
131 }
132 
133 long
134 schedule_timeout(long timeout)
135 {
136 	struct sleep_state sls;
137 	unsigned long deadline;
138 	int wait, spl, prio, timo = 0;
139 
140 	MUTEX_ASSERT_LOCKED(&sch_mtx);
141 	KASSERT(!cold);
142 
143 	if (timeout != MAX_SCHEDULE_TIMEOUT)
144 		timo = timeout;
145 	prio = sch_priority;
146 	sleep_setup(&sls, sch_ident, prio, "schto");
147 
148 	wait = (sch_proc == curproc && timeout > 0);
149 
150 	spl = MUTEX_OLDIPL(&sch_mtx);
151 	MUTEX_OLDIPL(&sch_mtx) = splsched();
152 	mtx_leave(&sch_mtx);
153 
154 	if (timeout != MAX_SCHEDULE_TIMEOUT)
155 		deadline = jiffies + timeout;
156 	sleep_finish(&sls, prio, timo, wait);
157 	if (timeout != MAX_SCHEDULE_TIMEOUT)
158 		timeout = deadline - jiffies;
159 
160 	mtx_enter(&sch_mtx);
161 	MUTEX_OLDIPL(&sch_mtx) = spl;
162 	sch_ident = curproc;
163 
164 	return timeout > 0 ? timeout : 0;
165 }
166 
167 long
168 schedule_timeout_uninterruptible(long timeout)
169 {
170 	tsleep(curproc, PWAIT, "schtou", timeout);
171 	return 0;
172 }
173 
174 int
175 wake_up_process(struct proc *p)
176 {
177 	int s, rv;
178 
179 	SCHED_LOCK(s);
180 	atomic_cas_ptr(&sch_proc, p, NULL);
181 	rv = wakeup_proc(p, NULL, 0);
182 	SCHED_UNLOCK(s);
183 	return rv;
184 }
185 
186 void
187 flush_workqueue(struct workqueue_struct *wq)
188 {
189 	if (cold)
190 		return;
191 
192 	if (wq)
193 		taskq_barrier((struct taskq *)wq);
194 }
195 
196 bool
197 flush_work(struct work_struct *work)
198 {
199 	if (cold)
200 		return false;
201 
202 	if (work->tq)
203 		taskq_barrier(work->tq);
204 	return false;
205 }
206 
207 bool
208 flush_delayed_work(struct delayed_work *dwork)
209 {
210 	bool ret = false;
211 
212 	if (cold)
213 		return false;
214 
215 	while (timeout_pending(&dwork->to)) {
216 		tsleep(dwork, PWAIT, "fldwto", 1);
217 		ret = true;
218 	}
219 
220 	if (dwork->tq)
221 		taskq_barrier(dwork->tq);
222 	return ret;
223 }
224 
225 struct kthread {
226 	int (*func)(void *);
227 	void *data;
228 	struct proc *proc;
229 	volatile u_int flags;
230 #define KTHREAD_SHOULDSTOP	0x0000001
231 #define KTHREAD_STOPPED		0x0000002
232 #define KTHREAD_SHOULDPARK	0x0000004
233 #define KTHREAD_PARKED		0x0000008
234 	LIST_ENTRY(kthread) next;
235 };
236 
237 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
238 
239 void
240 kthread_func(void *arg)
241 {
242 	struct kthread *thread = arg;
243 	int ret;
244 
245 	ret = thread->func(thread->data);
246 	thread->flags |= KTHREAD_STOPPED;
247 	wakeup(thread);
248 	kthread_exit(ret);
249 }
250 
251 struct proc *
252 kthread_run(int (*func)(void *), void *data, const char *name)
253 {
254 	struct kthread *thread;
255 
256 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
257 	thread->func = func;
258 	thread->data = data;
259 	thread->flags = 0;
260 
261 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
262 		free(thread, M_DRM, sizeof(*thread));
263 		return ERR_PTR(-ENOMEM);
264 	}
265 
266 	LIST_INSERT_HEAD(&kthread_list, thread, next);
267 	return thread->proc;
268 }
269 
270 struct kthread_worker *
271 kthread_create_worker(unsigned int flags, const char *fmt, ...)
272 {
273 	char name[MAXCOMLEN+1];
274 	va_list ap;
275 
276 	struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
277 	va_start(ap, fmt);
278 	vsnprintf(name, sizeof(name), fmt, ap);
279 	va_end(ap);
280 	w->tq = taskq_create(name, 1, IPL_HIGH, 0);
281 
282 	return w;
283 }
284 
285 void
286 kthread_destroy_worker(struct kthread_worker *worker)
287 {
288 	taskq_destroy(worker->tq);
289 	free(worker, M_DRM, sizeof(*worker));
290 
291 }
292 
293 void
294 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
295 {
296 	work->tq = NULL;
297 	task_set(&work->task, (void (*)(void *))func, work);
298 }
299 
300 bool
301 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
302 {
303 	work->tq = worker->tq;
304 	return task_add(work->tq, &work->task);
305 }
306 
307 bool
308 kthread_cancel_work_sync(struct kthread_work *work)
309 {
310 	return task_del(work->tq, &work->task);
311 }
312 
313 void
314 kthread_flush_work(struct kthread_work *work)
315 {
316 	if (cold)
317 		return;
318 
319 	if (work->tq)
320 		taskq_barrier(work->tq);
321 }
322 
323 void
324 kthread_flush_worker(struct kthread_worker *worker)
325 {
326 	if (cold)
327 		return;
328 
329 	if (worker->tq)
330 		taskq_barrier(worker->tq);
331 }
332 
333 struct kthread *
334 kthread_lookup(struct proc *p)
335 {
336 	struct kthread *thread;
337 
338 	LIST_FOREACH(thread, &kthread_list, next) {
339 		if (thread->proc == p)
340 			break;
341 	}
342 	KASSERT(thread);
343 
344 	return thread;
345 }
346 
347 int
348 kthread_should_park(void)
349 {
350 	struct kthread *thread = kthread_lookup(curproc);
351 	return (thread->flags & KTHREAD_SHOULDPARK);
352 }
353 
354 void
355 kthread_parkme(void)
356 {
357 	struct kthread *thread = kthread_lookup(curproc);
358 
359 	while (thread->flags & KTHREAD_SHOULDPARK) {
360 		thread->flags |= KTHREAD_PARKED;
361 		wakeup(thread);
362 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
363 		thread->flags &= ~KTHREAD_PARKED;
364 	}
365 }
366 
367 void
368 kthread_park(struct proc *p)
369 {
370 	struct kthread *thread = kthread_lookup(p);
371 
372 	while ((thread->flags & KTHREAD_PARKED) == 0) {
373 		thread->flags |= KTHREAD_SHOULDPARK;
374 		wake_up_process(thread->proc);
375 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
376 	}
377 }
378 
379 void
380 kthread_unpark(struct proc *p)
381 {
382 	struct kthread *thread = kthread_lookup(p);
383 
384 	thread->flags &= ~KTHREAD_SHOULDPARK;
385 	wakeup(thread);
386 }
387 
388 int
389 kthread_should_stop(void)
390 {
391 	struct kthread *thread = kthread_lookup(curproc);
392 	return (thread->flags & KTHREAD_SHOULDSTOP);
393 }
394 
395 void
396 kthread_stop(struct proc *p)
397 {
398 	struct kthread *thread = kthread_lookup(p);
399 
400 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
401 		thread->flags |= KTHREAD_SHOULDSTOP;
402 		kthread_unpark(p);
403 		wake_up_process(thread->proc);
404 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
405 	}
406 	LIST_REMOVE(thread, next);
407 	free(thread, M_DRM, sizeof(*thread));
408 }
409 
410 #if NBIOS > 0
411 extern char smbios_board_vendor[];
412 extern char smbios_board_prod[];
413 extern char smbios_board_serial[];
414 #endif
415 
416 bool
417 dmi_match(int slot, const char *str)
418 {
419 	switch (slot) {
420 	case DMI_SYS_VENDOR:
421 		if (hw_vendor != NULL &&
422 		    !strcmp(hw_vendor, str))
423 			return true;
424 		break;
425 	case DMI_PRODUCT_NAME:
426 		if (hw_prod != NULL &&
427 		    !strcmp(hw_prod, str))
428 			return true;
429 		break;
430 	case DMI_PRODUCT_VERSION:
431 		if (hw_ver != NULL &&
432 		    !strcmp(hw_ver, str))
433 			return true;
434 		break;
435 #if NBIOS > 0
436 	case DMI_BOARD_VENDOR:
437 		if (strcmp(smbios_board_vendor, str) == 0)
438 			return true;
439 		break;
440 	case DMI_BOARD_NAME:
441 		if (strcmp(smbios_board_prod, str) == 0)
442 			return true;
443 		break;
444 	case DMI_BOARD_SERIAL:
445 		if (strcmp(smbios_board_serial, str) == 0)
446 			return true;
447 		break;
448 #else
449 	case DMI_BOARD_VENDOR:
450 		if (hw_vendor != NULL &&
451 		    !strcmp(hw_vendor, str))
452 			return true;
453 		break;
454 	case DMI_BOARD_NAME:
455 		if (hw_prod != NULL &&
456 		    !strcmp(hw_prod, str))
457 			return true;
458 		break;
459 #endif
460 	case DMI_NONE:
461 	default:
462 		return false;
463 	}
464 
465 	return false;
466 }
467 
468 static bool
469 dmi_found(const struct dmi_system_id *dsi)
470 {
471 	int i, slot;
472 
473 	for (i = 0; i < nitems(dsi->matches); i++) {
474 		slot = dsi->matches[i].slot;
475 		if (slot == DMI_NONE)
476 			break;
477 		if (!dmi_match(slot, dsi->matches[i].substr))
478 			return false;
479 	}
480 
481 	return true;
482 }
483 
484 const struct dmi_system_id *
485 dmi_first_match(const struct dmi_system_id *sysid)
486 {
487 	const struct dmi_system_id *dsi;
488 
489 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
490 		if (dmi_found(dsi))
491 			return dsi;
492 	}
493 
494 	return NULL;
495 }
496 
497 #if NBIOS > 0
498 extern char smbios_bios_date[];
499 extern char smbios_bios_version[];
500 #endif
501 
502 const char *
503 dmi_get_system_info(int slot)
504 {
505 #if NBIOS > 0
506 	switch (slot) {
507 	case DMI_BIOS_DATE:
508 		return smbios_bios_date;
509 	case DMI_BIOS_VERSION:
510 		return smbios_bios_version;
511 	default:
512 		printf("%s slot %d not handled\n", __func__, slot);
513 	}
514 #endif
515 	return NULL;
516 }
517 
518 int
519 dmi_check_system(const struct dmi_system_id *sysid)
520 {
521 	const struct dmi_system_id *dsi;
522 	int num = 0;
523 
524 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
525 		if (dmi_found(dsi)) {
526 			num++;
527 			if (dsi->callback && dsi->callback(dsi))
528 				break;
529 		}
530 	}
531 	return (num);
532 }
533 
534 struct vm_page *
535 alloc_pages(unsigned int gfp_mask, unsigned int order)
536 {
537 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
538 	struct uvm_constraint_range *constraint = &no_constraint;
539 	struct pglist mlist;
540 
541 	if (gfp_mask & M_CANFAIL)
542 		flags |= UVM_PLA_FAILOK;
543 	if (gfp_mask & M_ZERO)
544 		flags |= UVM_PLA_ZERO;
545 	if (gfp_mask & __GFP_DMA32)
546 		constraint = &dma_constraint;
547 
548 	TAILQ_INIT(&mlist);
549 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
550 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
551 		return NULL;
552 	return TAILQ_FIRST(&mlist);
553 }
554 
555 void
556 __free_pages(struct vm_page *page, unsigned int order)
557 {
558 	struct pglist mlist;
559 	int i;
560 
561 	TAILQ_INIT(&mlist);
562 	for (i = 0; i < (1 << order); i++)
563 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
564 	uvm_pglistfree(&mlist);
565 }
566 
567 void
568 __pagevec_release(struct pagevec *pvec)
569 {
570 	struct pglist mlist;
571 	int i;
572 
573 	TAILQ_INIT(&mlist);
574 	for (i = 0; i < pvec->nr; i++)
575 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
576 	uvm_pglistfree(&mlist);
577 	pagevec_reinit(pvec);
578 }
579 
580 static struct kmem_va_mode kv_physwait = {
581 	.kv_map = &phys_map,
582 	.kv_wait = 1,
583 };
584 
585 void *
586 kmap(struct vm_page *pg)
587 {
588 	vaddr_t va;
589 
590 #if defined (__HAVE_PMAP_DIRECT)
591 	va = pmap_map_direct(pg);
592 #else
593 	va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok);
594 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
595 	pmap_update(pmap_kernel());
596 #endif
597 	return (void *)va;
598 }
599 
600 void
601 kunmap_va(void *addr)
602 {
603 	vaddr_t va = (vaddr_t)addr;
604 
605 #if defined (__HAVE_PMAP_DIRECT)
606 	pmap_unmap_direct(va);
607 #else
608 	pmap_kremove(va, PAGE_SIZE);
609 	pmap_update(pmap_kernel());
610 	km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none);
611 #endif
612 }
613 
614 vaddr_t kmap_atomic_va;
615 int kmap_atomic_inuse;
616 
617 void *
618 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
619 {
620 	KASSERT(!kmap_atomic_inuse);
621 
622 	kmap_atomic_inuse = 1;
623 	pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
624 	    PROT_READ | PROT_WRITE);
625 	return (void *)kmap_atomic_va;
626 }
627 
628 void
629 kunmap_atomic(void *addr)
630 {
631 	KASSERT(kmap_atomic_inuse);
632 
633 	pmap_kremove(kmap_atomic_va, PAGE_SIZE);
634 	kmap_atomic_inuse = 0;
635 }
636 
637 void *
638 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
639      pgprot_t prot)
640 {
641 	vaddr_t va;
642 	paddr_t pa;
643 	int i;
644 
645 	va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none,
646 	    &kd_nowait);
647 	if (va == 0)
648 		return NULL;
649 	for (i = 0; i < npages; i++) {
650 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
651 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
652 		    PROT_READ | PROT_WRITE,
653 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
654 		pmap_update(pmap_kernel());
655 	}
656 
657 	return (void *)va;
658 }
659 
660 void
661 vunmap(void *addr, size_t size)
662 {
663 	vaddr_t va = (vaddr_t)addr;
664 
665 	pmap_remove(pmap_kernel(), va, va + size);
666 	pmap_update(pmap_kernel());
667 	km_free((void *)va, size, &kv_any, &kp_none);
668 }
669 
670 bool
671 is_vmalloc_addr(const void *p)
672 {
673 	vaddr_t min, max, addr;
674 
675 	min = vm_map_min(kernel_map);
676 	max = vm_map_max(kernel_map);
677 	addr = (vaddr_t)p;
678 
679 	if (addr >= min && addr <= max)
680 		return true;
681 	else
682 		return false;
683 }
684 
685 void
686 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
687     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
688 {
689 	const uint8_t *cbuf = buf;
690 	int i;
691 
692 	for (i = 0; i < len; i++) {
693 		if ((i % rowsize) == 0)
694 			printf("%s", prefix_str);
695 		printf("%02x", cbuf[i]);
696 		if ((i % rowsize) == (rowsize - 1))
697 			printf("\n");
698 		else
699 			printf(" ");
700 	}
701 }
702 
703 void *
704 memchr_inv(const void *s, int c, size_t n)
705 {
706 	if (n != 0) {
707 		const unsigned char *p = s;
708 
709 		do {
710 			if (*p++ != (unsigned char)c)
711 				return ((void *)(p - 1));
712 		} while (--n != 0);
713 	}
714 	return (NULL);
715 }
716 
717 int
718 panic_cmp(struct rb_node *a, struct rb_node *b)
719 {
720 	panic(__func__);
721 }
722 
723 #undef RB_ROOT
724 #define RB_ROOT(head)	(head)->rbh_root
725 
726 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
727 
728 /*
729  * This is a fairly minimal implementation of the Linux "idr" API.  It
730  * probably isn't very efficient, and definitely isn't RCU safe.  The
731  * pre-load buffer is global instead of per-cpu; we rely on the kernel
732  * lock to make this work.  We do randomize our IDs in order to make
733  * them harder to guess.
734  */
735 
736 int idr_cmp(struct idr_entry *, struct idr_entry *);
737 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
738 
739 struct pool idr_pool;
740 struct idr_entry *idr_entry_cache;
741 
742 void
743 idr_init(struct idr *idr)
744 {
745 	SPLAY_INIT(&idr->tree);
746 }
747 
748 void
749 idr_destroy(struct idr *idr)
750 {
751 	struct idr_entry *id;
752 
753 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
754 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
755 		pool_put(&idr_pool, id);
756 	}
757 }
758 
759 void
760 idr_preload(unsigned int gfp_mask)
761 {
762 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
763 
764 	KERNEL_ASSERT_LOCKED();
765 
766 	if (idr_entry_cache == NULL)
767 		idr_entry_cache = pool_get(&idr_pool, flags);
768 }
769 
770 int
771 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
772 {
773 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
774 	struct idr_entry *id;
775 	int begin;
776 
777 	KERNEL_ASSERT_LOCKED();
778 
779 	if (idr_entry_cache) {
780 		id = idr_entry_cache;
781 		idr_entry_cache = NULL;
782 	} else {
783 		id = pool_get(&idr_pool, flags);
784 		if (id == NULL)
785 			return -ENOMEM;
786 	}
787 
788 	if (end <= 0)
789 		end = INT_MAX;
790 
791 #ifdef notyet
792 	id->id = begin = start + arc4random_uniform(end - start);
793 #else
794 	id->id = begin = start;
795 #endif
796 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
797 		if (id->id == end)
798 			id->id = start;
799 		else
800 			id->id++;
801 		if (id->id == begin) {
802 			pool_put(&idr_pool, id);
803 			return -ENOSPC;
804 		}
805 	}
806 	id->ptr = ptr;
807 	return id->id;
808 }
809 
810 void *
811 idr_replace(struct idr *idr, void *ptr, unsigned long id)
812 {
813 	struct idr_entry find, *res;
814 	void *old;
815 
816 	find.id = id;
817 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
818 	if (res == NULL)
819 		return ERR_PTR(-ENOENT);
820 	old = res->ptr;
821 	res->ptr = ptr;
822 	return old;
823 }
824 
825 void *
826 idr_remove(struct idr *idr, unsigned long id)
827 {
828 	struct idr_entry find, *res;
829 	void *ptr = NULL;
830 
831 	find.id = id;
832 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
833 	if (res) {
834 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
835 		ptr = res->ptr;
836 		pool_put(&idr_pool, res);
837 	}
838 	return ptr;
839 }
840 
841 void *
842 idr_find(struct idr *idr, unsigned long id)
843 {
844 	struct idr_entry find, *res;
845 
846 	find.id = id;
847 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
848 	if (res == NULL)
849 		return NULL;
850 	return res->ptr;
851 }
852 
853 void *
854 idr_get_next(struct idr *idr, int *id)
855 {
856 	struct idr_entry *res;
857 
858 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
859 		if (res->id >= *id) {
860 			*id = res->id;
861 			return res->ptr;
862 		}
863 	}
864 
865 	return NULL;
866 }
867 
868 int
869 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
870 {
871 	struct idr_entry *id;
872 	int ret;
873 
874 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
875 		ret = func(id->id, id->ptr, data);
876 		if (ret)
877 			return ret;
878 	}
879 
880 	return 0;
881 }
882 
883 int
884 idr_cmp(struct idr_entry *a, struct idr_entry *b)
885 {
886 	return (a->id < b->id ? -1 : a->id > b->id);
887 }
888 
889 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
890 
891 void
892 ida_init(struct ida *ida)
893 {
894 	idr_init(&ida->idr);
895 }
896 
897 void
898 ida_destroy(struct ida *ida)
899 {
900 	idr_destroy(&ida->idr);
901 }
902 
903 int
904 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
905     gfp_t gfp_mask)
906 {
907 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
908 }
909 
910 void
911 ida_simple_remove(struct ida *ida, unsigned int id)
912 {
913 	idr_remove(&ida->idr, id);
914 }
915 
916 int
917 ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
918 {
919 	return idr_alloc(&ida->idr, NULL, min, INT_MAX, gfp);
920 }
921 
922 int
923 ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
924 {
925 	return idr_alloc(&ida->idr, NULL, 0, max - 1, gfp);
926 }
927 
928 void
929 ida_free(struct ida *ida, unsigned int id)
930 {
931 	idr_remove(&ida->idr, id);
932 }
933 
934 int
935 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
936 {
937 	return (a->id < b->id ? -1 : a->id > b->id);
938 }
939 
940 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
941 struct pool xa_pool;
942 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
943 
944 void
945 xa_init_flags(struct xarray *xa, gfp_t flags)
946 {
947 	static int initialized;
948 
949 	if (!initialized) {
950 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0,
951 		    "xapl", NULL);
952 		initialized = 1;
953 	}
954 	SPLAY_INIT(&xa->xa_tree);
955 	if (flags & XA_FLAGS_LOCK_IRQ)
956 		mtx_init(&xa->xa_lock, IPL_TTY);
957 	else
958 		mtx_init(&xa->xa_lock, IPL_NONE);
959 }
960 
961 void
962 xa_destroy(struct xarray *xa)
963 {
964 	struct xarray_entry *id;
965 
966 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
967 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
968 		pool_put(&xa_pool, id);
969 	}
970 }
971 
972 /* Don't wrap ids. */
973 int
974 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
975 {
976 	struct xarray_entry *xid;
977 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
978 	int begin;
979 
980 	if (gfp & GFP_NOWAIT) {
981 		xid = pool_get(&xa_pool, PR_NOWAIT);
982 	} else {
983 		mtx_leave(&xa->xa_lock);
984 		xid = pool_get(&xa_pool, PR_WAITOK);
985 		mtx_enter(&xa->xa_lock);
986 	}
987 
988 	if (xid == NULL)
989 		return -ENOMEM;
990 
991 	if (limit <= 0)
992 		limit = INT_MAX;
993 
994 	xid->id = begin = start;
995 
996 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
997 		if (xid->id == limit)
998 			xid->id = start;
999 		else
1000 			xid->id++;
1001 		if (xid->id == begin) {
1002 			pool_put(&xa_pool, xid);
1003 			return -EBUSY;
1004 		}
1005 	}
1006 	xid->ptr = entry;
1007 	*id = xid->id;
1008 	return 0;
1009 }
1010 
1011 /*
1012  * Wrap ids and store next id.
1013  * We walk the entire tree so don't special case wrapping.
1014  * The only caller of this (i915_drm_client.c) doesn't use next id.
1015  */
1016 int
1017 __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, int limit, u32 *next,
1018     gfp_t gfp)
1019 {
1020 	int r = __xa_alloc(xa, id, entry, limit, gfp);
1021 	*next = *id + 1;
1022 	return r;
1023 }
1024 
1025 void *
1026 __xa_erase(struct xarray *xa, unsigned long index)
1027 {
1028 	struct xarray_entry find, *res;
1029 	void *ptr = NULL;
1030 
1031 	find.id = index;
1032 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1033 	if (res) {
1034 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
1035 		ptr = res->ptr;
1036 		pool_put(&xa_pool, res);
1037 	}
1038 	return ptr;
1039 }
1040 
1041 void *
1042 __xa_load(struct xarray *xa, unsigned long index)
1043 {
1044 	struct xarray_entry find, *res;
1045 
1046 	find.id = index;
1047 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1048 	if (res == NULL)
1049 		return NULL;
1050 	return res->ptr;
1051 }
1052 
1053 void *
1054 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1055 {
1056 	struct xarray_entry find, *res;
1057 	void *prev;
1058 
1059 	if (entry == NULL)
1060 		return __xa_erase(xa, index);
1061 
1062 	find.id = index;
1063 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1064 	if (res != NULL) {
1065 		/* index exists */
1066 		/* XXX Multislot entries updates not implemented yet */
1067 		prev = res->ptr;
1068 		res->ptr = entry;
1069 		return prev;
1070 	}
1071 
1072 	/* index not found, add new */
1073 	if (gfp & GFP_NOWAIT) {
1074 		res = pool_get(&xa_pool, PR_NOWAIT);
1075 	} else {
1076 		mtx_leave(&xa->xa_lock);
1077 		res = pool_get(&xa_pool, PR_WAITOK);
1078 		mtx_enter(&xa->xa_lock);
1079 	}
1080 	if (res == NULL)
1081 		return XA_ERROR(-ENOMEM);
1082 	res->id = index;
1083 	res->ptr = entry;
1084 	if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL)
1085 		return XA_ERROR(-EINVAL);
1086 	return NULL; /* no prev entry at index */
1087 }
1088 
1089 void *
1090 xa_get_next(struct xarray *xa, unsigned long *index)
1091 {
1092 	struct xarray_entry *res;
1093 
1094 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
1095 		if (res->id >= *index) {
1096 			*index = res->id;
1097 			return res->ptr;
1098 		}
1099 	}
1100 
1101 	return NULL;
1102 }
1103 
1104 int
1105 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1106 {
1107 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
1108 	    M_DRM, gfp_mask | M_ZERO);
1109 	if (table->sgl == NULL)
1110 		return -ENOMEM;
1111 	table->nents = table->orig_nents = nents;
1112 	sg_mark_end(&table->sgl[nents - 1]);
1113 	return 0;
1114 }
1115 
1116 void
1117 sg_free_table(struct sg_table *table)
1118 {
1119 	free(table->sgl, M_DRM,
1120 	    table->orig_nents * sizeof(struct scatterlist));
1121 	table->orig_nents = 0;
1122 	table->sgl = NULL;
1123 }
1124 
1125 size_t
1126 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1127     const void *buf, size_t buflen)
1128 {
1129 	panic("%s", __func__);
1130 }
1131 
1132 int
1133 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1134 {
1135 	void *cmd = NULL;
1136 	int cmdlen = 0;
1137 	int err, ret = 0;
1138 	int op;
1139 
1140 	iic_acquire_bus(&adap->ic, 0);
1141 
1142 	while (num > 2) {
1143 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1144 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1145 		    msgs->buf, msgs->len, 0);
1146 		if (err) {
1147 			ret = -err;
1148 			goto fail;
1149 		}
1150 		msgs++;
1151 		num--;
1152 		ret++;
1153 	}
1154 
1155 	if (num > 1) {
1156 		cmd = msgs->buf;
1157 		cmdlen = msgs->len;
1158 		msgs++;
1159 		num--;
1160 		ret++;
1161 	}
1162 
1163 	op = (msgs->flags & I2C_M_RD) ?
1164 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1165 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1166 	    msgs->buf, msgs->len, 0);
1167 	if (err) {
1168 		ret = -err;
1169 		goto fail;
1170 	}
1171 	msgs++;
1172 	ret++;
1173 
1174 fail:
1175 	iic_release_bus(&adap->ic, 0);
1176 
1177 	return ret;
1178 }
1179 
1180 int
1181 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1182 {
1183 	int ret, retries;
1184 
1185 	retries = adap->retries;
1186 retry:
1187 	if (adap->algo)
1188 		ret = adap->algo->master_xfer(adap, msgs, num);
1189 	else
1190 		ret = i2c_master_xfer(adap, msgs, num);
1191 	if (ret == -EAGAIN && retries > 0) {
1192 		retries--;
1193 		goto retry;
1194 	}
1195 
1196 	return ret;
1197 }
1198 
1199 int
1200 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1201 {
1202 	int ret;
1203 
1204 	if (adap->lock_ops)
1205 		adap->lock_ops->lock_bus(adap, 0);
1206 
1207 	ret = __i2c_transfer(adap, msgs, num);
1208 
1209 	if (adap->lock_ops)
1210 		adap->lock_ops->unlock_bus(adap, 0);
1211 
1212 	return ret;
1213 }
1214 
1215 int
1216 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1217 {
1218 	struct i2c_algo_bit_data *algo = adap->algo_data;
1219 	struct i2c_adapter bb;
1220 
1221 	memset(&bb, 0, sizeof(bb));
1222 	bb.ic = algo->ic;
1223 	bb.retries = adap->retries;
1224 	return i2c_master_xfer(&bb, msgs, num);
1225 }
1226 
1227 uint32_t
1228 i2c_bb_functionality(struct i2c_adapter *adap)
1229 {
1230 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1231 }
1232 
1233 struct i2c_algorithm i2c_bit_algo = {
1234 	.master_xfer = i2c_bb_master_xfer,
1235 	.functionality = i2c_bb_functionality
1236 };
1237 
1238 int
1239 i2c_bit_add_bus(struct i2c_adapter *adap)
1240 {
1241 	adap->algo = &i2c_bit_algo;
1242 	adap->retries = 3;
1243 
1244 	return 0;
1245 }
1246 
1247 #if defined(__amd64__) || defined(__i386__)
1248 
1249 /*
1250  * This is a minimal implementation of the Linux vga_get/vga_put
1251  * interface.  In all likelihood, it will only work for inteldrm(4) as
1252  * it assumes that if there is another active VGA device in the
1253  * system, it is sitting behind a PCI bridge.
1254  */
1255 
1256 extern int pci_enumerate_bus(struct pci_softc *,
1257     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1258 
1259 pcitag_t vga_bridge_tag;
1260 int vga_bridge_disabled;
1261 
1262 int
1263 vga_disable_bridge(struct pci_attach_args *pa)
1264 {
1265 	pcireg_t bhlc, bc;
1266 
1267 	if (pa->pa_domain != 0)
1268 		return 0;
1269 
1270 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1271 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1272 		return 0;
1273 
1274 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1275 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1276 		return 0;
1277 	bc &= ~PPB_BC_VGA_ENABLE;
1278 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1279 
1280 	vga_bridge_tag = pa->pa_tag;
1281 	vga_bridge_disabled = 1;
1282 
1283 	return 1;
1284 }
1285 
1286 void
1287 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1288 {
1289 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1290 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1291 }
1292 
1293 void
1294 vga_put(struct pci_dev *pdev, int rsrc)
1295 {
1296 	pcireg_t bc;
1297 
1298 	if (!vga_bridge_disabled)
1299 		return;
1300 
1301 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1302 	bc |= PPB_BC_VGA_ENABLE;
1303 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1304 
1305 	vga_bridge_disabled = 0;
1306 }
1307 
1308 #endif
1309 
1310 /*
1311  * ACPI types and interfaces.
1312  */
1313 
1314 #ifdef __HAVE_ACPI
1315 #include "acpi.h"
1316 #endif
1317 
1318 #if NACPI > 0
1319 
1320 #include <dev/acpi/acpireg.h>
1321 #include <dev/acpi/acpivar.h>
1322 #include <dev/acpi/amltypes.h>
1323 #include <dev/acpi/dsdt.h>
1324 
1325 acpi_status
1326 acpi_get_table(const char *sig, int instance,
1327     struct acpi_table_header **hdr)
1328 {
1329 	struct acpi_softc *sc = acpi_softc;
1330 	struct acpi_q *entry;
1331 
1332 	KASSERT(instance == 1);
1333 
1334 	if (sc == NULL)
1335 		return AE_NOT_FOUND;
1336 
1337 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1338 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1339 			*hdr = entry->q_table;
1340 			return 0;
1341 		}
1342 	}
1343 
1344 	return AE_NOT_FOUND;
1345 }
1346 
1347 void
1348 acpi_put_table(struct acpi_table_header *hdr)
1349 {
1350 }
1351 
1352 acpi_status
1353 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1354 {
1355 	node = aml_searchname(node, name);
1356 	if (node == NULL)
1357 		return AE_NOT_FOUND;
1358 
1359 	*rnode = node;
1360 	return 0;
1361 }
1362 
1363 acpi_status
1364 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1365 {
1366 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1367 	KASSERT(type == ACPI_FULL_PATHNAME);
1368 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1369 	return 0;
1370 }
1371 
1372 acpi_status
1373 acpi_evaluate_object(acpi_handle node, const char *name,
1374     struct acpi_object_list *params, struct acpi_buffer *result)
1375 {
1376 	struct aml_value args[4], res;
1377 	union acpi_object *obj;
1378 	uint8_t *data;
1379 	int i;
1380 
1381 	KASSERT(params->count <= nitems(args));
1382 
1383 	for (i = 0; i < params->count; i++) {
1384 		args[i].type = params->pointer[i].type;
1385 		switch (args[i].type) {
1386 		case AML_OBJTYPE_INTEGER:
1387 			args[i].v_integer = params->pointer[i].integer.value;
1388 			break;
1389 		case AML_OBJTYPE_BUFFER:
1390 			args[i].length = params->pointer[i].buffer.length;
1391 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1392 			break;
1393 		default:
1394 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1395 			return AE_BAD_PARAMETER;
1396 		}
1397 	}
1398 
1399 	if (name) {
1400 		node = aml_searchname(node, name);
1401 		if (node == NULL)
1402 			return AE_NOT_FOUND;
1403 	}
1404 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1405 		aml_freevalue(&res);
1406 		return AE_ERROR;
1407 	}
1408 
1409 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1410 
1411 	result->length = sizeof(union acpi_object);
1412 	switch (res.type) {
1413 	case AML_OBJTYPE_BUFFER:
1414 		result->length += res.length;
1415 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1416 		obj = (union acpi_object *)result->pointer;
1417 		data = (uint8_t *)(obj + 1);
1418 		obj->type = res.type;
1419 		obj->buffer.length = res.length;
1420 		obj->buffer.pointer = data;
1421 		memcpy(data, res.v_buffer, res.length);
1422 		break;
1423 	default:
1424 		printf("%s: return type 0x%02x", __func__, res.type);
1425 		aml_freevalue(&res);
1426 		return AE_ERROR;
1427 	}
1428 
1429 	aml_freevalue(&res);
1430 	return 0;
1431 }
1432 
1433 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1434 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1435 
1436 int
1437 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1438 {
1439 	struct acpi_bus_event event;
1440 	struct notifier_block *nb;
1441 
1442 	event.device_class = ACPI_VIDEO_CLASS;
1443 	event.type = notify;
1444 
1445 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1446 		nb->notifier_call(nb, 0, &event);
1447 	return 0;
1448 }
1449 
1450 int
1451 register_acpi_notifier(struct notifier_block *nb)
1452 {
1453 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1454 	return 0;
1455 }
1456 
1457 int
1458 unregister_acpi_notifier(struct notifier_block *nb)
1459 {
1460 	struct notifier_block *tmp;
1461 
1462 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1463 		if (tmp == nb) {
1464 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1465 			    notifier_block, link);
1466 			return 0;
1467 		}
1468 	}
1469 
1470 	return -ENOENT;
1471 }
1472 
1473 const char *
1474 acpi_format_exception(acpi_status status)
1475 {
1476 	switch (status) {
1477 	case AE_NOT_FOUND:
1478 		return "not found";
1479 	case AE_BAD_PARAMETER:
1480 		return "bad parameter";
1481 	default:
1482 		return "unknown";
1483 	}
1484 }
1485 
1486 #endif
1487 
1488 void
1489 backlight_do_update_status(void *arg)
1490 {
1491 	backlight_update_status(arg);
1492 }
1493 
1494 struct backlight_device *
1495 backlight_device_register(const char *name, void *kdev, void *data,
1496     const struct backlight_ops *ops, struct backlight_properties *props)
1497 {
1498 	struct backlight_device *bd;
1499 
1500 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1501 	bd->ops = ops;
1502 	bd->props = *props;
1503 	bd->data = data;
1504 
1505 	task_set(&bd->task, backlight_do_update_status, bd);
1506 
1507 	return bd;
1508 }
1509 
1510 void
1511 backlight_device_unregister(struct backlight_device *bd)
1512 {
1513 	free(bd, M_DRM, sizeof(*bd));
1514 }
1515 
1516 struct backlight_device *
1517 devm_backlight_device_register(void *dev, const char *name, void *parent,
1518     void *data, const struct backlight_ops *bo,
1519     const struct backlight_properties *bp)
1520 {
1521 	STUB();
1522 	return NULL;
1523 }
1524 
1525 void
1526 backlight_schedule_update_status(struct backlight_device *bd)
1527 {
1528 	task_add(systq, &bd->task);
1529 }
1530 
1531 inline int
1532 backlight_enable(struct backlight_device *bd)
1533 {
1534 	if (bd == NULL)
1535 		return 0;
1536 
1537 	bd->props.power = FB_BLANK_UNBLANK;
1538 
1539 	return bd->ops->update_status(bd);
1540 }
1541 
1542 inline int
1543 backlight_disable(struct backlight_device *bd)
1544 {
1545 	if (bd == NULL)
1546 		return 0;
1547 
1548 	bd->props.power = FB_BLANK_POWERDOWN;
1549 
1550 	return bd->ops->update_status(bd);
1551 }
1552 
1553 void
1554 drm_sysfs_hotplug_event(struct drm_device *dev)
1555 {
1556 	knote_locked(&dev->note, NOTE_CHANGE);
1557 }
1558 
1559 void
1560 drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
1561 {
1562 	knote_locked(&connector->dev->note, NOTE_CHANGE);
1563 }
1564 
1565 void
1566 drm_sysfs_connector_status_event(struct drm_connector *connector,
1567     struct drm_property *property)
1568 {
1569 	STUB();
1570 }
1571 
1572 struct dma_fence *
1573 dma_fence_get(struct dma_fence *fence)
1574 {
1575 	if (fence)
1576 		kref_get(&fence->refcount);
1577 	return fence;
1578 }
1579 
1580 struct dma_fence *
1581 dma_fence_get_rcu(struct dma_fence *fence)
1582 {
1583 	if (fence)
1584 		kref_get(&fence->refcount);
1585 	return fence;
1586 }
1587 
1588 struct dma_fence *
1589 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1590 {
1591 	struct dma_fence *fence;
1592 	if (dfp == NULL)
1593 		return NULL;
1594 	fence = *dfp;
1595 	if (fence)
1596 		kref_get(&fence->refcount);
1597 	return fence;
1598 }
1599 
1600 void
1601 dma_fence_release(struct kref *ref)
1602 {
1603 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1604 	if (fence->ops && fence->ops->release)
1605 		fence->ops->release(fence);
1606 	else
1607 		free(fence, M_DRM, 0);
1608 }
1609 
1610 void
1611 dma_fence_put(struct dma_fence *fence)
1612 {
1613 	if (fence)
1614 		kref_put(&fence->refcount, dma_fence_release);
1615 }
1616 
1617 int
1618 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp)
1619 {
1620 	struct dma_fence_cb *cur, *tmp;
1621 	struct list_head cb_list;
1622 
1623 	if (fence == NULL)
1624 		return -EINVAL;
1625 
1626 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1627 		return -EINVAL;
1628 
1629 	list_replace(&fence->cb_list, &cb_list);
1630 
1631 	fence->timestamp = timestamp;
1632 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1633 
1634 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1635 		INIT_LIST_HEAD(&cur->node);
1636 		cur->func(fence, cur);
1637 	}
1638 
1639 	return 0;
1640 }
1641 
1642 int
1643 dma_fence_signal(struct dma_fence *fence)
1644 {
1645 	int r;
1646 
1647 	if (fence == NULL)
1648 		return -EINVAL;
1649 
1650 	mtx_enter(fence->lock);
1651 	r = dma_fence_signal_timestamp_locked(fence, ktime_get());
1652 	mtx_leave(fence->lock);
1653 
1654 	return r;
1655 }
1656 
1657 int
1658 dma_fence_signal_locked(struct dma_fence *fence)
1659 {
1660 	if (fence == NULL)
1661 		return -EINVAL;
1662 
1663 	return dma_fence_signal_timestamp_locked(fence, ktime_get());
1664 }
1665 
1666 int
1667 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
1668 {
1669 	int r;
1670 
1671 	if (fence == NULL)
1672 		return -EINVAL;
1673 
1674 	mtx_enter(fence->lock);
1675 	r = dma_fence_signal_timestamp_locked(fence, timestamp);
1676 	mtx_leave(fence->lock);
1677 
1678 	return r;
1679 }
1680 
1681 bool
1682 dma_fence_is_signaled(struct dma_fence *fence)
1683 {
1684 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1685 		return true;
1686 
1687 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1688 		dma_fence_signal(fence);
1689 		return true;
1690 	}
1691 
1692 	return false;
1693 }
1694 
1695 bool
1696 dma_fence_is_signaled_locked(struct dma_fence *fence)
1697 {
1698 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1699 		return true;
1700 
1701 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1702 		dma_fence_signal_locked(fence);
1703 		return true;
1704 	}
1705 
1706 	return false;
1707 }
1708 
1709 long
1710 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1711 {
1712 	if (timeout < 0)
1713 		return -EINVAL;
1714 
1715 	if (fence->ops->wait)
1716 		return fence->ops->wait(fence, intr, timeout);
1717 	else
1718 		return dma_fence_default_wait(fence, intr, timeout);
1719 }
1720 
1721 long
1722 dma_fence_wait(struct dma_fence *fence, bool intr)
1723 {
1724 	long ret;
1725 
1726 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1727 	if (ret < 0)
1728 		return ret;
1729 
1730 	return 0;
1731 }
1732 
1733 void
1734 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1735 {
1736 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1737 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1738 	    fence->ops->enable_signaling) {
1739 		mtx_enter(fence->lock);
1740 		if (!fence->ops->enable_signaling(fence))
1741 			dma_fence_signal_locked(fence);
1742 		mtx_leave(fence->lock);
1743 	}
1744 }
1745 
1746 void
1747 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1748     struct mutex *lock, uint64_t context, uint64_t seqno)
1749 {
1750 	fence->ops = ops;
1751 	fence->lock = lock;
1752 	fence->context = context;
1753 	fence->seqno = seqno;
1754 	fence->flags = 0;
1755 	fence->error = 0;
1756 	kref_init(&fence->refcount);
1757 	INIT_LIST_HEAD(&fence->cb_list);
1758 }
1759 
1760 int
1761 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1762     dma_fence_func_t func)
1763 {
1764 	int ret = 0;
1765 	bool was_set;
1766 
1767 	if (WARN_ON(!fence || !func))
1768 		return -EINVAL;
1769 
1770 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1771 		INIT_LIST_HEAD(&cb->node);
1772 		return -ENOENT;
1773 	}
1774 
1775 	mtx_enter(fence->lock);
1776 
1777 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1778 
1779 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1780 		ret = -ENOENT;
1781 	else if (!was_set && fence->ops->enable_signaling) {
1782 		if (!fence->ops->enable_signaling(fence)) {
1783 			dma_fence_signal_locked(fence);
1784 			ret = -ENOENT;
1785 		}
1786 	}
1787 
1788 	if (!ret) {
1789 		cb->func = func;
1790 		list_add_tail(&cb->node, &fence->cb_list);
1791 	} else
1792 		INIT_LIST_HEAD(&cb->node);
1793 	mtx_leave(fence->lock);
1794 
1795 	return ret;
1796 }
1797 
1798 bool
1799 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1800 {
1801 	bool ret;
1802 
1803 	mtx_enter(fence->lock);
1804 
1805 	ret = !list_empty(&cb->node);
1806 	if (ret)
1807 		list_del_init(&cb->node);
1808 
1809 	mtx_leave(fence->lock);
1810 
1811 	return ret;
1812 }
1813 
1814 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1815 
1816 uint64_t
1817 dma_fence_context_alloc(unsigned int num)
1818 {
1819   return atomic64_add_return(num, &drm_fence_context_count) - num;
1820 }
1821 
1822 struct default_wait_cb {
1823 	struct dma_fence_cb base;
1824 	struct proc *proc;
1825 };
1826 
1827 static void
1828 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1829 {
1830 	struct default_wait_cb *wait =
1831 	    container_of(cb, struct default_wait_cb, base);
1832 	wake_up_process(wait->proc);
1833 }
1834 
1835 long
1836 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1837 {
1838 	long ret = timeout ? timeout : 1;
1839 	unsigned long end;
1840 	int err;
1841 	struct default_wait_cb cb;
1842 	bool was_set;
1843 
1844 	KASSERT(timeout <= INT_MAX);
1845 
1846 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1847 		return ret;
1848 
1849 	mtx_enter(fence->lock);
1850 
1851 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1852 	    &fence->flags);
1853 
1854 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1855 		goto out;
1856 
1857 	if (!was_set && fence->ops->enable_signaling) {
1858 		if (!fence->ops->enable_signaling(fence)) {
1859 			dma_fence_signal_locked(fence);
1860 			goto out;
1861 		}
1862 	}
1863 
1864 	if (timeout == 0) {
1865 		ret = 0;
1866 		goto out;
1867 	}
1868 
1869 	cb.base.func = dma_fence_default_wait_cb;
1870 	cb.proc = curproc;
1871 	list_add(&cb.base.node, &fence->cb_list);
1872 
1873 	end = jiffies + timeout;
1874 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1875 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1876 			break;
1877 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1878 		    "dmafence", ret);
1879 		if (err == EINTR || err == ERESTART) {
1880 			ret = -ERESTARTSYS;
1881 			break;
1882 		}
1883 	}
1884 
1885 	if (!list_empty(&cb.base.node))
1886 		list_del(&cb.base.node);
1887 out:
1888 	mtx_leave(fence->lock);
1889 
1890 	return ret;
1891 }
1892 
1893 static bool
1894 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1895     uint32_t *idx)
1896 {
1897 	int i;
1898 
1899 	for (i = 0; i < count; ++i) {
1900 		struct dma_fence *fence = fences[i];
1901 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1902 			if (idx)
1903 				*idx = i;
1904 			return true;
1905 		}
1906 	}
1907 	return false;
1908 }
1909 
1910 long
1911 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1912     bool intr, long timeout, uint32_t *idx)
1913 {
1914 	struct default_wait_cb *cb;
1915 	long ret = timeout;
1916 	unsigned long end;
1917 	int i, err;
1918 
1919 	KASSERT(timeout <= INT_MAX);
1920 
1921 	if (timeout == 0) {
1922 		for (i = 0; i < count; i++) {
1923 			if (dma_fence_is_signaled(fences[i])) {
1924 				if (idx)
1925 					*idx = i;
1926 				return 1;
1927 			}
1928 		}
1929 		return 0;
1930 	}
1931 
1932 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1933 	if (cb == NULL)
1934 		return -ENOMEM;
1935 
1936 	for (i = 0; i < count; i++) {
1937 		struct dma_fence *fence = fences[i];
1938 		cb[i].proc = curproc;
1939 		if (dma_fence_add_callback(fence, &cb[i].base,
1940 		    dma_fence_default_wait_cb)) {
1941 			if (idx)
1942 				*idx = i;
1943 			goto cb_cleanup;
1944 		}
1945 	}
1946 
1947 	end = jiffies + timeout;
1948 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1949 		if (dma_fence_test_signaled_any(fences, count, idx))
1950 			break;
1951 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
1952 		if (err == EINTR || err == ERESTART) {
1953 			ret = -ERESTARTSYS;
1954 			break;
1955 		}
1956 	}
1957 
1958 cb_cleanup:
1959 	while (i-- > 0)
1960 		dma_fence_remove_callback(fences[i], &cb[i].base);
1961 	free(cb, M_DRM, count * sizeof(*cb));
1962 	return ret;
1963 }
1964 
1965 static struct dma_fence dma_fence_stub;
1966 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1967 
1968 static const char *
1969 dma_fence_stub_get_name(struct dma_fence *fence)
1970 {
1971 	return "stub";
1972 }
1973 
1974 static const struct dma_fence_ops dma_fence_stub_ops = {
1975 	.get_driver_name = dma_fence_stub_get_name,
1976 	.get_timeline_name = dma_fence_stub_get_name,
1977 };
1978 
1979 struct dma_fence *
1980 dma_fence_get_stub(void)
1981 {
1982 	mtx_enter(&dma_fence_stub_mtx);
1983 	if (dma_fence_stub.ops == NULL) {
1984 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1985 		    &dma_fence_stub_mtx, 0, 0);
1986 		dma_fence_signal_locked(&dma_fence_stub);
1987 	}
1988 	mtx_leave(&dma_fence_stub_mtx);
1989 
1990 	return dma_fence_get(&dma_fence_stub);
1991 }
1992 
1993 struct dma_fence *
1994 dma_fence_allocate_private_stub(void)
1995 {
1996 	struct dma_fence *f = malloc(sizeof(*f), M_DRM,
1997 	    M_ZERO | M_WAITOK | M_CANFAIL);
1998 	if (f == NULL)
1999 		return ERR_PTR(-ENOMEM);
2000 	dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
2001 	dma_fence_signal(f);
2002 	return f;
2003 }
2004 
2005 static const char *
2006 dma_fence_array_get_driver_name(struct dma_fence *fence)
2007 {
2008 	return "dma_fence_array";
2009 }
2010 
2011 static const char *
2012 dma_fence_array_get_timeline_name(struct dma_fence *fence)
2013 {
2014 	return "unbound";
2015 }
2016 
2017 static void
2018 irq_dma_fence_array_work(void *arg)
2019 {
2020 	struct dma_fence_array *dfa = (struct dma_fence_array *)arg;
2021 	dma_fence_signal(&dfa->base);
2022 	dma_fence_put(&dfa->base);
2023 }
2024 
2025 static void
2026 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
2027 {
2028 	struct dma_fence_array_cb *array_cb =
2029 	    container_of(cb, struct dma_fence_array_cb, cb);
2030 	struct dma_fence_array *dfa = array_cb->array;
2031 
2032 	if (atomic_dec_and_test(&dfa->num_pending))
2033 		timeout_add(&dfa->to, 1);
2034 	else
2035 		dma_fence_put(&dfa->base);
2036 }
2037 
2038 static bool
2039 dma_fence_array_enable_signaling(struct dma_fence *fence)
2040 {
2041 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2042 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
2043 	int i;
2044 
2045 	for (i = 0; i < dfa->num_fences; ++i) {
2046 		cb[i].array = dfa;
2047 		dma_fence_get(&dfa->base);
2048 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
2049 		    dma_fence_array_cb_func)) {
2050 			dma_fence_put(&dfa->base);
2051 			if (atomic_dec_and_test(&dfa->num_pending))
2052 				return false;
2053 		}
2054 	}
2055 
2056 	return true;
2057 }
2058 
2059 static bool
2060 dma_fence_array_signaled(struct dma_fence *fence)
2061 {
2062 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2063 
2064 	return atomic_read(&dfa->num_pending) <= 0;
2065 }
2066 
2067 static void
2068 dma_fence_array_release(struct dma_fence *fence)
2069 {
2070 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2071 	int i;
2072 
2073 	for (i = 0; i < dfa->num_fences; ++i)
2074 		dma_fence_put(dfa->fences[i]);
2075 
2076 	free(dfa->fences, M_DRM, 0);
2077 	dma_fence_free(fence);
2078 }
2079 
2080 struct dma_fence_array *
2081 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
2082     unsigned seqno, bool signal_on_any)
2083 {
2084 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
2085 	    (num_fences * sizeof(struct dma_fence_array_cb)),
2086 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2087 	if (dfa == NULL)
2088 		return NULL;
2089 
2090 	mtx_init(&dfa->lock, IPL_TTY);
2091 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
2092 	    context, seqno);
2093 	timeout_set(&dfa->to, irq_dma_fence_array_work, dfa);
2094 
2095 	dfa->num_fences = num_fences;
2096 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
2097 	dfa->fences = fences;
2098 
2099 	return dfa;
2100 }
2101 
2102 struct dma_fence *
2103 dma_fence_array_first(struct dma_fence *f)
2104 {
2105 	struct dma_fence_array *dfa;
2106 
2107 	if (f == NULL)
2108 		return NULL;
2109 
2110 	if ((dfa = to_dma_fence_array(f)) == NULL)
2111 		return f;
2112 
2113 	if (dfa->num_fences > 0)
2114 		return dfa->fences[0];
2115 
2116 	return NULL;
2117 }
2118 
2119 struct dma_fence *
2120 dma_fence_array_next(struct dma_fence *f, unsigned int i)
2121 {
2122 	struct dma_fence_array *dfa;
2123 
2124 	if (f == NULL)
2125 		return NULL;
2126 
2127 	if ((dfa = to_dma_fence_array(f)) == NULL)
2128 		return NULL;
2129 
2130 	if (i < dfa->num_fences)
2131 		return dfa->fences[i];
2132 
2133 	return NULL;
2134 }
2135 
2136 const struct dma_fence_ops dma_fence_array_ops = {
2137 	.get_driver_name = dma_fence_array_get_driver_name,
2138 	.get_timeline_name = dma_fence_array_get_timeline_name,
2139 	.enable_signaling = dma_fence_array_enable_signaling,
2140 	.signaled = dma_fence_array_signaled,
2141 	.release = dma_fence_array_release,
2142 };
2143 
2144 int
2145 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
2146 {
2147 	struct dma_fence_chain *chain;
2148 	struct dma_fence *fence;
2149 
2150 	if (seqno == 0)
2151 		return 0;
2152 
2153 	if ((chain = to_dma_fence_chain(*df)) == NULL)
2154 		return -EINVAL;
2155 
2156 	fence = &chain->base;
2157 	if (fence->seqno < seqno)
2158 		return -EINVAL;
2159 
2160 	dma_fence_chain_for_each(*df, fence) {
2161 		if ((*df)->context != fence->context)
2162 			break;
2163 
2164 		chain = to_dma_fence_chain(*df);
2165 		if (chain->prev_seqno < seqno)
2166 			break;
2167 	}
2168 	dma_fence_put(fence);
2169 
2170 	return 0;
2171 }
2172 
2173 void
2174 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
2175     struct dma_fence *fence, uint64_t seqno)
2176 {
2177 	uint64_t context;
2178 
2179 	chain->fence = fence;
2180 	chain->prev = prev;
2181 	mtx_init(&chain->lock, IPL_TTY);
2182 
2183 	/* if prev is a chain */
2184 	if (to_dma_fence_chain(prev) != NULL) {
2185 		if (__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
2186 			chain->prev_seqno = prev->seqno;
2187 			context = prev->context;
2188 		} else {
2189 			chain->prev_seqno = 0;
2190 			context = dma_fence_context_alloc(1);
2191 			seqno = prev->seqno;
2192 		}
2193 	} else {
2194 		chain->prev_seqno = 0;
2195 		context = dma_fence_context_alloc(1);
2196 	}
2197 
2198 	dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
2199 	    context, seqno);
2200 }
2201 
2202 static const char *
2203 dma_fence_chain_get_driver_name(struct dma_fence *fence)
2204 {
2205 	return "dma_fence_chain";
2206 }
2207 
2208 static const char *
2209 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
2210 {
2211 	return "unbound";
2212 }
2213 
2214 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
2215 
2216 static void
2217 dma_fence_chain_timo(void *arg)
2218 {
2219 	struct dma_fence_chain *chain = (struct dma_fence_chain *)arg;
2220 
2221 	if (dma_fence_chain_enable_signaling(&chain->base) == false)
2222 		dma_fence_signal(&chain->base);
2223 	dma_fence_put(&chain->base);
2224 }
2225 
2226 static void
2227 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
2228 {
2229 	struct dma_fence_chain *chain =
2230 	    container_of(cb, struct dma_fence_chain, cb);
2231 	timeout_set(&chain->to, dma_fence_chain_timo, chain);
2232 	timeout_add(&chain->to, 1);
2233 	dma_fence_put(f);
2234 }
2235 
2236 static bool
2237 dma_fence_chain_enable_signaling(struct dma_fence *fence)
2238 {
2239 	struct dma_fence_chain *chain, *h;
2240 	struct dma_fence *f;
2241 
2242 	h = to_dma_fence_chain(fence);
2243 	dma_fence_get(&h->base);
2244 	dma_fence_chain_for_each(fence, &h->base) {
2245 		chain = to_dma_fence_chain(fence);
2246 		if (chain == NULL)
2247 			f = fence;
2248 		else
2249 			f = chain->fence;
2250 
2251 		dma_fence_get(f);
2252 		if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) {
2253 			dma_fence_put(fence);
2254 			return true;
2255 		}
2256 		dma_fence_put(f);
2257 	}
2258 	dma_fence_put(&h->base);
2259 	return false;
2260 }
2261 
2262 static bool
2263 dma_fence_chain_signaled(struct dma_fence *fence)
2264 {
2265 	struct dma_fence_chain *chain;
2266 	struct dma_fence *f;
2267 
2268 	dma_fence_chain_for_each(fence, fence) {
2269 		chain = to_dma_fence_chain(fence);
2270 		if (chain == NULL)
2271 			f = fence;
2272 		else
2273 			f = chain->fence;
2274 
2275 		if (dma_fence_is_signaled(f) == false) {
2276 			dma_fence_put(fence);
2277 			return false;
2278 		}
2279 	}
2280 	return true;
2281 }
2282 
2283 static void
2284 dma_fence_chain_release(struct dma_fence *fence)
2285 {
2286 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
2287 	struct dma_fence_chain *prev_chain;
2288 	struct dma_fence *prev;
2289 
2290 	for (prev = chain->prev; prev != NULL; prev = chain->prev) {
2291 		if (kref_read(&prev->refcount) > 1)
2292 			break;
2293 		if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
2294 			break;
2295 		chain->prev = prev_chain->prev;
2296 		prev_chain->prev = NULL;
2297 		dma_fence_put(prev);
2298 	}
2299 	dma_fence_put(prev);
2300 	dma_fence_put(chain->fence);
2301 	dma_fence_free(fence);
2302 }
2303 
2304 struct dma_fence *
2305 dma_fence_chain_walk(struct dma_fence *fence)
2306 {
2307 	struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain;
2308 	struct dma_fence *prev, *new_prev, *tmp;
2309 
2310 	if (chain == NULL) {
2311 		dma_fence_put(fence);
2312 		return NULL;
2313 	}
2314 
2315 	while ((prev = dma_fence_get(chain->prev)) != NULL) {
2316 		prev_chain = to_dma_fence_chain(prev);
2317 		if (prev_chain != NULL) {
2318 			if (!dma_fence_is_signaled(prev_chain->fence))
2319 				break;
2320 			new_prev = dma_fence_get(prev_chain->prev);
2321 		} else {
2322 			if (!dma_fence_is_signaled(prev))
2323 				break;
2324 			new_prev = NULL;
2325 		}
2326 		tmp = atomic_cas_ptr(&chain->prev, prev, new_prev);
2327 		dma_fence_put(tmp == prev ? prev : new_prev);
2328 		dma_fence_put(prev);
2329 	}
2330 
2331 	dma_fence_put(fence);
2332 	return prev;
2333 }
2334 
2335 const struct dma_fence_ops dma_fence_chain_ops = {
2336 	.get_driver_name = dma_fence_chain_get_driver_name,
2337 	.get_timeline_name = dma_fence_chain_get_timeline_name,
2338 	.enable_signaling = dma_fence_chain_enable_signaling,
2339 	.signaled = dma_fence_chain_signaled,
2340 	.release = dma_fence_chain_release,
2341 	.use_64bit_seqno = true,
2342 };
2343 
2344 bool
2345 dma_fence_is_container(struct dma_fence *fence)
2346 {
2347 	return (fence->ops == &dma_fence_chain_ops) ||
2348 	    (fence->ops == &dma_fence_array_ops);
2349 }
2350 
2351 int
2352 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2353 {
2354 	return (ENXIO);
2355 }
2356 
2357 int
2358 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2359 {
2360 	return (ENXIO);
2361 }
2362 
2363 int
2364 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2365 {
2366 	return (ENOTTY);
2367 }
2368 
2369 int
2370 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2371 {
2372 	return (EINVAL);
2373 }
2374 
2375 int
2376 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2377 {
2378 	struct dma_buf *dmabuf = fp->f_data;
2379 
2380 	memset(st, 0, sizeof(*st));
2381 	st->st_size = dmabuf->size;
2382 	st->st_mode = S_IFIFO;	/* XXX */
2383 	return (0);
2384 }
2385 
2386 int
2387 dmabuf_close(struct file *fp, struct proc *p)
2388 {
2389 	struct dma_buf *dmabuf = fp->f_data;
2390 
2391 	fp->f_data = NULL;
2392 	KERNEL_LOCK();
2393 	dmabuf->ops->release(dmabuf);
2394 	KERNEL_UNLOCK();
2395 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
2396 	return (0);
2397 }
2398 
2399 int
2400 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2401 {
2402 	struct dma_buf *dmabuf = fp->f_data;
2403 	off_t newoff;
2404 
2405 	if (*offset != 0)
2406 		return (EINVAL);
2407 
2408 	switch (whence) {
2409 	case SEEK_SET:
2410 		newoff = 0;
2411 		break;
2412 	case SEEK_END:
2413 		newoff = dmabuf->size;
2414 		break;
2415 	default:
2416 		return (EINVAL);
2417 	}
2418 	mtx_enter(&fp->f_mtx);
2419 	fp->f_offset = newoff;
2420 	mtx_leave(&fp->f_mtx);
2421 	*offset = newoff;
2422 	return (0);
2423 }
2424 
2425 const struct fileops dmabufops = {
2426 	.fo_read	= dmabuf_read,
2427 	.fo_write	= dmabuf_write,
2428 	.fo_ioctl	= dmabuf_ioctl,
2429 	.fo_kqfilter	= dmabuf_kqfilter,
2430 	.fo_stat	= dmabuf_stat,
2431 	.fo_close	= dmabuf_close,
2432 	.fo_seek	= dmabuf_seek,
2433 };
2434 
2435 struct dma_buf *
2436 dma_buf_export(const struct dma_buf_export_info *info)
2437 {
2438 	struct proc *p = curproc;
2439 	struct dma_buf *dmabuf;
2440 	struct file *fp;
2441 
2442 	fp = fnew(p);
2443 	if (fp == NULL)
2444 		return ERR_PTR(-ENFILE);
2445 	fp->f_type = DTYPE_DMABUF;
2446 	fp->f_ops = &dmabufops;
2447 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2448 	dmabuf->priv = info->priv;
2449 	dmabuf->ops = info->ops;
2450 	dmabuf->size = info->size;
2451 	dmabuf->file = fp;
2452 	fp->f_data = dmabuf;
2453 	INIT_LIST_HEAD(&dmabuf->attachments);
2454 	return dmabuf;
2455 }
2456 
2457 struct dma_buf *
2458 dma_buf_get(int fd)
2459 {
2460 	struct proc *p = curproc;
2461 	struct filedesc *fdp = p->p_fd;
2462 	struct file *fp;
2463 
2464 	if ((fp = fd_getfile(fdp, fd)) == NULL)
2465 		return ERR_PTR(-EBADF);
2466 
2467 	if (fp->f_type != DTYPE_DMABUF) {
2468 		FRELE(fp, p);
2469 		return ERR_PTR(-EINVAL);
2470 	}
2471 
2472 	return fp->f_data;
2473 }
2474 
2475 void
2476 dma_buf_put(struct dma_buf *dmabuf)
2477 {
2478 	KASSERT(dmabuf);
2479 	KASSERT(dmabuf->file);
2480 
2481 	FRELE(dmabuf->file, curproc);
2482 }
2483 
2484 int
2485 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2486 {
2487 	struct proc *p = curproc;
2488 	struct filedesc *fdp = p->p_fd;
2489 	struct file *fp = dmabuf->file;
2490 	int fd, cloexec, error;
2491 
2492 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2493 
2494 	fdplock(fdp);
2495 restart:
2496 	if ((error = fdalloc(p, 0, &fd)) != 0) {
2497 		if (error == ENOSPC) {
2498 			fdexpand(p);
2499 			goto restart;
2500 		}
2501 		fdpunlock(fdp);
2502 		return -error;
2503 	}
2504 
2505 	fdinsert(fdp, fd, cloexec, fp);
2506 	fdpunlock(fdp);
2507 
2508 	return fd;
2509 }
2510 
2511 void
2512 get_dma_buf(struct dma_buf *dmabuf)
2513 {
2514 	FREF(dmabuf->file);
2515 }
2516 
2517 enum pci_bus_speed
2518 pcie_get_speed_cap(struct pci_dev *pdev)
2519 {
2520 	pci_chipset_tag_t	pc;
2521 	pcitag_t		tag;
2522 	int			pos ;
2523 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
2524 	pcireg_t		id;
2525 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
2526 	int			bus, device, function;
2527 
2528 	if (pdev == NULL)
2529 		return PCI_SPEED_UNKNOWN;
2530 
2531 	pc = pdev->pc;
2532 	tag = pdev->tag;
2533 
2534 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2535 	    &pos, NULL))
2536 		return PCI_SPEED_UNKNOWN;
2537 
2538 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2539 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2540 
2541 	/* we've been informed via and serverworks don't make the cut */
2542 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2543 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
2544 		return PCI_SPEED_UNKNOWN;
2545 
2546 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2547 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2548 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2549 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2550 
2551 	lnkcap &= 0x0f;
2552 	lnkcap2 &= 0xfe;
2553 
2554 	if (lnkcap2) { /* PCIE GEN 3.0 */
2555 		if (lnkcap2 & 0x02)
2556 			cap = PCIE_SPEED_2_5GT;
2557 		if (lnkcap2 & 0x04)
2558 			cap = PCIE_SPEED_5_0GT;
2559 		if (lnkcap2 & 0x08)
2560 			cap = PCIE_SPEED_8_0GT;
2561 		if (lnkcap2 & 0x10)
2562 			cap = PCIE_SPEED_16_0GT;
2563 		if (lnkcap2 & 0x20)
2564 			cap = PCIE_SPEED_32_0GT;
2565 		if (lnkcap2 & 0x40)
2566 			cap = PCIE_SPEED_64_0GT;
2567 	} else {
2568 		if (lnkcap & 0x01)
2569 			cap = PCIE_SPEED_2_5GT;
2570 		if (lnkcap & 0x02)
2571 			cap = PCIE_SPEED_5_0GT;
2572 	}
2573 
2574 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2575 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2576 	    lnkcap2);
2577 	return cap;
2578 }
2579 
2580 enum pcie_link_width
2581 pcie_get_width_cap(struct pci_dev *pdev)
2582 {
2583 	pci_chipset_tag_t	pc = pdev->pc;
2584 	pcitag_t		tag = pdev->tag;
2585 	int			pos ;
2586 	pcireg_t		lnkcap = 0;
2587 	pcireg_t		id;
2588 	int			bus, device, function;
2589 
2590 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2591 	    &pos, NULL))
2592 		return PCIE_LNK_WIDTH_UNKNOWN;
2593 
2594 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2595 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2596 
2597 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2598 
2599 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2600 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2601 
2602 	if (lnkcap)
2603 		return (lnkcap & 0x3f0) >> 4;
2604 	return PCIE_LNK_WIDTH_UNKNOWN;
2605 }
2606 
2607 bool
2608 pcie_aspm_enabled(struct pci_dev *pdev)
2609 {
2610 	pci_chipset_tag_t	pc = pdev->pc;
2611 	pcitag_t		tag = pdev->tag;
2612 	int			pos ;
2613 	pcireg_t		lcsr;
2614 
2615 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2616 	    &pos, NULL))
2617 		return false;
2618 
2619 	lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR);
2620 	if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0)
2621 		return true;
2622 
2623 	return false;
2624 }
2625 
2626 int
2627 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
2628     int sync, void *key)
2629 {
2630 	wakeup(wqe);
2631 	if (wqe->private)
2632 		wake_up_process(wqe->private);
2633 	list_del_init(&wqe->entry);
2634 	return 0;
2635 }
2636 
2637 static wait_queue_head_t bit_waitq;
2638 wait_queue_head_t var_waitq;
2639 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2640 
2641 int
2642 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2643 {
2644 	int err;
2645 
2646 	if (!test_bit(bit, word))
2647 		return 0;
2648 
2649 	mtx_enter(&wait_bit_mtx);
2650 	while (test_bit(bit, word)) {
2651 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2652 		    INFSLP);
2653 		if (err) {
2654 			mtx_leave(&wait_bit_mtx);
2655 			return 1;
2656 		}
2657 	}
2658 	mtx_leave(&wait_bit_mtx);
2659 	return 0;
2660 }
2661 
2662 int
2663 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2664 {
2665 	int err;
2666 
2667 	if (!test_bit(bit, word))
2668 		return 0;
2669 
2670 	mtx_enter(&wait_bit_mtx);
2671 	while (test_bit(bit, word)) {
2672 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2673 		if (err) {
2674 			mtx_leave(&wait_bit_mtx);
2675 			return 1;
2676 		}
2677 	}
2678 	mtx_leave(&wait_bit_mtx);
2679 	return 0;
2680 }
2681 
2682 void
2683 wake_up_bit(void *word, int bit)
2684 {
2685 	mtx_enter(&wait_bit_mtx);
2686 	wakeup(word);
2687 	mtx_leave(&wait_bit_mtx);
2688 }
2689 
2690 void
2691 clear_and_wake_up_bit(int bit, void *word)
2692 {
2693 	clear_bit(bit, word);
2694 	wake_up_bit(word, bit);
2695 }
2696 
2697 wait_queue_head_t *
2698 bit_waitqueue(void *word, int bit)
2699 {
2700 	/* XXX hash table of wait queues? */
2701 	return &bit_waitq;
2702 }
2703 
2704 wait_queue_head_t *
2705 __var_waitqueue(void *p)
2706 {
2707 	/* XXX hash table of wait queues? */
2708 	return &bit_waitq;
2709 }
2710 
2711 struct workqueue_struct *system_wq;
2712 struct workqueue_struct *system_highpri_wq;
2713 struct workqueue_struct *system_unbound_wq;
2714 struct workqueue_struct *system_long_wq;
2715 struct taskq *taskletq;
2716 
2717 void
2718 drm_linux_init(void)
2719 {
2720 	system_wq = (struct workqueue_struct *)
2721 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
2722 	system_highpri_wq = (struct workqueue_struct *)
2723 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2724 	system_unbound_wq = (struct workqueue_struct *)
2725 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2726 	system_long_wq = (struct workqueue_struct *)
2727 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2728 
2729 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2730 
2731 	init_waitqueue_head(&bit_waitq);
2732 	init_waitqueue_head(&var_waitq);
2733 
2734 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2735 	    "idrpl", NULL);
2736 
2737 	kmap_atomic_va =
2738 	    (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2739 }
2740 
2741 void
2742 drm_linux_exit(void)
2743 {
2744 	pool_destroy(&idr_pool);
2745 
2746 	taskq_destroy(taskletq);
2747 
2748 	taskq_destroy((struct taskq *)system_long_wq);
2749 	taskq_destroy((struct taskq *)system_unbound_wq);
2750 	taskq_destroy((struct taskq *)system_highpri_wq);
2751 	taskq_destroy((struct taskq *)system_wq);
2752 }
2753 
2754 #define PCIE_ECAP_RESIZE_BAR	0x15
2755 #define RBCAP0			0x04
2756 #define RBCTRL0			0x08
2757 #define RBCTRL_BARINDEX_MASK	0x07
2758 #define RBCTRL_BARSIZE_MASK	0x1f00
2759 #define RBCTRL_BARSIZE_SHIFT	8
2760 
2761 /* size in MB is 1 << nsize */
2762 int
2763 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2764 {
2765 	pcireg_t	reg;
2766 	uint32_t	offset, capid;
2767 
2768 	KASSERT(bar == 0);
2769 
2770 	offset = PCI_PCIE_ECAP;
2771 
2772 	/* search PCI Express Extended Capabilities */
2773 	do {
2774 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2775 		capid = PCI_PCIE_ECAP_ID(reg);
2776 		if (capid == PCIE_ECAP_RESIZE_BAR)
2777 			break;
2778 		offset = PCI_PCIE_ECAP_NEXT(reg);
2779 	} while (capid != 0);
2780 
2781 	if (capid == 0) {
2782 		printf("%s: could not find resize bar cap!\n", __func__);
2783 		return -ENOTSUP;
2784 	}
2785 
2786 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2787 
2788 	if ((reg & (1 << (nsize + 4))) == 0) {
2789 		printf("%s size not supported\n", __func__);
2790 		return -ENOTSUP;
2791 	}
2792 
2793 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2794 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2795 		printf("%s BAR index not 0\n", __func__);
2796 		return -EINVAL;
2797 	}
2798 
2799 	reg &= ~RBCTRL_BARSIZE_MASK;
2800 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2801 
2802 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2803 
2804 	return 0;
2805 }
2806 
2807 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2808 
2809 int
2810 register_shrinker(struct shrinker *shrinker, const char *format, ...)
2811 {
2812 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2813 	return 0;
2814 }
2815 
2816 void
2817 unregister_shrinker(struct shrinker *shrinker)
2818 {
2819 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2820 }
2821 
2822 void
2823 drmbackoff(long npages)
2824 {
2825 	struct shrink_control sc;
2826 	struct shrinker *shrinker;
2827 	u_long ret;
2828 
2829 	shrinker = TAILQ_FIRST(&shrinkers);
2830 	while (shrinker && npages > 0) {
2831 		sc.nr_to_scan = npages;
2832 		ret = shrinker->scan_objects(shrinker, &sc);
2833 		npages -= ret;
2834 		shrinker = TAILQ_NEXT(shrinker, next);
2835 	}
2836 }
2837 
2838 void *
2839 bitmap_zalloc(u_int n, gfp_t flags)
2840 {
2841 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2842 }
2843 
2844 void
2845 bitmap_free(void *p)
2846 {
2847 	kfree(p);
2848 }
2849 
2850 int
2851 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2852 {
2853 	if (atomic_add_unless(v, -1, 1))
2854 		return 0;
2855 
2856 	rw_enter_write(lock);
2857 	if (atomic_dec_return(v) == 0)
2858 		return 1;
2859 	rw_exit_write(lock);
2860 	return 0;
2861 }
2862 
2863 int
2864 printk(const char *fmt, ...)
2865 {
2866 	int ret, level;
2867 	va_list ap;
2868 
2869 	if (fmt != NULL && *fmt == '\001') {
2870 		level = fmt[1];
2871 #ifndef DRMDEBUG
2872 		if (level >= KERN_INFO[1] && level <= '9')
2873 			return 0;
2874 #endif
2875 		fmt += 2;
2876 	}
2877 
2878 	va_start(ap, fmt);
2879 	ret = vprintf(fmt, ap);
2880 	va_end(ap);
2881 
2882 	return ret;
2883 }
2884 
2885 #define START(node) ((node)->start)
2886 #define LAST(node) ((node)->last)
2887 
2888 struct interval_tree_node *
2889 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2890     unsigned long last)
2891 {
2892 	struct interval_tree_node *node;
2893 	struct rb_node *rb;
2894 
2895 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2896 		node = rb_entry(rb, typeof(*node), rb);
2897 		if (LAST(node) >= start && START(node) <= last)
2898 			return node;
2899 	}
2900 	return NULL;
2901 }
2902 
2903 void
2904 interval_tree_remove(struct interval_tree_node *node,
2905     struct rb_root_cached *root)
2906 {
2907 	rb_erase_cached(&node->rb, root);
2908 }
2909 
2910 void
2911 interval_tree_insert(struct interval_tree_node *node,
2912     struct rb_root_cached *root)
2913 {
2914 	struct rb_node **iter = &root->rb_root.rb_node;
2915 	struct rb_node *parent = NULL;
2916 	struct interval_tree_node *iter_node;
2917 
2918 	while (*iter) {
2919 		parent = *iter;
2920 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
2921 
2922 		if (node->start < iter_node->start)
2923 			iter = &(*iter)->rb_left;
2924 		else
2925 			iter = &(*iter)->rb_right;
2926 	}
2927 
2928 	rb_link_node(&node->rb, parent, iter);
2929 	rb_insert_color_cached(&node->rb, root, false);
2930 }
2931 
2932 int
2933 syncfile_read(struct file *fp, struct uio *uio, int fflags)
2934 {
2935 	return ENXIO;
2936 }
2937 
2938 int
2939 syncfile_write(struct file *fp, struct uio *uio, int fflags)
2940 {
2941 	return ENXIO;
2942 }
2943 
2944 int
2945 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2946 {
2947 	return ENOTTY;
2948 }
2949 
2950 int
2951 syncfile_kqfilter(struct file *fp, struct knote *kn)
2952 {
2953 	return EINVAL;
2954 }
2955 
2956 int
2957 syncfile_stat(struct file *fp, struct stat *st, struct proc *p)
2958 {
2959 	memset(st, 0, sizeof(*st));
2960 	st->st_mode = S_IFIFO;	/* XXX */
2961 	return 0;
2962 }
2963 
2964 int
2965 syncfile_close(struct file *fp, struct proc *p)
2966 {
2967 	struct sync_file *sf = fp->f_data;
2968 
2969 	dma_fence_put(sf->fence);
2970 	fp->f_data = NULL;
2971 	free(sf, M_DRM, sizeof(struct sync_file));
2972 	return 0;
2973 }
2974 
2975 int
2976 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2977 {
2978 	off_t newoff;
2979 
2980 	if (*offset != 0)
2981 		return EINVAL;
2982 
2983 	switch (whence) {
2984 	case SEEK_SET:
2985 		newoff = 0;
2986 		break;
2987 	case SEEK_END:
2988 		newoff = 0;
2989 		break;
2990 	default:
2991 		return EINVAL;
2992 	}
2993 	mtx_enter(&fp->f_mtx);
2994 	fp->f_offset = newoff;
2995 	mtx_leave(&fp->f_mtx);
2996 	*offset = newoff;
2997 	return 0;
2998 }
2999 
3000 const struct fileops syncfileops = {
3001 	.fo_read	= syncfile_read,
3002 	.fo_write	= syncfile_write,
3003 	.fo_ioctl	= syncfile_ioctl,
3004 	.fo_kqfilter	= syncfile_kqfilter,
3005 	.fo_stat	= syncfile_stat,
3006 	.fo_close	= syncfile_close,
3007 	.fo_seek	= syncfile_seek,
3008 };
3009 
3010 void
3011 fd_install(int fd, struct file *fp)
3012 {
3013 	struct proc *p = curproc;
3014 	struct filedesc *fdp = p->p_fd;
3015 
3016 	if (fp->f_type != DTYPE_SYNC)
3017 		return;
3018 
3019 	fdplock(fdp);
3020 	/* all callers use get_unused_fd_flags(O_CLOEXEC) */
3021 	fdinsert(fdp, fd, UF_EXCLOSE, fp);
3022 	fdpunlock(fdp);
3023 }
3024 
3025 void
3026 fput(struct file *fp)
3027 {
3028 	if (fp->f_type != DTYPE_SYNC)
3029 		return;
3030 
3031 	FRELE(fp, curproc);
3032 }
3033 
3034 int
3035 get_unused_fd_flags(unsigned int flags)
3036 {
3037 	struct proc *p = curproc;
3038 	struct filedesc *fdp = p->p_fd;
3039 	int error, fd;
3040 
3041 	KASSERT((flags & O_CLOEXEC) != 0);
3042 
3043 	fdplock(fdp);
3044 retryalloc:
3045 	if ((error = fdalloc(p, 0, &fd)) != 0) {
3046 		if (error == ENOSPC) {
3047 			fdexpand(p);
3048 			goto retryalloc;
3049 		}
3050 		fdpunlock(fdp);
3051 		return -1;
3052 	}
3053 	fdpunlock(fdp);
3054 
3055 	return fd;
3056 }
3057 
3058 void
3059 put_unused_fd(int fd)
3060 {
3061 	struct filedesc *fdp = curproc->p_fd;
3062 
3063 	fdplock(fdp);
3064 	fdremove(fdp, fd);
3065 	fdpunlock(fdp);
3066 }
3067 
3068 struct dma_fence *
3069 sync_file_get_fence(int fd)
3070 {
3071 	struct proc *p = curproc;
3072 	struct filedesc *fdp = p->p_fd;
3073 	struct file *fp;
3074 	struct sync_file *sf;
3075 	struct dma_fence *f;
3076 
3077 	if ((fp = fd_getfile(fdp, fd)) == NULL)
3078 		return NULL;
3079 
3080 	if (fp->f_type != DTYPE_SYNC) {
3081 		FRELE(fp, p);
3082 		return NULL;
3083 	}
3084 	sf = fp->f_data;
3085 	f = dma_fence_get(sf->fence);
3086 	FRELE(sf->file, p);
3087 	return f;
3088 }
3089 
3090 struct sync_file *
3091 sync_file_create(struct dma_fence *fence)
3092 {
3093 	struct proc *p = curproc;
3094 	struct sync_file *sf;
3095 	struct file *fp;
3096 
3097 	fp = fnew(p);
3098 	if (fp == NULL)
3099 		return NULL;
3100 	fp->f_type = DTYPE_SYNC;
3101 	fp->f_ops = &syncfileops;
3102 	sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO);
3103 	sf->file = fp;
3104 	sf->fence = dma_fence_get(fence);
3105 	fp->f_data = sf;
3106 	return sf;
3107 }
3108 
3109 bool
3110 drm_firmware_drivers_only(void)
3111 {
3112 	return false;
3113 }
3114