xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision 8550894424f8a4aa4aafb6cd57229dd6ed7cd9dd)
1 /*	$OpenBSD: drm_linux.c,v 1.95 2023/01/01 01:34:34 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 #include <linux/processor.h>
53 #include <linux/sync_file.h>
54 
55 #include <drm/drm_device.h>
56 #include <drm/drm_connector.h>
57 #include <drm/drm_print.h>
58 
59 #if defined(__amd64__) || defined(__i386__)
60 #include "bios.h"
61 #endif
62 
63 /* allowed to sleep */
64 void
65 tasklet_unlock_wait(struct tasklet_struct *ts)
66 {
67 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
68 		cpu_relax();
69 }
70 
71 /* must not sleep */
72 void
73 tasklet_unlock_spin_wait(struct tasklet_struct *ts)
74 {
75 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
76 		cpu_relax();
77 }
78 
79 void
80 tasklet_run(void *arg)
81 {
82 	struct tasklet_struct *ts = arg;
83 
84 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
85 	if (tasklet_trylock(ts)) {
86 		if (!atomic_read(&ts->count)) {
87 			if (ts->use_callback)
88 				ts->callback(ts);
89 			else
90 				ts->func(ts->data);
91 		}
92 		tasklet_unlock(ts);
93 	}
94 }
95 
96 /* 32 bit powerpc lacks 64 bit atomics */
97 #if defined(__powerpc__) && !defined(__powerpc64__)
98 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
99 #endif
100 
101 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
102 volatile struct proc *sch_proc;
103 volatile void *sch_ident;
104 int sch_priority;
105 
106 void
107 set_current_state(int state)
108 {
109 	if (sch_ident != curproc)
110 		mtx_enter(&sch_mtx);
111 	MUTEX_ASSERT_LOCKED(&sch_mtx);
112 	sch_ident = sch_proc = curproc;
113 	sch_priority = state;
114 }
115 
116 void
117 __set_current_state(int state)
118 {
119 	KASSERT(state == TASK_RUNNING);
120 	if (sch_ident == curproc) {
121 		MUTEX_ASSERT_LOCKED(&sch_mtx);
122 		sch_ident = NULL;
123 		mtx_leave(&sch_mtx);
124 	}
125 }
126 
127 void
128 schedule(void)
129 {
130 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
131 }
132 
133 long
134 schedule_timeout(long timeout)
135 {
136 	struct sleep_state sls;
137 	unsigned long deadline;
138 	int wait, spl, timo = 0;
139 
140 	MUTEX_ASSERT_LOCKED(&sch_mtx);
141 	KASSERT(!cold);
142 
143 	if (timeout != MAX_SCHEDULE_TIMEOUT)
144 		timo = timeout;
145 	sleep_setup(&sls, sch_ident, sch_priority, "schto", timo);
146 
147 	wait = (sch_proc == curproc && timeout > 0);
148 
149 	spl = MUTEX_OLDIPL(&sch_mtx);
150 	MUTEX_OLDIPL(&sch_mtx) = splsched();
151 	mtx_leave(&sch_mtx);
152 
153 	if (timeout != MAX_SCHEDULE_TIMEOUT)
154 		deadline = jiffies + timeout;
155 	sleep_finish(&sls, wait);
156 	if (timeout != MAX_SCHEDULE_TIMEOUT)
157 		timeout = deadline - jiffies;
158 
159 	mtx_enter(&sch_mtx);
160 	MUTEX_OLDIPL(&sch_mtx) = spl;
161 	sch_ident = curproc;
162 
163 	return timeout > 0 ? timeout : 0;
164 }
165 
166 long
167 schedule_timeout_uninterruptible(long timeout)
168 {
169 	tsleep(curproc, PWAIT, "schtou", timeout);
170 	return 0;
171 }
172 
173 int
174 wake_up_process(struct proc *p)
175 {
176 	atomic_cas_ptr(&sch_proc, p, NULL);
177 	return wakeup_proc(p, NULL);
178 }
179 
180 void
181 flush_workqueue(struct workqueue_struct *wq)
182 {
183 	if (cold)
184 		return;
185 
186 	if (wq)
187 		taskq_barrier((struct taskq *)wq);
188 }
189 
190 bool
191 flush_work(struct work_struct *work)
192 {
193 	if (cold)
194 		return false;
195 
196 	if (work->tq)
197 		taskq_barrier(work->tq);
198 	return false;
199 }
200 
201 bool
202 flush_delayed_work(struct delayed_work *dwork)
203 {
204 	bool ret = false;
205 
206 	if (cold)
207 		return false;
208 
209 	while (timeout_pending(&dwork->to)) {
210 		tsleep(dwork, PWAIT, "fldwto", 1);
211 		ret = true;
212 	}
213 
214 	if (dwork->tq)
215 		taskq_barrier(dwork->tq);
216 	return ret;
217 }
218 
219 struct kthread {
220 	int (*func)(void *);
221 	void *data;
222 	struct proc *proc;
223 	volatile u_int flags;
224 #define KTHREAD_SHOULDSTOP	0x0000001
225 #define KTHREAD_STOPPED		0x0000002
226 #define KTHREAD_SHOULDPARK	0x0000004
227 #define KTHREAD_PARKED		0x0000008
228 	LIST_ENTRY(kthread) next;
229 };
230 
231 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
232 
233 void
234 kthread_func(void *arg)
235 {
236 	struct kthread *thread = arg;
237 	int ret;
238 
239 	ret = thread->func(thread->data);
240 	thread->flags |= KTHREAD_STOPPED;
241 	wakeup(thread);
242 	kthread_exit(ret);
243 }
244 
245 struct proc *
246 kthread_run(int (*func)(void *), void *data, const char *name)
247 {
248 	struct kthread *thread;
249 
250 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
251 	thread->func = func;
252 	thread->data = data;
253 	thread->flags = 0;
254 
255 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
256 		free(thread, M_DRM, sizeof(*thread));
257 		return ERR_PTR(-ENOMEM);
258 	}
259 
260 	LIST_INSERT_HEAD(&kthread_list, thread, next);
261 	return thread->proc;
262 }
263 
264 struct kthread_worker *
265 kthread_create_worker(unsigned int flags, const char *fmt, ...)
266 {
267 	char name[MAXCOMLEN+1];
268 	va_list ap;
269 
270 	struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
271 	va_start(ap, fmt);
272 	vsnprintf(name, sizeof(name), fmt, ap);
273 	va_end(ap);
274 	w->tq = taskq_create(name, 1, IPL_HIGH, 0);
275 
276 	return w;
277 }
278 
279 void
280 kthread_destroy_worker(struct kthread_worker *worker)
281 {
282 	taskq_destroy(worker->tq);
283 	free(worker, M_DRM, sizeof(*worker));
284 
285 }
286 
287 void
288 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
289 {
290 	work->tq = NULL;
291 	task_set(&work->task, (void (*)(void *))func, work);
292 }
293 
294 bool
295 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
296 {
297 	work->tq = worker->tq;
298 	return task_add(work->tq, &work->task);
299 }
300 
301 bool
302 kthread_cancel_work_sync(struct kthread_work *work)
303 {
304 	return task_del(work->tq, &work->task);
305 }
306 
307 void
308 kthread_flush_work(struct kthread_work *work)
309 {
310 	if (cold)
311 		return;
312 
313 	if (work->tq)
314 		taskq_barrier(work->tq);
315 }
316 
317 void
318 kthread_flush_worker(struct kthread_worker *worker)
319 {
320 	if (cold)
321 		return;
322 
323 	if (worker->tq)
324 		taskq_barrier(worker->tq);
325 }
326 
327 struct kthread *
328 kthread_lookup(struct proc *p)
329 {
330 	struct kthread *thread;
331 
332 	LIST_FOREACH(thread, &kthread_list, next) {
333 		if (thread->proc == p)
334 			break;
335 	}
336 	KASSERT(thread);
337 
338 	return thread;
339 }
340 
341 int
342 kthread_should_park(void)
343 {
344 	struct kthread *thread = kthread_lookup(curproc);
345 	return (thread->flags & KTHREAD_SHOULDPARK);
346 }
347 
348 void
349 kthread_parkme(void)
350 {
351 	struct kthread *thread = kthread_lookup(curproc);
352 
353 	while (thread->flags & KTHREAD_SHOULDPARK) {
354 		thread->flags |= KTHREAD_PARKED;
355 		wakeup(thread);
356 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
357 		thread->flags &= ~KTHREAD_PARKED;
358 	}
359 }
360 
361 void
362 kthread_park(struct proc *p)
363 {
364 	struct kthread *thread = kthread_lookup(p);
365 
366 	while ((thread->flags & KTHREAD_PARKED) == 0) {
367 		thread->flags |= KTHREAD_SHOULDPARK;
368 		wake_up_process(thread->proc);
369 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
370 	}
371 }
372 
373 void
374 kthread_unpark(struct proc *p)
375 {
376 	struct kthread *thread = kthread_lookup(p);
377 
378 	thread->flags &= ~KTHREAD_SHOULDPARK;
379 	wakeup(thread);
380 }
381 
382 int
383 kthread_should_stop(void)
384 {
385 	struct kthread *thread = kthread_lookup(curproc);
386 	return (thread->flags & KTHREAD_SHOULDSTOP);
387 }
388 
389 void
390 kthread_stop(struct proc *p)
391 {
392 	struct kthread *thread = kthread_lookup(p);
393 
394 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
395 		thread->flags |= KTHREAD_SHOULDSTOP;
396 		kthread_unpark(p);
397 		wake_up_process(thread->proc);
398 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
399 	}
400 	LIST_REMOVE(thread, next);
401 	free(thread, M_DRM, sizeof(*thread));
402 }
403 
404 #if NBIOS > 0
405 extern char smbios_board_vendor[];
406 extern char smbios_board_prod[];
407 extern char smbios_board_serial[];
408 #endif
409 
410 bool
411 dmi_match(int slot, const char *str)
412 {
413 	switch (slot) {
414 	case DMI_SYS_VENDOR:
415 		if (hw_vendor != NULL &&
416 		    !strcmp(hw_vendor, str))
417 			return true;
418 		break;
419 	case DMI_PRODUCT_NAME:
420 		if (hw_prod != NULL &&
421 		    !strcmp(hw_prod, str))
422 			return true;
423 		break;
424 	case DMI_PRODUCT_VERSION:
425 		if (hw_ver != NULL &&
426 		    !strcmp(hw_ver, str))
427 			return true;
428 		break;
429 #if NBIOS > 0
430 	case DMI_BOARD_VENDOR:
431 		if (strcmp(smbios_board_vendor, str) == 0)
432 			return true;
433 		break;
434 	case DMI_BOARD_NAME:
435 		if (strcmp(smbios_board_prod, str) == 0)
436 			return true;
437 		break;
438 	case DMI_BOARD_SERIAL:
439 		if (strcmp(smbios_board_serial, str) == 0)
440 			return true;
441 		break;
442 #else
443 	case DMI_BOARD_VENDOR:
444 		if (hw_vendor != NULL &&
445 		    !strcmp(hw_vendor, str))
446 			return true;
447 		break;
448 	case DMI_BOARD_NAME:
449 		if (hw_prod != NULL &&
450 		    !strcmp(hw_prod, str))
451 			return true;
452 		break;
453 #endif
454 	case DMI_NONE:
455 	default:
456 		return false;
457 	}
458 
459 	return false;
460 }
461 
462 static bool
463 dmi_found(const struct dmi_system_id *dsi)
464 {
465 	int i, slot;
466 
467 	for (i = 0; i < nitems(dsi->matches); i++) {
468 		slot = dsi->matches[i].slot;
469 		if (slot == DMI_NONE)
470 			break;
471 		if (!dmi_match(slot, dsi->matches[i].substr))
472 			return false;
473 	}
474 
475 	return true;
476 }
477 
478 const struct dmi_system_id *
479 dmi_first_match(const struct dmi_system_id *sysid)
480 {
481 	const struct dmi_system_id *dsi;
482 
483 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
484 		if (dmi_found(dsi))
485 			return dsi;
486 	}
487 
488 	return NULL;
489 }
490 
491 #if NBIOS > 0
492 extern char smbios_bios_date[];
493 #endif
494 
495 const char *
496 dmi_get_system_info(int slot)
497 {
498 	WARN_ON(slot != DMI_BIOS_DATE);
499 #if NBIOS > 0
500 	if (slot == DMI_BIOS_DATE)
501 		return smbios_bios_date;
502 #endif
503 	return NULL;
504 }
505 
506 int
507 dmi_check_system(const struct dmi_system_id *sysid)
508 {
509 	const struct dmi_system_id *dsi;
510 	int num = 0;
511 
512 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
513 		if (dmi_found(dsi)) {
514 			num++;
515 			if (dsi->callback && dsi->callback(dsi))
516 				break;
517 		}
518 	}
519 	return (num);
520 }
521 
522 struct vm_page *
523 alloc_pages(unsigned int gfp_mask, unsigned int order)
524 {
525 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
526 	struct uvm_constraint_range *constraint = &no_constraint;
527 	struct pglist mlist;
528 
529 	if (gfp_mask & M_CANFAIL)
530 		flags |= UVM_PLA_FAILOK;
531 	if (gfp_mask & M_ZERO)
532 		flags |= UVM_PLA_ZERO;
533 	if (gfp_mask & __GFP_DMA32)
534 		constraint = &dma_constraint;
535 
536 	TAILQ_INIT(&mlist);
537 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
538 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
539 		return NULL;
540 	return TAILQ_FIRST(&mlist);
541 }
542 
543 void
544 __free_pages(struct vm_page *page, unsigned int order)
545 {
546 	struct pglist mlist;
547 	int i;
548 
549 	TAILQ_INIT(&mlist);
550 	for (i = 0; i < (1 << order); i++)
551 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
552 	uvm_pglistfree(&mlist);
553 }
554 
555 void
556 __pagevec_release(struct pagevec *pvec)
557 {
558 	struct pglist mlist;
559 	int i;
560 
561 	TAILQ_INIT(&mlist);
562 	for (i = 0; i < pvec->nr; i++)
563 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
564 	uvm_pglistfree(&mlist);
565 	pagevec_reinit(pvec);
566 }
567 
568 static struct kmem_va_mode kv_physwait = {
569 	.kv_map = &phys_map,
570 	.kv_wait = 1,
571 };
572 
573 void *
574 kmap(struct vm_page *pg)
575 {
576 	vaddr_t va;
577 
578 #if defined (__HAVE_PMAP_DIRECT)
579 	va = pmap_map_direct(pg);
580 #else
581 	va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok);
582 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
583 	pmap_update(pmap_kernel());
584 #endif
585 	return (void *)va;
586 }
587 
588 void
589 kunmap_va(void *addr)
590 {
591 	vaddr_t va = (vaddr_t)addr;
592 
593 #if defined (__HAVE_PMAP_DIRECT)
594 	pmap_unmap_direct(va);
595 #else
596 	pmap_kremove(va, PAGE_SIZE);
597 	pmap_update(pmap_kernel());
598 	km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none);
599 #endif
600 }
601 
602 vaddr_t kmap_atomic_va;
603 int kmap_atomic_inuse;
604 
605 void *
606 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
607 {
608 	KASSERT(!kmap_atomic_inuse);
609 
610 	kmap_atomic_inuse = 1;
611 	pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
612 	    PROT_READ | PROT_WRITE);
613 	return (void *)kmap_atomic_va;
614 }
615 
616 void
617 kunmap_atomic(void *addr)
618 {
619 	KASSERT(kmap_atomic_inuse);
620 
621 	pmap_kremove(kmap_atomic_va, PAGE_SIZE);
622 	kmap_atomic_inuse = 0;
623 }
624 
625 void *
626 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
627      pgprot_t prot)
628 {
629 	vaddr_t va;
630 	paddr_t pa;
631 	int i;
632 
633 	va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none,
634 	    &kd_nowait);
635 	if (va == 0)
636 		return NULL;
637 	for (i = 0; i < npages; i++) {
638 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
639 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
640 		    PROT_READ | PROT_WRITE,
641 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
642 		pmap_update(pmap_kernel());
643 	}
644 
645 	return (void *)va;
646 }
647 
648 void
649 vunmap(void *addr, size_t size)
650 {
651 	vaddr_t va = (vaddr_t)addr;
652 
653 	pmap_remove(pmap_kernel(), va, va + size);
654 	pmap_update(pmap_kernel());
655 	km_free((void *)va, size, &kv_any, &kp_none);
656 }
657 
658 bool
659 is_vmalloc_addr(const void *p)
660 {
661 	vaddr_t min, max, addr;
662 
663 	min = vm_map_min(kernel_map);
664 	max = vm_map_max(kernel_map);
665 	addr = (vaddr_t)p;
666 
667 	if (addr >= min && addr <= max)
668 		return true;
669 	else
670 		return false;
671 }
672 
673 void
674 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
675     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
676 {
677 	const uint8_t *cbuf = buf;
678 	int i;
679 
680 	for (i = 0; i < len; i++) {
681 		if ((i % rowsize) == 0)
682 			printf("%s", prefix_str);
683 		printf("%02x", cbuf[i]);
684 		if ((i % rowsize) == (rowsize - 1))
685 			printf("\n");
686 		else
687 			printf(" ");
688 	}
689 }
690 
691 void *
692 memchr_inv(const void *s, int c, size_t n)
693 {
694 	if (n != 0) {
695 		const unsigned char *p = s;
696 
697 		do {
698 			if (*p++ != (unsigned char)c)
699 				return ((void *)(p - 1));
700 		} while (--n != 0);
701 	}
702 	return (NULL);
703 }
704 
705 int
706 panic_cmp(struct rb_node *a, struct rb_node *b)
707 {
708 	panic(__func__);
709 }
710 
711 #undef RB_ROOT
712 #define RB_ROOT(head)	(head)->rbh_root
713 
714 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
715 
716 /*
717  * This is a fairly minimal implementation of the Linux "idr" API.  It
718  * probably isn't very efficient, and definitely isn't RCU safe.  The
719  * pre-load buffer is global instead of per-cpu; we rely on the kernel
720  * lock to make this work.  We do randomize our IDs in order to make
721  * them harder to guess.
722  */
723 
724 int idr_cmp(struct idr_entry *, struct idr_entry *);
725 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
726 
727 struct pool idr_pool;
728 struct idr_entry *idr_entry_cache;
729 
730 void
731 idr_init(struct idr *idr)
732 {
733 	SPLAY_INIT(&idr->tree);
734 }
735 
736 void
737 idr_destroy(struct idr *idr)
738 {
739 	struct idr_entry *id;
740 
741 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
742 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
743 		pool_put(&idr_pool, id);
744 	}
745 }
746 
747 void
748 idr_preload(unsigned int gfp_mask)
749 {
750 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
751 
752 	KERNEL_ASSERT_LOCKED();
753 
754 	if (idr_entry_cache == NULL)
755 		idr_entry_cache = pool_get(&idr_pool, flags);
756 }
757 
758 int
759 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
760 {
761 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
762 	struct idr_entry *id;
763 	int begin;
764 
765 	KERNEL_ASSERT_LOCKED();
766 
767 	if (idr_entry_cache) {
768 		id = idr_entry_cache;
769 		idr_entry_cache = NULL;
770 	} else {
771 		id = pool_get(&idr_pool, flags);
772 		if (id == NULL)
773 			return -ENOMEM;
774 	}
775 
776 	if (end <= 0)
777 		end = INT_MAX;
778 
779 #ifdef notyet
780 	id->id = begin = start + arc4random_uniform(end - start);
781 #else
782 	id->id = begin = start;
783 #endif
784 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
785 		if (id->id == end)
786 			id->id = start;
787 		else
788 			id->id++;
789 		if (id->id == begin) {
790 			pool_put(&idr_pool, id);
791 			return -ENOSPC;
792 		}
793 	}
794 	id->ptr = ptr;
795 	return id->id;
796 }
797 
798 void *
799 idr_replace(struct idr *idr, void *ptr, unsigned long id)
800 {
801 	struct idr_entry find, *res;
802 	void *old;
803 
804 	find.id = id;
805 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
806 	if (res == NULL)
807 		return ERR_PTR(-ENOENT);
808 	old = res->ptr;
809 	res->ptr = ptr;
810 	return old;
811 }
812 
813 void *
814 idr_remove(struct idr *idr, unsigned long id)
815 {
816 	struct idr_entry find, *res;
817 	void *ptr = NULL;
818 
819 	find.id = id;
820 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
821 	if (res) {
822 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
823 		ptr = res->ptr;
824 		pool_put(&idr_pool, res);
825 	}
826 	return ptr;
827 }
828 
829 void *
830 idr_find(struct idr *idr, unsigned long id)
831 {
832 	struct idr_entry find, *res;
833 
834 	find.id = id;
835 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
836 	if (res == NULL)
837 		return NULL;
838 	return res->ptr;
839 }
840 
841 void *
842 idr_get_next(struct idr *idr, int *id)
843 {
844 	struct idr_entry *res;
845 
846 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
847 		if (res->id >= *id) {
848 			*id = res->id;
849 			return res->ptr;
850 		}
851 	}
852 
853 	return NULL;
854 }
855 
856 int
857 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
858 {
859 	struct idr_entry *id;
860 	int ret;
861 
862 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
863 		ret = func(id->id, id->ptr, data);
864 		if (ret)
865 			return ret;
866 	}
867 
868 	return 0;
869 }
870 
871 int
872 idr_cmp(struct idr_entry *a, struct idr_entry *b)
873 {
874 	return (a->id < b->id ? -1 : a->id > b->id);
875 }
876 
877 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
878 
879 void
880 ida_init(struct ida *ida)
881 {
882 	idr_init(&ida->idr);
883 }
884 
885 void
886 ida_destroy(struct ida *ida)
887 {
888 	idr_destroy(&ida->idr);
889 }
890 
891 int
892 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
893     gfp_t gfp_mask)
894 {
895 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
896 }
897 
898 void
899 ida_simple_remove(struct ida *ida, unsigned int id)
900 {
901 	idr_remove(&ida->idr, id);
902 }
903 
904 int
905 ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
906 {
907 	return idr_alloc(&ida->idr, NULL, min, INT_MAX, gfp);
908 }
909 
910 int
911 ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
912 {
913 	return idr_alloc(&ida->idr, NULL, 0, max - 1, gfp);
914 }
915 
916 void
917 ida_free(struct ida *ida, unsigned int id)
918 {
919 	idr_remove(&ida->idr, id);
920 }
921 
922 int
923 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
924 {
925 	return (a->id < b->id ? -1 : a->id > b->id);
926 }
927 
928 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
929 struct pool xa_pool;
930 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
931 
932 void
933 xa_init_flags(struct xarray *xa, gfp_t flags)
934 {
935 	static int initialized;
936 
937 	if (!initialized) {
938 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0,
939 		    "xapl", NULL);
940 		initialized = 1;
941 	}
942 	SPLAY_INIT(&xa->xa_tree);
943 	if (flags & XA_FLAGS_LOCK_IRQ)
944 		mtx_init(&xa->xa_lock, IPL_TTY);
945 	else
946 		mtx_init(&xa->xa_lock, IPL_NONE);
947 }
948 
949 void
950 xa_destroy(struct xarray *xa)
951 {
952 	struct xarray_entry *id;
953 
954 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
955 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
956 		pool_put(&xa_pool, id);
957 	}
958 }
959 
960 /* Don't wrap ids. */
961 int
962 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
963 {
964 	struct xarray_entry *xid;
965 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
966 	int begin;
967 
968 	if (gfp & GFP_NOWAIT) {
969 		xid = pool_get(&xa_pool, PR_NOWAIT);
970 	} else {
971 		mtx_leave(&xa->xa_lock);
972 		xid = pool_get(&xa_pool, PR_WAITOK);
973 		mtx_enter(&xa->xa_lock);
974 	}
975 
976 	if (xid == NULL)
977 		return -ENOMEM;
978 
979 	if (limit <= 0)
980 		limit = INT_MAX;
981 
982 	xid->id = begin = start;
983 
984 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
985 		if (xid->id == limit)
986 			xid->id = start;
987 		else
988 			xid->id++;
989 		if (xid->id == begin) {
990 			pool_put(&xa_pool, xid);
991 			return -EBUSY;
992 		}
993 	}
994 	xid->ptr = entry;
995 	*id = xid->id;
996 	return 0;
997 }
998 
999 /*
1000  * Wrap ids and store next id.
1001  * We walk the entire tree so don't special case wrapping.
1002  * The only caller of this (i915_drm_client.c) doesn't use next id.
1003  */
1004 int
1005 __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, int limit, u32 *next,
1006     gfp_t gfp)
1007 {
1008 	int r = __xa_alloc(xa, id, entry, limit, gfp);
1009 	*next = *id + 1;
1010 	return r;
1011 }
1012 
1013 void *
1014 __xa_erase(struct xarray *xa, unsigned long index)
1015 {
1016 	struct xarray_entry find, *res;
1017 	void *ptr = NULL;
1018 
1019 	find.id = index;
1020 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1021 	if (res) {
1022 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
1023 		ptr = res->ptr;
1024 		pool_put(&xa_pool, res);
1025 	}
1026 	return ptr;
1027 }
1028 
1029 void *
1030 __xa_load(struct xarray *xa, unsigned long index)
1031 {
1032 	struct xarray_entry find, *res;
1033 
1034 	find.id = index;
1035 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1036 	if (res == NULL)
1037 		return NULL;
1038 	return res->ptr;
1039 }
1040 
1041 void *
1042 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1043 {
1044 	struct xarray_entry find, *res;
1045 	void *prev;
1046 
1047 	if (entry == NULL)
1048 		return __xa_erase(xa, index);
1049 
1050 	find.id = index;
1051 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1052 	if (res != NULL) {
1053 		/* index exists */
1054 		/* XXX Multislot entries updates not implemented yet */
1055 		prev = res->ptr;
1056 		res->ptr = entry;
1057 		return prev;
1058 	}
1059 
1060 	/* index not found, add new */
1061 	if (gfp & GFP_NOWAIT) {
1062 		res = pool_get(&xa_pool, PR_NOWAIT);
1063 	} else {
1064 		mtx_leave(&xa->xa_lock);
1065 		res = pool_get(&xa_pool, PR_WAITOK);
1066 		mtx_enter(&xa->xa_lock);
1067 	}
1068 	if (res == NULL)
1069 		return XA_ERROR(-ENOMEM);
1070 	res->id = index;
1071 	res->ptr = entry;
1072 	if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL)
1073 		return XA_ERROR(-EINVAL);
1074 	return NULL; /* no prev entry at index */
1075 }
1076 
1077 void *
1078 xa_get_next(struct xarray *xa, unsigned long *index)
1079 {
1080 	struct xarray_entry *res;
1081 
1082 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
1083 		if (res->id >= *index) {
1084 			*index = res->id;
1085 			return res->ptr;
1086 		}
1087 	}
1088 
1089 	return NULL;
1090 }
1091 
1092 int
1093 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1094 {
1095 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
1096 	    M_DRM, gfp_mask | M_ZERO);
1097 	if (table->sgl == NULL)
1098 		return -ENOMEM;
1099 	table->nents = table->orig_nents = nents;
1100 	sg_mark_end(&table->sgl[nents - 1]);
1101 	return 0;
1102 }
1103 
1104 void
1105 sg_free_table(struct sg_table *table)
1106 {
1107 	free(table->sgl, M_DRM,
1108 	    table->orig_nents * sizeof(struct scatterlist));
1109 	table->orig_nents = 0;
1110 	table->sgl = NULL;
1111 }
1112 
1113 size_t
1114 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1115     const void *buf, size_t buflen)
1116 {
1117 	panic("%s", __func__);
1118 }
1119 
1120 int
1121 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1122 {
1123 	void *cmd = NULL;
1124 	int cmdlen = 0;
1125 	int err, ret = 0;
1126 	int op;
1127 
1128 	iic_acquire_bus(&adap->ic, 0);
1129 
1130 	while (num > 2) {
1131 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1132 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1133 		    msgs->buf, msgs->len, 0);
1134 		if (err) {
1135 			ret = -err;
1136 			goto fail;
1137 		}
1138 		msgs++;
1139 		num--;
1140 		ret++;
1141 	}
1142 
1143 	if (num > 1) {
1144 		cmd = msgs->buf;
1145 		cmdlen = msgs->len;
1146 		msgs++;
1147 		num--;
1148 		ret++;
1149 	}
1150 
1151 	op = (msgs->flags & I2C_M_RD) ?
1152 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1153 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1154 	    msgs->buf, msgs->len, 0);
1155 	if (err) {
1156 		ret = -err;
1157 		goto fail;
1158 	}
1159 	msgs++;
1160 	ret++;
1161 
1162 fail:
1163 	iic_release_bus(&adap->ic, 0);
1164 
1165 	return ret;
1166 }
1167 
1168 int
1169 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1170 {
1171 	int ret, retries;
1172 
1173 	retries = adap->retries;
1174 retry:
1175 	if (adap->algo)
1176 		ret = adap->algo->master_xfer(adap, msgs, num);
1177 	else
1178 		ret = i2c_master_xfer(adap, msgs, num);
1179 	if (ret == -EAGAIN && retries > 0) {
1180 		retries--;
1181 		goto retry;
1182 	}
1183 
1184 	return ret;
1185 }
1186 
1187 int
1188 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1189 {
1190 	int ret;
1191 
1192 	if (adap->lock_ops)
1193 		adap->lock_ops->lock_bus(adap, 0);
1194 
1195 	ret = __i2c_transfer(adap, msgs, num);
1196 
1197 	if (adap->lock_ops)
1198 		adap->lock_ops->unlock_bus(adap, 0);
1199 
1200 	return ret;
1201 }
1202 
1203 int
1204 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1205 {
1206 	struct i2c_algo_bit_data *algo = adap->algo_data;
1207 	struct i2c_adapter bb;
1208 
1209 	memset(&bb, 0, sizeof(bb));
1210 	bb.ic = algo->ic;
1211 	bb.retries = adap->retries;
1212 	return i2c_master_xfer(&bb, msgs, num);
1213 }
1214 
1215 uint32_t
1216 i2c_bb_functionality(struct i2c_adapter *adap)
1217 {
1218 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1219 }
1220 
1221 struct i2c_algorithm i2c_bit_algo = {
1222 	.master_xfer = i2c_bb_master_xfer,
1223 	.functionality = i2c_bb_functionality
1224 };
1225 
1226 int
1227 i2c_bit_add_bus(struct i2c_adapter *adap)
1228 {
1229 	adap->algo = &i2c_bit_algo;
1230 	adap->retries = 3;
1231 
1232 	return 0;
1233 }
1234 
1235 #if defined(__amd64__) || defined(__i386__)
1236 
1237 /*
1238  * This is a minimal implementation of the Linux vga_get/vga_put
1239  * interface.  In all likelihood, it will only work for inteldrm(4) as
1240  * it assumes that if there is another active VGA device in the
1241  * system, it is sitting behind a PCI bridge.
1242  */
1243 
1244 extern int pci_enumerate_bus(struct pci_softc *,
1245     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1246 
1247 pcitag_t vga_bridge_tag;
1248 int vga_bridge_disabled;
1249 
1250 int
1251 vga_disable_bridge(struct pci_attach_args *pa)
1252 {
1253 	pcireg_t bhlc, bc;
1254 
1255 	if (pa->pa_domain != 0)
1256 		return 0;
1257 
1258 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1259 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1260 		return 0;
1261 
1262 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1263 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1264 		return 0;
1265 	bc &= ~PPB_BC_VGA_ENABLE;
1266 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1267 
1268 	vga_bridge_tag = pa->pa_tag;
1269 	vga_bridge_disabled = 1;
1270 
1271 	return 1;
1272 }
1273 
1274 void
1275 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1276 {
1277 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1278 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1279 }
1280 
1281 void
1282 vga_put(struct pci_dev *pdev, int rsrc)
1283 {
1284 	pcireg_t bc;
1285 
1286 	if (!vga_bridge_disabled)
1287 		return;
1288 
1289 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1290 	bc |= PPB_BC_VGA_ENABLE;
1291 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1292 
1293 	vga_bridge_disabled = 0;
1294 }
1295 
1296 #endif
1297 
1298 /*
1299  * ACPI types and interfaces.
1300  */
1301 
1302 #ifdef __HAVE_ACPI
1303 #include "acpi.h"
1304 #endif
1305 
1306 #if NACPI > 0
1307 
1308 #include <dev/acpi/acpireg.h>
1309 #include <dev/acpi/acpivar.h>
1310 #include <dev/acpi/amltypes.h>
1311 #include <dev/acpi/dsdt.h>
1312 
1313 acpi_status
1314 acpi_get_table(const char *sig, int instance,
1315     struct acpi_table_header **hdr)
1316 {
1317 	struct acpi_softc *sc = acpi_softc;
1318 	struct acpi_q *entry;
1319 
1320 	KASSERT(instance == 1);
1321 
1322 	if (sc == NULL)
1323 		return AE_NOT_FOUND;
1324 
1325 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1326 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1327 			*hdr = entry->q_table;
1328 			return 0;
1329 		}
1330 	}
1331 
1332 	return AE_NOT_FOUND;
1333 }
1334 
1335 void
1336 acpi_put_table(struct acpi_table_header *hdr)
1337 {
1338 }
1339 
1340 acpi_status
1341 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1342 {
1343 	node = aml_searchname(node, name);
1344 	if (node == NULL)
1345 		return AE_NOT_FOUND;
1346 
1347 	*rnode = node;
1348 	return 0;
1349 }
1350 
1351 acpi_status
1352 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1353 {
1354 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1355 	KASSERT(type == ACPI_FULL_PATHNAME);
1356 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1357 	return 0;
1358 }
1359 
1360 acpi_status
1361 acpi_evaluate_object(acpi_handle node, const char *name,
1362     struct acpi_object_list *params, struct acpi_buffer *result)
1363 {
1364 	struct aml_value args[4], res;
1365 	union acpi_object *obj;
1366 	uint8_t *data;
1367 	int i;
1368 
1369 	KASSERT(params->count <= nitems(args));
1370 
1371 	for (i = 0; i < params->count; i++) {
1372 		args[i].type = params->pointer[i].type;
1373 		switch (args[i].type) {
1374 		case AML_OBJTYPE_INTEGER:
1375 			args[i].v_integer = params->pointer[i].integer.value;
1376 			break;
1377 		case AML_OBJTYPE_BUFFER:
1378 			args[i].length = params->pointer[i].buffer.length;
1379 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1380 			break;
1381 		default:
1382 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1383 			return AE_BAD_PARAMETER;
1384 		}
1385 	}
1386 
1387 	if (name) {
1388 		node = aml_searchname(node, name);
1389 		if (node == NULL)
1390 			return AE_NOT_FOUND;
1391 	}
1392 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1393 		aml_freevalue(&res);
1394 		return AE_ERROR;
1395 	}
1396 
1397 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1398 
1399 	result->length = sizeof(union acpi_object);
1400 	switch (res.type) {
1401 	case AML_OBJTYPE_BUFFER:
1402 		result->length += res.length;
1403 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1404 		obj = (union acpi_object *)result->pointer;
1405 		data = (uint8_t *)(obj + 1);
1406 		obj->type = res.type;
1407 		obj->buffer.length = res.length;
1408 		obj->buffer.pointer = data;
1409 		memcpy(data, res.v_buffer, res.length);
1410 		break;
1411 	default:
1412 		printf("%s: return type 0x%02x", __func__, res.type);
1413 		aml_freevalue(&res);
1414 		return AE_ERROR;
1415 	}
1416 
1417 	aml_freevalue(&res);
1418 	return 0;
1419 }
1420 
1421 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1422 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1423 
1424 int
1425 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1426 {
1427 	struct acpi_bus_event event;
1428 	struct notifier_block *nb;
1429 
1430 	event.device_class = ACPI_VIDEO_CLASS;
1431 	event.type = notify;
1432 
1433 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1434 		nb->notifier_call(nb, 0, &event);
1435 	return 0;
1436 }
1437 
1438 int
1439 register_acpi_notifier(struct notifier_block *nb)
1440 {
1441 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1442 	return 0;
1443 }
1444 
1445 int
1446 unregister_acpi_notifier(struct notifier_block *nb)
1447 {
1448 	struct notifier_block *tmp;
1449 
1450 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1451 		if (tmp == nb) {
1452 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1453 			    notifier_block, link);
1454 			return 0;
1455 		}
1456 	}
1457 
1458 	return -ENOENT;
1459 }
1460 
1461 const char *
1462 acpi_format_exception(acpi_status status)
1463 {
1464 	switch (status) {
1465 	case AE_NOT_FOUND:
1466 		return "not found";
1467 	case AE_BAD_PARAMETER:
1468 		return "bad parameter";
1469 	default:
1470 		return "unknown";
1471 	}
1472 }
1473 
1474 #endif
1475 
1476 void
1477 backlight_do_update_status(void *arg)
1478 {
1479 	backlight_update_status(arg);
1480 }
1481 
1482 struct backlight_device *
1483 backlight_device_register(const char *name, void *kdev, void *data,
1484     const struct backlight_ops *ops, struct backlight_properties *props)
1485 {
1486 	struct backlight_device *bd;
1487 
1488 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1489 	bd->ops = ops;
1490 	bd->props = *props;
1491 	bd->data = data;
1492 
1493 	task_set(&bd->task, backlight_do_update_status, bd);
1494 
1495 	return bd;
1496 }
1497 
1498 void
1499 backlight_device_unregister(struct backlight_device *bd)
1500 {
1501 	free(bd, M_DRM, sizeof(*bd));
1502 }
1503 
1504 struct backlight_device *
1505 devm_backlight_device_register(void *dev, const char *name, void *parent,
1506     void *data, const struct backlight_ops *bo,
1507     const struct backlight_properties *bp)
1508 {
1509 	STUB();
1510 	return NULL;
1511 }
1512 
1513 void
1514 backlight_schedule_update_status(struct backlight_device *bd)
1515 {
1516 	task_add(systq, &bd->task);
1517 }
1518 
1519 inline int
1520 backlight_enable(struct backlight_device *bd)
1521 {
1522 	if (bd == NULL)
1523 		return 0;
1524 
1525 	bd->props.power = FB_BLANK_UNBLANK;
1526 
1527 	return bd->ops->update_status(bd);
1528 }
1529 
1530 inline int
1531 backlight_disable(struct backlight_device *bd)
1532 {
1533 	if (bd == NULL)
1534 		return 0;
1535 
1536 	bd->props.power = FB_BLANK_POWERDOWN;
1537 
1538 	return bd->ops->update_status(bd);
1539 }
1540 
1541 void
1542 drm_sysfs_hotplug_event(struct drm_device *dev)
1543 {
1544 	KNOTE(&dev->note, NOTE_CHANGE);
1545 }
1546 
1547 void
1548 drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
1549 {
1550 	KNOTE(&connector->dev->note, NOTE_CHANGE);
1551 }
1552 
1553 void
1554 drm_sysfs_connector_status_event(struct drm_connector *connector,
1555     struct drm_property *property)
1556 {
1557 	STUB();
1558 }
1559 
1560 struct dma_fence *
1561 dma_fence_get(struct dma_fence *fence)
1562 {
1563 	if (fence)
1564 		kref_get(&fence->refcount);
1565 	return fence;
1566 }
1567 
1568 struct dma_fence *
1569 dma_fence_get_rcu(struct dma_fence *fence)
1570 {
1571 	if (fence)
1572 		kref_get(&fence->refcount);
1573 	return fence;
1574 }
1575 
1576 struct dma_fence *
1577 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1578 {
1579 	struct dma_fence *fence;
1580 	if (dfp == NULL)
1581 		return NULL;
1582 	fence = *dfp;
1583 	if (fence)
1584 		kref_get(&fence->refcount);
1585 	return fence;
1586 }
1587 
1588 void
1589 dma_fence_release(struct kref *ref)
1590 {
1591 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1592 	if (fence->ops && fence->ops->release)
1593 		fence->ops->release(fence);
1594 	else
1595 		free(fence, M_DRM, 0);
1596 }
1597 
1598 void
1599 dma_fence_put(struct dma_fence *fence)
1600 {
1601 	if (fence)
1602 		kref_put(&fence->refcount, dma_fence_release);
1603 }
1604 
1605 int
1606 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp)
1607 {
1608 	struct dma_fence_cb *cur, *tmp;
1609 	struct list_head cb_list;
1610 
1611 	if (fence == NULL)
1612 		return -EINVAL;
1613 
1614 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1615 		return -EINVAL;
1616 
1617 	list_replace(&fence->cb_list, &cb_list);
1618 
1619 	fence->timestamp = timestamp;
1620 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1621 
1622 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1623 		INIT_LIST_HEAD(&cur->node);
1624 		cur->func(fence, cur);
1625 	}
1626 
1627 	return 0;
1628 }
1629 
1630 int
1631 dma_fence_signal(struct dma_fence *fence)
1632 {
1633 	int r;
1634 
1635 	if (fence == NULL)
1636 		return -EINVAL;
1637 
1638 	mtx_enter(fence->lock);
1639 	r = dma_fence_signal_timestamp_locked(fence, ktime_get());
1640 	mtx_leave(fence->lock);
1641 
1642 	return r;
1643 }
1644 
1645 int
1646 dma_fence_signal_locked(struct dma_fence *fence)
1647 {
1648 	if (fence == NULL)
1649 		return -EINVAL;
1650 
1651 	return dma_fence_signal_timestamp_locked(fence, ktime_get());
1652 }
1653 
1654 int
1655 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
1656 {
1657 	int r;
1658 
1659 	if (fence == NULL)
1660 		return -EINVAL;
1661 
1662 	mtx_enter(fence->lock);
1663 	r = dma_fence_signal_timestamp_locked(fence, timestamp);
1664 	mtx_leave(fence->lock);
1665 
1666 	return r;
1667 }
1668 
1669 bool
1670 dma_fence_is_signaled(struct dma_fence *fence)
1671 {
1672 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1673 		return true;
1674 
1675 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1676 		dma_fence_signal(fence);
1677 		return true;
1678 	}
1679 
1680 	return false;
1681 }
1682 
1683 bool
1684 dma_fence_is_signaled_locked(struct dma_fence *fence)
1685 {
1686 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1687 		return true;
1688 
1689 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1690 		dma_fence_signal_locked(fence);
1691 		return true;
1692 	}
1693 
1694 	return false;
1695 }
1696 
1697 long
1698 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1699 {
1700 	if (timeout < 0)
1701 		return -EINVAL;
1702 
1703 	if (fence->ops->wait)
1704 		return fence->ops->wait(fence, intr, timeout);
1705 	else
1706 		return dma_fence_default_wait(fence, intr, timeout);
1707 }
1708 
1709 long
1710 dma_fence_wait(struct dma_fence *fence, bool intr)
1711 {
1712 	long ret;
1713 
1714 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1715 	if (ret < 0)
1716 		return ret;
1717 
1718 	return 0;
1719 }
1720 
1721 void
1722 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1723 {
1724 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1725 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1726 	    fence->ops->enable_signaling) {
1727 		mtx_enter(fence->lock);
1728 		if (!fence->ops->enable_signaling(fence))
1729 			dma_fence_signal_locked(fence);
1730 		mtx_leave(fence->lock);
1731 	}
1732 }
1733 
1734 void
1735 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1736     struct mutex *lock, uint64_t context, uint64_t seqno)
1737 {
1738 	fence->ops = ops;
1739 	fence->lock = lock;
1740 	fence->context = context;
1741 	fence->seqno = seqno;
1742 	fence->flags = 0;
1743 	fence->error = 0;
1744 	kref_init(&fence->refcount);
1745 	INIT_LIST_HEAD(&fence->cb_list);
1746 }
1747 
1748 int
1749 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1750     dma_fence_func_t func)
1751 {
1752 	int ret = 0;
1753 	bool was_set;
1754 
1755 	if (WARN_ON(!fence || !func))
1756 		return -EINVAL;
1757 
1758 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1759 		INIT_LIST_HEAD(&cb->node);
1760 		return -ENOENT;
1761 	}
1762 
1763 	mtx_enter(fence->lock);
1764 
1765 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1766 
1767 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1768 		ret = -ENOENT;
1769 	else if (!was_set && fence->ops->enable_signaling) {
1770 		if (!fence->ops->enable_signaling(fence)) {
1771 			dma_fence_signal_locked(fence);
1772 			ret = -ENOENT;
1773 		}
1774 	}
1775 
1776 	if (!ret) {
1777 		cb->func = func;
1778 		list_add_tail(&cb->node, &fence->cb_list);
1779 	} else
1780 		INIT_LIST_HEAD(&cb->node);
1781 	mtx_leave(fence->lock);
1782 
1783 	return ret;
1784 }
1785 
1786 bool
1787 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1788 {
1789 	bool ret;
1790 
1791 	mtx_enter(fence->lock);
1792 
1793 	ret = !list_empty(&cb->node);
1794 	if (ret)
1795 		list_del_init(&cb->node);
1796 
1797 	mtx_leave(fence->lock);
1798 
1799 	return ret;
1800 }
1801 
1802 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1803 
1804 uint64_t
1805 dma_fence_context_alloc(unsigned int num)
1806 {
1807   return atomic64_add_return(num, &drm_fence_context_count) - num;
1808 }
1809 
1810 struct default_wait_cb {
1811 	struct dma_fence_cb base;
1812 	struct proc *proc;
1813 };
1814 
1815 static void
1816 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1817 {
1818 	struct default_wait_cb *wait =
1819 	    container_of(cb, struct default_wait_cb, base);
1820 	wake_up_process(wait->proc);
1821 }
1822 
1823 long
1824 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1825 {
1826 	long ret = timeout ? timeout : 1;
1827 	unsigned long end;
1828 	int err;
1829 	struct default_wait_cb cb;
1830 	bool was_set;
1831 
1832 	KASSERT(timeout <= INT_MAX);
1833 
1834 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1835 		return ret;
1836 
1837 	mtx_enter(fence->lock);
1838 
1839 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1840 	    &fence->flags);
1841 
1842 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1843 		goto out;
1844 
1845 	if (!was_set && fence->ops->enable_signaling) {
1846 		if (!fence->ops->enable_signaling(fence)) {
1847 			dma_fence_signal_locked(fence);
1848 			goto out;
1849 		}
1850 	}
1851 
1852 	if (timeout == 0) {
1853 		ret = 0;
1854 		goto out;
1855 	}
1856 
1857 	cb.base.func = dma_fence_default_wait_cb;
1858 	cb.proc = curproc;
1859 	list_add(&cb.base.node, &fence->cb_list);
1860 
1861 	end = jiffies + timeout;
1862 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1863 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1864 			break;
1865 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1866 		    "dmafence", ret);
1867 		if (err == EINTR || err == ERESTART) {
1868 			ret = -ERESTARTSYS;
1869 			break;
1870 		}
1871 	}
1872 
1873 	if (!list_empty(&cb.base.node))
1874 		list_del(&cb.base.node);
1875 out:
1876 	mtx_leave(fence->lock);
1877 
1878 	return ret;
1879 }
1880 
1881 static bool
1882 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1883     uint32_t *idx)
1884 {
1885 	int i;
1886 
1887 	for (i = 0; i < count; ++i) {
1888 		struct dma_fence *fence = fences[i];
1889 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1890 			if (idx)
1891 				*idx = i;
1892 			return true;
1893 		}
1894 	}
1895 	return false;
1896 }
1897 
1898 long
1899 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1900     bool intr, long timeout, uint32_t *idx)
1901 {
1902 	struct default_wait_cb *cb;
1903 	long ret = timeout;
1904 	unsigned long end;
1905 	int i, err;
1906 
1907 	KASSERT(timeout <= INT_MAX);
1908 
1909 	if (timeout == 0) {
1910 		for (i = 0; i < count; i++) {
1911 			if (dma_fence_is_signaled(fences[i])) {
1912 				if (idx)
1913 					*idx = i;
1914 				return 1;
1915 			}
1916 		}
1917 		return 0;
1918 	}
1919 
1920 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1921 	if (cb == NULL)
1922 		return -ENOMEM;
1923 
1924 	for (i = 0; i < count; i++) {
1925 		struct dma_fence *fence = fences[i];
1926 		cb[i].proc = curproc;
1927 		if (dma_fence_add_callback(fence, &cb[i].base,
1928 		    dma_fence_default_wait_cb)) {
1929 			if (idx)
1930 				*idx = i;
1931 			goto cb_cleanup;
1932 		}
1933 	}
1934 
1935 	end = jiffies + timeout;
1936 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1937 		if (dma_fence_test_signaled_any(fences, count, idx))
1938 			break;
1939 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
1940 		if (err == EINTR || err == ERESTART) {
1941 			ret = -ERESTARTSYS;
1942 			break;
1943 		}
1944 	}
1945 
1946 cb_cleanup:
1947 	while (i-- > 0)
1948 		dma_fence_remove_callback(fences[i], &cb[i].base);
1949 	free(cb, M_DRM, count * sizeof(*cb));
1950 	return ret;
1951 }
1952 
1953 static struct dma_fence dma_fence_stub;
1954 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1955 
1956 static const char *
1957 dma_fence_stub_get_name(struct dma_fence *fence)
1958 {
1959 	return "stub";
1960 }
1961 
1962 static const struct dma_fence_ops dma_fence_stub_ops = {
1963 	.get_driver_name = dma_fence_stub_get_name,
1964 	.get_timeline_name = dma_fence_stub_get_name,
1965 };
1966 
1967 struct dma_fence *
1968 dma_fence_get_stub(void)
1969 {
1970 	mtx_enter(&dma_fence_stub_mtx);
1971 	if (dma_fence_stub.ops == NULL) {
1972 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1973 		    &dma_fence_stub_mtx, 0, 0);
1974 		dma_fence_signal_locked(&dma_fence_stub);
1975 	}
1976 	mtx_leave(&dma_fence_stub_mtx);
1977 
1978 	return dma_fence_get(&dma_fence_stub);
1979 }
1980 
1981 struct dma_fence *
1982 dma_fence_allocate_private_stub(void)
1983 {
1984 	struct dma_fence *f = malloc(sizeof(*f), M_DRM,
1985 	    M_ZERO | M_WAITOK | M_CANFAIL);
1986 	if (f == NULL)
1987 		return ERR_PTR(-ENOMEM);
1988 	dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
1989 	dma_fence_signal(f);
1990 	return f;
1991 }
1992 
1993 static const char *
1994 dma_fence_array_get_driver_name(struct dma_fence *fence)
1995 {
1996 	return "dma_fence_array";
1997 }
1998 
1999 static const char *
2000 dma_fence_array_get_timeline_name(struct dma_fence *fence)
2001 {
2002 	return "unbound";
2003 }
2004 
2005 static void
2006 irq_dma_fence_array_work(void *arg)
2007 {
2008 	struct dma_fence_array *dfa = (struct dma_fence_array *)arg;
2009 	dma_fence_signal(&dfa->base);
2010 	dma_fence_put(&dfa->base);
2011 }
2012 
2013 static void
2014 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
2015 {
2016 	struct dma_fence_array_cb *array_cb =
2017 	    container_of(cb, struct dma_fence_array_cb, cb);
2018 	struct dma_fence_array *dfa = array_cb->array;
2019 
2020 	if (atomic_dec_and_test(&dfa->num_pending))
2021 		timeout_add(&dfa->to, 1);
2022 	else
2023 		dma_fence_put(&dfa->base);
2024 }
2025 
2026 static bool
2027 dma_fence_array_enable_signaling(struct dma_fence *fence)
2028 {
2029 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2030 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
2031 	int i;
2032 
2033 	for (i = 0; i < dfa->num_fences; ++i) {
2034 		cb[i].array = dfa;
2035 		dma_fence_get(&dfa->base);
2036 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
2037 		    dma_fence_array_cb_func)) {
2038 			dma_fence_put(&dfa->base);
2039 			if (atomic_dec_and_test(&dfa->num_pending))
2040 				return false;
2041 		}
2042 	}
2043 
2044 	return true;
2045 }
2046 
2047 static bool
2048 dma_fence_array_signaled(struct dma_fence *fence)
2049 {
2050 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2051 
2052 	return atomic_read(&dfa->num_pending) <= 0;
2053 }
2054 
2055 static void
2056 dma_fence_array_release(struct dma_fence *fence)
2057 {
2058 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2059 	int i;
2060 
2061 	for (i = 0; i < dfa->num_fences; ++i)
2062 		dma_fence_put(dfa->fences[i]);
2063 
2064 	free(dfa->fences, M_DRM, 0);
2065 	dma_fence_free(fence);
2066 }
2067 
2068 struct dma_fence_array *
2069 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
2070     unsigned seqno, bool signal_on_any)
2071 {
2072 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
2073 	    (num_fences * sizeof(struct dma_fence_array_cb)),
2074 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2075 	if (dfa == NULL)
2076 		return NULL;
2077 
2078 	mtx_init(&dfa->lock, IPL_TTY);
2079 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
2080 	    context, seqno);
2081 	timeout_set(&dfa->to, irq_dma_fence_array_work, dfa);
2082 
2083 	dfa->num_fences = num_fences;
2084 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
2085 	dfa->fences = fences;
2086 
2087 	return dfa;
2088 }
2089 
2090 struct dma_fence *
2091 dma_fence_array_first(struct dma_fence *f)
2092 {
2093 	struct dma_fence_array *dfa;
2094 
2095 	if (f == NULL)
2096 		return NULL;
2097 
2098 	if ((dfa = to_dma_fence_array(f)) == NULL)
2099 		return f;
2100 
2101 	if (dfa->num_fences > 0)
2102 		return dfa->fences[0];
2103 
2104 	return NULL;
2105 }
2106 
2107 struct dma_fence *
2108 dma_fence_array_next(struct dma_fence *f, unsigned int i)
2109 {
2110 	struct dma_fence_array *dfa;
2111 
2112 	if (f == NULL)
2113 		return NULL;
2114 
2115 	if ((dfa = to_dma_fence_array(f)) == NULL)
2116 		return NULL;
2117 
2118 	if (i < dfa->num_fences)
2119 		return dfa->fences[i];
2120 
2121 	return NULL;
2122 }
2123 
2124 const struct dma_fence_ops dma_fence_array_ops = {
2125 	.get_driver_name = dma_fence_array_get_driver_name,
2126 	.get_timeline_name = dma_fence_array_get_timeline_name,
2127 	.enable_signaling = dma_fence_array_enable_signaling,
2128 	.signaled = dma_fence_array_signaled,
2129 	.release = dma_fence_array_release,
2130 };
2131 
2132 int
2133 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
2134 {
2135 	struct dma_fence_chain *chain;
2136 	struct dma_fence *fence;
2137 
2138 	if (seqno == 0)
2139 		return 0;
2140 
2141 	if ((chain = to_dma_fence_chain(*df)) == NULL)
2142 		return -EINVAL;
2143 
2144 	fence = &chain->base;
2145 	if (fence->seqno < seqno)
2146 		return -EINVAL;
2147 
2148 	dma_fence_chain_for_each(*df, fence) {
2149 		if ((*df)->context != fence->context)
2150 			break;
2151 
2152 		chain = to_dma_fence_chain(*df);
2153 		if (chain->prev_seqno < seqno)
2154 			break;
2155 	}
2156 	dma_fence_put(fence);
2157 
2158 	return 0;
2159 }
2160 
2161 void
2162 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
2163     struct dma_fence *fence, uint64_t seqno)
2164 {
2165 	uint64_t context;
2166 
2167 	chain->fence = fence;
2168 	chain->prev = prev;
2169 	mtx_init(&chain->lock, IPL_TTY);
2170 
2171 	/* if prev is a chain */
2172 	if (to_dma_fence_chain(prev) != NULL) {
2173 		if (__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
2174 			chain->prev_seqno = prev->seqno;
2175 			context = prev->context;
2176 		} else {
2177 			chain->prev_seqno = 0;
2178 			context = dma_fence_context_alloc(1);
2179 			seqno = prev->seqno;
2180 		}
2181 	} else {
2182 		chain->prev_seqno = 0;
2183 		context = dma_fence_context_alloc(1);
2184 	}
2185 
2186 	dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
2187 	    context, seqno);
2188 }
2189 
2190 static const char *
2191 dma_fence_chain_get_driver_name(struct dma_fence *fence)
2192 {
2193 	return "dma_fence_chain";
2194 }
2195 
2196 static const char *
2197 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
2198 {
2199 	return "unbound";
2200 }
2201 
2202 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
2203 
2204 static void
2205 dma_fence_chain_timo(void *arg)
2206 {
2207 	struct dma_fence_chain *chain = (struct dma_fence_chain *)arg;
2208 
2209 	if (dma_fence_chain_enable_signaling(&chain->base) == false)
2210 		dma_fence_signal(&chain->base);
2211 	dma_fence_put(&chain->base);
2212 }
2213 
2214 static void
2215 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
2216 {
2217 	struct dma_fence_chain *chain =
2218 	    container_of(cb, struct dma_fence_chain, cb);
2219 	timeout_set(&chain->to, dma_fence_chain_timo, chain);
2220 	timeout_add(&chain->to, 1);
2221 	dma_fence_put(f);
2222 }
2223 
2224 static bool
2225 dma_fence_chain_enable_signaling(struct dma_fence *fence)
2226 {
2227 	struct dma_fence_chain *chain, *h;
2228 	struct dma_fence *f;
2229 
2230 	h = to_dma_fence_chain(fence);
2231 	dma_fence_get(&h->base);
2232 	dma_fence_chain_for_each(fence, &h->base) {
2233 		chain = to_dma_fence_chain(fence);
2234 		if (chain == NULL)
2235 			f = fence;
2236 		else
2237 			f = chain->fence;
2238 
2239 		dma_fence_get(f);
2240 		if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) {
2241 			dma_fence_put(fence);
2242 			return true;
2243 		}
2244 		dma_fence_put(f);
2245 	}
2246 	dma_fence_put(&h->base);
2247 	return false;
2248 }
2249 
2250 static bool
2251 dma_fence_chain_signaled(struct dma_fence *fence)
2252 {
2253 	struct dma_fence_chain *chain;
2254 	struct dma_fence *f;
2255 
2256 	dma_fence_chain_for_each(fence, fence) {
2257 		chain = to_dma_fence_chain(fence);
2258 		if (chain == NULL)
2259 			f = fence;
2260 		else
2261 			f = chain->fence;
2262 
2263 		if (dma_fence_is_signaled(f) == false) {
2264 			dma_fence_put(fence);
2265 			return false;
2266 		}
2267 	}
2268 	return true;
2269 }
2270 
2271 static void
2272 dma_fence_chain_release(struct dma_fence *fence)
2273 {
2274 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
2275 	struct dma_fence_chain *prev_chain;
2276 	struct dma_fence *prev;
2277 
2278 	for (prev = chain->prev; prev != NULL; prev = chain->prev) {
2279 		if (kref_read(&prev->refcount) > 1)
2280 			break;
2281 		if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
2282 			break;
2283 		chain->prev = prev_chain->prev;
2284 		prev_chain->prev = NULL;
2285 		dma_fence_put(prev);
2286 	}
2287 	dma_fence_put(prev);
2288 	dma_fence_put(chain->fence);
2289 	dma_fence_free(fence);
2290 }
2291 
2292 struct dma_fence *
2293 dma_fence_chain_walk(struct dma_fence *fence)
2294 {
2295 	struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain;
2296 	struct dma_fence *prev, *new_prev, *tmp;
2297 
2298 	if (chain == NULL) {
2299 		dma_fence_put(fence);
2300 		return NULL;
2301 	}
2302 
2303 	while ((prev = dma_fence_get(chain->prev)) != NULL) {
2304 		prev_chain = to_dma_fence_chain(prev);
2305 		if (prev_chain != NULL) {
2306 			if (!dma_fence_is_signaled(prev_chain->fence))
2307 				break;
2308 			new_prev = dma_fence_get(prev_chain->prev);
2309 		} else {
2310 			if (!dma_fence_is_signaled(prev))
2311 				break;
2312 			new_prev = NULL;
2313 		}
2314 		tmp = atomic_cas_ptr(&chain->prev, prev, new_prev);
2315 		dma_fence_put(tmp == prev ? prev : new_prev);
2316 		dma_fence_put(prev);
2317 	}
2318 
2319 	dma_fence_put(fence);
2320 	return prev;
2321 }
2322 
2323 const struct dma_fence_ops dma_fence_chain_ops = {
2324 	.get_driver_name = dma_fence_chain_get_driver_name,
2325 	.get_timeline_name = dma_fence_chain_get_timeline_name,
2326 	.enable_signaling = dma_fence_chain_enable_signaling,
2327 	.signaled = dma_fence_chain_signaled,
2328 	.release = dma_fence_chain_release,
2329 	.use_64bit_seqno = true,
2330 };
2331 
2332 bool
2333 dma_fence_is_container(struct dma_fence *fence)
2334 {
2335 	return (fence->ops == &dma_fence_chain_ops) ||
2336 	    (fence->ops == &dma_fence_array_ops);
2337 }
2338 
2339 int
2340 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2341 {
2342 	return (ENXIO);
2343 }
2344 
2345 int
2346 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2347 {
2348 	return (ENXIO);
2349 }
2350 
2351 int
2352 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2353 {
2354 	return (ENOTTY);
2355 }
2356 
2357 int
2358 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2359 {
2360 	return (EINVAL);
2361 }
2362 
2363 int
2364 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2365 {
2366 	struct dma_buf *dmabuf = fp->f_data;
2367 
2368 	memset(st, 0, sizeof(*st));
2369 	st->st_size = dmabuf->size;
2370 	st->st_mode = S_IFIFO;	/* XXX */
2371 	return (0);
2372 }
2373 
2374 int
2375 dmabuf_close(struct file *fp, struct proc *p)
2376 {
2377 	struct dma_buf *dmabuf = fp->f_data;
2378 
2379 	fp->f_data = NULL;
2380 	KERNEL_LOCK();
2381 	dmabuf->ops->release(dmabuf);
2382 	KERNEL_UNLOCK();
2383 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
2384 	return (0);
2385 }
2386 
2387 int
2388 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2389 {
2390 	struct dma_buf *dmabuf = fp->f_data;
2391 	off_t newoff;
2392 
2393 	if (*offset != 0)
2394 		return (EINVAL);
2395 
2396 	switch (whence) {
2397 	case SEEK_SET:
2398 		newoff = 0;
2399 		break;
2400 	case SEEK_END:
2401 		newoff = dmabuf->size;
2402 		break;
2403 	default:
2404 		return (EINVAL);
2405 	}
2406 	mtx_enter(&fp->f_mtx);
2407 	fp->f_offset = newoff;
2408 	mtx_leave(&fp->f_mtx);
2409 	*offset = newoff;
2410 	return (0);
2411 }
2412 
2413 const struct fileops dmabufops = {
2414 	.fo_read	= dmabuf_read,
2415 	.fo_write	= dmabuf_write,
2416 	.fo_ioctl	= dmabuf_ioctl,
2417 	.fo_kqfilter	= dmabuf_kqfilter,
2418 	.fo_stat	= dmabuf_stat,
2419 	.fo_close	= dmabuf_close,
2420 	.fo_seek	= dmabuf_seek,
2421 };
2422 
2423 struct dma_buf *
2424 dma_buf_export(const struct dma_buf_export_info *info)
2425 {
2426 	struct proc *p = curproc;
2427 	struct dma_buf *dmabuf;
2428 	struct file *fp;
2429 
2430 	fp = fnew(p);
2431 	if (fp == NULL)
2432 		return ERR_PTR(-ENFILE);
2433 	fp->f_type = DTYPE_DMABUF;
2434 	fp->f_ops = &dmabufops;
2435 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2436 	dmabuf->priv = info->priv;
2437 	dmabuf->ops = info->ops;
2438 	dmabuf->size = info->size;
2439 	dmabuf->file = fp;
2440 	fp->f_data = dmabuf;
2441 	INIT_LIST_HEAD(&dmabuf->attachments);
2442 	return dmabuf;
2443 }
2444 
2445 struct dma_buf *
2446 dma_buf_get(int fd)
2447 {
2448 	struct proc *p = curproc;
2449 	struct filedesc *fdp = p->p_fd;
2450 	struct file *fp;
2451 
2452 	if ((fp = fd_getfile(fdp, fd)) == NULL)
2453 		return ERR_PTR(-EBADF);
2454 
2455 	if (fp->f_type != DTYPE_DMABUF) {
2456 		FRELE(fp, p);
2457 		return ERR_PTR(-EINVAL);
2458 	}
2459 
2460 	return fp->f_data;
2461 }
2462 
2463 void
2464 dma_buf_put(struct dma_buf *dmabuf)
2465 {
2466 	KASSERT(dmabuf);
2467 	KASSERT(dmabuf->file);
2468 
2469 	FRELE(dmabuf->file, curproc);
2470 }
2471 
2472 int
2473 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2474 {
2475 	struct proc *p = curproc;
2476 	struct filedesc *fdp = p->p_fd;
2477 	struct file *fp = dmabuf->file;
2478 	int fd, cloexec, error;
2479 
2480 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2481 
2482 	fdplock(fdp);
2483 restart:
2484 	if ((error = fdalloc(p, 0, &fd)) != 0) {
2485 		if (error == ENOSPC) {
2486 			fdexpand(p);
2487 			goto restart;
2488 		}
2489 		fdpunlock(fdp);
2490 		return -error;
2491 	}
2492 
2493 	fdinsert(fdp, fd, cloexec, fp);
2494 	fdpunlock(fdp);
2495 
2496 	return fd;
2497 }
2498 
2499 void
2500 get_dma_buf(struct dma_buf *dmabuf)
2501 {
2502 	FREF(dmabuf->file);
2503 }
2504 
2505 enum pci_bus_speed
2506 pcie_get_speed_cap(struct pci_dev *pdev)
2507 {
2508 	pci_chipset_tag_t	pc;
2509 	pcitag_t		tag;
2510 	int			pos ;
2511 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
2512 	pcireg_t		id;
2513 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
2514 	int			bus, device, function;
2515 
2516 	if (pdev == NULL)
2517 		return PCI_SPEED_UNKNOWN;
2518 
2519 	pc = pdev->pc;
2520 	tag = pdev->tag;
2521 
2522 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2523 	    &pos, NULL))
2524 		return PCI_SPEED_UNKNOWN;
2525 
2526 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2527 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2528 
2529 	/* we've been informed via and serverworks don't make the cut */
2530 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2531 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
2532 		return PCI_SPEED_UNKNOWN;
2533 
2534 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2535 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2536 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2537 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2538 
2539 	lnkcap &= 0x0f;
2540 	lnkcap2 &= 0xfe;
2541 
2542 	if (lnkcap2) { /* PCIE GEN 3.0 */
2543 		if (lnkcap2 & 0x02)
2544 			cap = PCIE_SPEED_2_5GT;
2545 		if (lnkcap2 & 0x04)
2546 			cap = PCIE_SPEED_5_0GT;
2547 		if (lnkcap2 & 0x08)
2548 			cap = PCIE_SPEED_8_0GT;
2549 		if (lnkcap2 & 0x10)
2550 			cap = PCIE_SPEED_16_0GT;
2551 		if (lnkcap2 & 0x20)
2552 			cap = PCIE_SPEED_32_0GT;
2553 		if (lnkcap2 & 0x40)
2554 			cap = PCIE_SPEED_64_0GT;
2555 	} else {
2556 		if (lnkcap & 0x01)
2557 			cap = PCIE_SPEED_2_5GT;
2558 		if (lnkcap & 0x02)
2559 			cap = PCIE_SPEED_5_0GT;
2560 	}
2561 
2562 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2563 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2564 	    lnkcap2);
2565 	return cap;
2566 }
2567 
2568 enum pcie_link_width
2569 pcie_get_width_cap(struct pci_dev *pdev)
2570 {
2571 	pci_chipset_tag_t	pc = pdev->pc;
2572 	pcitag_t		tag = pdev->tag;
2573 	int			pos ;
2574 	pcireg_t		lnkcap = 0;
2575 	pcireg_t		id;
2576 	int			bus, device, function;
2577 
2578 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2579 	    &pos, NULL))
2580 		return PCIE_LNK_WIDTH_UNKNOWN;
2581 
2582 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2583 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2584 
2585 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2586 
2587 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2588 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2589 
2590 	if (lnkcap)
2591 		return (lnkcap & 0x3f0) >> 4;
2592 	return PCIE_LNK_WIDTH_UNKNOWN;
2593 }
2594 
2595 bool
2596 pcie_aspm_enabled(struct pci_dev *pdev)
2597 {
2598 	pci_chipset_tag_t	pc = pdev->pc;
2599 	pcitag_t		tag = pdev->tag;
2600 	int			pos ;
2601 	pcireg_t		lcsr;
2602 
2603 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2604 	    &pos, NULL))
2605 		return false;
2606 
2607 	lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR);
2608 	if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0)
2609 		return true;
2610 
2611 	return false;
2612 }
2613 
2614 int
2615 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
2616     int sync, void *key)
2617 {
2618 	wakeup(wqe);
2619 	if (wqe->private)
2620 		wake_up_process(wqe->private);
2621 	list_del_init(&wqe->entry);
2622 	return 0;
2623 }
2624 
2625 static wait_queue_head_t bit_waitq;
2626 wait_queue_head_t var_waitq;
2627 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2628 
2629 int
2630 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2631 {
2632 	int err;
2633 
2634 	if (!test_bit(bit, word))
2635 		return 0;
2636 
2637 	mtx_enter(&wait_bit_mtx);
2638 	while (test_bit(bit, word)) {
2639 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2640 		    INFSLP);
2641 		if (err) {
2642 			mtx_leave(&wait_bit_mtx);
2643 			return 1;
2644 		}
2645 	}
2646 	mtx_leave(&wait_bit_mtx);
2647 	return 0;
2648 }
2649 
2650 int
2651 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2652 {
2653 	int err;
2654 
2655 	if (!test_bit(bit, word))
2656 		return 0;
2657 
2658 	mtx_enter(&wait_bit_mtx);
2659 	while (test_bit(bit, word)) {
2660 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2661 		if (err) {
2662 			mtx_leave(&wait_bit_mtx);
2663 			return 1;
2664 		}
2665 	}
2666 	mtx_leave(&wait_bit_mtx);
2667 	return 0;
2668 }
2669 
2670 void
2671 wake_up_bit(void *word, int bit)
2672 {
2673 	mtx_enter(&wait_bit_mtx);
2674 	wakeup(word);
2675 	mtx_leave(&wait_bit_mtx);
2676 }
2677 
2678 void
2679 clear_and_wake_up_bit(int bit, void *word)
2680 {
2681 	clear_bit(bit, word);
2682 	wake_up_bit(word, bit);
2683 }
2684 
2685 wait_queue_head_t *
2686 bit_waitqueue(void *word, int bit)
2687 {
2688 	/* XXX hash table of wait queues? */
2689 	return &bit_waitq;
2690 }
2691 
2692 wait_queue_head_t *
2693 __var_waitqueue(void *p)
2694 {
2695 	/* XXX hash table of wait queues? */
2696 	return &bit_waitq;
2697 }
2698 
2699 struct workqueue_struct *system_wq;
2700 struct workqueue_struct *system_highpri_wq;
2701 struct workqueue_struct *system_unbound_wq;
2702 struct workqueue_struct *system_long_wq;
2703 struct taskq *taskletq;
2704 
2705 void
2706 drm_linux_init(void)
2707 {
2708 	system_wq = (struct workqueue_struct *)
2709 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
2710 	system_highpri_wq = (struct workqueue_struct *)
2711 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2712 	system_unbound_wq = (struct workqueue_struct *)
2713 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2714 	system_long_wq = (struct workqueue_struct *)
2715 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2716 
2717 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2718 
2719 	init_waitqueue_head(&bit_waitq);
2720 	init_waitqueue_head(&var_waitq);
2721 
2722 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2723 	    "idrpl", NULL);
2724 
2725 	kmap_atomic_va =
2726 	    (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2727 }
2728 
2729 void
2730 drm_linux_exit(void)
2731 {
2732 	pool_destroy(&idr_pool);
2733 
2734 	taskq_destroy(taskletq);
2735 
2736 	taskq_destroy((struct taskq *)system_long_wq);
2737 	taskq_destroy((struct taskq *)system_unbound_wq);
2738 	taskq_destroy((struct taskq *)system_highpri_wq);
2739 	taskq_destroy((struct taskq *)system_wq);
2740 }
2741 
2742 #define PCIE_ECAP_RESIZE_BAR	0x15
2743 #define RBCAP0			0x04
2744 #define RBCTRL0			0x08
2745 #define RBCTRL_BARINDEX_MASK	0x07
2746 #define RBCTRL_BARSIZE_MASK	0x1f00
2747 #define RBCTRL_BARSIZE_SHIFT	8
2748 
2749 /* size in MB is 1 << nsize */
2750 int
2751 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2752 {
2753 	pcireg_t	reg;
2754 	uint32_t	offset, capid;
2755 
2756 	KASSERT(bar == 0);
2757 
2758 	offset = PCI_PCIE_ECAP;
2759 
2760 	/* search PCI Express Extended Capabilities */
2761 	do {
2762 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2763 		capid = PCI_PCIE_ECAP_ID(reg);
2764 		if (capid == PCIE_ECAP_RESIZE_BAR)
2765 			break;
2766 		offset = PCI_PCIE_ECAP_NEXT(reg);
2767 	} while (capid != 0);
2768 
2769 	if (capid == 0) {
2770 		printf("%s: could not find resize bar cap!\n", __func__);
2771 		return -ENOTSUP;
2772 	}
2773 
2774 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2775 
2776 	if ((reg & (1 << (nsize + 4))) == 0) {
2777 		printf("%s size not supported\n", __func__);
2778 		return -ENOTSUP;
2779 	}
2780 
2781 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2782 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2783 		printf("%s BAR index not 0\n", __func__);
2784 		return -EINVAL;
2785 	}
2786 
2787 	reg &= ~RBCTRL_BARSIZE_MASK;
2788 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2789 
2790 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2791 
2792 	return 0;
2793 }
2794 
2795 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2796 
2797 int
2798 register_shrinker(struct shrinker *shrinker, const char *format, ...)
2799 {
2800 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2801 	return 0;
2802 }
2803 
2804 void
2805 unregister_shrinker(struct shrinker *shrinker)
2806 {
2807 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2808 }
2809 
2810 void
2811 drmbackoff(long npages)
2812 {
2813 	struct shrink_control sc;
2814 	struct shrinker *shrinker;
2815 	u_long ret;
2816 
2817 	shrinker = TAILQ_FIRST(&shrinkers);
2818 	while (shrinker && npages > 0) {
2819 		sc.nr_to_scan = npages;
2820 		ret = shrinker->scan_objects(shrinker, &sc);
2821 		npages -= ret;
2822 		shrinker = TAILQ_NEXT(shrinker, next);
2823 	}
2824 }
2825 
2826 void *
2827 bitmap_zalloc(u_int n, gfp_t flags)
2828 {
2829 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2830 }
2831 
2832 void
2833 bitmap_free(void *p)
2834 {
2835 	kfree(p);
2836 }
2837 
2838 int
2839 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2840 {
2841 	if (atomic_add_unless(v, -1, 1))
2842 		return 0;
2843 
2844 	rw_enter_write(lock);
2845 	if (atomic_dec_return(v) == 0)
2846 		return 1;
2847 	rw_exit_write(lock);
2848 	return 0;
2849 }
2850 
2851 int
2852 printk(const char *fmt, ...)
2853 {
2854 	int ret, level;
2855 	va_list ap;
2856 
2857 	if (fmt != NULL && *fmt == '\001') {
2858 		level = fmt[1];
2859 #ifndef DRMDEBUG
2860 		if (level >= KERN_INFO[1] && level <= '9')
2861 			return 0;
2862 #endif
2863 		fmt += 2;
2864 	}
2865 
2866 	va_start(ap, fmt);
2867 	ret = vprintf(fmt, ap);
2868 	va_end(ap);
2869 
2870 	return ret;
2871 }
2872 
2873 #define START(node) ((node)->start)
2874 #define LAST(node) ((node)->last)
2875 
2876 struct interval_tree_node *
2877 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2878     unsigned long last)
2879 {
2880 	struct interval_tree_node *node;
2881 	struct rb_node *rb;
2882 
2883 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2884 		node = rb_entry(rb, typeof(*node), rb);
2885 		if (LAST(node) >= start && START(node) <= last)
2886 			return node;
2887 	}
2888 	return NULL;
2889 }
2890 
2891 void
2892 interval_tree_remove(struct interval_tree_node *node,
2893     struct rb_root_cached *root)
2894 {
2895 	rb_erase_cached(&node->rb, root);
2896 }
2897 
2898 void
2899 interval_tree_insert(struct interval_tree_node *node,
2900     struct rb_root_cached *root)
2901 {
2902 	struct rb_node **iter = &root->rb_root.rb_node;
2903 	struct rb_node *parent = NULL;
2904 	struct interval_tree_node *iter_node;
2905 
2906 	while (*iter) {
2907 		parent = *iter;
2908 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
2909 
2910 		if (node->start < iter_node->start)
2911 			iter = &(*iter)->rb_left;
2912 		else
2913 			iter = &(*iter)->rb_right;
2914 	}
2915 
2916 	rb_link_node(&node->rb, parent, iter);
2917 	rb_insert_color_cached(&node->rb, root, false);
2918 }
2919 
2920 int
2921 syncfile_read(struct file *fp, struct uio *uio, int fflags)
2922 {
2923 	return ENXIO;
2924 }
2925 
2926 int
2927 syncfile_write(struct file *fp, struct uio *uio, int fflags)
2928 {
2929 	return ENXIO;
2930 }
2931 
2932 int
2933 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2934 {
2935 	return ENOTTY;
2936 }
2937 
2938 int
2939 syncfile_kqfilter(struct file *fp, struct knote *kn)
2940 {
2941 	return EINVAL;
2942 }
2943 
2944 int
2945 syncfile_stat(struct file *fp, struct stat *st, struct proc *p)
2946 {
2947 	memset(st, 0, sizeof(*st));
2948 	st->st_mode = S_IFIFO;	/* XXX */
2949 	return 0;
2950 }
2951 
2952 int
2953 syncfile_close(struct file *fp, struct proc *p)
2954 {
2955 	struct sync_file *sf = fp->f_data;
2956 
2957 	dma_fence_put(sf->fence);
2958 	fp->f_data = NULL;
2959 	free(sf, M_DRM, sizeof(struct sync_file));
2960 	return 0;
2961 }
2962 
2963 int
2964 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2965 {
2966 	off_t newoff;
2967 
2968 	if (*offset != 0)
2969 		return EINVAL;
2970 
2971 	switch (whence) {
2972 	case SEEK_SET:
2973 		newoff = 0;
2974 		break;
2975 	case SEEK_END:
2976 		newoff = 0;
2977 		break;
2978 	default:
2979 		return EINVAL;
2980 	}
2981 	mtx_enter(&fp->f_mtx);
2982 	fp->f_offset = newoff;
2983 	mtx_leave(&fp->f_mtx);
2984 	*offset = newoff;
2985 	return 0;
2986 }
2987 
2988 const struct fileops syncfileops = {
2989 	.fo_read	= syncfile_read,
2990 	.fo_write	= syncfile_write,
2991 	.fo_ioctl	= syncfile_ioctl,
2992 	.fo_kqfilter	= syncfile_kqfilter,
2993 	.fo_stat	= syncfile_stat,
2994 	.fo_close	= syncfile_close,
2995 	.fo_seek	= syncfile_seek,
2996 };
2997 
2998 void
2999 fd_install(int fd, struct file *fp)
3000 {
3001 	struct proc *p = curproc;
3002 	struct filedesc *fdp = p->p_fd;
3003 
3004 	if (fp->f_type != DTYPE_SYNC)
3005 		return;
3006 
3007 	fdplock(fdp);
3008 	/* all callers use get_unused_fd_flags(O_CLOEXEC) */
3009 	fdinsert(fdp, fd, UF_EXCLOSE, fp);
3010 	fdpunlock(fdp);
3011 }
3012 
3013 void
3014 fput(struct file *fp)
3015 {
3016 	if (fp->f_type != DTYPE_SYNC)
3017 		return;
3018 
3019 	FRELE(fp, curproc);
3020 }
3021 
3022 int
3023 get_unused_fd_flags(unsigned int flags)
3024 {
3025 	struct proc *p = curproc;
3026 	struct filedesc *fdp = p->p_fd;
3027 	int error, fd;
3028 
3029 	KASSERT((flags & O_CLOEXEC) != 0);
3030 
3031 	fdplock(fdp);
3032 retryalloc:
3033 	if ((error = fdalloc(p, 0, &fd)) != 0) {
3034 		if (error == ENOSPC) {
3035 			fdexpand(p);
3036 			goto retryalloc;
3037 		}
3038 		fdpunlock(fdp);
3039 		return -1;
3040 	}
3041 	fdpunlock(fdp);
3042 
3043 	return fd;
3044 }
3045 
3046 void
3047 put_unused_fd(int fd)
3048 {
3049 	struct filedesc *fdp = curproc->p_fd;
3050 
3051 	fdplock(fdp);
3052 	fdremove(fdp, fd);
3053 	fdpunlock(fdp);
3054 }
3055 
3056 struct dma_fence *
3057 sync_file_get_fence(int fd)
3058 {
3059 	struct proc *p = curproc;
3060 	struct filedesc *fdp = p->p_fd;
3061 	struct file *fp;
3062 	struct sync_file *sf;
3063 	struct dma_fence *f;
3064 
3065 	if ((fp = fd_getfile(fdp, fd)) == NULL)
3066 		return NULL;
3067 
3068 	if (fp->f_type != DTYPE_SYNC) {
3069 		FRELE(fp, p);
3070 		return NULL;
3071 	}
3072 	sf = fp->f_data;
3073 	f = dma_fence_get(sf->fence);
3074 	FRELE(sf->file, p);
3075 	return f;
3076 }
3077 
3078 struct sync_file *
3079 sync_file_create(struct dma_fence *fence)
3080 {
3081 	struct proc *p = curproc;
3082 	struct sync_file *sf;
3083 	struct file *fp;
3084 
3085 	fp = fnew(p);
3086 	if (fp == NULL)
3087 		return NULL;
3088 	fp->f_type = DTYPE_SYNC;
3089 	fp->f_ops = &syncfileops;
3090 	sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO);
3091 	sf->file = fp;
3092 	sf->fence = dma_fence_get(fence);
3093 	fp->f_data = sf;
3094 	return sf;
3095 }
3096 
3097 bool
3098 drm_firmware_drivers_only(void)
3099 {
3100 	return false;
3101 }
3102