xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision d5abdd01d7a5f24fb6f9b0aab446ef59a9e9067a)
1 /*	$OpenBSD: drm_linux.c,v 1.98 2023/06/01 10:21:26 claudio Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 #include <linux/processor.h>
53 #include <linux/sync_file.h>
54 
55 #include <drm/drm_device.h>
56 #include <drm/drm_connector.h>
57 #include <drm/drm_print.h>
58 
59 #if defined(__amd64__) || defined(__i386__)
60 #include "bios.h"
61 #endif
62 
63 /* allowed to sleep */
64 void
65 tasklet_unlock_wait(struct tasklet_struct *ts)
66 {
67 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
68 		cpu_relax();
69 }
70 
71 /* must not sleep */
72 void
73 tasklet_unlock_spin_wait(struct tasklet_struct *ts)
74 {
75 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
76 		cpu_relax();
77 }
78 
79 void
80 tasklet_run(void *arg)
81 {
82 	struct tasklet_struct *ts = arg;
83 
84 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
85 	if (tasklet_trylock(ts)) {
86 		if (!atomic_read(&ts->count)) {
87 			if (ts->use_callback)
88 				ts->callback(ts);
89 			else
90 				ts->func(ts->data);
91 		}
92 		tasklet_unlock(ts);
93 	}
94 }
95 
96 /* 32 bit powerpc lacks 64 bit atomics */
97 #if defined(__powerpc__) && !defined(__powerpc64__)
98 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
99 #endif
100 
101 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
102 volatile struct proc *sch_proc;
103 volatile void *sch_ident;
104 int sch_priority;
105 
106 void
107 set_current_state(int state)
108 {
109 	if (sch_ident != curproc)
110 		mtx_enter(&sch_mtx);
111 	MUTEX_ASSERT_LOCKED(&sch_mtx);
112 	sch_ident = sch_proc = curproc;
113 	sch_priority = state;
114 }
115 
116 void
117 __set_current_state(int state)
118 {
119 	KASSERT(state == TASK_RUNNING);
120 	if (sch_ident == curproc) {
121 		MUTEX_ASSERT_LOCKED(&sch_mtx);
122 		sch_ident = NULL;
123 		mtx_leave(&sch_mtx);
124 	}
125 }
126 
127 void
128 schedule(void)
129 {
130 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
131 }
132 
133 long
134 schedule_timeout(long timeout)
135 {
136 	struct sleep_state sls;
137 	unsigned long deadline;
138 	int wait, spl, timo = 0;
139 
140 	MUTEX_ASSERT_LOCKED(&sch_mtx);
141 	KASSERT(!cold);
142 
143 	if (timeout != MAX_SCHEDULE_TIMEOUT)
144 		timo = timeout;
145 	sleep_setup(&sls, sch_ident, sch_priority, "schto", timo);
146 
147 	wait = (sch_proc == curproc && timeout > 0);
148 
149 	spl = MUTEX_OLDIPL(&sch_mtx);
150 	MUTEX_OLDIPL(&sch_mtx) = splsched();
151 	mtx_leave(&sch_mtx);
152 
153 	if (timeout != MAX_SCHEDULE_TIMEOUT)
154 		deadline = jiffies + timeout;
155 	sleep_finish(&sls, wait);
156 	if (timeout != MAX_SCHEDULE_TIMEOUT)
157 		timeout = deadline - jiffies;
158 
159 	mtx_enter(&sch_mtx);
160 	MUTEX_OLDIPL(&sch_mtx) = spl;
161 	sch_ident = curproc;
162 
163 	return timeout > 0 ? timeout : 0;
164 }
165 
166 long
167 schedule_timeout_uninterruptible(long timeout)
168 {
169 	tsleep(curproc, PWAIT, "schtou", timeout);
170 	return 0;
171 }
172 
173 int
174 wake_up_process(struct proc *p)
175 {
176 	int s, rv;
177 
178 	SCHED_LOCK(s);
179 	atomic_cas_ptr(&sch_proc, p, NULL);
180 	rv = wakeup_proc(p, NULL, 0);
181 	SCHED_UNLOCK(s);
182 	return rv;
183 }
184 
185 void
186 flush_workqueue(struct workqueue_struct *wq)
187 {
188 	if (cold)
189 		return;
190 
191 	if (wq)
192 		taskq_barrier((struct taskq *)wq);
193 }
194 
195 bool
196 flush_work(struct work_struct *work)
197 {
198 	if (cold)
199 		return false;
200 
201 	if (work->tq)
202 		taskq_barrier(work->tq);
203 	return false;
204 }
205 
206 bool
207 flush_delayed_work(struct delayed_work *dwork)
208 {
209 	bool ret = false;
210 
211 	if (cold)
212 		return false;
213 
214 	while (timeout_pending(&dwork->to)) {
215 		tsleep(dwork, PWAIT, "fldwto", 1);
216 		ret = true;
217 	}
218 
219 	if (dwork->tq)
220 		taskq_barrier(dwork->tq);
221 	return ret;
222 }
223 
224 struct kthread {
225 	int (*func)(void *);
226 	void *data;
227 	struct proc *proc;
228 	volatile u_int flags;
229 #define KTHREAD_SHOULDSTOP	0x0000001
230 #define KTHREAD_STOPPED		0x0000002
231 #define KTHREAD_SHOULDPARK	0x0000004
232 #define KTHREAD_PARKED		0x0000008
233 	LIST_ENTRY(kthread) next;
234 };
235 
236 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
237 
238 void
239 kthread_func(void *arg)
240 {
241 	struct kthread *thread = arg;
242 	int ret;
243 
244 	ret = thread->func(thread->data);
245 	thread->flags |= KTHREAD_STOPPED;
246 	wakeup(thread);
247 	kthread_exit(ret);
248 }
249 
250 struct proc *
251 kthread_run(int (*func)(void *), void *data, const char *name)
252 {
253 	struct kthread *thread;
254 
255 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
256 	thread->func = func;
257 	thread->data = data;
258 	thread->flags = 0;
259 
260 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
261 		free(thread, M_DRM, sizeof(*thread));
262 		return ERR_PTR(-ENOMEM);
263 	}
264 
265 	LIST_INSERT_HEAD(&kthread_list, thread, next);
266 	return thread->proc;
267 }
268 
269 struct kthread_worker *
270 kthread_create_worker(unsigned int flags, const char *fmt, ...)
271 {
272 	char name[MAXCOMLEN+1];
273 	va_list ap;
274 
275 	struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
276 	va_start(ap, fmt);
277 	vsnprintf(name, sizeof(name), fmt, ap);
278 	va_end(ap);
279 	w->tq = taskq_create(name, 1, IPL_HIGH, 0);
280 
281 	return w;
282 }
283 
284 void
285 kthread_destroy_worker(struct kthread_worker *worker)
286 {
287 	taskq_destroy(worker->tq);
288 	free(worker, M_DRM, sizeof(*worker));
289 
290 }
291 
292 void
293 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
294 {
295 	work->tq = NULL;
296 	task_set(&work->task, (void (*)(void *))func, work);
297 }
298 
299 bool
300 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
301 {
302 	work->tq = worker->tq;
303 	return task_add(work->tq, &work->task);
304 }
305 
306 bool
307 kthread_cancel_work_sync(struct kthread_work *work)
308 {
309 	return task_del(work->tq, &work->task);
310 }
311 
312 void
313 kthread_flush_work(struct kthread_work *work)
314 {
315 	if (cold)
316 		return;
317 
318 	if (work->tq)
319 		taskq_barrier(work->tq);
320 }
321 
322 void
323 kthread_flush_worker(struct kthread_worker *worker)
324 {
325 	if (cold)
326 		return;
327 
328 	if (worker->tq)
329 		taskq_barrier(worker->tq);
330 }
331 
332 struct kthread *
333 kthread_lookup(struct proc *p)
334 {
335 	struct kthread *thread;
336 
337 	LIST_FOREACH(thread, &kthread_list, next) {
338 		if (thread->proc == p)
339 			break;
340 	}
341 	KASSERT(thread);
342 
343 	return thread;
344 }
345 
346 int
347 kthread_should_park(void)
348 {
349 	struct kthread *thread = kthread_lookup(curproc);
350 	return (thread->flags & KTHREAD_SHOULDPARK);
351 }
352 
353 void
354 kthread_parkme(void)
355 {
356 	struct kthread *thread = kthread_lookup(curproc);
357 
358 	while (thread->flags & KTHREAD_SHOULDPARK) {
359 		thread->flags |= KTHREAD_PARKED;
360 		wakeup(thread);
361 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
362 		thread->flags &= ~KTHREAD_PARKED;
363 	}
364 }
365 
366 void
367 kthread_park(struct proc *p)
368 {
369 	struct kthread *thread = kthread_lookup(p);
370 
371 	while ((thread->flags & KTHREAD_PARKED) == 0) {
372 		thread->flags |= KTHREAD_SHOULDPARK;
373 		wake_up_process(thread->proc);
374 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
375 	}
376 }
377 
378 void
379 kthread_unpark(struct proc *p)
380 {
381 	struct kthread *thread = kthread_lookup(p);
382 
383 	thread->flags &= ~KTHREAD_SHOULDPARK;
384 	wakeup(thread);
385 }
386 
387 int
388 kthread_should_stop(void)
389 {
390 	struct kthread *thread = kthread_lookup(curproc);
391 	return (thread->flags & KTHREAD_SHOULDSTOP);
392 }
393 
394 void
395 kthread_stop(struct proc *p)
396 {
397 	struct kthread *thread = kthread_lookup(p);
398 
399 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
400 		thread->flags |= KTHREAD_SHOULDSTOP;
401 		kthread_unpark(p);
402 		wake_up_process(thread->proc);
403 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
404 	}
405 	LIST_REMOVE(thread, next);
406 	free(thread, M_DRM, sizeof(*thread));
407 }
408 
409 #if NBIOS > 0
410 extern char smbios_board_vendor[];
411 extern char smbios_board_prod[];
412 extern char smbios_board_serial[];
413 #endif
414 
415 bool
416 dmi_match(int slot, const char *str)
417 {
418 	switch (slot) {
419 	case DMI_SYS_VENDOR:
420 		if (hw_vendor != NULL &&
421 		    !strcmp(hw_vendor, str))
422 			return true;
423 		break;
424 	case DMI_PRODUCT_NAME:
425 		if (hw_prod != NULL &&
426 		    !strcmp(hw_prod, str))
427 			return true;
428 		break;
429 	case DMI_PRODUCT_VERSION:
430 		if (hw_ver != NULL &&
431 		    !strcmp(hw_ver, str))
432 			return true;
433 		break;
434 #if NBIOS > 0
435 	case DMI_BOARD_VENDOR:
436 		if (strcmp(smbios_board_vendor, str) == 0)
437 			return true;
438 		break;
439 	case DMI_BOARD_NAME:
440 		if (strcmp(smbios_board_prod, str) == 0)
441 			return true;
442 		break;
443 	case DMI_BOARD_SERIAL:
444 		if (strcmp(smbios_board_serial, str) == 0)
445 			return true;
446 		break;
447 #else
448 	case DMI_BOARD_VENDOR:
449 		if (hw_vendor != NULL &&
450 		    !strcmp(hw_vendor, str))
451 			return true;
452 		break;
453 	case DMI_BOARD_NAME:
454 		if (hw_prod != NULL &&
455 		    !strcmp(hw_prod, str))
456 			return true;
457 		break;
458 #endif
459 	case DMI_NONE:
460 	default:
461 		return false;
462 	}
463 
464 	return false;
465 }
466 
467 static bool
468 dmi_found(const struct dmi_system_id *dsi)
469 {
470 	int i, slot;
471 
472 	for (i = 0; i < nitems(dsi->matches); i++) {
473 		slot = dsi->matches[i].slot;
474 		if (slot == DMI_NONE)
475 			break;
476 		if (!dmi_match(slot, dsi->matches[i].substr))
477 			return false;
478 	}
479 
480 	return true;
481 }
482 
483 const struct dmi_system_id *
484 dmi_first_match(const struct dmi_system_id *sysid)
485 {
486 	const struct dmi_system_id *dsi;
487 
488 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
489 		if (dmi_found(dsi))
490 			return dsi;
491 	}
492 
493 	return NULL;
494 }
495 
496 #if NBIOS > 0
497 extern char smbios_bios_date[];
498 extern char smbios_bios_version[];
499 #endif
500 
501 const char *
502 dmi_get_system_info(int slot)
503 {
504 #if NBIOS > 0
505 	switch (slot) {
506 	case DMI_BIOS_DATE:
507 		return smbios_bios_date;
508 	case DMI_BIOS_VERSION:
509 		return smbios_bios_version;
510 	default:
511 		printf("%s slot %d not handled\n", __func__, slot);
512 	}
513 #endif
514 	return NULL;
515 }
516 
517 int
518 dmi_check_system(const struct dmi_system_id *sysid)
519 {
520 	const struct dmi_system_id *dsi;
521 	int num = 0;
522 
523 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
524 		if (dmi_found(dsi)) {
525 			num++;
526 			if (dsi->callback && dsi->callback(dsi))
527 				break;
528 		}
529 	}
530 	return (num);
531 }
532 
533 struct vm_page *
534 alloc_pages(unsigned int gfp_mask, unsigned int order)
535 {
536 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
537 	struct uvm_constraint_range *constraint = &no_constraint;
538 	struct pglist mlist;
539 
540 	if (gfp_mask & M_CANFAIL)
541 		flags |= UVM_PLA_FAILOK;
542 	if (gfp_mask & M_ZERO)
543 		flags |= UVM_PLA_ZERO;
544 	if (gfp_mask & __GFP_DMA32)
545 		constraint = &dma_constraint;
546 
547 	TAILQ_INIT(&mlist);
548 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
549 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
550 		return NULL;
551 	return TAILQ_FIRST(&mlist);
552 }
553 
554 void
555 __free_pages(struct vm_page *page, unsigned int order)
556 {
557 	struct pglist mlist;
558 	int i;
559 
560 	TAILQ_INIT(&mlist);
561 	for (i = 0; i < (1 << order); i++)
562 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
563 	uvm_pglistfree(&mlist);
564 }
565 
566 void
567 __pagevec_release(struct pagevec *pvec)
568 {
569 	struct pglist mlist;
570 	int i;
571 
572 	TAILQ_INIT(&mlist);
573 	for (i = 0; i < pvec->nr; i++)
574 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
575 	uvm_pglistfree(&mlist);
576 	pagevec_reinit(pvec);
577 }
578 
579 static struct kmem_va_mode kv_physwait = {
580 	.kv_map = &phys_map,
581 	.kv_wait = 1,
582 };
583 
584 void *
585 kmap(struct vm_page *pg)
586 {
587 	vaddr_t va;
588 
589 #if defined (__HAVE_PMAP_DIRECT)
590 	va = pmap_map_direct(pg);
591 #else
592 	va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok);
593 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
594 	pmap_update(pmap_kernel());
595 #endif
596 	return (void *)va;
597 }
598 
599 void
600 kunmap_va(void *addr)
601 {
602 	vaddr_t va = (vaddr_t)addr;
603 
604 #if defined (__HAVE_PMAP_DIRECT)
605 	pmap_unmap_direct(va);
606 #else
607 	pmap_kremove(va, PAGE_SIZE);
608 	pmap_update(pmap_kernel());
609 	km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none);
610 #endif
611 }
612 
613 vaddr_t kmap_atomic_va;
614 int kmap_atomic_inuse;
615 
616 void *
617 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
618 {
619 	KASSERT(!kmap_atomic_inuse);
620 
621 	kmap_atomic_inuse = 1;
622 	pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
623 	    PROT_READ | PROT_WRITE);
624 	return (void *)kmap_atomic_va;
625 }
626 
627 void
628 kunmap_atomic(void *addr)
629 {
630 	KASSERT(kmap_atomic_inuse);
631 
632 	pmap_kremove(kmap_atomic_va, PAGE_SIZE);
633 	kmap_atomic_inuse = 0;
634 }
635 
636 void *
637 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
638      pgprot_t prot)
639 {
640 	vaddr_t va;
641 	paddr_t pa;
642 	int i;
643 
644 	va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none,
645 	    &kd_nowait);
646 	if (va == 0)
647 		return NULL;
648 	for (i = 0; i < npages; i++) {
649 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
650 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
651 		    PROT_READ | PROT_WRITE,
652 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
653 		pmap_update(pmap_kernel());
654 	}
655 
656 	return (void *)va;
657 }
658 
659 void
660 vunmap(void *addr, size_t size)
661 {
662 	vaddr_t va = (vaddr_t)addr;
663 
664 	pmap_remove(pmap_kernel(), va, va + size);
665 	pmap_update(pmap_kernel());
666 	km_free((void *)va, size, &kv_any, &kp_none);
667 }
668 
669 bool
670 is_vmalloc_addr(const void *p)
671 {
672 	vaddr_t min, max, addr;
673 
674 	min = vm_map_min(kernel_map);
675 	max = vm_map_max(kernel_map);
676 	addr = (vaddr_t)p;
677 
678 	if (addr >= min && addr <= max)
679 		return true;
680 	else
681 		return false;
682 }
683 
684 void
685 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
686     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
687 {
688 	const uint8_t *cbuf = buf;
689 	int i;
690 
691 	for (i = 0; i < len; i++) {
692 		if ((i % rowsize) == 0)
693 			printf("%s", prefix_str);
694 		printf("%02x", cbuf[i]);
695 		if ((i % rowsize) == (rowsize - 1))
696 			printf("\n");
697 		else
698 			printf(" ");
699 	}
700 }
701 
702 void *
703 memchr_inv(const void *s, int c, size_t n)
704 {
705 	if (n != 0) {
706 		const unsigned char *p = s;
707 
708 		do {
709 			if (*p++ != (unsigned char)c)
710 				return ((void *)(p - 1));
711 		} while (--n != 0);
712 	}
713 	return (NULL);
714 }
715 
716 int
717 panic_cmp(struct rb_node *a, struct rb_node *b)
718 {
719 	panic(__func__);
720 }
721 
722 #undef RB_ROOT
723 #define RB_ROOT(head)	(head)->rbh_root
724 
725 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
726 
727 /*
728  * This is a fairly minimal implementation of the Linux "idr" API.  It
729  * probably isn't very efficient, and definitely isn't RCU safe.  The
730  * pre-load buffer is global instead of per-cpu; we rely on the kernel
731  * lock to make this work.  We do randomize our IDs in order to make
732  * them harder to guess.
733  */
734 
735 int idr_cmp(struct idr_entry *, struct idr_entry *);
736 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
737 
738 struct pool idr_pool;
739 struct idr_entry *idr_entry_cache;
740 
741 void
742 idr_init(struct idr *idr)
743 {
744 	SPLAY_INIT(&idr->tree);
745 }
746 
747 void
748 idr_destroy(struct idr *idr)
749 {
750 	struct idr_entry *id;
751 
752 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
753 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
754 		pool_put(&idr_pool, id);
755 	}
756 }
757 
758 void
759 idr_preload(unsigned int gfp_mask)
760 {
761 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
762 
763 	KERNEL_ASSERT_LOCKED();
764 
765 	if (idr_entry_cache == NULL)
766 		idr_entry_cache = pool_get(&idr_pool, flags);
767 }
768 
769 int
770 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
771 {
772 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
773 	struct idr_entry *id;
774 	int begin;
775 
776 	KERNEL_ASSERT_LOCKED();
777 
778 	if (idr_entry_cache) {
779 		id = idr_entry_cache;
780 		idr_entry_cache = NULL;
781 	} else {
782 		id = pool_get(&idr_pool, flags);
783 		if (id == NULL)
784 			return -ENOMEM;
785 	}
786 
787 	if (end <= 0)
788 		end = INT_MAX;
789 
790 #ifdef notyet
791 	id->id = begin = start + arc4random_uniform(end - start);
792 #else
793 	id->id = begin = start;
794 #endif
795 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
796 		if (id->id == end)
797 			id->id = start;
798 		else
799 			id->id++;
800 		if (id->id == begin) {
801 			pool_put(&idr_pool, id);
802 			return -ENOSPC;
803 		}
804 	}
805 	id->ptr = ptr;
806 	return id->id;
807 }
808 
809 void *
810 idr_replace(struct idr *idr, void *ptr, unsigned long id)
811 {
812 	struct idr_entry find, *res;
813 	void *old;
814 
815 	find.id = id;
816 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
817 	if (res == NULL)
818 		return ERR_PTR(-ENOENT);
819 	old = res->ptr;
820 	res->ptr = ptr;
821 	return old;
822 }
823 
824 void *
825 idr_remove(struct idr *idr, unsigned long id)
826 {
827 	struct idr_entry find, *res;
828 	void *ptr = NULL;
829 
830 	find.id = id;
831 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
832 	if (res) {
833 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
834 		ptr = res->ptr;
835 		pool_put(&idr_pool, res);
836 	}
837 	return ptr;
838 }
839 
840 void *
841 idr_find(struct idr *idr, unsigned long id)
842 {
843 	struct idr_entry find, *res;
844 
845 	find.id = id;
846 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
847 	if (res == NULL)
848 		return NULL;
849 	return res->ptr;
850 }
851 
852 void *
853 idr_get_next(struct idr *idr, int *id)
854 {
855 	struct idr_entry *res;
856 
857 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
858 		if (res->id >= *id) {
859 			*id = res->id;
860 			return res->ptr;
861 		}
862 	}
863 
864 	return NULL;
865 }
866 
867 int
868 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
869 {
870 	struct idr_entry *id;
871 	int ret;
872 
873 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
874 		ret = func(id->id, id->ptr, data);
875 		if (ret)
876 			return ret;
877 	}
878 
879 	return 0;
880 }
881 
882 int
883 idr_cmp(struct idr_entry *a, struct idr_entry *b)
884 {
885 	return (a->id < b->id ? -1 : a->id > b->id);
886 }
887 
888 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
889 
890 void
891 ida_init(struct ida *ida)
892 {
893 	idr_init(&ida->idr);
894 }
895 
896 void
897 ida_destroy(struct ida *ida)
898 {
899 	idr_destroy(&ida->idr);
900 }
901 
902 int
903 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
904     gfp_t gfp_mask)
905 {
906 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
907 }
908 
909 void
910 ida_simple_remove(struct ida *ida, unsigned int id)
911 {
912 	idr_remove(&ida->idr, id);
913 }
914 
915 int
916 ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
917 {
918 	return idr_alloc(&ida->idr, NULL, min, INT_MAX, gfp);
919 }
920 
921 int
922 ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
923 {
924 	return idr_alloc(&ida->idr, NULL, 0, max - 1, gfp);
925 }
926 
927 void
928 ida_free(struct ida *ida, unsigned int id)
929 {
930 	idr_remove(&ida->idr, id);
931 }
932 
933 int
934 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
935 {
936 	return (a->id < b->id ? -1 : a->id > b->id);
937 }
938 
939 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
940 struct pool xa_pool;
941 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
942 
943 void
944 xa_init_flags(struct xarray *xa, gfp_t flags)
945 {
946 	static int initialized;
947 
948 	if (!initialized) {
949 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0,
950 		    "xapl", NULL);
951 		initialized = 1;
952 	}
953 	SPLAY_INIT(&xa->xa_tree);
954 	if (flags & XA_FLAGS_LOCK_IRQ)
955 		mtx_init(&xa->xa_lock, IPL_TTY);
956 	else
957 		mtx_init(&xa->xa_lock, IPL_NONE);
958 }
959 
960 void
961 xa_destroy(struct xarray *xa)
962 {
963 	struct xarray_entry *id;
964 
965 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
966 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
967 		pool_put(&xa_pool, id);
968 	}
969 }
970 
971 /* Don't wrap ids. */
972 int
973 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
974 {
975 	struct xarray_entry *xid;
976 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
977 	int begin;
978 
979 	if (gfp & GFP_NOWAIT) {
980 		xid = pool_get(&xa_pool, PR_NOWAIT);
981 	} else {
982 		mtx_leave(&xa->xa_lock);
983 		xid = pool_get(&xa_pool, PR_WAITOK);
984 		mtx_enter(&xa->xa_lock);
985 	}
986 
987 	if (xid == NULL)
988 		return -ENOMEM;
989 
990 	if (limit <= 0)
991 		limit = INT_MAX;
992 
993 	xid->id = begin = start;
994 
995 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
996 		if (xid->id == limit)
997 			xid->id = start;
998 		else
999 			xid->id++;
1000 		if (xid->id == begin) {
1001 			pool_put(&xa_pool, xid);
1002 			return -EBUSY;
1003 		}
1004 	}
1005 	xid->ptr = entry;
1006 	*id = xid->id;
1007 	return 0;
1008 }
1009 
1010 /*
1011  * Wrap ids and store next id.
1012  * We walk the entire tree so don't special case wrapping.
1013  * The only caller of this (i915_drm_client.c) doesn't use next id.
1014  */
1015 int
1016 __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, int limit, u32 *next,
1017     gfp_t gfp)
1018 {
1019 	int r = __xa_alloc(xa, id, entry, limit, gfp);
1020 	*next = *id + 1;
1021 	return r;
1022 }
1023 
1024 void *
1025 __xa_erase(struct xarray *xa, unsigned long index)
1026 {
1027 	struct xarray_entry find, *res;
1028 	void *ptr = NULL;
1029 
1030 	find.id = index;
1031 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1032 	if (res) {
1033 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
1034 		ptr = res->ptr;
1035 		pool_put(&xa_pool, res);
1036 	}
1037 	return ptr;
1038 }
1039 
1040 void *
1041 __xa_load(struct xarray *xa, unsigned long index)
1042 {
1043 	struct xarray_entry find, *res;
1044 
1045 	find.id = index;
1046 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1047 	if (res == NULL)
1048 		return NULL;
1049 	return res->ptr;
1050 }
1051 
1052 void *
1053 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1054 {
1055 	struct xarray_entry find, *res;
1056 	void *prev;
1057 
1058 	if (entry == NULL)
1059 		return __xa_erase(xa, index);
1060 
1061 	find.id = index;
1062 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1063 	if (res != NULL) {
1064 		/* index exists */
1065 		/* XXX Multislot entries updates not implemented yet */
1066 		prev = res->ptr;
1067 		res->ptr = entry;
1068 		return prev;
1069 	}
1070 
1071 	/* index not found, add new */
1072 	if (gfp & GFP_NOWAIT) {
1073 		res = pool_get(&xa_pool, PR_NOWAIT);
1074 	} else {
1075 		mtx_leave(&xa->xa_lock);
1076 		res = pool_get(&xa_pool, PR_WAITOK);
1077 		mtx_enter(&xa->xa_lock);
1078 	}
1079 	if (res == NULL)
1080 		return XA_ERROR(-ENOMEM);
1081 	res->id = index;
1082 	res->ptr = entry;
1083 	if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL)
1084 		return XA_ERROR(-EINVAL);
1085 	return NULL; /* no prev entry at index */
1086 }
1087 
1088 void *
1089 xa_get_next(struct xarray *xa, unsigned long *index)
1090 {
1091 	struct xarray_entry *res;
1092 
1093 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
1094 		if (res->id >= *index) {
1095 			*index = res->id;
1096 			return res->ptr;
1097 		}
1098 	}
1099 
1100 	return NULL;
1101 }
1102 
1103 int
1104 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1105 {
1106 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
1107 	    M_DRM, gfp_mask | M_ZERO);
1108 	if (table->sgl == NULL)
1109 		return -ENOMEM;
1110 	table->nents = table->orig_nents = nents;
1111 	sg_mark_end(&table->sgl[nents - 1]);
1112 	return 0;
1113 }
1114 
1115 void
1116 sg_free_table(struct sg_table *table)
1117 {
1118 	free(table->sgl, M_DRM,
1119 	    table->orig_nents * sizeof(struct scatterlist));
1120 	table->orig_nents = 0;
1121 	table->sgl = NULL;
1122 }
1123 
1124 size_t
1125 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1126     const void *buf, size_t buflen)
1127 {
1128 	panic("%s", __func__);
1129 }
1130 
1131 int
1132 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1133 {
1134 	void *cmd = NULL;
1135 	int cmdlen = 0;
1136 	int err, ret = 0;
1137 	int op;
1138 
1139 	iic_acquire_bus(&adap->ic, 0);
1140 
1141 	while (num > 2) {
1142 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1143 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1144 		    msgs->buf, msgs->len, 0);
1145 		if (err) {
1146 			ret = -err;
1147 			goto fail;
1148 		}
1149 		msgs++;
1150 		num--;
1151 		ret++;
1152 	}
1153 
1154 	if (num > 1) {
1155 		cmd = msgs->buf;
1156 		cmdlen = msgs->len;
1157 		msgs++;
1158 		num--;
1159 		ret++;
1160 	}
1161 
1162 	op = (msgs->flags & I2C_M_RD) ?
1163 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1164 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1165 	    msgs->buf, msgs->len, 0);
1166 	if (err) {
1167 		ret = -err;
1168 		goto fail;
1169 	}
1170 	msgs++;
1171 	ret++;
1172 
1173 fail:
1174 	iic_release_bus(&adap->ic, 0);
1175 
1176 	return ret;
1177 }
1178 
1179 int
1180 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1181 {
1182 	int ret, retries;
1183 
1184 	retries = adap->retries;
1185 retry:
1186 	if (adap->algo)
1187 		ret = adap->algo->master_xfer(adap, msgs, num);
1188 	else
1189 		ret = i2c_master_xfer(adap, msgs, num);
1190 	if (ret == -EAGAIN && retries > 0) {
1191 		retries--;
1192 		goto retry;
1193 	}
1194 
1195 	return ret;
1196 }
1197 
1198 int
1199 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1200 {
1201 	int ret;
1202 
1203 	if (adap->lock_ops)
1204 		adap->lock_ops->lock_bus(adap, 0);
1205 
1206 	ret = __i2c_transfer(adap, msgs, num);
1207 
1208 	if (adap->lock_ops)
1209 		adap->lock_ops->unlock_bus(adap, 0);
1210 
1211 	return ret;
1212 }
1213 
1214 int
1215 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1216 {
1217 	struct i2c_algo_bit_data *algo = adap->algo_data;
1218 	struct i2c_adapter bb;
1219 
1220 	memset(&bb, 0, sizeof(bb));
1221 	bb.ic = algo->ic;
1222 	bb.retries = adap->retries;
1223 	return i2c_master_xfer(&bb, msgs, num);
1224 }
1225 
1226 uint32_t
1227 i2c_bb_functionality(struct i2c_adapter *adap)
1228 {
1229 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1230 }
1231 
1232 struct i2c_algorithm i2c_bit_algo = {
1233 	.master_xfer = i2c_bb_master_xfer,
1234 	.functionality = i2c_bb_functionality
1235 };
1236 
1237 int
1238 i2c_bit_add_bus(struct i2c_adapter *adap)
1239 {
1240 	adap->algo = &i2c_bit_algo;
1241 	adap->retries = 3;
1242 
1243 	return 0;
1244 }
1245 
1246 #if defined(__amd64__) || defined(__i386__)
1247 
1248 /*
1249  * This is a minimal implementation of the Linux vga_get/vga_put
1250  * interface.  In all likelihood, it will only work for inteldrm(4) as
1251  * it assumes that if there is another active VGA device in the
1252  * system, it is sitting behind a PCI bridge.
1253  */
1254 
1255 extern int pci_enumerate_bus(struct pci_softc *,
1256     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1257 
1258 pcitag_t vga_bridge_tag;
1259 int vga_bridge_disabled;
1260 
1261 int
1262 vga_disable_bridge(struct pci_attach_args *pa)
1263 {
1264 	pcireg_t bhlc, bc;
1265 
1266 	if (pa->pa_domain != 0)
1267 		return 0;
1268 
1269 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1270 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1271 		return 0;
1272 
1273 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1274 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1275 		return 0;
1276 	bc &= ~PPB_BC_VGA_ENABLE;
1277 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1278 
1279 	vga_bridge_tag = pa->pa_tag;
1280 	vga_bridge_disabled = 1;
1281 
1282 	return 1;
1283 }
1284 
1285 void
1286 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1287 {
1288 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1289 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1290 }
1291 
1292 void
1293 vga_put(struct pci_dev *pdev, int rsrc)
1294 {
1295 	pcireg_t bc;
1296 
1297 	if (!vga_bridge_disabled)
1298 		return;
1299 
1300 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1301 	bc |= PPB_BC_VGA_ENABLE;
1302 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1303 
1304 	vga_bridge_disabled = 0;
1305 }
1306 
1307 #endif
1308 
1309 /*
1310  * ACPI types and interfaces.
1311  */
1312 
1313 #ifdef __HAVE_ACPI
1314 #include "acpi.h"
1315 #endif
1316 
1317 #if NACPI > 0
1318 
1319 #include <dev/acpi/acpireg.h>
1320 #include <dev/acpi/acpivar.h>
1321 #include <dev/acpi/amltypes.h>
1322 #include <dev/acpi/dsdt.h>
1323 
1324 acpi_status
1325 acpi_get_table(const char *sig, int instance,
1326     struct acpi_table_header **hdr)
1327 {
1328 	struct acpi_softc *sc = acpi_softc;
1329 	struct acpi_q *entry;
1330 
1331 	KASSERT(instance == 1);
1332 
1333 	if (sc == NULL)
1334 		return AE_NOT_FOUND;
1335 
1336 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1337 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1338 			*hdr = entry->q_table;
1339 			return 0;
1340 		}
1341 	}
1342 
1343 	return AE_NOT_FOUND;
1344 }
1345 
1346 void
1347 acpi_put_table(struct acpi_table_header *hdr)
1348 {
1349 }
1350 
1351 acpi_status
1352 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1353 {
1354 	node = aml_searchname(node, name);
1355 	if (node == NULL)
1356 		return AE_NOT_FOUND;
1357 
1358 	*rnode = node;
1359 	return 0;
1360 }
1361 
1362 acpi_status
1363 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1364 {
1365 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1366 	KASSERT(type == ACPI_FULL_PATHNAME);
1367 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1368 	return 0;
1369 }
1370 
1371 acpi_status
1372 acpi_evaluate_object(acpi_handle node, const char *name,
1373     struct acpi_object_list *params, struct acpi_buffer *result)
1374 {
1375 	struct aml_value args[4], res;
1376 	union acpi_object *obj;
1377 	uint8_t *data;
1378 	int i;
1379 
1380 	KASSERT(params->count <= nitems(args));
1381 
1382 	for (i = 0; i < params->count; i++) {
1383 		args[i].type = params->pointer[i].type;
1384 		switch (args[i].type) {
1385 		case AML_OBJTYPE_INTEGER:
1386 			args[i].v_integer = params->pointer[i].integer.value;
1387 			break;
1388 		case AML_OBJTYPE_BUFFER:
1389 			args[i].length = params->pointer[i].buffer.length;
1390 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1391 			break;
1392 		default:
1393 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1394 			return AE_BAD_PARAMETER;
1395 		}
1396 	}
1397 
1398 	if (name) {
1399 		node = aml_searchname(node, name);
1400 		if (node == NULL)
1401 			return AE_NOT_FOUND;
1402 	}
1403 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1404 		aml_freevalue(&res);
1405 		return AE_ERROR;
1406 	}
1407 
1408 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1409 
1410 	result->length = sizeof(union acpi_object);
1411 	switch (res.type) {
1412 	case AML_OBJTYPE_BUFFER:
1413 		result->length += res.length;
1414 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1415 		obj = (union acpi_object *)result->pointer;
1416 		data = (uint8_t *)(obj + 1);
1417 		obj->type = res.type;
1418 		obj->buffer.length = res.length;
1419 		obj->buffer.pointer = data;
1420 		memcpy(data, res.v_buffer, res.length);
1421 		break;
1422 	default:
1423 		printf("%s: return type 0x%02x", __func__, res.type);
1424 		aml_freevalue(&res);
1425 		return AE_ERROR;
1426 	}
1427 
1428 	aml_freevalue(&res);
1429 	return 0;
1430 }
1431 
1432 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1433 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1434 
1435 int
1436 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1437 {
1438 	struct acpi_bus_event event;
1439 	struct notifier_block *nb;
1440 
1441 	event.device_class = ACPI_VIDEO_CLASS;
1442 	event.type = notify;
1443 
1444 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1445 		nb->notifier_call(nb, 0, &event);
1446 	return 0;
1447 }
1448 
1449 int
1450 register_acpi_notifier(struct notifier_block *nb)
1451 {
1452 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1453 	return 0;
1454 }
1455 
1456 int
1457 unregister_acpi_notifier(struct notifier_block *nb)
1458 {
1459 	struct notifier_block *tmp;
1460 
1461 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1462 		if (tmp == nb) {
1463 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1464 			    notifier_block, link);
1465 			return 0;
1466 		}
1467 	}
1468 
1469 	return -ENOENT;
1470 }
1471 
1472 const char *
1473 acpi_format_exception(acpi_status status)
1474 {
1475 	switch (status) {
1476 	case AE_NOT_FOUND:
1477 		return "not found";
1478 	case AE_BAD_PARAMETER:
1479 		return "bad parameter";
1480 	default:
1481 		return "unknown";
1482 	}
1483 }
1484 
1485 #endif
1486 
1487 void
1488 backlight_do_update_status(void *arg)
1489 {
1490 	backlight_update_status(arg);
1491 }
1492 
1493 struct backlight_device *
1494 backlight_device_register(const char *name, void *kdev, void *data,
1495     const struct backlight_ops *ops, struct backlight_properties *props)
1496 {
1497 	struct backlight_device *bd;
1498 
1499 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1500 	bd->ops = ops;
1501 	bd->props = *props;
1502 	bd->data = data;
1503 
1504 	task_set(&bd->task, backlight_do_update_status, bd);
1505 
1506 	return bd;
1507 }
1508 
1509 void
1510 backlight_device_unregister(struct backlight_device *bd)
1511 {
1512 	free(bd, M_DRM, sizeof(*bd));
1513 }
1514 
1515 struct backlight_device *
1516 devm_backlight_device_register(void *dev, const char *name, void *parent,
1517     void *data, const struct backlight_ops *bo,
1518     const struct backlight_properties *bp)
1519 {
1520 	STUB();
1521 	return NULL;
1522 }
1523 
1524 void
1525 backlight_schedule_update_status(struct backlight_device *bd)
1526 {
1527 	task_add(systq, &bd->task);
1528 }
1529 
1530 inline int
1531 backlight_enable(struct backlight_device *bd)
1532 {
1533 	if (bd == NULL)
1534 		return 0;
1535 
1536 	bd->props.power = FB_BLANK_UNBLANK;
1537 
1538 	return bd->ops->update_status(bd);
1539 }
1540 
1541 inline int
1542 backlight_disable(struct backlight_device *bd)
1543 {
1544 	if (bd == NULL)
1545 		return 0;
1546 
1547 	bd->props.power = FB_BLANK_POWERDOWN;
1548 
1549 	return bd->ops->update_status(bd);
1550 }
1551 
1552 void
1553 drm_sysfs_hotplug_event(struct drm_device *dev)
1554 {
1555 	knote_locked(&dev->note, NOTE_CHANGE);
1556 }
1557 
1558 void
1559 drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
1560 {
1561 	knote_locked(&connector->dev->note, NOTE_CHANGE);
1562 }
1563 
1564 void
1565 drm_sysfs_connector_status_event(struct drm_connector *connector,
1566     struct drm_property *property)
1567 {
1568 	STUB();
1569 }
1570 
1571 struct dma_fence *
1572 dma_fence_get(struct dma_fence *fence)
1573 {
1574 	if (fence)
1575 		kref_get(&fence->refcount);
1576 	return fence;
1577 }
1578 
1579 struct dma_fence *
1580 dma_fence_get_rcu(struct dma_fence *fence)
1581 {
1582 	if (fence)
1583 		kref_get(&fence->refcount);
1584 	return fence;
1585 }
1586 
1587 struct dma_fence *
1588 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1589 {
1590 	struct dma_fence *fence;
1591 	if (dfp == NULL)
1592 		return NULL;
1593 	fence = *dfp;
1594 	if (fence)
1595 		kref_get(&fence->refcount);
1596 	return fence;
1597 }
1598 
1599 void
1600 dma_fence_release(struct kref *ref)
1601 {
1602 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1603 	if (fence->ops && fence->ops->release)
1604 		fence->ops->release(fence);
1605 	else
1606 		free(fence, M_DRM, 0);
1607 }
1608 
1609 void
1610 dma_fence_put(struct dma_fence *fence)
1611 {
1612 	if (fence)
1613 		kref_put(&fence->refcount, dma_fence_release);
1614 }
1615 
1616 int
1617 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp)
1618 {
1619 	struct dma_fence_cb *cur, *tmp;
1620 	struct list_head cb_list;
1621 
1622 	if (fence == NULL)
1623 		return -EINVAL;
1624 
1625 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1626 		return -EINVAL;
1627 
1628 	list_replace(&fence->cb_list, &cb_list);
1629 
1630 	fence->timestamp = timestamp;
1631 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1632 
1633 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1634 		INIT_LIST_HEAD(&cur->node);
1635 		cur->func(fence, cur);
1636 	}
1637 
1638 	return 0;
1639 }
1640 
1641 int
1642 dma_fence_signal(struct dma_fence *fence)
1643 {
1644 	int r;
1645 
1646 	if (fence == NULL)
1647 		return -EINVAL;
1648 
1649 	mtx_enter(fence->lock);
1650 	r = dma_fence_signal_timestamp_locked(fence, ktime_get());
1651 	mtx_leave(fence->lock);
1652 
1653 	return r;
1654 }
1655 
1656 int
1657 dma_fence_signal_locked(struct dma_fence *fence)
1658 {
1659 	if (fence == NULL)
1660 		return -EINVAL;
1661 
1662 	return dma_fence_signal_timestamp_locked(fence, ktime_get());
1663 }
1664 
1665 int
1666 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
1667 {
1668 	int r;
1669 
1670 	if (fence == NULL)
1671 		return -EINVAL;
1672 
1673 	mtx_enter(fence->lock);
1674 	r = dma_fence_signal_timestamp_locked(fence, timestamp);
1675 	mtx_leave(fence->lock);
1676 
1677 	return r;
1678 }
1679 
1680 bool
1681 dma_fence_is_signaled(struct dma_fence *fence)
1682 {
1683 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1684 		return true;
1685 
1686 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1687 		dma_fence_signal(fence);
1688 		return true;
1689 	}
1690 
1691 	return false;
1692 }
1693 
1694 bool
1695 dma_fence_is_signaled_locked(struct dma_fence *fence)
1696 {
1697 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1698 		return true;
1699 
1700 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1701 		dma_fence_signal_locked(fence);
1702 		return true;
1703 	}
1704 
1705 	return false;
1706 }
1707 
1708 long
1709 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1710 {
1711 	if (timeout < 0)
1712 		return -EINVAL;
1713 
1714 	if (fence->ops->wait)
1715 		return fence->ops->wait(fence, intr, timeout);
1716 	else
1717 		return dma_fence_default_wait(fence, intr, timeout);
1718 }
1719 
1720 long
1721 dma_fence_wait(struct dma_fence *fence, bool intr)
1722 {
1723 	long ret;
1724 
1725 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1726 	if (ret < 0)
1727 		return ret;
1728 
1729 	return 0;
1730 }
1731 
1732 void
1733 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1734 {
1735 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1736 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1737 	    fence->ops->enable_signaling) {
1738 		mtx_enter(fence->lock);
1739 		if (!fence->ops->enable_signaling(fence))
1740 			dma_fence_signal_locked(fence);
1741 		mtx_leave(fence->lock);
1742 	}
1743 }
1744 
1745 void
1746 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1747     struct mutex *lock, uint64_t context, uint64_t seqno)
1748 {
1749 	fence->ops = ops;
1750 	fence->lock = lock;
1751 	fence->context = context;
1752 	fence->seqno = seqno;
1753 	fence->flags = 0;
1754 	fence->error = 0;
1755 	kref_init(&fence->refcount);
1756 	INIT_LIST_HEAD(&fence->cb_list);
1757 }
1758 
1759 int
1760 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1761     dma_fence_func_t func)
1762 {
1763 	int ret = 0;
1764 	bool was_set;
1765 
1766 	if (WARN_ON(!fence || !func))
1767 		return -EINVAL;
1768 
1769 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1770 		INIT_LIST_HEAD(&cb->node);
1771 		return -ENOENT;
1772 	}
1773 
1774 	mtx_enter(fence->lock);
1775 
1776 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1777 
1778 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1779 		ret = -ENOENT;
1780 	else if (!was_set && fence->ops->enable_signaling) {
1781 		if (!fence->ops->enable_signaling(fence)) {
1782 			dma_fence_signal_locked(fence);
1783 			ret = -ENOENT;
1784 		}
1785 	}
1786 
1787 	if (!ret) {
1788 		cb->func = func;
1789 		list_add_tail(&cb->node, &fence->cb_list);
1790 	} else
1791 		INIT_LIST_HEAD(&cb->node);
1792 	mtx_leave(fence->lock);
1793 
1794 	return ret;
1795 }
1796 
1797 bool
1798 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1799 {
1800 	bool ret;
1801 
1802 	mtx_enter(fence->lock);
1803 
1804 	ret = !list_empty(&cb->node);
1805 	if (ret)
1806 		list_del_init(&cb->node);
1807 
1808 	mtx_leave(fence->lock);
1809 
1810 	return ret;
1811 }
1812 
1813 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1814 
1815 uint64_t
1816 dma_fence_context_alloc(unsigned int num)
1817 {
1818   return atomic64_add_return(num, &drm_fence_context_count) - num;
1819 }
1820 
1821 struct default_wait_cb {
1822 	struct dma_fence_cb base;
1823 	struct proc *proc;
1824 };
1825 
1826 static void
1827 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1828 {
1829 	struct default_wait_cb *wait =
1830 	    container_of(cb, struct default_wait_cb, base);
1831 	wake_up_process(wait->proc);
1832 }
1833 
1834 long
1835 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1836 {
1837 	long ret = timeout ? timeout : 1;
1838 	unsigned long end;
1839 	int err;
1840 	struct default_wait_cb cb;
1841 	bool was_set;
1842 
1843 	KASSERT(timeout <= INT_MAX);
1844 
1845 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1846 		return ret;
1847 
1848 	mtx_enter(fence->lock);
1849 
1850 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1851 	    &fence->flags);
1852 
1853 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1854 		goto out;
1855 
1856 	if (!was_set && fence->ops->enable_signaling) {
1857 		if (!fence->ops->enable_signaling(fence)) {
1858 			dma_fence_signal_locked(fence);
1859 			goto out;
1860 		}
1861 	}
1862 
1863 	if (timeout == 0) {
1864 		ret = 0;
1865 		goto out;
1866 	}
1867 
1868 	cb.base.func = dma_fence_default_wait_cb;
1869 	cb.proc = curproc;
1870 	list_add(&cb.base.node, &fence->cb_list);
1871 
1872 	end = jiffies + timeout;
1873 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1874 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1875 			break;
1876 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1877 		    "dmafence", ret);
1878 		if (err == EINTR || err == ERESTART) {
1879 			ret = -ERESTARTSYS;
1880 			break;
1881 		}
1882 	}
1883 
1884 	if (!list_empty(&cb.base.node))
1885 		list_del(&cb.base.node);
1886 out:
1887 	mtx_leave(fence->lock);
1888 
1889 	return ret;
1890 }
1891 
1892 static bool
1893 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1894     uint32_t *idx)
1895 {
1896 	int i;
1897 
1898 	for (i = 0; i < count; ++i) {
1899 		struct dma_fence *fence = fences[i];
1900 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1901 			if (idx)
1902 				*idx = i;
1903 			return true;
1904 		}
1905 	}
1906 	return false;
1907 }
1908 
1909 long
1910 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1911     bool intr, long timeout, uint32_t *idx)
1912 {
1913 	struct default_wait_cb *cb;
1914 	long ret = timeout;
1915 	unsigned long end;
1916 	int i, err;
1917 
1918 	KASSERT(timeout <= INT_MAX);
1919 
1920 	if (timeout == 0) {
1921 		for (i = 0; i < count; i++) {
1922 			if (dma_fence_is_signaled(fences[i])) {
1923 				if (idx)
1924 					*idx = i;
1925 				return 1;
1926 			}
1927 		}
1928 		return 0;
1929 	}
1930 
1931 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1932 	if (cb == NULL)
1933 		return -ENOMEM;
1934 
1935 	for (i = 0; i < count; i++) {
1936 		struct dma_fence *fence = fences[i];
1937 		cb[i].proc = curproc;
1938 		if (dma_fence_add_callback(fence, &cb[i].base,
1939 		    dma_fence_default_wait_cb)) {
1940 			if (idx)
1941 				*idx = i;
1942 			goto cb_cleanup;
1943 		}
1944 	}
1945 
1946 	end = jiffies + timeout;
1947 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1948 		if (dma_fence_test_signaled_any(fences, count, idx))
1949 			break;
1950 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
1951 		if (err == EINTR || err == ERESTART) {
1952 			ret = -ERESTARTSYS;
1953 			break;
1954 		}
1955 	}
1956 
1957 cb_cleanup:
1958 	while (i-- > 0)
1959 		dma_fence_remove_callback(fences[i], &cb[i].base);
1960 	free(cb, M_DRM, count * sizeof(*cb));
1961 	return ret;
1962 }
1963 
1964 static struct dma_fence dma_fence_stub;
1965 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1966 
1967 static const char *
1968 dma_fence_stub_get_name(struct dma_fence *fence)
1969 {
1970 	return "stub";
1971 }
1972 
1973 static const struct dma_fence_ops dma_fence_stub_ops = {
1974 	.get_driver_name = dma_fence_stub_get_name,
1975 	.get_timeline_name = dma_fence_stub_get_name,
1976 };
1977 
1978 struct dma_fence *
1979 dma_fence_get_stub(void)
1980 {
1981 	mtx_enter(&dma_fence_stub_mtx);
1982 	if (dma_fence_stub.ops == NULL) {
1983 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1984 		    &dma_fence_stub_mtx, 0, 0);
1985 		dma_fence_signal_locked(&dma_fence_stub);
1986 	}
1987 	mtx_leave(&dma_fence_stub_mtx);
1988 
1989 	return dma_fence_get(&dma_fence_stub);
1990 }
1991 
1992 struct dma_fence *
1993 dma_fence_allocate_private_stub(void)
1994 {
1995 	struct dma_fence *f = malloc(sizeof(*f), M_DRM,
1996 	    M_ZERO | M_WAITOK | M_CANFAIL);
1997 	if (f == NULL)
1998 		return ERR_PTR(-ENOMEM);
1999 	dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
2000 	dma_fence_signal(f);
2001 	return f;
2002 }
2003 
2004 static const char *
2005 dma_fence_array_get_driver_name(struct dma_fence *fence)
2006 {
2007 	return "dma_fence_array";
2008 }
2009 
2010 static const char *
2011 dma_fence_array_get_timeline_name(struct dma_fence *fence)
2012 {
2013 	return "unbound";
2014 }
2015 
2016 static void
2017 irq_dma_fence_array_work(void *arg)
2018 {
2019 	struct dma_fence_array *dfa = (struct dma_fence_array *)arg;
2020 	dma_fence_signal(&dfa->base);
2021 	dma_fence_put(&dfa->base);
2022 }
2023 
2024 static void
2025 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
2026 {
2027 	struct dma_fence_array_cb *array_cb =
2028 	    container_of(cb, struct dma_fence_array_cb, cb);
2029 	struct dma_fence_array *dfa = array_cb->array;
2030 
2031 	if (atomic_dec_and_test(&dfa->num_pending))
2032 		timeout_add(&dfa->to, 1);
2033 	else
2034 		dma_fence_put(&dfa->base);
2035 }
2036 
2037 static bool
2038 dma_fence_array_enable_signaling(struct dma_fence *fence)
2039 {
2040 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2041 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
2042 	int i;
2043 
2044 	for (i = 0; i < dfa->num_fences; ++i) {
2045 		cb[i].array = dfa;
2046 		dma_fence_get(&dfa->base);
2047 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
2048 		    dma_fence_array_cb_func)) {
2049 			dma_fence_put(&dfa->base);
2050 			if (atomic_dec_and_test(&dfa->num_pending))
2051 				return false;
2052 		}
2053 	}
2054 
2055 	return true;
2056 }
2057 
2058 static bool
2059 dma_fence_array_signaled(struct dma_fence *fence)
2060 {
2061 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2062 
2063 	return atomic_read(&dfa->num_pending) <= 0;
2064 }
2065 
2066 static void
2067 dma_fence_array_release(struct dma_fence *fence)
2068 {
2069 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2070 	int i;
2071 
2072 	for (i = 0; i < dfa->num_fences; ++i)
2073 		dma_fence_put(dfa->fences[i]);
2074 
2075 	free(dfa->fences, M_DRM, 0);
2076 	dma_fence_free(fence);
2077 }
2078 
2079 struct dma_fence_array *
2080 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
2081     unsigned seqno, bool signal_on_any)
2082 {
2083 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
2084 	    (num_fences * sizeof(struct dma_fence_array_cb)),
2085 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2086 	if (dfa == NULL)
2087 		return NULL;
2088 
2089 	mtx_init(&dfa->lock, IPL_TTY);
2090 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
2091 	    context, seqno);
2092 	timeout_set(&dfa->to, irq_dma_fence_array_work, dfa);
2093 
2094 	dfa->num_fences = num_fences;
2095 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
2096 	dfa->fences = fences;
2097 
2098 	return dfa;
2099 }
2100 
2101 struct dma_fence *
2102 dma_fence_array_first(struct dma_fence *f)
2103 {
2104 	struct dma_fence_array *dfa;
2105 
2106 	if (f == NULL)
2107 		return NULL;
2108 
2109 	if ((dfa = to_dma_fence_array(f)) == NULL)
2110 		return f;
2111 
2112 	if (dfa->num_fences > 0)
2113 		return dfa->fences[0];
2114 
2115 	return NULL;
2116 }
2117 
2118 struct dma_fence *
2119 dma_fence_array_next(struct dma_fence *f, unsigned int i)
2120 {
2121 	struct dma_fence_array *dfa;
2122 
2123 	if (f == NULL)
2124 		return NULL;
2125 
2126 	if ((dfa = to_dma_fence_array(f)) == NULL)
2127 		return NULL;
2128 
2129 	if (i < dfa->num_fences)
2130 		return dfa->fences[i];
2131 
2132 	return NULL;
2133 }
2134 
2135 const struct dma_fence_ops dma_fence_array_ops = {
2136 	.get_driver_name = dma_fence_array_get_driver_name,
2137 	.get_timeline_name = dma_fence_array_get_timeline_name,
2138 	.enable_signaling = dma_fence_array_enable_signaling,
2139 	.signaled = dma_fence_array_signaled,
2140 	.release = dma_fence_array_release,
2141 };
2142 
2143 int
2144 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
2145 {
2146 	struct dma_fence_chain *chain;
2147 	struct dma_fence *fence;
2148 
2149 	if (seqno == 0)
2150 		return 0;
2151 
2152 	if ((chain = to_dma_fence_chain(*df)) == NULL)
2153 		return -EINVAL;
2154 
2155 	fence = &chain->base;
2156 	if (fence->seqno < seqno)
2157 		return -EINVAL;
2158 
2159 	dma_fence_chain_for_each(*df, fence) {
2160 		if ((*df)->context != fence->context)
2161 			break;
2162 
2163 		chain = to_dma_fence_chain(*df);
2164 		if (chain->prev_seqno < seqno)
2165 			break;
2166 	}
2167 	dma_fence_put(fence);
2168 
2169 	return 0;
2170 }
2171 
2172 void
2173 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
2174     struct dma_fence *fence, uint64_t seqno)
2175 {
2176 	uint64_t context;
2177 
2178 	chain->fence = fence;
2179 	chain->prev = prev;
2180 	mtx_init(&chain->lock, IPL_TTY);
2181 
2182 	/* if prev is a chain */
2183 	if (to_dma_fence_chain(prev) != NULL) {
2184 		if (__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
2185 			chain->prev_seqno = prev->seqno;
2186 			context = prev->context;
2187 		} else {
2188 			chain->prev_seqno = 0;
2189 			context = dma_fence_context_alloc(1);
2190 			seqno = prev->seqno;
2191 		}
2192 	} else {
2193 		chain->prev_seqno = 0;
2194 		context = dma_fence_context_alloc(1);
2195 	}
2196 
2197 	dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
2198 	    context, seqno);
2199 }
2200 
2201 static const char *
2202 dma_fence_chain_get_driver_name(struct dma_fence *fence)
2203 {
2204 	return "dma_fence_chain";
2205 }
2206 
2207 static const char *
2208 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
2209 {
2210 	return "unbound";
2211 }
2212 
2213 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
2214 
2215 static void
2216 dma_fence_chain_timo(void *arg)
2217 {
2218 	struct dma_fence_chain *chain = (struct dma_fence_chain *)arg;
2219 
2220 	if (dma_fence_chain_enable_signaling(&chain->base) == false)
2221 		dma_fence_signal(&chain->base);
2222 	dma_fence_put(&chain->base);
2223 }
2224 
2225 static void
2226 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
2227 {
2228 	struct dma_fence_chain *chain =
2229 	    container_of(cb, struct dma_fence_chain, cb);
2230 	timeout_set(&chain->to, dma_fence_chain_timo, chain);
2231 	timeout_add(&chain->to, 1);
2232 	dma_fence_put(f);
2233 }
2234 
2235 static bool
2236 dma_fence_chain_enable_signaling(struct dma_fence *fence)
2237 {
2238 	struct dma_fence_chain *chain, *h;
2239 	struct dma_fence *f;
2240 
2241 	h = to_dma_fence_chain(fence);
2242 	dma_fence_get(&h->base);
2243 	dma_fence_chain_for_each(fence, &h->base) {
2244 		chain = to_dma_fence_chain(fence);
2245 		if (chain == NULL)
2246 			f = fence;
2247 		else
2248 			f = chain->fence;
2249 
2250 		dma_fence_get(f);
2251 		if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) {
2252 			dma_fence_put(fence);
2253 			return true;
2254 		}
2255 		dma_fence_put(f);
2256 	}
2257 	dma_fence_put(&h->base);
2258 	return false;
2259 }
2260 
2261 static bool
2262 dma_fence_chain_signaled(struct dma_fence *fence)
2263 {
2264 	struct dma_fence_chain *chain;
2265 	struct dma_fence *f;
2266 
2267 	dma_fence_chain_for_each(fence, fence) {
2268 		chain = to_dma_fence_chain(fence);
2269 		if (chain == NULL)
2270 			f = fence;
2271 		else
2272 			f = chain->fence;
2273 
2274 		if (dma_fence_is_signaled(f) == false) {
2275 			dma_fence_put(fence);
2276 			return false;
2277 		}
2278 	}
2279 	return true;
2280 }
2281 
2282 static void
2283 dma_fence_chain_release(struct dma_fence *fence)
2284 {
2285 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
2286 	struct dma_fence_chain *prev_chain;
2287 	struct dma_fence *prev;
2288 
2289 	for (prev = chain->prev; prev != NULL; prev = chain->prev) {
2290 		if (kref_read(&prev->refcount) > 1)
2291 			break;
2292 		if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
2293 			break;
2294 		chain->prev = prev_chain->prev;
2295 		prev_chain->prev = NULL;
2296 		dma_fence_put(prev);
2297 	}
2298 	dma_fence_put(prev);
2299 	dma_fence_put(chain->fence);
2300 	dma_fence_free(fence);
2301 }
2302 
2303 struct dma_fence *
2304 dma_fence_chain_walk(struct dma_fence *fence)
2305 {
2306 	struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain;
2307 	struct dma_fence *prev, *new_prev, *tmp;
2308 
2309 	if (chain == NULL) {
2310 		dma_fence_put(fence);
2311 		return NULL;
2312 	}
2313 
2314 	while ((prev = dma_fence_get(chain->prev)) != NULL) {
2315 		prev_chain = to_dma_fence_chain(prev);
2316 		if (prev_chain != NULL) {
2317 			if (!dma_fence_is_signaled(prev_chain->fence))
2318 				break;
2319 			new_prev = dma_fence_get(prev_chain->prev);
2320 		} else {
2321 			if (!dma_fence_is_signaled(prev))
2322 				break;
2323 			new_prev = NULL;
2324 		}
2325 		tmp = atomic_cas_ptr(&chain->prev, prev, new_prev);
2326 		dma_fence_put(tmp == prev ? prev : new_prev);
2327 		dma_fence_put(prev);
2328 	}
2329 
2330 	dma_fence_put(fence);
2331 	return prev;
2332 }
2333 
2334 const struct dma_fence_ops dma_fence_chain_ops = {
2335 	.get_driver_name = dma_fence_chain_get_driver_name,
2336 	.get_timeline_name = dma_fence_chain_get_timeline_name,
2337 	.enable_signaling = dma_fence_chain_enable_signaling,
2338 	.signaled = dma_fence_chain_signaled,
2339 	.release = dma_fence_chain_release,
2340 	.use_64bit_seqno = true,
2341 };
2342 
2343 bool
2344 dma_fence_is_container(struct dma_fence *fence)
2345 {
2346 	return (fence->ops == &dma_fence_chain_ops) ||
2347 	    (fence->ops == &dma_fence_array_ops);
2348 }
2349 
2350 int
2351 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2352 {
2353 	return (ENXIO);
2354 }
2355 
2356 int
2357 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2358 {
2359 	return (ENXIO);
2360 }
2361 
2362 int
2363 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2364 {
2365 	return (ENOTTY);
2366 }
2367 
2368 int
2369 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2370 {
2371 	return (EINVAL);
2372 }
2373 
2374 int
2375 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2376 {
2377 	struct dma_buf *dmabuf = fp->f_data;
2378 
2379 	memset(st, 0, sizeof(*st));
2380 	st->st_size = dmabuf->size;
2381 	st->st_mode = S_IFIFO;	/* XXX */
2382 	return (0);
2383 }
2384 
2385 int
2386 dmabuf_close(struct file *fp, struct proc *p)
2387 {
2388 	struct dma_buf *dmabuf = fp->f_data;
2389 
2390 	fp->f_data = NULL;
2391 	KERNEL_LOCK();
2392 	dmabuf->ops->release(dmabuf);
2393 	KERNEL_UNLOCK();
2394 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
2395 	return (0);
2396 }
2397 
2398 int
2399 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2400 {
2401 	struct dma_buf *dmabuf = fp->f_data;
2402 	off_t newoff;
2403 
2404 	if (*offset != 0)
2405 		return (EINVAL);
2406 
2407 	switch (whence) {
2408 	case SEEK_SET:
2409 		newoff = 0;
2410 		break;
2411 	case SEEK_END:
2412 		newoff = dmabuf->size;
2413 		break;
2414 	default:
2415 		return (EINVAL);
2416 	}
2417 	mtx_enter(&fp->f_mtx);
2418 	fp->f_offset = newoff;
2419 	mtx_leave(&fp->f_mtx);
2420 	*offset = newoff;
2421 	return (0);
2422 }
2423 
2424 const struct fileops dmabufops = {
2425 	.fo_read	= dmabuf_read,
2426 	.fo_write	= dmabuf_write,
2427 	.fo_ioctl	= dmabuf_ioctl,
2428 	.fo_kqfilter	= dmabuf_kqfilter,
2429 	.fo_stat	= dmabuf_stat,
2430 	.fo_close	= dmabuf_close,
2431 	.fo_seek	= dmabuf_seek,
2432 };
2433 
2434 struct dma_buf *
2435 dma_buf_export(const struct dma_buf_export_info *info)
2436 {
2437 	struct proc *p = curproc;
2438 	struct dma_buf *dmabuf;
2439 	struct file *fp;
2440 
2441 	fp = fnew(p);
2442 	if (fp == NULL)
2443 		return ERR_PTR(-ENFILE);
2444 	fp->f_type = DTYPE_DMABUF;
2445 	fp->f_ops = &dmabufops;
2446 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2447 	dmabuf->priv = info->priv;
2448 	dmabuf->ops = info->ops;
2449 	dmabuf->size = info->size;
2450 	dmabuf->file = fp;
2451 	fp->f_data = dmabuf;
2452 	INIT_LIST_HEAD(&dmabuf->attachments);
2453 	return dmabuf;
2454 }
2455 
2456 struct dma_buf *
2457 dma_buf_get(int fd)
2458 {
2459 	struct proc *p = curproc;
2460 	struct filedesc *fdp = p->p_fd;
2461 	struct file *fp;
2462 
2463 	if ((fp = fd_getfile(fdp, fd)) == NULL)
2464 		return ERR_PTR(-EBADF);
2465 
2466 	if (fp->f_type != DTYPE_DMABUF) {
2467 		FRELE(fp, p);
2468 		return ERR_PTR(-EINVAL);
2469 	}
2470 
2471 	return fp->f_data;
2472 }
2473 
2474 void
2475 dma_buf_put(struct dma_buf *dmabuf)
2476 {
2477 	KASSERT(dmabuf);
2478 	KASSERT(dmabuf->file);
2479 
2480 	FRELE(dmabuf->file, curproc);
2481 }
2482 
2483 int
2484 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2485 {
2486 	struct proc *p = curproc;
2487 	struct filedesc *fdp = p->p_fd;
2488 	struct file *fp = dmabuf->file;
2489 	int fd, cloexec, error;
2490 
2491 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2492 
2493 	fdplock(fdp);
2494 restart:
2495 	if ((error = fdalloc(p, 0, &fd)) != 0) {
2496 		if (error == ENOSPC) {
2497 			fdexpand(p);
2498 			goto restart;
2499 		}
2500 		fdpunlock(fdp);
2501 		return -error;
2502 	}
2503 
2504 	fdinsert(fdp, fd, cloexec, fp);
2505 	fdpunlock(fdp);
2506 
2507 	return fd;
2508 }
2509 
2510 void
2511 get_dma_buf(struct dma_buf *dmabuf)
2512 {
2513 	FREF(dmabuf->file);
2514 }
2515 
2516 enum pci_bus_speed
2517 pcie_get_speed_cap(struct pci_dev *pdev)
2518 {
2519 	pci_chipset_tag_t	pc;
2520 	pcitag_t		tag;
2521 	int			pos ;
2522 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
2523 	pcireg_t		id;
2524 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
2525 	int			bus, device, function;
2526 
2527 	if (pdev == NULL)
2528 		return PCI_SPEED_UNKNOWN;
2529 
2530 	pc = pdev->pc;
2531 	tag = pdev->tag;
2532 
2533 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2534 	    &pos, NULL))
2535 		return PCI_SPEED_UNKNOWN;
2536 
2537 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2538 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2539 
2540 	/* we've been informed via and serverworks don't make the cut */
2541 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2542 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
2543 		return PCI_SPEED_UNKNOWN;
2544 
2545 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2546 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2547 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2548 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2549 
2550 	lnkcap &= 0x0f;
2551 	lnkcap2 &= 0xfe;
2552 
2553 	if (lnkcap2) { /* PCIE GEN 3.0 */
2554 		if (lnkcap2 & 0x02)
2555 			cap = PCIE_SPEED_2_5GT;
2556 		if (lnkcap2 & 0x04)
2557 			cap = PCIE_SPEED_5_0GT;
2558 		if (lnkcap2 & 0x08)
2559 			cap = PCIE_SPEED_8_0GT;
2560 		if (lnkcap2 & 0x10)
2561 			cap = PCIE_SPEED_16_0GT;
2562 		if (lnkcap2 & 0x20)
2563 			cap = PCIE_SPEED_32_0GT;
2564 		if (lnkcap2 & 0x40)
2565 			cap = PCIE_SPEED_64_0GT;
2566 	} else {
2567 		if (lnkcap & 0x01)
2568 			cap = PCIE_SPEED_2_5GT;
2569 		if (lnkcap & 0x02)
2570 			cap = PCIE_SPEED_5_0GT;
2571 	}
2572 
2573 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2574 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2575 	    lnkcap2);
2576 	return cap;
2577 }
2578 
2579 enum pcie_link_width
2580 pcie_get_width_cap(struct pci_dev *pdev)
2581 {
2582 	pci_chipset_tag_t	pc = pdev->pc;
2583 	pcitag_t		tag = pdev->tag;
2584 	int			pos ;
2585 	pcireg_t		lnkcap = 0;
2586 	pcireg_t		id;
2587 	int			bus, device, function;
2588 
2589 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2590 	    &pos, NULL))
2591 		return PCIE_LNK_WIDTH_UNKNOWN;
2592 
2593 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2594 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2595 
2596 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2597 
2598 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2599 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2600 
2601 	if (lnkcap)
2602 		return (lnkcap & 0x3f0) >> 4;
2603 	return PCIE_LNK_WIDTH_UNKNOWN;
2604 }
2605 
2606 bool
2607 pcie_aspm_enabled(struct pci_dev *pdev)
2608 {
2609 	pci_chipset_tag_t	pc = pdev->pc;
2610 	pcitag_t		tag = pdev->tag;
2611 	int			pos ;
2612 	pcireg_t		lcsr;
2613 
2614 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2615 	    &pos, NULL))
2616 		return false;
2617 
2618 	lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR);
2619 	if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0)
2620 		return true;
2621 
2622 	return false;
2623 }
2624 
2625 int
2626 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
2627     int sync, void *key)
2628 {
2629 	wakeup(wqe);
2630 	if (wqe->private)
2631 		wake_up_process(wqe->private);
2632 	list_del_init(&wqe->entry);
2633 	return 0;
2634 }
2635 
2636 static wait_queue_head_t bit_waitq;
2637 wait_queue_head_t var_waitq;
2638 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2639 
2640 int
2641 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2642 {
2643 	int err;
2644 
2645 	if (!test_bit(bit, word))
2646 		return 0;
2647 
2648 	mtx_enter(&wait_bit_mtx);
2649 	while (test_bit(bit, word)) {
2650 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2651 		    INFSLP);
2652 		if (err) {
2653 			mtx_leave(&wait_bit_mtx);
2654 			return 1;
2655 		}
2656 	}
2657 	mtx_leave(&wait_bit_mtx);
2658 	return 0;
2659 }
2660 
2661 int
2662 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2663 {
2664 	int err;
2665 
2666 	if (!test_bit(bit, word))
2667 		return 0;
2668 
2669 	mtx_enter(&wait_bit_mtx);
2670 	while (test_bit(bit, word)) {
2671 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2672 		if (err) {
2673 			mtx_leave(&wait_bit_mtx);
2674 			return 1;
2675 		}
2676 	}
2677 	mtx_leave(&wait_bit_mtx);
2678 	return 0;
2679 }
2680 
2681 void
2682 wake_up_bit(void *word, int bit)
2683 {
2684 	mtx_enter(&wait_bit_mtx);
2685 	wakeup(word);
2686 	mtx_leave(&wait_bit_mtx);
2687 }
2688 
2689 void
2690 clear_and_wake_up_bit(int bit, void *word)
2691 {
2692 	clear_bit(bit, word);
2693 	wake_up_bit(word, bit);
2694 }
2695 
2696 wait_queue_head_t *
2697 bit_waitqueue(void *word, int bit)
2698 {
2699 	/* XXX hash table of wait queues? */
2700 	return &bit_waitq;
2701 }
2702 
2703 wait_queue_head_t *
2704 __var_waitqueue(void *p)
2705 {
2706 	/* XXX hash table of wait queues? */
2707 	return &bit_waitq;
2708 }
2709 
2710 struct workqueue_struct *system_wq;
2711 struct workqueue_struct *system_highpri_wq;
2712 struct workqueue_struct *system_unbound_wq;
2713 struct workqueue_struct *system_long_wq;
2714 struct taskq *taskletq;
2715 
2716 void
2717 drm_linux_init(void)
2718 {
2719 	system_wq = (struct workqueue_struct *)
2720 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
2721 	system_highpri_wq = (struct workqueue_struct *)
2722 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2723 	system_unbound_wq = (struct workqueue_struct *)
2724 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2725 	system_long_wq = (struct workqueue_struct *)
2726 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2727 
2728 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2729 
2730 	init_waitqueue_head(&bit_waitq);
2731 	init_waitqueue_head(&var_waitq);
2732 
2733 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2734 	    "idrpl", NULL);
2735 
2736 	kmap_atomic_va =
2737 	    (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2738 }
2739 
2740 void
2741 drm_linux_exit(void)
2742 {
2743 	pool_destroy(&idr_pool);
2744 
2745 	taskq_destroy(taskletq);
2746 
2747 	taskq_destroy((struct taskq *)system_long_wq);
2748 	taskq_destroy((struct taskq *)system_unbound_wq);
2749 	taskq_destroy((struct taskq *)system_highpri_wq);
2750 	taskq_destroy((struct taskq *)system_wq);
2751 }
2752 
2753 #define PCIE_ECAP_RESIZE_BAR	0x15
2754 #define RBCAP0			0x04
2755 #define RBCTRL0			0x08
2756 #define RBCTRL_BARINDEX_MASK	0x07
2757 #define RBCTRL_BARSIZE_MASK	0x1f00
2758 #define RBCTRL_BARSIZE_SHIFT	8
2759 
2760 /* size in MB is 1 << nsize */
2761 int
2762 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2763 {
2764 	pcireg_t	reg;
2765 	uint32_t	offset, capid;
2766 
2767 	KASSERT(bar == 0);
2768 
2769 	offset = PCI_PCIE_ECAP;
2770 
2771 	/* search PCI Express Extended Capabilities */
2772 	do {
2773 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2774 		capid = PCI_PCIE_ECAP_ID(reg);
2775 		if (capid == PCIE_ECAP_RESIZE_BAR)
2776 			break;
2777 		offset = PCI_PCIE_ECAP_NEXT(reg);
2778 	} while (capid != 0);
2779 
2780 	if (capid == 0) {
2781 		printf("%s: could not find resize bar cap!\n", __func__);
2782 		return -ENOTSUP;
2783 	}
2784 
2785 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2786 
2787 	if ((reg & (1 << (nsize + 4))) == 0) {
2788 		printf("%s size not supported\n", __func__);
2789 		return -ENOTSUP;
2790 	}
2791 
2792 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2793 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2794 		printf("%s BAR index not 0\n", __func__);
2795 		return -EINVAL;
2796 	}
2797 
2798 	reg &= ~RBCTRL_BARSIZE_MASK;
2799 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2800 
2801 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2802 
2803 	return 0;
2804 }
2805 
2806 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2807 
2808 int
2809 register_shrinker(struct shrinker *shrinker, const char *format, ...)
2810 {
2811 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2812 	return 0;
2813 }
2814 
2815 void
2816 unregister_shrinker(struct shrinker *shrinker)
2817 {
2818 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2819 }
2820 
2821 void
2822 drmbackoff(long npages)
2823 {
2824 	struct shrink_control sc;
2825 	struct shrinker *shrinker;
2826 	u_long ret;
2827 
2828 	shrinker = TAILQ_FIRST(&shrinkers);
2829 	while (shrinker && npages > 0) {
2830 		sc.nr_to_scan = npages;
2831 		ret = shrinker->scan_objects(shrinker, &sc);
2832 		npages -= ret;
2833 		shrinker = TAILQ_NEXT(shrinker, next);
2834 	}
2835 }
2836 
2837 void *
2838 bitmap_zalloc(u_int n, gfp_t flags)
2839 {
2840 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2841 }
2842 
2843 void
2844 bitmap_free(void *p)
2845 {
2846 	kfree(p);
2847 }
2848 
2849 int
2850 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2851 {
2852 	if (atomic_add_unless(v, -1, 1))
2853 		return 0;
2854 
2855 	rw_enter_write(lock);
2856 	if (atomic_dec_return(v) == 0)
2857 		return 1;
2858 	rw_exit_write(lock);
2859 	return 0;
2860 }
2861 
2862 int
2863 printk(const char *fmt, ...)
2864 {
2865 	int ret, level;
2866 	va_list ap;
2867 
2868 	if (fmt != NULL && *fmt == '\001') {
2869 		level = fmt[1];
2870 #ifndef DRMDEBUG
2871 		if (level >= KERN_INFO[1] && level <= '9')
2872 			return 0;
2873 #endif
2874 		fmt += 2;
2875 	}
2876 
2877 	va_start(ap, fmt);
2878 	ret = vprintf(fmt, ap);
2879 	va_end(ap);
2880 
2881 	return ret;
2882 }
2883 
2884 #define START(node) ((node)->start)
2885 #define LAST(node) ((node)->last)
2886 
2887 struct interval_tree_node *
2888 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2889     unsigned long last)
2890 {
2891 	struct interval_tree_node *node;
2892 	struct rb_node *rb;
2893 
2894 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2895 		node = rb_entry(rb, typeof(*node), rb);
2896 		if (LAST(node) >= start && START(node) <= last)
2897 			return node;
2898 	}
2899 	return NULL;
2900 }
2901 
2902 void
2903 interval_tree_remove(struct interval_tree_node *node,
2904     struct rb_root_cached *root)
2905 {
2906 	rb_erase_cached(&node->rb, root);
2907 }
2908 
2909 void
2910 interval_tree_insert(struct interval_tree_node *node,
2911     struct rb_root_cached *root)
2912 {
2913 	struct rb_node **iter = &root->rb_root.rb_node;
2914 	struct rb_node *parent = NULL;
2915 	struct interval_tree_node *iter_node;
2916 
2917 	while (*iter) {
2918 		parent = *iter;
2919 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
2920 
2921 		if (node->start < iter_node->start)
2922 			iter = &(*iter)->rb_left;
2923 		else
2924 			iter = &(*iter)->rb_right;
2925 	}
2926 
2927 	rb_link_node(&node->rb, parent, iter);
2928 	rb_insert_color_cached(&node->rb, root, false);
2929 }
2930 
2931 int
2932 syncfile_read(struct file *fp, struct uio *uio, int fflags)
2933 {
2934 	return ENXIO;
2935 }
2936 
2937 int
2938 syncfile_write(struct file *fp, struct uio *uio, int fflags)
2939 {
2940 	return ENXIO;
2941 }
2942 
2943 int
2944 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2945 {
2946 	return ENOTTY;
2947 }
2948 
2949 int
2950 syncfile_kqfilter(struct file *fp, struct knote *kn)
2951 {
2952 	return EINVAL;
2953 }
2954 
2955 int
2956 syncfile_stat(struct file *fp, struct stat *st, struct proc *p)
2957 {
2958 	memset(st, 0, sizeof(*st));
2959 	st->st_mode = S_IFIFO;	/* XXX */
2960 	return 0;
2961 }
2962 
2963 int
2964 syncfile_close(struct file *fp, struct proc *p)
2965 {
2966 	struct sync_file *sf = fp->f_data;
2967 
2968 	dma_fence_put(sf->fence);
2969 	fp->f_data = NULL;
2970 	free(sf, M_DRM, sizeof(struct sync_file));
2971 	return 0;
2972 }
2973 
2974 int
2975 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2976 {
2977 	off_t newoff;
2978 
2979 	if (*offset != 0)
2980 		return EINVAL;
2981 
2982 	switch (whence) {
2983 	case SEEK_SET:
2984 		newoff = 0;
2985 		break;
2986 	case SEEK_END:
2987 		newoff = 0;
2988 		break;
2989 	default:
2990 		return EINVAL;
2991 	}
2992 	mtx_enter(&fp->f_mtx);
2993 	fp->f_offset = newoff;
2994 	mtx_leave(&fp->f_mtx);
2995 	*offset = newoff;
2996 	return 0;
2997 }
2998 
2999 const struct fileops syncfileops = {
3000 	.fo_read	= syncfile_read,
3001 	.fo_write	= syncfile_write,
3002 	.fo_ioctl	= syncfile_ioctl,
3003 	.fo_kqfilter	= syncfile_kqfilter,
3004 	.fo_stat	= syncfile_stat,
3005 	.fo_close	= syncfile_close,
3006 	.fo_seek	= syncfile_seek,
3007 };
3008 
3009 void
3010 fd_install(int fd, struct file *fp)
3011 {
3012 	struct proc *p = curproc;
3013 	struct filedesc *fdp = p->p_fd;
3014 
3015 	if (fp->f_type != DTYPE_SYNC)
3016 		return;
3017 
3018 	fdplock(fdp);
3019 	/* all callers use get_unused_fd_flags(O_CLOEXEC) */
3020 	fdinsert(fdp, fd, UF_EXCLOSE, fp);
3021 	fdpunlock(fdp);
3022 }
3023 
3024 void
3025 fput(struct file *fp)
3026 {
3027 	if (fp->f_type != DTYPE_SYNC)
3028 		return;
3029 
3030 	FRELE(fp, curproc);
3031 }
3032 
3033 int
3034 get_unused_fd_flags(unsigned int flags)
3035 {
3036 	struct proc *p = curproc;
3037 	struct filedesc *fdp = p->p_fd;
3038 	int error, fd;
3039 
3040 	KASSERT((flags & O_CLOEXEC) != 0);
3041 
3042 	fdplock(fdp);
3043 retryalloc:
3044 	if ((error = fdalloc(p, 0, &fd)) != 0) {
3045 		if (error == ENOSPC) {
3046 			fdexpand(p);
3047 			goto retryalloc;
3048 		}
3049 		fdpunlock(fdp);
3050 		return -1;
3051 	}
3052 	fdpunlock(fdp);
3053 
3054 	return fd;
3055 }
3056 
3057 void
3058 put_unused_fd(int fd)
3059 {
3060 	struct filedesc *fdp = curproc->p_fd;
3061 
3062 	fdplock(fdp);
3063 	fdremove(fdp, fd);
3064 	fdpunlock(fdp);
3065 }
3066 
3067 struct dma_fence *
3068 sync_file_get_fence(int fd)
3069 {
3070 	struct proc *p = curproc;
3071 	struct filedesc *fdp = p->p_fd;
3072 	struct file *fp;
3073 	struct sync_file *sf;
3074 	struct dma_fence *f;
3075 
3076 	if ((fp = fd_getfile(fdp, fd)) == NULL)
3077 		return NULL;
3078 
3079 	if (fp->f_type != DTYPE_SYNC) {
3080 		FRELE(fp, p);
3081 		return NULL;
3082 	}
3083 	sf = fp->f_data;
3084 	f = dma_fence_get(sf->fence);
3085 	FRELE(sf->file, p);
3086 	return f;
3087 }
3088 
3089 struct sync_file *
3090 sync_file_create(struct dma_fence *fence)
3091 {
3092 	struct proc *p = curproc;
3093 	struct sync_file *sf;
3094 	struct file *fp;
3095 
3096 	fp = fnew(p);
3097 	if (fp == NULL)
3098 		return NULL;
3099 	fp->f_type = DTYPE_SYNC;
3100 	fp->f_ops = &syncfileops;
3101 	sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO);
3102 	sf->file = fp;
3103 	sf->fence = dma_fence_get(fence);
3104 	fp->f_data = sf;
3105 	return sf;
3106 }
3107 
3108 bool
3109 drm_firmware_drivers_only(void)
3110 {
3111 	return false;
3112 }
3113