xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /*	$OpenBSD: drm_linux.c,v 1.93 2022/06/20 01:39:44 visa Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 #include <linux/processor.h>
53 #include <linux/sync_file.h>
54 
55 #include <drm/drm_device.h>
56 #include <drm/drm_print.h>
57 
58 #if defined(__amd64__) || defined(__i386__)
59 #include "bios.h"
60 #endif
61 
62 /* allowed to sleep */
63 void
64 tasklet_unlock_wait(struct tasklet_struct *ts)
65 {
66 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
67 		cpu_relax();
68 }
69 
70 /* must not sleep */
71 void
72 tasklet_unlock_spin_wait(struct tasklet_struct *ts)
73 {
74 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
75 		cpu_relax();
76 }
77 
78 void
79 tasklet_run(void *arg)
80 {
81 	struct tasklet_struct *ts = arg;
82 
83 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
84 	if (tasklet_trylock(ts)) {
85 		if (!atomic_read(&ts->count)) {
86 			if (ts->use_callback)
87 				ts->callback(ts);
88 			else
89 				ts->func(ts->data);
90 		}
91 		tasklet_unlock(ts);
92 	}
93 }
94 
95 /* 32 bit powerpc lacks 64 bit atomics */
96 #if defined(__powerpc__) && !defined(__powerpc64__)
97 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
98 #endif
99 
100 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
101 volatile struct proc *sch_proc;
102 volatile void *sch_ident;
103 int sch_priority;
104 
105 void
106 set_current_state(int state)
107 {
108 	if (sch_ident != curproc)
109 		mtx_enter(&sch_mtx);
110 	MUTEX_ASSERT_LOCKED(&sch_mtx);
111 	sch_ident = sch_proc = curproc;
112 	sch_priority = state;
113 }
114 
115 void
116 __set_current_state(int state)
117 {
118 	KASSERT(state == TASK_RUNNING);
119 	if (sch_ident == curproc) {
120 		MUTEX_ASSERT_LOCKED(&sch_mtx);
121 		sch_ident = NULL;
122 		mtx_leave(&sch_mtx);
123 	}
124 }
125 
126 void
127 schedule(void)
128 {
129 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
130 }
131 
132 long
133 schedule_timeout(long timeout)
134 {
135 	struct sleep_state sls;
136 	unsigned long deadline;
137 	int wait, spl, timo = 0;
138 
139 	MUTEX_ASSERT_LOCKED(&sch_mtx);
140 	KASSERT(!cold);
141 
142 	if (timeout != MAX_SCHEDULE_TIMEOUT)
143 		timo = timeout;
144 	sleep_setup(&sls, sch_ident, sch_priority, "schto", timo);
145 
146 	wait = (sch_proc == curproc && timeout > 0);
147 
148 	spl = MUTEX_OLDIPL(&sch_mtx);
149 	MUTEX_OLDIPL(&sch_mtx) = splsched();
150 	mtx_leave(&sch_mtx);
151 
152 	if (timeout != MAX_SCHEDULE_TIMEOUT)
153 		deadline = jiffies + timeout;
154 	sleep_finish(&sls, wait);
155 	if (timeout != MAX_SCHEDULE_TIMEOUT)
156 		timeout = deadline - jiffies;
157 
158 	mtx_enter(&sch_mtx);
159 	MUTEX_OLDIPL(&sch_mtx) = spl;
160 	sch_ident = curproc;
161 
162 	return timeout > 0 ? timeout : 0;
163 }
164 
165 long
166 schedule_timeout_uninterruptible(long timeout)
167 {
168 	tsleep(curproc, PWAIT, "schtou", timeout);
169 	return 0;
170 }
171 
172 int
173 wake_up_process(struct proc *p)
174 {
175 	atomic_cas_ptr(&sch_proc, p, NULL);
176 	return wakeup_proc(p, NULL);
177 }
178 
179 void
180 flush_workqueue(struct workqueue_struct *wq)
181 {
182 	if (cold)
183 		return;
184 
185 	if (wq)
186 		taskq_barrier((struct taskq *)wq);
187 }
188 
189 bool
190 flush_work(struct work_struct *work)
191 {
192 	if (cold)
193 		return false;
194 
195 	if (work->tq)
196 		taskq_barrier(work->tq);
197 	return false;
198 }
199 
200 bool
201 flush_delayed_work(struct delayed_work *dwork)
202 {
203 	bool ret = false;
204 
205 	if (cold)
206 		return false;
207 
208 	while (timeout_pending(&dwork->to)) {
209 		tsleep(dwork, PWAIT, "fldwto", 1);
210 		ret = true;
211 	}
212 
213 	if (dwork->tq)
214 		taskq_barrier(dwork->tq);
215 	return ret;
216 }
217 
218 struct kthread {
219 	int (*func)(void *);
220 	void *data;
221 	struct proc *proc;
222 	volatile u_int flags;
223 #define KTHREAD_SHOULDSTOP	0x0000001
224 #define KTHREAD_STOPPED		0x0000002
225 #define KTHREAD_SHOULDPARK	0x0000004
226 #define KTHREAD_PARKED		0x0000008
227 	LIST_ENTRY(kthread) next;
228 };
229 
230 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
231 
232 void
233 kthread_func(void *arg)
234 {
235 	struct kthread *thread = arg;
236 	int ret;
237 
238 	ret = thread->func(thread->data);
239 	thread->flags |= KTHREAD_STOPPED;
240 	wakeup(thread);
241 	kthread_exit(ret);
242 }
243 
244 struct proc *
245 kthread_run(int (*func)(void *), void *data, const char *name)
246 {
247 	struct kthread *thread;
248 
249 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
250 	thread->func = func;
251 	thread->data = data;
252 	thread->flags = 0;
253 
254 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
255 		free(thread, M_DRM, sizeof(*thread));
256 		return ERR_PTR(-ENOMEM);
257 	}
258 
259 	LIST_INSERT_HEAD(&kthread_list, thread, next);
260 	return thread->proc;
261 }
262 
263 struct kthread_worker *
264 kthread_create_worker(unsigned int flags, const char *fmt, ...)
265 {
266 	char name[MAXCOMLEN+1];
267 	va_list ap;
268 
269 	struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
270 	va_start(ap, fmt);
271 	vsnprintf(name, sizeof(name), fmt, ap);
272 	va_end(ap);
273 	w->tq = taskq_create(name, 1, IPL_HIGH, 0);
274 
275 	return w;
276 }
277 
278 void
279 kthread_destroy_worker(struct kthread_worker *worker)
280 {
281 	taskq_destroy(worker->tq);
282 	free(worker, M_DRM, sizeof(*worker));
283 
284 }
285 
286 void
287 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
288 {
289 	work->tq = NULL;
290 	task_set(&work->task, (void (*)(void *))func, work);
291 }
292 
293 bool
294 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
295 {
296 	work->tq = worker->tq;
297 	return task_add(work->tq, &work->task);
298 }
299 
300 bool
301 kthread_cancel_work_sync(struct kthread_work *work)
302 {
303 	return task_del(work->tq, &work->task);
304 }
305 
306 void
307 kthread_flush_work(struct kthread_work *work)
308 {
309 	if (cold)
310 		return;
311 
312 	if (work->tq)
313 		taskq_barrier(work->tq);
314 }
315 
316 void
317 kthread_flush_worker(struct kthread_worker *worker)
318 {
319 	if (cold)
320 		return;
321 
322 	if (worker->tq)
323 		taskq_barrier(worker->tq);
324 }
325 
326 struct kthread *
327 kthread_lookup(struct proc *p)
328 {
329 	struct kthread *thread;
330 
331 	LIST_FOREACH(thread, &kthread_list, next) {
332 		if (thread->proc == p)
333 			break;
334 	}
335 	KASSERT(thread);
336 
337 	return thread;
338 }
339 
340 int
341 kthread_should_park(void)
342 {
343 	struct kthread *thread = kthread_lookup(curproc);
344 	return (thread->flags & KTHREAD_SHOULDPARK);
345 }
346 
347 void
348 kthread_parkme(void)
349 {
350 	struct kthread *thread = kthread_lookup(curproc);
351 
352 	while (thread->flags & KTHREAD_SHOULDPARK) {
353 		thread->flags |= KTHREAD_PARKED;
354 		wakeup(thread);
355 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
356 		thread->flags &= ~KTHREAD_PARKED;
357 	}
358 }
359 
360 void
361 kthread_park(struct proc *p)
362 {
363 	struct kthread *thread = kthread_lookup(p);
364 
365 	while ((thread->flags & KTHREAD_PARKED) == 0) {
366 		thread->flags |= KTHREAD_SHOULDPARK;
367 		wake_up_process(thread->proc);
368 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
369 	}
370 }
371 
372 void
373 kthread_unpark(struct proc *p)
374 {
375 	struct kthread *thread = kthread_lookup(p);
376 
377 	thread->flags &= ~KTHREAD_SHOULDPARK;
378 	wakeup(thread);
379 }
380 
381 int
382 kthread_should_stop(void)
383 {
384 	struct kthread *thread = kthread_lookup(curproc);
385 	return (thread->flags & KTHREAD_SHOULDSTOP);
386 }
387 
388 void
389 kthread_stop(struct proc *p)
390 {
391 	struct kthread *thread = kthread_lookup(p);
392 
393 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
394 		thread->flags |= KTHREAD_SHOULDSTOP;
395 		kthread_unpark(p);
396 		wake_up_process(thread->proc);
397 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
398 	}
399 	LIST_REMOVE(thread, next);
400 	free(thread, M_DRM, sizeof(*thread));
401 }
402 
403 #if NBIOS > 0
404 extern char smbios_board_vendor[];
405 extern char smbios_board_prod[];
406 extern char smbios_board_serial[];
407 #endif
408 
409 bool
410 dmi_match(int slot, const char *str)
411 {
412 	switch (slot) {
413 	case DMI_SYS_VENDOR:
414 		if (hw_vendor != NULL &&
415 		    !strcmp(hw_vendor, str))
416 			return true;
417 		break;
418 	case DMI_PRODUCT_NAME:
419 		if (hw_prod != NULL &&
420 		    !strcmp(hw_prod, str))
421 			return true;
422 		break;
423 	case DMI_PRODUCT_VERSION:
424 		if (hw_ver != NULL &&
425 		    !strcmp(hw_ver, str))
426 			return true;
427 		break;
428 #if NBIOS > 0
429 	case DMI_BOARD_VENDOR:
430 		if (strcmp(smbios_board_vendor, str) == 0)
431 			return true;
432 		break;
433 	case DMI_BOARD_NAME:
434 		if (strcmp(smbios_board_prod, str) == 0)
435 			return true;
436 		break;
437 	case DMI_BOARD_SERIAL:
438 		if (strcmp(smbios_board_serial, str) == 0)
439 			return true;
440 		break;
441 #else
442 	case DMI_BOARD_VENDOR:
443 		if (hw_vendor != NULL &&
444 		    !strcmp(hw_vendor, str))
445 			return true;
446 		break;
447 	case DMI_BOARD_NAME:
448 		if (hw_prod != NULL &&
449 		    !strcmp(hw_prod, str))
450 			return true;
451 		break;
452 #endif
453 	case DMI_NONE:
454 	default:
455 		return false;
456 	}
457 
458 	return false;
459 }
460 
461 static bool
462 dmi_found(const struct dmi_system_id *dsi)
463 {
464 	int i, slot;
465 
466 	for (i = 0; i < nitems(dsi->matches); i++) {
467 		slot = dsi->matches[i].slot;
468 		if (slot == DMI_NONE)
469 			break;
470 		if (!dmi_match(slot, dsi->matches[i].substr))
471 			return false;
472 	}
473 
474 	return true;
475 }
476 
477 const struct dmi_system_id *
478 dmi_first_match(const struct dmi_system_id *sysid)
479 {
480 	const struct dmi_system_id *dsi;
481 
482 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
483 		if (dmi_found(dsi))
484 			return dsi;
485 	}
486 
487 	return NULL;
488 }
489 
490 #if NBIOS > 0
491 extern char smbios_bios_date[];
492 #endif
493 
494 const char *
495 dmi_get_system_info(int slot)
496 {
497 	WARN_ON(slot != DMI_BIOS_DATE);
498 #if NBIOS > 0
499 	if (slot == DMI_BIOS_DATE)
500 		return smbios_bios_date;
501 #endif
502 	return NULL;
503 }
504 
505 int
506 dmi_check_system(const struct dmi_system_id *sysid)
507 {
508 	const struct dmi_system_id *dsi;
509 	int num = 0;
510 
511 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
512 		if (dmi_found(dsi)) {
513 			num++;
514 			if (dsi->callback && dsi->callback(dsi))
515 				break;
516 		}
517 	}
518 	return (num);
519 }
520 
521 struct vm_page *
522 alloc_pages(unsigned int gfp_mask, unsigned int order)
523 {
524 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
525 	struct uvm_constraint_range *constraint = &no_constraint;
526 	struct pglist mlist;
527 
528 	if (gfp_mask & M_CANFAIL)
529 		flags |= UVM_PLA_FAILOK;
530 	if (gfp_mask & M_ZERO)
531 		flags |= UVM_PLA_ZERO;
532 	if (gfp_mask & __GFP_DMA32)
533 		constraint = &dma_constraint;
534 
535 	TAILQ_INIT(&mlist);
536 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
537 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
538 		return NULL;
539 	return TAILQ_FIRST(&mlist);
540 }
541 
542 void
543 __free_pages(struct vm_page *page, unsigned int order)
544 {
545 	struct pglist mlist;
546 	int i;
547 
548 	TAILQ_INIT(&mlist);
549 	for (i = 0; i < (1 << order); i++)
550 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
551 	uvm_pglistfree(&mlist);
552 }
553 
554 void
555 __pagevec_release(struct pagevec *pvec)
556 {
557 	struct pglist mlist;
558 	int i;
559 
560 	TAILQ_INIT(&mlist);
561 	for (i = 0; i < pvec->nr; i++)
562 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
563 	uvm_pglistfree(&mlist);
564 	pagevec_reinit(pvec);
565 }
566 
567 static struct kmem_va_mode kv_physwait = {
568 	.kv_map = &phys_map,
569 	.kv_wait = 1,
570 };
571 
572 void *
573 kmap(struct vm_page *pg)
574 {
575 	vaddr_t va;
576 
577 #if defined (__HAVE_PMAP_DIRECT)
578 	va = pmap_map_direct(pg);
579 #else
580 	va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok);
581 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
582 	pmap_update(pmap_kernel());
583 #endif
584 	return (void *)va;
585 }
586 
587 void
588 kunmap_va(void *addr)
589 {
590 	vaddr_t va = (vaddr_t)addr;
591 
592 #if defined (__HAVE_PMAP_DIRECT)
593 	pmap_unmap_direct(va);
594 #else
595 	pmap_kremove(va, PAGE_SIZE);
596 	pmap_update(pmap_kernel());
597 	km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none);
598 #endif
599 }
600 
601 vaddr_t kmap_atomic_va;
602 int kmap_atomic_inuse;
603 
604 void *
605 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
606 {
607 	KASSERT(!kmap_atomic_inuse);
608 
609 	kmap_atomic_inuse = 1;
610 	pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
611 	    PROT_READ | PROT_WRITE);
612 	return (void *)kmap_atomic_va;
613 }
614 
615 void
616 kunmap_atomic(void *addr)
617 {
618 	KASSERT(kmap_atomic_inuse);
619 
620 	pmap_kremove(kmap_atomic_va, PAGE_SIZE);
621 	kmap_atomic_inuse = 0;
622 }
623 
624 void *
625 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
626      pgprot_t prot)
627 {
628 	vaddr_t va;
629 	paddr_t pa;
630 	int i;
631 
632 	va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none,
633 	    &kd_nowait);
634 	if (va == 0)
635 		return NULL;
636 	for (i = 0; i < npages; i++) {
637 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
638 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
639 		    PROT_READ | PROT_WRITE,
640 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
641 		pmap_update(pmap_kernel());
642 	}
643 
644 	return (void *)va;
645 }
646 
647 void
648 vunmap(void *addr, size_t size)
649 {
650 	vaddr_t va = (vaddr_t)addr;
651 
652 	pmap_remove(pmap_kernel(), va, va + size);
653 	pmap_update(pmap_kernel());
654 	km_free((void *)va, size, &kv_any, &kp_none);
655 }
656 
657 bool
658 is_vmalloc_addr(const void *p)
659 {
660 	vaddr_t min, max, addr;
661 
662 	min = vm_map_min(kernel_map);
663 	max = vm_map_max(kernel_map);
664 	addr = (vaddr_t)p;
665 
666 	if (addr >= min && addr <= max)
667 		return true;
668 	else
669 		return false;
670 }
671 
672 void
673 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
674     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
675 {
676 	const uint8_t *cbuf = buf;
677 	int i;
678 
679 	for (i = 0; i < len; i++) {
680 		if ((i % rowsize) == 0)
681 			printf("%s", prefix_str);
682 		printf("%02x", cbuf[i]);
683 		if ((i % rowsize) == (rowsize - 1))
684 			printf("\n");
685 		else
686 			printf(" ");
687 	}
688 }
689 
690 void *
691 memchr_inv(const void *s, int c, size_t n)
692 {
693 	if (n != 0) {
694 		const unsigned char *p = s;
695 
696 		do {
697 			if (*p++ != (unsigned char)c)
698 				return ((void *)(p - 1));
699 		} while (--n != 0);
700 	}
701 	return (NULL);
702 }
703 
704 int
705 panic_cmp(struct rb_node *a, struct rb_node *b)
706 {
707 	panic(__func__);
708 }
709 
710 #undef RB_ROOT
711 #define RB_ROOT(head)	(head)->rbh_root
712 
713 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
714 
715 /*
716  * This is a fairly minimal implementation of the Linux "idr" API.  It
717  * probably isn't very efficient, and definitely isn't RCU safe.  The
718  * pre-load buffer is global instead of per-cpu; we rely on the kernel
719  * lock to make this work.  We do randomize our IDs in order to make
720  * them harder to guess.
721  */
722 
723 int idr_cmp(struct idr_entry *, struct idr_entry *);
724 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
725 
726 struct pool idr_pool;
727 struct idr_entry *idr_entry_cache;
728 
729 void
730 idr_init(struct idr *idr)
731 {
732 	SPLAY_INIT(&idr->tree);
733 }
734 
735 void
736 idr_destroy(struct idr *idr)
737 {
738 	struct idr_entry *id;
739 
740 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
741 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
742 		pool_put(&idr_pool, id);
743 	}
744 }
745 
746 void
747 idr_preload(unsigned int gfp_mask)
748 {
749 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
750 
751 	KERNEL_ASSERT_LOCKED();
752 
753 	if (idr_entry_cache == NULL)
754 		idr_entry_cache = pool_get(&idr_pool, flags);
755 }
756 
757 int
758 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
759 {
760 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
761 	struct idr_entry *id;
762 	int begin;
763 
764 	KERNEL_ASSERT_LOCKED();
765 
766 	if (idr_entry_cache) {
767 		id = idr_entry_cache;
768 		idr_entry_cache = NULL;
769 	} else {
770 		id = pool_get(&idr_pool, flags);
771 		if (id == NULL)
772 			return -ENOMEM;
773 	}
774 
775 	if (end <= 0)
776 		end = INT_MAX;
777 
778 #ifdef notyet
779 	id->id = begin = start + arc4random_uniform(end - start);
780 #else
781 	id->id = begin = start;
782 #endif
783 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
784 		if (id->id == end)
785 			id->id = start;
786 		else
787 			id->id++;
788 		if (id->id == begin) {
789 			pool_put(&idr_pool, id);
790 			return -ENOSPC;
791 		}
792 	}
793 	id->ptr = ptr;
794 	return id->id;
795 }
796 
797 void *
798 idr_replace(struct idr *idr, void *ptr, unsigned long id)
799 {
800 	struct idr_entry find, *res;
801 	void *old;
802 
803 	find.id = id;
804 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
805 	if (res == NULL)
806 		return ERR_PTR(-ENOENT);
807 	old = res->ptr;
808 	res->ptr = ptr;
809 	return old;
810 }
811 
812 void *
813 idr_remove(struct idr *idr, unsigned long id)
814 {
815 	struct idr_entry find, *res;
816 	void *ptr = NULL;
817 
818 	find.id = id;
819 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
820 	if (res) {
821 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
822 		ptr = res->ptr;
823 		pool_put(&idr_pool, res);
824 	}
825 	return ptr;
826 }
827 
828 void *
829 idr_find(struct idr *idr, unsigned long id)
830 {
831 	struct idr_entry find, *res;
832 
833 	find.id = id;
834 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
835 	if (res == NULL)
836 		return NULL;
837 	return res->ptr;
838 }
839 
840 void *
841 idr_get_next(struct idr *idr, int *id)
842 {
843 	struct idr_entry *res;
844 
845 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
846 		if (res->id >= *id) {
847 			*id = res->id;
848 			return res->ptr;
849 		}
850 	}
851 
852 	return NULL;
853 }
854 
855 int
856 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
857 {
858 	struct idr_entry *id;
859 	int ret;
860 
861 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
862 		ret = func(id->id, id->ptr, data);
863 		if (ret)
864 			return ret;
865 	}
866 
867 	return 0;
868 }
869 
870 int
871 idr_cmp(struct idr_entry *a, struct idr_entry *b)
872 {
873 	return (a->id < b->id ? -1 : a->id > b->id);
874 }
875 
876 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
877 
878 void
879 ida_init(struct ida *ida)
880 {
881 	idr_init(&ida->idr);
882 }
883 
884 void
885 ida_destroy(struct ida *ida)
886 {
887 	idr_destroy(&ida->idr);
888 }
889 
890 int
891 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
892     gfp_t gfp_mask)
893 {
894 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
895 }
896 
897 void
898 ida_simple_remove(struct ida *ida, unsigned int id)
899 {
900 	idr_remove(&ida->idr, id);
901 }
902 
903 int
904 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
905 {
906 	return (a->id < b->id ? -1 : a->id > b->id);
907 }
908 
909 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
910 struct pool xa_pool;
911 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
912 
913 void
914 xa_init_flags(struct xarray *xa, gfp_t flags)
915 {
916 	static int initialized;
917 
918 	if (!initialized) {
919 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0,
920 		    "xapl", NULL);
921 		initialized = 1;
922 	}
923 	SPLAY_INIT(&xa->xa_tree);
924 	if (flags & XA_FLAGS_LOCK_IRQ)
925 		mtx_init(&xa->xa_lock, IPL_TTY);
926 	else
927 		mtx_init(&xa->xa_lock, IPL_NONE);
928 }
929 
930 void
931 xa_destroy(struct xarray *xa)
932 {
933 	struct xarray_entry *id;
934 
935 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
936 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
937 		pool_put(&xa_pool, id);
938 	}
939 }
940 
941 int
942 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
943 {
944 	struct xarray_entry *xid;
945 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
946 	int begin;
947 
948 	if (gfp & GFP_NOWAIT) {
949 		xid = pool_get(&xa_pool, PR_NOWAIT);
950 	} else {
951 		mtx_leave(&xa->xa_lock);
952 		xid = pool_get(&xa_pool, PR_WAITOK);
953 		mtx_enter(&xa->xa_lock);
954 	}
955 
956 	if (xid == NULL)
957 		return -ENOMEM;
958 
959 	if (limit <= 0)
960 		limit = INT_MAX;
961 
962 	xid->id = begin = start;
963 
964 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
965 		if (xid->id == limit)
966 			xid->id = start;
967 		else
968 			xid->id++;
969 		if (xid->id == begin) {
970 			pool_put(&xa_pool, xid);
971 			return -EBUSY;
972 		}
973 	}
974 	xid->ptr = entry;
975 	*id = xid->id;
976 	return 0;
977 }
978 
979 void *
980 __xa_erase(struct xarray *xa, unsigned long index)
981 {
982 	struct xarray_entry find, *res;
983 	void *ptr = NULL;
984 
985 	find.id = index;
986 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
987 	if (res) {
988 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
989 		ptr = res->ptr;
990 		pool_put(&xa_pool, res);
991 	}
992 	return ptr;
993 }
994 
995 void *
996 __xa_load(struct xarray *xa, unsigned long index)
997 {
998 	struct xarray_entry find, *res;
999 
1000 	find.id = index;
1001 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1002 	if (res == NULL)
1003 		return NULL;
1004 	return res->ptr;
1005 }
1006 
1007 void *
1008 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1009 {
1010 	struct xarray_entry find, *res;
1011 	void *prev;
1012 
1013 	if (entry == NULL)
1014 		return __xa_erase(xa, index);
1015 
1016 	find.id = index;
1017 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1018 	if (res != NULL) {
1019 		/* index exists */
1020 		/* XXX Multislot entries updates not implemented yet */
1021 		prev = res->ptr;
1022 		res->ptr = entry;
1023 		return prev;
1024 	}
1025 
1026 	/* index not found, add new */
1027 	if (gfp & GFP_NOWAIT) {
1028 		res = pool_get(&xa_pool, PR_NOWAIT);
1029 	} else {
1030 		mtx_leave(&xa->xa_lock);
1031 		res = pool_get(&xa_pool, PR_WAITOK);
1032 		mtx_enter(&xa->xa_lock);
1033 	}
1034 	if (res == NULL)
1035 		return XA_ERROR(-ENOMEM);
1036 	res->id = index;
1037 	res->ptr = entry;
1038 	if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL)
1039 		return XA_ERROR(-EINVAL);
1040 	return NULL; /* no prev entry at index */
1041 }
1042 
1043 void *
1044 xa_get_next(struct xarray *xa, unsigned long *index)
1045 {
1046 	struct xarray_entry *res;
1047 
1048 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
1049 		if (res->id >= *index) {
1050 			*index = res->id;
1051 			return res->ptr;
1052 		}
1053 	}
1054 
1055 	return NULL;
1056 }
1057 
1058 int
1059 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1060 {
1061 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
1062 	    M_DRM, gfp_mask | M_ZERO);
1063 	if (table->sgl == NULL)
1064 		return -ENOMEM;
1065 	table->nents = table->orig_nents = nents;
1066 	sg_mark_end(&table->sgl[nents - 1]);
1067 	return 0;
1068 }
1069 
1070 void
1071 sg_free_table(struct sg_table *table)
1072 {
1073 	free(table->sgl, M_DRM,
1074 	    table->orig_nents * sizeof(struct scatterlist));
1075 	table->orig_nents = 0;
1076 	table->sgl = NULL;
1077 }
1078 
1079 size_t
1080 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1081     const void *buf, size_t buflen)
1082 {
1083 	panic("%s", __func__);
1084 }
1085 
1086 int
1087 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1088 {
1089 	void *cmd = NULL;
1090 	int cmdlen = 0;
1091 	int err, ret = 0;
1092 	int op;
1093 
1094 	iic_acquire_bus(&adap->ic, 0);
1095 
1096 	while (num > 2) {
1097 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1098 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1099 		    msgs->buf, msgs->len, 0);
1100 		if (err) {
1101 			ret = -err;
1102 			goto fail;
1103 		}
1104 		msgs++;
1105 		num--;
1106 		ret++;
1107 	}
1108 
1109 	if (num > 1) {
1110 		cmd = msgs->buf;
1111 		cmdlen = msgs->len;
1112 		msgs++;
1113 		num--;
1114 		ret++;
1115 	}
1116 
1117 	op = (msgs->flags & I2C_M_RD) ?
1118 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1119 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1120 	    msgs->buf, msgs->len, 0);
1121 	if (err) {
1122 		ret = -err;
1123 		goto fail;
1124 	}
1125 	msgs++;
1126 	ret++;
1127 
1128 fail:
1129 	iic_release_bus(&adap->ic, 0);
1130 
1131 	return ret;
1132 }
1133 
1134 int
1135 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1136 {
1137 	int ret, retries;
1138 
1139 	retries = adap->retries;
1140 retry:
1141 	if (adap->algo)
1142 		ret = adap->algo->master_xfer(adap, msgs, num);
1143 	else
1144 		ret = i2c_master_xfer(adap, msgs, num);
1145 	if (ret == -EAGAIN && retries > 0) {
1146 		retries--;
1147 		goto retry;
1148 	}
1149 
1150 	return ret;
1151 }
1152 
1153 int
1154 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1155 {
1156 	int ret;
1157 
1158 	if (adap->lock_ops)
1159 		adap->lock_ops->lock_bus(adap, 0);
1160 
1161 	ret = __i2c_transfer(adap, msgs, num);
1162 
1163 	if (adap->lock_ops)
1164 		adap->lock_ops->unlock_bus(adap, 0);
1165 
1166 	return ret;
1167 }
1168 
1169 int
1170 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1171 {
1172 	struct i2c_algo_bit_data *algo = adap->algo_data;
1173 	struct i2c_adapter bb;
1174 
1175 	memset(&bb, 0, sizeof(bb));
1176 	bb.ic = algo->ic;
1177 	bb.retries = adap->retries;
1178 	return i2c_master_xfer(&bb, msgs, num);
1179 }
1180 
1181 uint32_t
1182 i2c_bb_functionality(struct i2c_adapter *adap)
1183 {
1184 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1185 }
1186 
1187 struct i2c_algorithm i2c_bit_algo = {
1188 	.master_xfer = i2c_bb_master_xfer,
1189 	.functionality = i2c_bb_functionality
1190 };
1191 
1192 int
1193 i2c_bit_add_bus(struct i2c_adapter *adap)
1194 {
1195 	adap->algo = &i2c_bit_algo;
1196 	adap->retries = 3;
1197 
1198 	return 0;
1199 }
1200 
1201 #if defined(__amd64__) || defined(__i386__)
1202 
1203 /*
1204  * This is a minimal implementation of the Linux vga_get/vga_put
1205  * interface.  In all likelihood, it will only work for inteldrm(4) as
1206  * it assumes that if there is another active VGA device in the
1207  * system, it is sitting behind a PCI bridge.
1208  */
1209 
1210 extern int pci_enumerate_bus(struct pci_softc *,
1211     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1212 
1213 pcitag_t vga_bridge_tag;
1214 int vga_bridge_disabled;
1215 
1216 int
1217 vga_disable_bridge(struct pci_attach_args *pa)
1218 {
1219 	pcireg_t bhlc, bc;
1220 
1221 	if (pa->pa_domain != 0)
1222 		return 0;
1223 
1224 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1225 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1226 		return 0;
1227 
1228 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1229 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1230 		return 0;
1231 	bc &= ~PPB_BC_VGA_ENABLE;
1232 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1233 
1234 	vga_bridge_tag = pa->pa_tag;
1235 	vga_bridge_disabled = 1;
1236 
1237 	return 1;
1238 }
1239 
1240 void
1241 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1242 {
1243 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1244 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1245 }
1246 
1247 void
1248 vga_put(struct pci_dev *pdev, int rsrc)
1249 {
1250 	pcireg_t bc;
1251 
1252 	if (!vga_bridge_disabled)
1253 		return;
1254 
1255 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1256 	bc |= PPB_BC_VGA_ENABLE;
1257 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1258 
1259 	vga_bridge_disabled = 0;
1260 }
1261 
1262 #endif
1263 
1264 /*
1265  * ACPI types and interfaces.
1266  */
1267 
1268 #ifdef __HAVE_ACPI
1269 #include "acpi.h"
1270 #endif
1271 
1272 #if NACPI > 0
1273 
1274 #include <dev/acpi/acpireg.h>
1275 #include <dev/acpi/acpivar.h>
1276 #include <dev/acpi/amltypes.h>
1277 #include <dev/acpi/dsdt.h>
1278 
1279 acpi_status
1280 acpi_get_table(const char *sig, int instance,
1281     struct acpi_table_header **hdr)
1282 {
1283 	struct acpi_softc *sc = acpi_softc;
1284 	struct acpi_q *entry;
1285 
1286 	KASSERT(instance == 1);
1287 
1288 	if (sc == NULL)
1289 		return AE_NOT_FOUND;
1290 
1291 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1292 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1293 			*hdr = entry->q_table;
1294 			return 0;
1295 		}
1296 	}
1297 
1298 	return AE_NOT_FOUND;
1299 }
1300 
1301 acpi_status
1302 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1303 {
1304 	node = aml_searchname(node, name);
1305 	if (node == NULL)
1306 		return AE_NOT_FOUND;
1307 
1308 	*rnode = node;
1309 	return 0;
1310 }
1311 
1312 acpi_status
1313 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1314 {
1315 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1316 	KASSERT(type == ACPI_FULL_PATHNAME);
1317 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1318 	return 0;
1319 }
1320 
1321 acpi_status
1322 acpi_evaluate_object(acpi_handle node, const char *name,
1323     struct acpi_object_list *params, struct acpi_buffer *result)
1324 {
1325 	struct aml_value args[4], res;
1326 	union acpi_object *obj;
1327 	uint8_t *data;
1328 	int i;
1329 
1330 	KASSERT(params->count <= nitems(args));
1331 
1332 	for (i = 0; i < params->count; i++) {
1333 		args[i].type = params->pointer[i].type;
1334 		switch (args[i].type) {
1335 		case AML_OBJTYPE_INTEGER:
1336 			args[i].v_integer = params->pointer[i].integer.value;
1337 			break;
1338 		case AML_OBJTYPE_BUFFER:
1339 			args[i].length = params->pointer[i].buffer.length;
1340 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1341 			break;
1342 		default:
1343 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1344 			return AE_BAD_PARAMETER;
1345 		}
1346 	}
1347 
1348 	if (name) {
1349 		node = aml_searchname(node, name);
1350 		if (node == NULL)
1351 			return AE_NOT_FOUND;
1352 	}
1353 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1354 		aml_freevalue(&res);
1355 		return AE_ERROR;
1356 	}
1357 
1358 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1359 
1360 	result->length = sizeof(union acpi_object);
1361 	switch (res.type) {
1362 	case AML_OBJTYPE_BUFFER:
1363 		result->length += res.length;
1364 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1365 		obj = (union acpi_object *)result->pointer;
1366 		data = (uint8_t *)(obj + 1);
1367 		obj->type = res.type;
1368 		obj->buffer.length = res.length;
1369 		obj->buffer.pointer = data;
1370 		memcpy(data, res.v_buffer, res.length);
1371 		break;
1372 	default:
1373 		printf("%s: return type 0x%02x", __func__, res.type);
1374 		aml_freevalue(&res);
1375 		return AE_ERROR;
1376 	}
1377 
1378 	aml_freevalue(&res);
1379 	return 0;
1380 }
1381 
1382 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1383 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1384 
1385 int
1386 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1387 {
1388 	struct acpi_bus_event event;
1389 	struct notifier_block *nb;
1390 
1391 	event.device_class = ACPI_VIDEO_CLASS;
1392 	event.type = notify;
1393 
1394 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1395 		nb->notifier_call(nb, 0, &event);
1396 	return 0;
1397 }
1398 
1399 int
1400 register_acpi_notifier(struct notifier_block *nb)
1401 {
1402 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1403 	return 0;
1404 }
1405 
1406 int
1407 unregister_acpi_notifier(struct notifier_block *nb)
1408 {
1409 	struct notifier_block *tmp;
1410 
1411 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1412 		if (tmp == nb) {
1413 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1414 			    notifier_block, link);
1415 			return 0;
1416 		}
1417 	}
1418 
1419 	return -ENOENT;
1420 }
1421 
1422 const char *
1423 acpi_format_exception(acpi_status status)
1424 {
1425 	switch (status) {
1426 	case AE_NOT_FOUND:
1427 		return "not found";
1428 	case AE_BAD_PARAMETER:
1429 		return "bad parameter";
1430 	default:
1431 		return "unknown";
1432 	}
1433 }
1434 
1435 #endif
1436 
1437 void
1438 backlight_do_update_status(void *arg)
1439 {
1440 	backlight_update_status(arg);
1441 }
1442 
1443 struct backlight_device *
1444 backlight_device_register(const char *name, void *kdev, void *data,
1445     const struct backlight_ops *ops, struct backlight_properties *props)
1446 {
1447 	struct backlight_device *bd;
1448 
1449 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1450 	bd->ops = ops;
1451 	bd->props = *props;
1452 	bd->data = data;
1453 
1454 	task_set(&bd->task, backlight_do_update_status, bd);
1455 
1456 	return bd;
1457 }
1458 
1459 void
1460 backlight_device_unregister(struct backlight_device *bd)
1461 {
1462 	free(bd, M_DRM, sizeof(*bd));
1463 }
1464 
1465 struct backlight_device *
1466 devm_backlight_device_register(void *dev, const char *name, void *parent,
1467     void *data, const struct backlight_ops *bo,
1468     const struct backlight_properties *bp)
1469 {
1470 	STUB();
1471 	return NULL;
1472 }
1473 
1474 void
1475 backlight_schedule_update_status(struct backlight_device *bd)
1476 {
1477 	task_add(systq, &bd->task);
1478 }
1479 
1480 inline int
1481 backlight_enable(struct backlight_device *bd)
1482 {
1483 	if (bd == NULL)
1484 		return 0;
1485 
1486 	bd->props.power = FB_BLANK_UNBLANK;
1487 
1488 	return bd->ops->update_status(bd);
1489 }
1490 
1491 inline int
1492 backlight_disable(struct backlight_device *bd)
1493 {
1494 	if (bd == NULL)
1495 		return 0;
1496 
1497 	bd->props.power = FB_BLANK_POWERDOWN;
1498 
1499 	return bd->ops->update_status(bd);
1500 }
1501 
1502 void
1503 drm_sysfs_hotplug_event(struct drm_device *dev)
1504 {
1505 	KNOTE(&dev->note, NOTE_CHANGE);
1506 }
1507 
1508 struct dma_fence *
1509 dma_fence_get(struct dma_fence *fence)
1510 {
1511 	if (fence)
1512 		kref_get(&fence->refcount);
1513 	return fence;
1514 }
1515 
1516 struct dma_fence *
1517 dma_fence_get_rcu(struct dma_fence *fence)
1518 {
1519 	if (fence)
1520 		kref_get(&fence->refcount);
1521 	return fence;
1522 }
1523 
1524 struct dma_fence *
1525 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1526 {
1527 	struct dma_fence *fence;
1528 	if (dfp == NULL)
1529 		return NULL;
1530 	fence = *dfp;
1531 	if (fence)
1532 		kref_get(&fence->refcount);
1533 	return fence;
1534 }
1535 
1536 void
1537 dma_fence_release(struct kref *ref)
1538 {
1539 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1540 	if (fence->ops && fence->ops->release)
1541 		fence->ops->release(fence);
1542 	else
1543 		free(fence, M_DRM, 0);
1544 }
1545 
1546 void
1547 dma_fence_put(struct dma_fence *fence)
1548 {
1549 	if (fence)
1550 		kref_put(&fence->refcount, dma_fence_release);
1551 }
1552 
1553 int
1554 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp)
1555 {
1556 	struct dma_fence_cb *cur, *tmp;
1557 	struct list_head cb_list;
1558 
1559 	if (fence == NULL)
1560 		return -EINVAL;
1561 
1562 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1563 		return -EINVAL;
1564 
1565 	list_replace(&fence->cb_list, &cb_list);
1566 
1567 	fence->timestamp = timestamp;
1568 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1569 
1570 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1571 		INIT_LIST_HEAD(&cur->node);
1572 		cur->func(fence, cur);
1573 	}
1574 
1575 	return 0;
1576 }
1577 
1578 int
1579 dma_fence_signal(struct dma_fence *fence)
1580 {
1581 	int r;
1582 
1583 	if (fence == NULL)
1584 		return -EINVAL;
1585 
1586 	mtx_enter(fence->lock);
1587 	r = dma_fence_signal_timestamp_locked(fence, ktime_get());
1588 	mtx_leave(fence->lock);
1589 
1590 	return r;
1591 }
1592 
1593 int
1594 dma_fence_signal_locked(struct dma_fence *fence)
1595 {
1596 	if (fence == NULL)
1597 		return -EINVAL;
1598 
1599 	return dma_fence_signal_timestamp_locked(fence, ktime_get());
1600 }
1601 
1602 int
1603 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
1604 {
1605 	int r;
1606 
1607 	if (fence == NULL)
1608 		return -EINVAL;
1609 
1610 	mtx_enter(fence->lock);
1611 	r = dma_fence_signal_timestamp_locked(fence, timestamp);
1612 	mtx_leave(fence->lock);
1613 
1614 	return r;
1615 }
1616 
1617 bool
1618 dma_fence_is_signaled(struct dma_fence *fence)
1619 {
1620 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1621 		return true;
1622 
1623 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1624 		dma_fence_signal(fence);
1625 		return true;
1626 	}
1627 
1628 	return false;
1629 }
1630 
1631 bool
1632 dma_fence_is_signaled_locked(struct dma_fence *fence)
1633 {
1634 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1635 		return true;
1636 
1637 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1638 		dma_fence_signal_locked(fence);
1639 		return true;
1640 	}
1641 
1642 	return false;
1643 }
1644 
1645 long
1646 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1647 {
1648 	if (timeout < 0)
1649 		return -EINVAL;
1650 
1651 	if (fence->ops->wait)
1652 		return fence->ops->wait(fence, intr, timeout);
1653 	else
1654 		return dma_fence_default_wait(fence, intr, timeout);
1655 }
1656 
1657 long
1658 dma_fence_wait(struct dma_fence *fence, bool intr)
1659 {
1660 	long ret;
1661 
1662 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1663 	if (ret < 0)
1664 		return ret;
1665 
1666 	return 0;
1667 }
1668 
1669 void
1670 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1671 {
1672 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1673 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1674 	    fence->ops->enable_signaling) {
1675 		mtx_enter(fence->lock);
1676 		if (!fence->ops->enable_signaling(fence))
1677 			dma_fence_signal_locked(fence);
1678 		mtx_leave(fence->lock);
1679 	}
1680 }
1681 
1682 void
1683 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1684     struct mutex *lock, uint64_t context, uint64_t seqno)
1685 {
1686 	fence->ops = ops;
1687 	fence->lock = lock;
1688 	fence->context = context;
1689 	fence->seqno = seqno;
1690 	fence->flags = 0;
1691 	fence->error = 0;
1692 	kref_init(&fence->refcount);
1693 	INIT_LIST_HEAD(&fence->cb_list);
1694 }
1695 
1696 int
1697 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1698     dma_fence_func_t func)
1699 {
1700 	int ret = 0;
1701 	bool was_set;
1702 
1703 	if (WARN_ON(!fence || !func))
1704 		return -EINVAL;
1705 
1706 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1707 		INIT_LIST_HEAD(&cb->node);
1708 		return -ENOENT;
1709 	}
1710 
1711 	mtx_enter(fence->lock);
1712 
1713 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1714 
1715 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1716 		ret = -ENOENT;
1717 	else if (!was_set && fence->ops->enable_signaling) {
1718 		if (!fence->ops->enable_signaling(fence)) {
1719 			dma_fence_signal_locked(fence);
1720 			ret = -ENOENT;
1721 		}
1722 	}
1723 
1724 	if (!ret) {
1725 		cb->func = func;
1726 		list_add_tail(&cb->node, &fence->cb_list);
1727 	} else
1728 		INIT_LIST_HEAD(&cb->node);
1729 	mtx_leave(fence->lock);
1730 
1731 	return ret;
1732 }
1733 
1734 bool
1735 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1736 {
1737 	bool ret;
1738 
1739 	mtx_enter(fence->lock);
1740 
1741 	ret = !list_empty(&cb->node);
1742 	if (ret)
1743 		list_del_init(&cb->node);
1744 
1745 	mtx_leave(fence->lock);
1746 
1747 	return ret;
1748 }
1749 
1750 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1751 
1752 uint64_t
1753 dma_fence_context_alloc(unsigned int num)
1754 {
1755   return atomic64_add_return(num, &drm_fence_context_count) - num;
1756 }
1757 
1758 struct default_wait_cb {
1759 	struct dma_fence_cb base;
1760 	struct proc *proc;
1761 };
1762 
1763 static void
1764 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1765 {
1766 	struct default_wait_cb *wait =
1767 	    container_of(cb, struct default_wait_cb, base);
1768 	wake_up_process(wait->proc);
1769 }
1770 
1771 long
1772 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1773 {
1774 	long ret = timeout ? timeout : 1;
1775 	unsigned long end;
1776 	int err;
1777 	struct default_wait_cb cb;
1778 	bool was_set;
1779 
1780 	KASSERT(timeout <= INT_MAX);
1781 
1782 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1783 		return ret;
1784 
1785 	mtx_enter(fence->lock);
1786 
1787 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1788 	    &fence->flags);
1789 
1790 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1791 		goto out;
1792 
1793 	if (!was_set && fence->ops->enable_signaling) {
1794 		if (!fence->ops->enable_signaling(fence)) {
1795 			dma_fence_signal_locked(fence);
1796 			goto out;
1797 		}
1798 	}
1799 
1800 	if (timeout == 0) {
1801 		ret = 0;
1802 		goto out;
1803 	}
1804 
1805 	cb.base.func = dma_fence_default_wait_cb;
1806 	cb.proc = curproc;
1807 	list_add(&cb.base.node, &fence->cb_list);
1808 
1809 	end = jiffies + timeout;
1810 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1811 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1812 			break;
1813 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1814 		    "dmafence", ret);
1815 		if (err == EINTR || err == ERESTART) {
1816 			ret = -ERESTARTSYS;
1817 			break;
1818 		}
1819 	}
1820 
1821 	if (!list_empty(&cb.base.node))
1822 		list_del(&cb.base.node);
1823 out:
1824 	mtx_leave(fence->lock);
1825 
1826 	return ret;
1827 }
1828 
1829 static bool
1830 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1831     uint32_t *idx)
1832 {
1833 	int i;
1834 
1835 	for (i = 0; i < count; ++i) {
1836 		struct dma_fence *fence = fences[i];
1837 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1838 			if (idx)
1839 				*idx = i;
1840 			return true;
1841 		}
1842 	}
1843 	return false;
1844 }
1845 
1846 long
1847 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1848     bool intr, long timeout, uint32_t *idx)
1849 {
1850 	struct default_wait_cb *cb;
1851 	long ret = timeout;
1852 	unsigned long end;
1853 	int i, err;
1854 
1855 	KASSERT(timeout <= INT_MAX);
1856 
1857 	if (timeout == 0) {
1858 		for (i = 0; i < count; i++) {
1859 			if (dma_fence_is_signaled(fences[i])) {
1860 				if (idx)
1861 					*idx = i;
1862 				return 1;
1863 			}
1864 		}
1865 		return 0;
1866 	}
1867 
1868 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1869 	if (cb == NULL)
1870 		return -ENOMEM;
1871 
1872 	for (i = 0; i < count; i++) {
1873 		struct dma_fence *fence = fences[i];
1874 		cb[i].proc = curproc;
1875 		if (dma_fence_add_callback(fence, &cb[i].base,
1876 		    dma_fence_default_wait_cb)) {
1877 			if (idx)
1878 				*idx = i;
1879 			goto cb_cleanup;
1880 		}
1881 	}
1882 
1883 	end = jiffies + timeout;
1884 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1885 		if (dma_fence_test_signaled_any(fences, count, idx))
1886 			break;
1887 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
1888 		if (err == EINTR || err == ERESTART) {
1889 			ret = -ERESTARTSYS;
1890 			break;
1891 		}
1892 	}
1893 
1894 cb_cleanup:
1895 	while (i-- > 0)
1896 		dma_fence_remove_callback(fences[i], &cb[i].base);
1897 	free(cb, M_DRM, count * sizeof(*cb));
1898 	return ret;
1899 }
1900 
1901 static struct dma_fence dma_fence_stub;
1902 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1903 
1904 static const char *
1905 dma_fence_stub_get_name(struct dma_fence *fence)
1906 {
1907 	return "stub";
1908 }
1909 
1910 static const struct dma_fence_ops dma_fence_stub_ops = {
1911 	.get_driver_name = dma_fence_stub_get_name,
1912 	.get_timeline_name = dma_fence_stub_get_name,
1913 };
1914 
1915 struct dma_fence *
1916 dma_fence_get_stub(void)
1917 {
1918 	mtx_enter(&dma_fence_stub_mtx);
1919 	if (dma_fence_stub.ops == NULL) {
1920 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1921 		    &dma_fence_stub_mtx, 0, 0);
1922 		dma_fence_signal_locked(&dma_fence_stub);
1923 	}
1924 	mtx_leave(&dma_fence_stub_mtx);
1925 
1926 	return dma_fence_get(&dma_fence_stub);
1927 }
1928 
1929 struct dma_fence *
1930 dma_fence_allocate_private_stub(void)
1931 {
1932 	struct dma_fence *f = malloc(sizeof(*f), M_DRM,
1933 	    M_ZERO | M_WAITOK | M_CANFAIL);
1934 	if (f == NULL)
1935 		return ERR_PTR(-ENOMEM);
1936 	dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
1937 	dma_fence_signal(f);
1938 	return f;
1939 }
1940 
1941 static const char *
1942 dma_fence_array_get_driver_name(struct dma_fence *fence)
1943 {
1944 	return "dma_fence_array";
1945 }
1946 
1947 static const char *
1948 dma_fence_array_get_timeline_name(struct dma_fence *fence)
1949 {
1950 	return "unbound";
1951 }
1952 
1953 static void
1954 irq_dma_fence_array_work(void *arg)
1955 {
1956 	struct dma_fence_array *dfa = (struct dma_fence_array *)arg;
1957 	dma_fence_signal(&dfa->base);
1958 	dma_fence_put(&dfa->base);
1959 }
1960 
1961 static void
1962 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
1963 {
1964 	struct dma_fence_array_cb *array_cb =
1965 	    container_of(cb, struct dma_fence_array_cb, cb);
1966 	struct dma_fence_array *dfa = array_cb->array;
1967 
1968 	if (atomic_dec_and_test(&dfa->num_pending))
1969 		timeout_add(&dfa->to, 1);
1970 	else
1971 		dma_fence_put(&dfa->base);
1972 }
1973 
1974 static bool
1975 dma_fence_array_enable_signaling(struct dma_fence *fence)
1976 {
1977 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1978 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
1979 	int i;
1980 
1981 	for (i = 0; i < dfa->num_fences; ++i) {
1982 		cb[i].array = dfa;
1983 		dma_fence_get(&dfa->base);
1984 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
1985 		    dma_fence_array_cb_func)) {
1986 			dma_fence_put(&dfa->base);
1987 			if (atomic_dec_and_test(&dfa->num_pending))
1988 				return false;
1989 		}
1990 	}
1991 
1992 	return true;
1993 }
1994 
1995 static bool
1996 dma_fence_array_signaled(struct dma_fence *fence)
1997 {
1998 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1999 
2000 	return atomic_read(&dfa->num_pending) <= 0;
2001 }
2002 
2003 static void
2004 dma_fence_array_release(struct dma_fence *fence)
2005 {
2006 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2007 	int i;
2008 
2009 	for (i = 0; i < dfa->num_fences; ++i)
2010 		dma_fence_put(dfa->fences[i]);
2011 
2012 	free(dfa->fences, M_DRM, 0);
2013 	dma_fence_free(fence);
2014 }
2015 
2016 struct dma_fence_array *
2017 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
2018     unsigned seqno, bool signal_on_any)
2019 {
2020 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
2021 	    (num_fences * sizeof(struct dma_fence_array_cb)),
2022 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2023 	if (dfa == NULL)
2024 		return NULL;
2025 
2026 	mtx_init(&dfa->lock, IPL_TTY);
2027 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
2028 	    context, seqno);
2029 	timeout_set(&dfa->to, irq_dma_fence_array_work, dfa);
2030 
2031 	dfa->num_fences = num_fences;
2032 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
2033 	dfa->fences = fences;
2034 
2035 	return dfa;
2036 }
2037 
2038 const struct dma_fence_ops dma_fence_array_ops = {
2039 	.get_driver_name = dma_fence_array_get_driver_name,
2040 	.get_timeline_name = dma_fence_array_get_timeline_name,
2041 	.enable_signaling = dma_fence_array_enable_signaling,
2042 	.signaled = dma_fence_array_signaled,
2043 	.release = dma_fence_array_release,
2044 };
2045 
2046 int
2047 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
2048 {
2049 	struct dma_fence_chain *chain;
2050 	struct dma_fence *fence;
2051 
2052 	if (seqno == 0)
2053 		return 0;
2054 
2055 	if ((chain = to_dma_fence_chain(*df)) == NULL)
2056 		return -EINVAL;
2057 
2058 	fence = &chain->base;
2059 	if (fence->seqno < seqno)
2060 		return -EINVAL;
2061 
2062 	dma_fence_chain_for_each(*df, fence) {
2063 		if ((*df)->context != fence->context)
2064 			break;
2065 
2066 		chain = to_dma_fence_chain(*df);
2067 		if (chain->prev_seqno < seqno)
2068 			break;
2069 	}
2070 	dma_fence_put(fence);
2071 
2072 	return 0;
2073 }
2074 
2075 void
2076 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
2077     struct dma_fence *fence, uint64_t seqno)
2078 {
2079 	uint64_t context;
2080 
2081 	chain->fence = fence;
2082 	chain->prev = prev;
2083 	mtx_init(&chain->lock, IPL_TTY);
2084 
2085 	/* if prev is a chain */
2086 	if (to_dma_fence_chain(prev) != NULL) {
2087 		if (seqno > prev->seqno) {
2088 			chain->prev_seqno = prev->seqno;
2089 			context = prev->context;
2090 		} else {
2091 			chain->prev_seqno = 0;
2092 			context = dma_fence_context_alloc(1);
2093 			seqno = prev->seqno;
2094 		}
2095 	} else {
2096 		chain->prev_seqno = 0;
2097 		context = dma_fence_context_alloc(1);
2098 	}
2099 
2100 	dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
2101 	    context, seqno);
2102 }
2103 
2104 static const char *
2105 dma_fence_chain_get_driver_name(struct dma_fence *fence)
2106 {
2107 	return "dma_fence_chain";
2108 }
2109 
2110 static const char *
2111 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
2112 {
2113 	return "unbound";
2114 }
2115 
2116 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
2117 
2118 static void
2119 dma_fence_chain_timo(void *arg)
2120 {
2121 	struct dma_fence_chain *chain = (struct dma_fence_chain *)arg;
2122 
2123 	if (dma_fence_chain_enable_signaling(&chain->base) == false)
2124 		dma_fence_signal(&chain->base);
2125 	dma_fence_put(&chain->base);
2126 }
2127 
2128 static void
2129 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
2130 {
2131 	struct dma_fence_chain *chain =
2132 	    container_of(cb, struct dma_fence_chain, cb);
2133 	timeout_set(&chain->to, dma_fence_chain_timo, chain);
2134 	timeout_add(&chain->to, 1);
2135 	dma_fence_put(f);
2136 }
2137 
2138 static bool
2139 dma_fence_chain_enable_signaling(struct dma_fence *fence)
2140 {
2141 	struct dma_fence_chain *chain, *h;
2142 	struct dma_fence *f;
2143 
2144 	h = to_dma_fence_chain(fence);
2145 	dma_fence_get(&h->base);
2146 	dma_fence_chain_for_each(fence, &h->base) {
2147 		chain = to_dma_fence_chain(fence);
2148 		if (chain == NULL)
2149 			f = fence;
2150 		else
2151 			f = chain->fence;
2152 
2153 		dma_fence_get(f);
2154 		if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) {
2155 			dma_fence_put(fence);
2156 			return true;
2157 		}
2158 		dma_fence_put(f);
2159 	}
2160 	dma_fence_put(&h->base);
2161 	return false;
2162 }
2163 
2164 static bool
2165 dma_fence_chain_signaled(struct dma_fence *fence)
2166 {
2167 	struct dma_fence_chain *chain;
2168 	struct dma_fence *f;
2169 
2170 	dma_fence_chain_for_each(fence, fence) {
2171 		chain = to_dma_fence_chain(fence);
2172 		if (chain == NULL)
2173 			f = fence;
2174 		else
2175 			f = chain->fence;
2176 
2177 		if (dma_fence_is_signaled(f) == false) {
2178 			dma_fence_put(fence);
2179 			return false;
2180 		}
2181 	}
2182 	return true;
2183 }
2184 
2185 static void
2186 dma_fence_chain_release(struct dma_fence *fence)
2187 {
2188 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
2189 	struct dma_fence_chain *prev_chain;
2190 	struct dma_fence *prev;
2191 
2192 	for (prev = chain->prev; prev != NULL; prev = chain->prev) {
2193 		if (kref_read(&prev->refcount) > 1)
2194 			break;
2195 		if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
2196 			break;
2197 		chain->prev = prev_chain->prev;
2198 		prev_chain->prev = NULL;
2199 		dma_fence_put(prev);
2200 	}
2201 	dma_fence_put(prev);
2202 	dma_fence_put(chain->fence);
2203 	dma_fence_free(fence);
2204 }
2205 
2206 struct dma_fence *
2207 dma_fence_chain_walk(struct dma_fence *fence)
2208 {
2209 	struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain;
2210 	struct dma_fence *prev, *new_prev, *tmp;
2211 
2212 	if (chain == NULL) {
2213 		dma_fence_put(fence);
2214 		return NULL;
2215 	}
2216 
2217 	while ((prev = dma_fence_get(chain->prev)) != NULL) {
2218 		prev_chain = to_dma_fence_chain(prev);
2219 		if (prev_chain != NULL) {
2220 			if (!dma_fence_is_signaled(prev_chain->fence))
2221 				break;
2222 			new_prev = dma_fence_get(prev_chain->prev);
2223 		} else {
2224 			if (!dma_fence_is_signaled(prev))
2225 				break;
2226 			new_prev = NULL;
2227 		}
2228 		tmp = atomic_cas_ptr(&chain->prev, prev, new_prev);
2229 		dma_fence_put(tmp == prev ? prev : new_prev);
2230 		dma_fence_put(prev);
2231 	}
2232 
2233 	dma_fence_put(fence);
2234 	return prev;
2235 }
2236 
2237 const struct dma_fence_ops dma_fence_chain_ops = {
2238 	.get_driver_name = dma_fence_chain_get_driver_name,
2239 	.get_timeline_name = dma_fence_chain_get_timeline_name,
2240 	.enable_signaling = dma_fence_chain_enable_signaling,
2241 	.signaled = dma_fence_chain_signaled,
2242 	.release = dma_fence_chain_release,
2243 };
2244 
2245 int
2246 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2247 {
2248 	return (ENXIO);
2249 }
2250 
2251 int
2252 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2253 {
2254 	return (ENXIO);
2255 }
2256 
2257 int
2258 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2259 {
2260 	return (ENOTTY);
2261 }
2262 
2263 int
2264 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2265 {
2266 	return (EINVAL);
2267 }
2268 
2269 int
2270 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2271 {
2272 	struct dma_buf *dmabuf = fp->f_data;
2273 
2274 	memset(st, 0, sizeof(*st));
2275 	st->st_size = dmabuf->size;
2276 	st->st_mode = S_IFIFO;	/* XXX */
2277 	return (0);
2278 }
2279 
2280 int
2281 dmabuf_close(struct file *fp, struct proc *p)
2282 {
2283 	struct dma_buf *dmabuf = fp->f_data;
2284 
2285 	fp->f_data = NULL;
2286 	KERNEL_LOCK();
2287 	dmabuf->ops->release(dmabuf);
2288 	KERNEL_UNLOCK();
2289 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
2290 	return (0);
2291 }
2292 
2293 int
2294 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2295 {
2296 	struct dma_buf *dmabuf = fp->f_data;
2297 	off_t newoff;
2298 
2299 	if (*offset != 0)
2300 		return (EINVAL);
2301 
2302 	switch (whence) {
2303 	case SEEK_SET:
2304 		newoff = 0;
2305 		break;
2306 	case SEEK_END:
2307 		newoff = dmabuf->size;
2308 		break;
2309 	default:
2310 		return (EINVAL);
2311 	}
2312 	mtx_enter(&fp->f_mtx);
2313 	fp->f_offset = newoff;
2314 	mtx_leave(&fp->f_mtx);
2315 	*offset = newoff;
2316 	return (0);
2317 }
2318 
2319 const struct fileops dmabufops = {
2320 	.fo_read	= dmabuf_read,
2321 	.fo_write	= dmabuf_write,
2322 	.fo_ioctl	= dmabuf_ioctl,
2323 	.fo_kqfilter	= dmabuf_kqfilter,
2324 	.fo_stat	= dmabuf_stat,
2325 	.fo_close	= dmabuf_close,
2326 	.fo_seek	= dmabuf_seek,
2327 };
2328 
2329 struct dma_buf *
2330 dma_buf_export(const struct dma_buf_export_info *info)
2331 {
2332 	struct proc *p = curproc;
2333 	struct dma_buf *dmabuf;
2334 	struct file *fp;
2335 
2336 	fp = fnew(p);
2337 	if (fp == NULL)
2338 		return ERR_PTR(-ENFILE);
2339 	fp->f_type = DTYPE_DMABUF;
2340 	fp->f_ops = &dmabufops;
2341 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2342 	dmabuf->priv = info->priv;
2343 	dmabuf->ops = info->ops;
2344 	dmabuf->size = info->size;
2345 	dmabuf->file = fp;
2346 	fp->f_data = dmabuf;
2347 	INIT_LIST_HEAD(&dmabuf->attachments);
2348 	return dmabuf;
2349 }
2350 
2351 struct dma_buf *
2352 dma_buf_get(int fd)
2353 {
2354 	struct proc *p = curproc;
2355 	struct filedesc *fdp = p->p_fd;
2356 	struct file *fp;
2357 
2358 	if ((fp = fd_getfile(fdp, fd)) == NULL)
2359 		return ERR_PTR(-EBADF);
2360 
2361 	if (fp->f_type != DTYPE_DMABUF) {
2362 		FRELE(fp, p);
2363 		return ERR_PTR(-EINVAL);
2364 	}
2365 
2366 	return fp->f_data;
2367 }
2368 
2369 void
2370 dma_buf_put(struct dma_buf *dmabuf)
2371 {
2372 	KASSERT(dmabuf);
2373 	KASSERT(dmabuf->file);
2374 
2375 	FRELE(dmabuf->file, curproc);
2376 }
2377 
2378 int
2379 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2380 {
2381 	struct proc *p = curproc;
2382 	struct filedesc *fdp = p->p_fd;
2383 	struct file *fp = dmabuf->file;
2384 	int fd, cloexec, error;
2385 
2386 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2387 
2388 	fdplock(fdp);
2389 restart:
2390 	if ((error = fdalloc(p, 0, &fd)) != 0) {
2391 		if (error == ENOSPC) {
2392 			fdexpand(p);
2393 			goto restart;
2394 		}
2395 		fdpunlock(fdp);
2396 		return -error;
2397 	}
2398 
2399 	fdinsert(fdp, fd, cloexec, fp);
2400 	fdpunlock(fdp);
2401 
2402 	return fd;
2403 }
2404 
2405 void
2406 get_dma_buf(struct dma_buf *dmabuf)
2407 {
2408 	FREF(dmabuf->file);
2409 }
2410 
2411 enum pci_bus_speed
2412 pcie_get_speed_cap(struct pci_dev *pdev)
2413 {
2414 	pci_chipset_tag_t	pc;
2415 	pcitag_t		tag;
2416 	int			pos ;
2417 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
2418 	pcireg_t		id;
2419 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
2420 	int			bus, device, function;
2421 
2422 	if (pdev == NULL)
2423 		return PCI_SPEED_UNKNOWN;
2424 
2425 	pc = pdev->pc;
2426 	tag = pdev->tag;
2427 
2428 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2429 	    &pos, NULL))
2430 		return PCI_SPEED_UNKNOWN;
2431 
2432 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2433 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2434 
2435 	/* we've been informed via and serverworks don't make the cut */
2436 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2437 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
2438 		return PCI_SPEED_UNKNOWN;
2439 
2440 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2441 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2442 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2443 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2444 
2445 	lnkcap &= 0x0f;
2446 	lnkcap2 &= 0xfe;
2447 
2448 	if (lnkcap2) { /* PCIE GEN 3.0 */
2449 		if (lnkcap2 & 0x02)
2450 			cap = PCIE_SPEED_2_5GT;
2451 		if (lnkcap2 & 0x04)
2452 			cap = PCIE_SPEED_5_0GT;
2453 		if (lnkcap2 & 0x08)
2454 			cap = PCIE_SPEED_8_0GT;
2455 		if (lnkcap2 & 0x10)
2456 			cap = PCIE_SPEED_16_0GT;
2457 		if (lnkcap2 & 0x20)
2458 			cap = PCIE_SPEED_32_0GT;
2459 		if (lnkcap2 & 0x40)
2460 			cap = PCIE_SPEED_64_0GT;
2461 	} else {
2462 		if (lnkcap & 0x01)
2463 			cap = PCIE_SPEED_2_5GT;
2464 		if (lnkcap & 0x02)
2465 			cap = PCIE_SPEED_5_0GT;
2466 	}
2467 
2468 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2469 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2470 	    lnkcap2);
2471 	return cap;
2472 }
2473 
2474 enum pcie_link_width
2475 pcie_get_width_cap(struct pci_dev *pdev)
2476 {
2477 	pci_chipset_tag_t	pc = pdev->pc;
2478 	pcitag_t		tag = pdev->tag;
2479 	int			pos ;
2480 	pcireg_t		lnkcap = 0;
2481 	pcireg_t		id;
2482 	int			bus, device, function;
2483 
2484 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2485 	    &pos, NULL))
2486 		return PCIE_LNK_WIDTH_UNKNOWN;
2487 
2488 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2489 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2490 
2491 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2492 
2493 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2494 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2495 
2496 	if (lnkcap)
2497 		return (lnkcap & 0x3f0) >> 4;
2498 	return PCIE_LNK_WIDTH_UNKNOWN;
2499 }
2500 
2501 bool
2502 pcie_aspm_enabled(struct pci_dev *pdev)
2503 {
2504 	pci_chipset_tag_t	pc = pdev->pc;
2505 	pcitag_t		tag = pdev->tag;
2506 	int			pos ;
2507 	pcireg_t		lcsr;
2508 
2509 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2510 	    &pos, NULL))
2511 		return false;
2512 
2513 	lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR);
2514 	if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0)
2515 		return true;
2516 
2517 	return false;
2518 }
2519 
2520 int
2521 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
2522     int sync, void *key)
2523 {
2524 	wakeup(wqe);
2525 	if (wqe->private)
2526 		wake_up_process(wqe->private);
2527 	list_del_init(&wqe->entry);
2528 	return 0;
2529 }
2530 
2531 static wait_queue_head_t bit_waitq;
2532 wait_queue_head_t var_waitq;
2533 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2534 
2535 int
2536 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2537 {
2538 	int err;
2539 
2540 	if (!test_bit(bit, word))
2541 		return 0;
2542 
2543 	mtx_enter(&wait_bit_mtx);
2544 	while (test_bit(bit, word)) {
2545 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2546 		    INFSLP);
2547 		if (err) {
2548 			mtx_leave(&wait_bit_mtx);
2549 			return 1;
2550 		}
2551 	}
2552 	mtx_leave(&wait_bit_mtx);
2553 	return 0;
2554 }
2555 
2556 int
2557 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2558 {
2559 	int err;
2560 
2561 	if (!test_bit(bit, word))
2562 		return 0;
2563 
2564 	mtx_enter(&wait_bit_mtx);
2565 	while (test_bit(bit, word)) {
2566 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2567 		if (err) {
2568 			mtx_leave(&wait_bit_mtx);
2569 			return 1;
2570 		}
2571 	}
2572 	mtx_leave(&wait_bit_mtx);
2573 	return 0;
2574 }
2575 
2576 void
2577 wake_up_bit(void *word, int bit)
2578 {
2579 	mtx_enter(&wait_bit_mtx);
2580 	wakeup(word);
2581 	mtx_leave(&wait_bit_mtx);
2582 }
2583 
2584 void
2585 clear_and_wake_up_bit(int bit, void *word)
2586 {
2587 	clear_bit(bit, word);
2588 	wake_up_bit(word, bit);
2589 }
2590 
2591 wait_queue_head_t *
2592 bit_waitqueue(void *word, int bit)
2593 {
2594 	/* XXX hash table of wait queues? */
2595 	return &bit_waitq;
2596 }
2597 
2598 wait_queue_head_t *
2599 __var_waitqueue(void *p)
2600 {
2601 	/* XXX hash table of wait queues? */
2602 	return &bit_waitq;
2603 }
2604 
2605 struct workqueue_struct *system_wq;
2606 struct workqueue_struct *system_highpri_wq;
2607 struct workqueue_struct *system_unbound_wq;
2608 struct workqueue_struct *system_long_wq;
2609 struct taskq *taskletq;
2610 
2611 void
2612 drm_linux_init(void)
2613 {
2614 	system_wq = (struct workqueue_struct *)
2615 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
2616 	system_highpri_wq = (struct workqueue_struct *)
2617 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2618 	system_unbound_wq = (struct workqueue_struct *)
2619 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2620 	system_long_wq = (struct workqueue_struct *)
2621 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2622 
2623 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2624 
2625 	init_waitqueue_head(&bit_waitq);
2626 	init_waitqueue_head(&var_waitq);
2627 
2628 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2629 	    "idrpl", NULL);
2630 
2631 	kmap_atomic_va =
2632 	    (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2633 }
2634 
2635 void
2636 drm_linux_exit(void)
2637 {
2638 	pool_destroy(&idr_pool);
2639 
2640 	taskq_destroy(taskletq);
2641 
2642 	taskq_destroy((struct taskq *)system_long_wq);
2643 	taskq_destroy((struct taskq *)system_unbound_wq);
2644 	taskq_destroy((struct taskq *)system_highpri_wq);
2645 	taskq_destroy((struct taskq *)system_wq);
2646 }
2647 
2648 #define PCIE_ECAP_RESIZE_BAR	0x15
2649 #define RBCAP0			0x04
2650 #define RBCTRL0			0x08
2651 #define RBCTRL_BARINDEX_MASK	0x07
2652 #define RBCTRL_BARSIZE_MASK	0x1f00
2653 #define RBCTRL_BARSIZE_SHIFT	8
2654 
2655 /* size in MB is 1 << nsize */
2656 int
2657 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2658 {
2659 	pcireg_t	reg;
2660 	uint32_t	offset, capid;
2661 
2662 	KASSERT(bar == 0);
2663 
2664 	offset = PCI_PCIE_ECAP;
2665 
2666 	/* search PCI Express Extended Capabilities */
2667 	do {
2668 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2669 		capid = PCI_PCIE_ECAP_ID(reg);
2670 		if (capid == PCIE_ECAP_RESIZE_BAR)
2671 			break;
2672 		offset = PCI_PCIE_ECAP_NEXT(reg);
2673 	} while (capid != 0);
2674 
2675 	if (capid == 0) {
2676 		printf("%s: could not find resize bar cap!\n", __func__);
2677 		return -ENOTSUP;
2678 	}
2679 
2680 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2681 
2682 	if ((reg & (1 << (nsize + 4))) == 0) {
2683 		printf("%s size not supported\n", __func__);
2684 		return -ENOTSUP;
2685 	}
2686 
2687 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2688 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2689 		printf("%s BAR index not 0\n", __func__);
2690 		return -EINVAL;
2691 	}
2692 
2693 	reg &= ~RBCTRL_BARSIZE_MASK;
2694 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2695 
2696 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2697 
2698 	return 0;
2699 }
2700 
2701 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2702 
2703 int
2704 register_shrinker(struct shrinker *shrinker)
2705 {
2706 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2707 	return 0;
2708 }
2709 
2710 void
2711 unregister_shrinker(struct shrinker *shrinker)
2712 {
2713 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2714 }
2715 
2716 void
2717 drmbackoff(long npages)
2718 {
2719 	struct shrink_control sc;
2720 	struct shrinker *shrinker;
2721 	u_long ret;
2722 
2723 	shrinker = TAILQ_FIRST(&shrinkers);
2724 	while (shrinker && npages > 0) {
2725 		sc.nr_to_scan = npages;
2726 		ret = shrinker->scan_objects(shrinker, &sc);
2727 		npages -= ret;
2728 		shrinker = TAILQ_NEXT(shrinker, next);
2729 	}
2730 }
2731 
2732 void *
2733 bitmap_zalloc(u_int n, gfp_t flags)
2734 {
2735 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2736 }
2737 
2738 void
2739 bitmap_free(void *p)
2740 {
2741 	kfree(p);
2742 }
2743 
2744 int
2745 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2746 {
2747 	if (atomic_add_unless(v, -1, 1))
2748 		return 0;
2749 
2750 	rw_enter_write(lock);
2751 	if (atomic_dec_return(v) == 0)
2752 		return 1;
2753 	rw_exit_write(lock);
2754 	return 0;
2755 }
2756 
2757 int
2758 printk(const char *fmt, ...)
2759 {
2760 	int ret, level;
2761 	va_list ap;
2762 
2763 	if (fmt != NULL && *fmt == '\001') {
2764 		level = fmt[1];
2765 #ifndef DRMDEBUG
2766 		if (level >= KERN_INFO[1] && level <= '9')
2767 			return 0;
2768 #endif
2769 		fmt += 2;
2770 	}
2771 
2772 	va_start(ap, fmt);
2773 	ret = vprintf(fmt, ap);
2774 	va_end(ap);
2775 
2776 	return ret;
2777 }
2778 
2779 #define START(node) ((node)->start)
2780 #define LAST(node) ((node)->last)
2781 
2782 struct interval_tree_node *
2783 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2784     unsigned long last)
2785 {
2786 	struct interval_tree_node *node;
2787 	struct rb_node *rb;
2788 
2789 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2790 		node = rb_entry(rb, typeof(*node), rb);
2791 		if (LAST(node) >= start && START(node) <= last)
2792 			return node;
2793 	}
2794 	return NULL;
2795 }
2796 
2797 void
2798 interval_tree_remove(struct interval_tree_node *node,
2799     struct rb_root_cached *root)
2800 {
2801 	rb_erase_cached(&node->rb, root);
2802 }
2803 
2804 void
2805 interval_tree_insert(struct interval_tree_node *node,
2806     struct rb_root_cached *root)
2807 {
2808 	struct rb_node **iter = &root->rb_root.rb_node;
2809 	struct rb_node *parent = NULL;
2810 	struct interval_tree_node *iter_node;
2811 
2812 	while (*iter) {
2813 		parent = *iter;
2814 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
2815 
2816 		if (node->start < iter_node->start)
2817 			iter = &(*iter)->rb_left;
2818 		else
2819 			iter = &(*iter)->rb_right;
2820 	}
2821 
2822 	rb_link_node(&node->rb, parent, iter);
2823 	rb_insert_color_cached(&node->rb, root, false);
2824 }
2825 
2826 int
2827 syncfile_read(struct file *fp, struct uio *uio, int fflags)
2828 {
2829 	return ENXIO;
2830 }
2831 
2832 int
2833 syncfile_write(struct file *fp, struct uio *uio, int fflags)
2834 {
2835 	return ENXIO;
2836 }
2837 
2838 int
2839 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2840 {
2841 	return ENOTTY;
2842 }
2843 
2844 int
2845 syncfile_kqfilter(struct file *fp, struct knote *kn)
2846 {
2847 	return EINVAL;
2848 }
2849 
2850 int
2851 syncfile_stat(struct file *fp, struct stat *st, struct proc *p)
2852 {
2853 	memset(st, 0, sizeof(*st));
2854 	st->st_mode = S_IFIFO;	/* XXX */
2855 	return 0;
2856 }
2857 
2858 int
2859 syncfile_close(struct file *fp, struct proc *p)
2860 {
2861 	struct sync_file *sf = fp->f_data;
2862 
2863 	dma_fence_put(sf->fence);
2864 	fp->f_data = NULL;
2865 	free(sf, M_DRM, sizeof(struct sync_file));
2866 	return 0;
2867 }
2868 
2869 int
2870 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2871 {
2872 	off_t newoff;
2873 
2874 	if (*offset != 0)
2875 		return EINVAL;
2876 
2877 	switch (whence) {
2878 	case SEEK_SET:
2879 		newoff = 0;
2880 		break;
2881 	case SEEK_END:
2882 		newoff = 0;
2883 		break;
2884 	default:
2885 		return EINVAL;
2886 	}
2887 	mtx_enter(&fp->f_mtx);
2888 	fp->f_offset = newoff;
2889 	mtx_leave(&fp->f_mtx);
2890 	*offset = newoff;
2891 	return 0;
2892 }
2893 
2894 const struct fileops syncfileops = {
2895 	.fo_read	= syncfile_read,
2896 	.fo_write	= syncfile_write,
2897 	.fo_ioctl	= syncfile_ioctl,
2898 	.fo_kqfilter	= syncfile_kqfilter,
2899 	.fo_stat	= syncfile_stat,
2900 	.fo_close	= syncfile_close,
2901 	.fo_seek	= syncfile_seek,
2902 };
2903 
2904 void
2905 fd_install(int fd, struct file *fp)
2906 {
2907 	struct proc *p = curproc;
2908 	struct filedesc *fdp = p->p_fd;
2909 
2910 	if (fp->f_type != DTYPE_SYNC)
2911 		return;
2912 
2913 	fdplock(fdp);
2914 	/* all callers use get_unused_fd_flags(O_CLOEXEC) */
2915 	fdinsert(fdp, fd, UF_EXCLOSE, fp);
2916 	fdpunlock(fdp);
2917 }
2918 
2919 void
2920 fput(struct file *fp)
2921 {
2922 	if (fp->f_type != DTYPE_SYNC)
2923 		return;
2924 
2925 	FRELE(fp, curproc);
2926 }
2927 
2928 int
2929 get_unused_fd_flags(unsigned int flags)
2930 {
2931 	struct proc *p = curproc;
2932 	struct filedesc *fdp = p->p_fd;
2933 	int error, fd;
2934 
2935 	KASSERT((flags & O_CLOEXEC) != 0);
2936 
2937 	fdplock(fdp);
2938 retryalloc:
2939 	if ((error = fdalloc(p, 0, &fd)) != 0) {
2940 		if (error == ENOSPC) {
2941 			fdexpand(p);
2942 			goto retryalloc;
2943 		}
2944 		fdpunlock(fdp);
2945 		return -1;
2946 	}
2947 	fdpunlock(fdp);
2948 
2949 	return fd;
2950 }
2951 
2952 void
2953 put_unused_fd(int fd)
2954 {
2955 	struct filedesc *fdp = curproc->p_fd;
2956 
2957 	fdplock(fdp);
2958 	fdremove(fdp, fd);
2959 	fdpunlock(fdp);
2960 }
2961 
2962 struct dma_fence *
2963 sync_file_get_fence(int fd)
2964 {
2965 	struct proc *p = curproc;
2966 	struct filedesc *fdp = p->p_fd;
2967 	struct file *fp;
2968 	struct sync_file *sf;
2969 	struct dma_fence *f;
2970 
2971 	if ((fp = fd_getfile(fdp, fd)) == NULL)
2972 		return NULL;
2973 
2974 	if (fp->f_type != DTYPE_SYNC) {
2975 		FRELE(fp, p);
2976 		return NULL;
2977 	}
2978 	sf = fp->f_data;
2979 	f = dma_fence_get(sf->fence);
2980 	FRELE(sf->file, p);
2981 	return f;
2982 }
2983 
2984 struct sync_file *
2985 sync_file_create(struct dma_fence *fence)
2986 {
2987 	struct proc *p = curproc;
2988 	struct sync_file *sf;
2989 	struct file *fp;
2990 
2991 	fp = fnew(p);
2992 	if (fp == NULL)
2993 		return NULL;
2994 	fp->f_type = DTYPE_SYNC;
2995 	fp->f_ops = &syncfileops;
2996 	sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO);
2997 	sf->file = fp;
2998 	sf->fence = dma_fence_get(fence);
2999 	fp->f_data = sf;
3000 	return sf;
3001 }
3002