xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision b99ef4df7fac99f3475b694d6cd4990521c99ae6)
1 /*	$OpenBSD: drm_linux.c,v 1.77 2021/02/08 08:18:45 mpi Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/interrupt.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/scatterlist.h>
42 #include <linux/i2c.h>
43 #include <linux/pci.h>
44 #include <linux/notifier.h>
45 #include <linux/backlight.h>
46 #include <linux/shrinker.h>
47 #include <linux/fb.h>
48 #include <linux/xarray.h>
49 #include <linux/interval_tree.h>
50 
51 #include <drm/drm_device.h>
52 #include <drm/drm_print.h>
53 
54 #if defined(__amd64__) || defined(__i386__)
55 #include "bios.h"
56 #endif
57 
58 void
59 tasklet_run(void *arg)
60 {
61 	struct tasklet_struct *ts = arg;
62 
63 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
64 	if (tasklet_trylock(ts)) {
65 		if (!atomic_read(&ts->count))
66 			ts->func(ts->data);
67 		tasklet_unlock(ts);
68 	}
69 }
70 
71 /* 32 bit powerpc lacks 64 bit atomics */
72 #if defined(__powerpc__) && !defined(__powerpc64__)
73 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
74 #endif
75 
76 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
77 volatile struct proc *sch_proc;
78 volatile void *sch_ident;
79 int sch_priority;
80 
81 void
82 set_current_state(int state)
83 {
84 	if (sch_ident != curproc)
85 		mtx_enter(&sch_mtx);
86 	MUTEX_ASSERT_LOCKED(&sch_mtx);
87 	sch_ident = sch_proc = curproc;
88 	sch_priority = state;
89 }
90 
91 void
92 __set_current_state(int state)
93 {
94 	KASSERT(state == TASK_RUNNING);
95 	if (sch_ident == curproc) {
96 		MUTEX_ASSERT_LOCKED(&sch_mtx);
97 		sch_ident = NULL;
98 		mtx_leave(&sch_mtx);
99 	}
100 }
101 
102 void
103 schedule(void)
104 {
105 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
106 }
107 
108 long
109 schedule_timeout(long timeout)
110 {
111 	struct sleep_state sls;
112 	unsigned long deadline;
113 	int wait, spl, timo = 0;
114 
115 	MUTEX_ASSERT_LOCKED(&sch_mtx);
116 	KASSERT(!cold);
117 
118 	if (timeout != MAX_SCHEDULE_TIMEOUT)
119 		timo = timeout;
120 	sleep_setup(&sls, sch_ident, sch_priority, "schto", timo);
121 
122 	wait = (sch_proc == curproc && timeout > 0);
123 
124 	spl = MUTEX_OLDIPL(&sch_mtx);
125 	MUTEX_OLDIPL(&sch_mtx) = splsched();
126 	mtx_leave(&sch_mtx);
127 
128 	if (timeout != MAX_SCHEDULE_TIMEOUT)
129 		deadline = jiffies + timeout;
130 	sleep_finish(&sls, wait);
131 	if (timeout != MAX_SCHEDULE_TIMEOUT)
132 		timeout = deadline - jiffies;
133 
134 	mtx_enter(&sch_mtx);
135 	MUTEX_OLDIPL(&sch_mtx) = spl;
136 	sch_ident = curproc;
137 
138 	return timeout > 0 ? timeout : 0;
139 }
140 
141 long
142 schedule_timeout_uninterruptible(long timeout)
143 {
144 	tsleep(curproc, PWAIT, "schtou", timeout);
145 	return 0;
146 }
147 
148 int
149 wake_up_process(struct proc *p)
150 {
151 	atomic_cas_ptr(&sch_proc, p, NULL);
152 	return wakeup_proc(p, NULL);
153 }
154 
155 void
156 flush_workqueue(struct workqueue_struct *wq)
157 {
158 	if (cold)
159 		return;
160 
161 	taskq_barrier((struct taskq *)wq);
162 }
163 
164 bool
165 flush_work(struct work_struct *work)
166 {
167 	if (cold)
168 		return false;
169 
170 	taskq_barrier(work->tq);
171 	return false;
172 }
173 
174 bool
175 flush_delayed_work(struct delayed_work *dwork)
176 {
177 	bool ret = false;
178 
179 	if (cold)
180 		return false;
181 
182 	while (timeout_pending(&dwork->to)) {
183 		tsleep(dwork, PWAIT, "fldwto", 1);
184 		ret = true;
185 	}
186 
187 	taskq_barrier(dwork->tq ? dwork->tq : (struct taskq *)system_wq);
188 	return ret;
189 }
190 
191 struct kthread {
192 	int (*func)(void *);
193 	void *data;
194 	struct proc *proc;
195 	volatile u_int flags;
196 #define KTHREAD_SHOULDSTOP	0x0000001
197 #define KTHREAD_STOPPED		0x0000002
198 #define KTHREAD_SHOULDPARK	0x0000004
199 #define KTHREAD_PARKED		0x0000008
200 	LIST_ENTRY(kthread) next;
201 };
202 
203 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
204 
205 void
206 kthread_func(void *arg)
207 {
208 	struct kthread *thread = arg;
209 	int ret;
210 
211 	ret = thread->func(thread->data);
212 	thread->flags |= KTHREAD_STOPPED;
213 	wakeup(thread);
214 	kthread_exit(ret);
215 }
216 
217 struct proc *
218 kthread_run(int (*func)(void *), void *data, const char *name)
219 {
220 	struct kthread *thread;
221 
222 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
223 	thread->func = func;
224 	thread->data = data;
225 	thread->flags = 0;
226 
227 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
228 		free(thread, M_DRM, sizeof(*thread));
229 		return ERR_PTR(-ENOMEM);
230 	}
231 
232 	LIST_INSERT_HEAD(&kthread_list, thread, next);
233 	return thread->proc;
234 }
235 
236 struct kthread *
237 kthread_lookup(struct proc *p)
238 {
239 	struct kthread *thread;
240 
241 	LIST_FOREACH(thread, &kthread_list, next) {
242 		if (thread->proc == p)
243 			break;
244 	}
245 	KASSERT(thread);
246 
247 	return thread;
248 }
249 
250 int
251 kthread_should_park(void)
252 {
253 	struct kthread *thread = kthread_lookup(curproc);
254 	return (thread->flags & KTHREAD_SHOULDPARK);
255 }
256 
257 void
258 kthread_parkme(void)
259 {
260 	struct kthread *thread = kthread_lookup(curproc);
261 
262 	while (thread->flags & KTHREAD_SHOULDPARK) {
263 		thread->flags |= KTHREAD_PARKED;
264 		wakeup(thread);
265 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
266 		thread->flags &= ~KTHREAD_PARKED;
267 	}
268 }
269 
270 void
271 kthread_park(struct proc *p)
272 {
273 	struct kthread *thread = kthread_lookup(p);
274 
275 	while ((thread->flags & KTHREAD_PARKED) == 0) {
276 		thread->flags |= KTHREAD_SHOULDPARK;
277 		wake_up_process(thread->proc);
278 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
279 	}
280 }
281 
282 void
283 kthread_unpark(struct proc *p)
284 {
285 	struct kthread *thread = kthread_lookup(p);
286 
287 	thread->flags &= ~KTHREAD_SHOULDPARK;
288 	wakeup(thread);
289 }
290 
291 int
292 kthread_should_stop(void)
293 {
294 	struct kthread *thread = kthread_lookup(curproc);
295 	return (thread->flags & KTHREAD_SHOULDSTOP);
296 }
297 
298 void
299 kthread_stop(struct proc *p)
300 {
301 	struct kthread *thread = kthread_lookup(p);
302 
303 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
304 		thread->flags |= KTHREAD_SHOULDSTOP;
305 		kthread_unpark(p);
306 		wake_up_process(thread->proc);
307 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
308 	}
309 	LIST_REMOVE(thread, next);
310 	free(thread, M_DRM, sizeof(*thread));
311 }
312 
313 #if NBIOS > 0
314 extern char smbios_board_vendor[];
315 extern char smbios_board_prod[];
316 extern char smbios_board_serial[];
317 #endif
318 
319 bool
320 dmi_match(int slot, const char *str)
321 {
322 	switch (slot) {
323 	case DMI_SYS_VENDOR:
324 		if (hw_vendor != NULL &&
325 		    !strcmp(hw_vendor, str))
326 			return true;
327 		break;
328 	case DMI_PRODUCT_NAME:
329 		if (hw_prod != NULL &&
330 		    !strcmp(hw_prod, str))
331 			return true;
332 		break;
333 	case DMI_PRODUCT_VERSION:
334 		if (hw_ver != NULL &&
335 		    !strcmp(hw_ver, str))
336 			return true;
337 		break;
338 #if NBIOS > 0
339 	case DMI_BOARD_VENDOR:
340 		if (strcmp(smbios_board_vendor, str) == 0)
341 			return true;
342 		break;
343 	case DMI_BOARD_NAME:
344 		if (strcmp(smbios_board_prod, str) == 0)
345 			return true;
346 		break;
347 	case DMI_BOARD_SERIAL:
348 		if (strcmp(smbios_board_serial, str) == 0)
349 			return true;
350 		break;
351 #else
352 	case DMI_BOARD_VENDOR:
353 		if (hw_vendor != NULL &&
354 		    !strcmp(hw_vendor, str))
355 			return true;
356 		break;
357 	case DMI_BOARD_NAME:
358 		if (hw_prod != NULL &&
359 		    !strcmp(hw_prod, str))
360 			return true;
361 		break;
362 #endif
363 	case DMI_NONE:
364 	default:
365 		return false;
366 	}
367 
368 	return false;
369 }
370 
371 static bool
372 dmi_found(const struct dmi_system_id *dsi)
373 {
374 	int i, slot;
375 
376 	for (i = 0; i < nitems(dsi->matches); i++) {
377 		slot = dsi->matches[i].slot;
378 		if (slot == DMI_NONE)
379 			break;
380 		if (!dmi_match(slot, dsi->matches[i].substr))
381 			return false;
382 	}
383 
384 	return true;
385 }
386 
387 const struct dmi_system_id *
388 dmi_first_match(const struct dmi_system_id *sysid)
389 {
390 	const struct dmi_system_id *dsi;
391 
392 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
393 		if (dmi_found(dsi))
394 			return dsi;
395 	}
396 
397 	return NULL;
398 }
399 
400 #if NBIOS > 0
401 extern char smbios_bios_date[];
402 #endif
403 
404 const char *
405 dmi_get_system_info(int slot)
406 {
407 	WARN_ON(slot != DMI_BIOS_DATE);
408 #if NBIOS > 0
409 	if (slot == DMI_BIOS_DATE)
410 		return smbios_bios_date;
411 #endif
412 	return NULL;
413 }
414 
415 int
416 dmi_check_system(const struct dmi_system_id *sysid)
417 {
418 	const struct dmi_system_id *dsi;
419 	int num = 0;
420 
421 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
422 		if (dmi_found(dsi)) {
423 			num++;
424 			if (dsi->callback && dsi->callback(dsi))
425 				break;
426 		}
427 	}
428 	return (num);
429 }
430 
431 struct vm_page *
432 alloc_pages(unsigned int gfp_mask, unsigned int order)
433 {
434 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
435 	struct uvm_constraint_range *constraint = &no_constraint;
436 	struct pglist mlist;
437 
438 	if (gfp_mask & M_CANFAIL)
439 		flags |= UVM_PLA_FAILOK;
440 	if (gfp_mask & M_ZERO)
441 		flags |= UVM_PLA_ZERO;
442 	if (gfp_mask & __GFP_DMA32)
443 		constraint = &dma_constraint;
444 
445 	TAILQ_INIT(&mlist);
446 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
447 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
448 		return NULL;
449 	return TAILQ_FIRST(&mlist);
450 }
451 
452 void
453 __free_pages(struct vm_page *page, unsigned int order)
454 {
455 	struct pglist mlist;
456 	int i;
457 
458 	TAILQ_INIT(&mlist);
459 	for (i = 0; i < (1 << order); i++)
460 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
461 	uvm_pglistfree(&mlist);
462 }
463 
464 void
465 __pagevec_release(struct pagevec *pvec)
466 {
467 	struct pglist mlist;
468 	int i;
469 
470 	TAILQ_INIT(&mlist);
471 	for (i = 0; i < pvec->nr; i++)
472 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
473 	uvm_pglistfree(&mlist);
474 	pagevec_reinit(pvec);
475 }
476 
477 void *
478 kmap(struct vm_page *pg)
479 {
480 	vaddr_t va;
481 
482 #if defined (__HAVE_PMAP_DIRECT)
483 	va = pmap_map_direct(pg);
484 #else
485 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
486 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
487 	pmap_update(pmap_kernel());
488 #endif
489 	return (void *)va;
490 }
491 
492 void
493 kunmap_va(void *addr)
494 {
495 	vaddr_t va = (vaddr_t)addr;
496 
497 #if defined (__HAVE_PMAP_DIRECT)
498 	pmap_unmap_direct(va);
499 #else
500 	pmap_kremove(va, PAGE_SIZE);
501 	pmap_update(pmap_kernel());
502 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
503 #endif
504 }
505 
506 void *
507 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
508      pgprot_t prot)
509 {
510 	vaddr_t va;
511 	paddr_t pa;
512 	int i;
513 
514 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
515 	if (va == 0)
516 		return NULL;
517 	for (i = 0; i < npages; i++) {
518 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
519 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
520 		    PROT_READ | PROT_WRITE,
521 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
522 		pmap_update(pmap_kernel());
523 	}
524 
525 	return (void *)va;
526 }
527 
528 void
529 vunmap(void *addr, size_t size)
530 {
531 	vaddr_t va = (vaddr_t)addr;
532 
533 	pmap_remove(pmap_kernel(), va, va + size);
534 	pmap_update(pmap_kernel());
535 	uvm_km_free(kernel_map, va, size);
536 }
537 
538 void
539 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
540     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
541 {
542 	const uint8_t *cbuf = buf;
543 	int i;
544 
545 	for (i = 0; i < len; i++) {
546 		if ((i % rowsize) == 0)
547 			printf("%s", prefix_str);
548 		printf("%02x", cbuf[i]);
549 		if ((i % rowsize) == (rowsize - 1))
550 			printf("\n");
551 		else
552 			printf(" ");
553 	}
554 }
555 
556 void *
557 memchr_inv(const void *s, int c, size_t n)
558 {
559 	if (n != 0) {
560 		const unsigned char *p = s;
561 
562 		do {
563 			if (*p++ != (unsigned char)c)
564 				return ((void *)(p - 1));
565 		} while (--n != 0);
566 	}
567 	return (NULL);
568 }
569 
570 int
571 panic_cmp(struct rb_node *a, struct rb_node *b)
572 {
573 	panic(__func__);
574 }
575 
576 #undef RB_ROOT
577 #define RB_ROOT(head)	(head)->rbh_root
578 
579 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
580 
581 /*
582  * This is a fairly minimal implementation of the Linux "idr" API.  It
583  * probably isn't very efficient, and defenitely isn't RCU safe.  The
584  * pre-load buffer is global instead of per-cpu; we rely on the kernel
585  * lock to make this work.  We do randomize our IDs in order to make
586  * them harder to guess.
587  */
588 
589 int idr_cmp(struct idr_entry *, struct idr_entry *);
590 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
591 
592 struct pool idr_pool;
593 struct idr_entry *idr_entry_cache;
594 
595 void
596 idr_init(struct idr *idr)
597 {
598 	SPLAY_INIT(&idr->tree);
599 }
600 
601 void
602 idr_destroy(struct idr *idr)
603 {
604 	struct idr_entry *id;
605 
606 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
607 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
608 		pool_put(&idr_pool, id);
609 	}
610 }
611 
612 void
613 idr_preload(unsigned int gfp_mask)
614 {
615 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
616 
617 	KERNEL_ASSERT_LOCKED();
618 
619 	if (idr_entry_cache == NULL)
620 		idr_entry_cache = pool_get(&idr_pool, flags);
621 }
622 
623 int
624 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
625 {
626 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
627 	struct idr_entry *id;
628 	int begin;
629 
630 	KERNEL_ASSERT_LOCKED();
631 
632 	if (idr_entry_cache) {
633 		id = idr_entry_cache;
634 		idr_entry_cache = NULL;
635 	} else {
636 		id = pool_get(&idr_pool, flags);
637 		if (id == NULL)
638 			return -ENOMEM;
639 	}
640 
641 	if (end <= 0)
642 		end = INT_MAX;
643 
644 #ifdef notyet
645 	id->id = begin = start + arc4random_uniform(end - start);
646 #else
647 	id->id = begin = start;
648 #endif
649 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
650 		if (id->id == end)
651 			id->id = start;
652 		else
653 			id->id++;
654 		if (id->id == begin) {
655 			pool_put(&idr_pool, id);
656 			return -ENOSPC;
657 		}
658 	}
659 	id->ptr = ptr;
660 	return id->id;
661 }
662 
663 void *
664 idr_replace(struct idr *idr, void *ptr, unsigned long id)
665 {
666 	struct idr_entry find, *res;
667 	void *old;
668 
669 	find.id = id;
670 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
671 	if (res == NULL)
672 		return ERR_PTR(-ENOENT);
673 	old = res->ptr;
674 	res->ptr = ptr;
675 	return old;
676 }
677 
678 void *
679 idr_remove(struct idr *idr, unsigned long id)
680 {
681 	struct idr_entry find, *res;
682 	void *ptr = NULL;
683 
684 	find.id = id;
685 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
686 	if (res) {
687 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
688 		ptr = res->ptr;
689 		pool_put(&idr_pool, res);
690 	}
691 	return ptr;
692 }
693 
694 void *
695 idr_find(struct idr *idr, unsigned long id)
696 {
697 	struct idr_entry find, *res;
698 
699 	find.id = id;
700 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
701 	if (res == NULL)
702 		return NULL;
703 	return res->ptr;
704 }
705 
706 void *
707 idr_get_next(struct idr *idr, int *id)
708 {
709 	struct idr_entry *res;
710 
711 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
712 		if (res->id >= *id) {
713 			*id = res->id;
714 			return res->ptr;
715 		}
716 	}
717 
718 	return NULL;
719 }
720 
721 int
722 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
723 {
724 	struct idr_entry *id;
725 	int ret;
726 
727 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
728 		ret = func(id->id, id->ptr, data);
729 		if (ret)
730 			return ret;
731 	}
732 
733 	return 0;
734 }
735 
736 int
737 idr_cmp(struct idr_entry *a, struct idr_entry *b)
738 {
739 	return (a->id < b->id ? -1 : a->id > b->id);
740 }
741 
742 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
743 
744 void
745 ida_init(struct ida *ida)
746 {
747 	idr_init(&ida->idr);
748 }
749 
750 void
751 ida_destroy(struct ida *ida)
752 {
753 	idr_destroy(&ida->idr);
754 }
755 
756 int
757 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
758     gfp_t gfp_mask)
759 {
760 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
761 }
762 
763 void
764 ida_simple_remove(struct ida *ida, unsigned int id)
765 {
766 	idr_remove(&ida->idr, id);
767 }
768 
769 int
770 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
771 {
772 	return (a->id < b->id ? -1 : a->id > b->id);
773 }
774 
775 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
776 struct pool xa_pool;
777 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
778 
779 void
780 xa_init_flags(struct xarray *xa, gfp_t flags)
781 {
782 	static int initialized;
783 
784 	if (!initialized) {
785 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_TTY, 0,
786 		    "xapl", NULL);
787 		initialized = 1;
788 	}
789 	SPLAY_INIT(&xa->xa_tree);
790 }
791 
792 void
793 xa_destroy(struct xarray *xa)
794 {
795 	struct xarray_entry *id;
796 
797 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
798 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
799 		pool_put(&xa_pool, id);
800 	}
801 }
802 
803 int
804 xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
805 {
806 	struct xarray_entry *xid;
807 	int flags = (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
808 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
809 	int begin;
810 
811 	xid = pool_get(&xa_pool, flags);
812 	if (xid == NULL)
813 		return -ENOMEM;
814 
815 	if (limit <= 0)
816 		limit = INT_MAX;
817 
818 	xid->id = begin = start;
819 
820 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
821 		if (xid->id == limit)
822 			xid->id = start;
823 		else
824 			xid->id++;
825 		if (xid->id == begin) {
826 			pool_put(&xa_pool, xid);
827 			return -EBUSY;
828 		}
829 	}
830 	xid->ptr = entry;
831 	*id = xid->id;
832 	return 0;
833 }
834 
835 void *
836 xa_erase(struct xarray *xa, unsigned long index)
837 {
838 	struct xarray_entry find, *res;
839 	void *ptr = NULL;
840 
841 	find.id = index;
842 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
843 	if (res) {
844 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
845 		ptr = res->ptr;
846 		pool_put(&xa_pool, res);
847 	}
848 	return ptr;
849 }
850 
851 void *
852 xa_load(struct xarray *xa, unsigned long index)
853 {
854 	struct xarray_entry find, *res;
855 
856 	find.id = index;
857 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
858 	if (res == NULL)
859 		return NULL;
860 	return res->ptr;
861 }
862 
863 void *
864 xa_get_next(struct xarray *xa, unsigned long *index)
865 {
866 	struct xarray_entry *res;
867 
868 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
869 		if (res->id >= *index) {
870 			*index = res->id;
871 			return res->ptr;
872 		}
873 	}
874 
875 	return NULL;
876 }
877 
878 int
879 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
880 {
881 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
882 	    M_DRM, gfp_mask);
883 	if (table->sgl == NULL)
884 		return -ENOMEM;
885 	table->nents = table->orig_nents = nents;
886 	return 0;
887 }
888 
889 void
890 sg_free_table(struct sg_table *table)
891 {
892 	free(table->sgl, M_DRM,
893 	    table->orig_nents * sizeof(struct scatterlist));
894 	table->sgl = NULL;
895 }
896 
897 size_t
898 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
899     const void *buf, size_t buflen)
900 {
901 	panic("%s", __func__);
902 }
903 
904 int
905 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
906 {
907 	void *cmd = NULL;
908 	int cmdlen = 0;
909 	int err, ret = 0;
910 	int op;
911 
912 	iic_acquire_bus(&adap->ic, 0);
913 
914 	while (num > 2) {
915 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
916 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
917 		    msgs->buf, msgs->len, 0);
918 		if (err) {
919 			ret = -err;
920 			goto fail;
921 		}
922 		msgs++;
923 		num--;
924 		ret++;
925 	}
926 
927 	if (num > 1) {
928 		cmd = msgs->buf;
929 		cmdlen = msgs->len;
930 		msgs++;
931 		num--;
932 		ret++;
933 	}
934 
935 	op = (msgs->flags & I2C_M_RD) ?
936 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
937 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
938 	    msgs->buf, msgs->len, 0);
939 	if (err) {
940 		ret = -err;
941 		goto fail;
942 	}
943 	msgs++;
944 	ret++;
945 
946 fail:
947 	iic_release_bus(&adap->ic, 0);
948 
949 	return ret;
950 }
951 
952 int
953 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
954 {
955 	int ret;
956 
957 	if (adap->lock_ops)
958 		adap->lock_ops->lock_bus(adap, 0);
959 
960 	if (adap->algo)
961 		ret = adap->algo->master_xfer(adap, msgs, num);
962 	else
963 		ret = i2c_master_xfer(adap, msgs, num);
964 
965 	if (adap->lock_ops)
966 		adap->lock_ops->unlock_bus(adap, 0);
967 
968 	return ret;
969 }
970 
971 int
972 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
973 {
974 	struct i2c_algo_bit_data *algo = adap->algo_data;
975 	struct i2c_adapter bb;
976 
977 	memset(&bb, 0, sizeof(bb));
978 	bb.ic = algo->ic;
979 	bb.retries = adap->retries;
980 	return i2c_master_xfer(&bb, msgs, num);
981 }
982 
983 uint32_t
984 i2c_bb_functionality(struct i2c_adapter *adap)
985 {
986 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
987 }
988 
989 struct i2c_algorithm i2c_bit_algo = {
990 	.master_xfer = i2c_bb_master_xfer,
991 	.functionality = i2c_bb_functionality
992 };
993 
994 int
995 i2c_bit_add_bus(struct i2c_adapter *adap)
996 {
997 	adap->algo = &i2c_bit_algo;
998 	adap->retries = 3;
999 
1000 	return 0;
1001 }
1002 
1003 #if defined(__amd64__) || defined(__i386__)
1004 
1005 /*
1006  * This is a minimal implementation of the Linux vga_get/vga_put
1007  * interface.  In all likelyhood, it will only work for inteldrm(4) as
1008  * it assumes that if there is another active VGA device in the
1009  * system, it is sitting behind a PCI bridge.
1010  */
1011 
1012 extern int pci_enumerate_bus(struct pci_softc *,
1013     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1014 
1015 pcitag_t vga_bridge_tag;
1016 int vga_bridge_disabled;
1017 
1018 int
1019 vga_disable_bridge(struct pci_attach_args *pa)
1020 {
1021 	pcireg_t bhlc, bc;
1022 
1023 	if (pa->pa_domain != 0)
1024 		return 0;
1025 
1026 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1027 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1028 		return 0;
1029 
1030 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1031 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1032 		return 0;
1033 	bc &= ~PPB_BC_VGA_ENABLE;
1034 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1035 
1036 	vga_bridge_tag = pa->pa_tag;
1037 	vga_bridge_disabled = 1;
1038 
1039 	return 1;
1040 }
1041 
1042 void
1043 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1044 {
1045 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1046 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1047 }
1048 
1049 void
1050 vga_put(struct pci_dev *pdev, int rsrc)
1051 {
1052 	pcireg_t bc;
1053 
1054 	if (!vga_bridge_disabled)
1055 		return;
1056 
1057 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1058 	bc |= PPB_BC_VGA_ENABLE;
1059 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1060 
1061 	vga_bridge_disabled = 0;
1062 }
1063 
1064 #endif
1065 
1066 /*
1067  * ACPI types and interfaces.
1068  */
1069 
1070 #ifdef __HAVE_ACPI
1071 #include "acpi.h"
1072 #endif
1073 
1074 #if NACPI > 0
1075 
1076 #include <dev/acpi/acpireg.h>
1077 #include <dev/acpi/acpivar.h>
1078 #include <dev/acpi/amltypes.h>
1079 #include <dev/acpi/dsdt.h>
1080 
1081 acpi_status
1082 acpi_get_table(const char *sig, int instance,
1083     struct acpi_table_header **hdr)
1084 {
1085 	struct acpi_softc *sc = acpi_softc;
1086 	struct acpi_q *entry;
1087 
1088 	KASSERT(instance == 1);
1089 
1090 	if (sc == NULL)
1091 		return AE_NOT_FOUND;
1092 
1093 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1094 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1095 			*hdr = entry->q_table;
1096 			return 0;
1097 		}
1098 	}
1099 
1100 	return AE_NOT_FOUND;
1101 }
1102 
1103 acpi_status
1104 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1105 {
1106 	node = aml_searchname(node, name);
1107 	if (node == NULL)
1108 		return AE_NOT_FOUND;
1109 
1110 	*rnode = node;
1111 	return 0;
1112 }
1113 
1114 acpi_status
1115 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1116 {
1117 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1118 	KASSERT(type == ACPI_FULL_PATHNAME);
1119 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1120 	return 0;
1121 }
1122 
1123 acpi_status
1124 acpi_evaluate_object(acpi_handle node, const char *name,
1125     struct acpi_object_list *params, struct acpi_buffer *result)
1126 {
1127 	struct aml_value args[4], res;
1128 	union acpi_object *obj;
1129 	uint8_t *data;
1130 	int i;
1131 
1132 	KASSERT(params->count <= nitems(args));
1133 
1134 	for (i = 0; i < params->count; i++) {
1135 		args[i].type = params->pointer[i].type;
1136 		switch (args[i].type) {
1137 		case AML_OBJTYPE_INTEGER:
1138 			args[i].v_integer = params->pointer[i].integer.value;
1139 			break;
1140 		case AML_OBJTYPE_BUFFER:
1141 			args[i].length = params->pointer[i].buffer.length;
1142 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1143 			break;
1144 		default:
1145 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1146 			return AE_BAD_PARAMETER;
1147 		}
1148 	}
1149 
1150 	if (name) {
1151 		node = aml_searchname(node, name);
1152 		if (node == NULL)
1153 			return AE_NOT_FOUND;
1154 	}
1155 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1156 		aml_freevalue(&res);
1157 		return AE_ERROR;
1158 	}
1159 
1160 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1161 
1162 	result->length = sizeof(union acpi_object);
1163 	switch (res.type) {
1164 	case AML_OBJTYPE_BUFFER:
1165 		result->length += res.length;
1166 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1167 		obj = (union acpi_object *)result->pointer;
1168 		data = (uint8_t *)(obj + 1);
1169 		obj->type = res.type;
1170 		obj->buffer.length = res.length;
1171 		obj->buffer.pointer = data;
1172 		memcpy(data, res.v_buffer, res.length);
1173 		break;
1174 	default:
1175 		printf("%s: return type 0x%02x", __func__, res.type);
1176 		aml_freevalue(&res);
1177 		return AE_ERROR;
1178 	}
1179 
1180 	aml_freevalue(&res);
1181 	return 0;
1182 }
1183 
1184 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1185 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1186 
1187 int
1188 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1189 {
1190 	struct acpi_bus_event event;
1191 	struct notifier_block *nb;
1192 
1193 	event.device_class = ACPI_VIDEO_CLASS;
1194 	event.type = notify;
1195 
1196 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1197 		nb->notifier_call(nb, 0, &event);
1198 	return 0;
1199 }
1200 
1201 int
1202 register_acpi_notifier(struct notifier_block *nb)
1203 {
1204 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1205 	return 0;
1206 }
1207 
1208 int
1209 unregister_acpi_notifier(struct notifier_block *nb)
1210 {
1211 	struct notifier_block *tmp;
1212 
1213 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1214 		if (tmp == nb) {
1215 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1216 			    notifier_block, link);
1217 			return 0;
1218 		}
1219 	}
1220 
1221 	return -ENOENT;
1222 }
1223 
1224 const char *
1225 acpi_format_exception(acpi_status status)
1226 {
1227 	switch (status) {
1228 	case AE_NOT_FOUND:
1229 		return "not found";
1230 	case AE_BAD_PARAMETER:
1231 		return "bad parameter";
1232 	default:
1233 		return "unknown";
1234 	}
1235 }
1236 
1237 #endif
1238 
1239 void
1240 backlight_do_update_status(void *arg)
1241 {
1242 	backlight_update_status(arg);
1243 }
1244 
1245 struct backlight_device *
1246 backlight_device_register(const char *name, void *kdev, void *data,
1247     const struct backlight_ops *ops, struct backlight_properties *props)
1248 {
1249 	struct backlight_device *bd;
1250 
1251 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1252 	bd->ops = ops;
1253 	bd->props = *props;
1254 	bd->data = data;
1255 
1256 	task_set(&bd->task, backlight_do_update_status, bd);
1257 
1258 	return bd;
1259 }
1260 
1261 void
1262 backlight_device_unregister(struct backlight_device *bd)
1263 {
1264 	free(bd, M_DRM, sizeof(*bd));
1265 }
1266 
1267 void
1268 backlight_schedule_update_status(struct backlight_device *bd)
1269 {
1270 	task_add(systq, &bd->task);
1271 }
1272 
1273 inline int
1274 backlight_enable(struct backlight_device *bd)
1275 {
1276 	if (bd == NULL)
1277 		return 0;
1278 
1279 	bd->props.power = FB_BLANK_UNBLANK;
1280 
1281 	return bd->ops->update_status(bd);
1282 }
1283 
1284 inline int
1285 backlight_disable(struct backlight_device *bd)
1286 {
1287 	if (bd == NULL)
1288 		return 0;
1289 
1290 	bd->props.power = FB_BLANK_POWERDOWN;
1291 
1292 	return bd->ops->update_status(bd);
1293 }
1294 
1295 void
1296 drm_sysfs_hotplug_event(struct drm_device *dev)
1297 {
1298 	KNOTE(&dev->note, NOTE_CHANGE);
1299 }
1300 
1301 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1302 
1303 uint64_t
1304 dma_fence_context_alloc(unsigned int num)
1305 {
1306   return atomic64_add_return(num, &drm_fence_context_count) - num;
1307 }
1308 
1309 struct default_wait_cb {
1310 	struct dma_fence_cb base;
1311 	struct proc *proc;
1312 };
1313 
1314 static void
1315 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1316 {
1317 	struct default_wait_cb *wait =
1318 	    container_of(cb, struct default_wait_cb, base);
1319 	wake_up_process(wait->proc);
1320 }
1321 
1322 long
1323 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1324 {
1325 	long ret = timeout ? timeout : 1;
1326 	unsigned long end;
1327 	int err;
1328 	struct default_wait_cb cb;
1329 	bool was_set;
1330 
1331 	KASSERT(timeout <= INT_MAX);
1332 
1333 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1334 		return ret;
1335 
1336 	mtx_enter(fence->lock);
1337 
1338 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1339 	    &fence->flags);
1340 
1341 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1342 		goto out;
1343 
1344 	if (!was_set && fence->ops->enable_signaling) {
1345 		if (!fence->ops->enable_signaling(fence)) {
1346 			dma_fence_signal_locked(fence);
1347 			goto out;
1348 		}
1349 	}
1350 
1351 	if (timeout == 0) {
1352 		ret = 0;
1353 		goto out;
1354 	}
1355 
1356 	cb.base.func = dma_fence_default_wait_cb;
1357 	cb.proc = curproc;
1358 	list_add(&cb.base.node, &fence->cb_list);
1359 
1360 	end = jiffies + timeout;
1361 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1362 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1363 			break;
1364 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1365 		    "dmafence", ret);
1366 		if (err == EINTR || err == ERESTART) {
1367 			ret = -ERESTARTSYS;
1368 			break;
1369 		}
1370 	}
1371 
1372 	if (!list_empty(&cb.base.node))
1373 		list_del(&cb.base.node);
1374 out:
1375 	mtx_leave(fence->lock);
1376 
1377 	return ret;
1378 }
1379 
1380 static bool
1381 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1382     uint32_t *idx)
1383 {
1384 	int i;
1385 
1386 	for (i = 0; i < count; ++i) {
1387 		struct dma_fence *fence = fences[i];
1388 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1389 			if (idx)
1390 				*idx = i;
1391 			return true;
1392 		}
1393 	}
1394 	return false;
1395 }
1396 
1397 long
1398 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1399     bool intr, long timeout, uint32_t *idx)
1400 {
1401 	struct default_wait_cb *cb;
1402 	long ret = timeout;
1403 	unsigned long end;
1404 	int i, err;
1405 
1406 	KASSERT(timeout <= INT_MAX);
1407 
1408 	if (timeout == 0) {
1409 		for (i = 0; i < count; i++) {
1410 			if (dma_fence_is_signaled(fences[i])) {
1411 				if (idx)
1412 					*idx = i;
1413 				return 1;
1414 			}
1415 		}
1416 		return 0;
1417 	}
1418 
1419 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1420 	if (cb == NULL)
1421 		return -ENOMEM;
1422 
1423 	for (i = 0; i < count; i++) {
1424 		struct dma_fence *fence = fences[i];
1425 		cb[i].proc = curproc;
1426 		if (dma_fence_add_callback(fence, &cb[i].base,
1427 		    dma_fence_default_wait_cb)) {
1428 			if (idx)
1429 				*idx = i;
1430 			goto cb_cleanup;
1431 		}
1432 	}
1433 
1434 	end = jiffies + timeout;
1435 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1436 		if (dma_fence_test_signaled_any(fences, count, idx))
1437 			break;
1438 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
1439 		if (err == EINTR || err == ERESTART) {
1440 			ret = -ERESTARTSYS;
1441 			break;
1442 		}
1443 	}
1444 
1445 cb_cleanup:
1446 	while (i-- > 0)
1447 		dma_fence_remove_callback(fences[i], &cb[i].base);
1448 	free(cb, M_DRM, count * sizeof(*cb));
1449 	return ret;
1450 }
1451 
1452 static struct dma_fence dma_fence_stub;
1453 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1454 
1455 static const char *
1456 dma_fence_stub_get_name(struct dma_fence *fence)
1457 {
1458 	return "stub";
1459 }
1460 
1461 static const struct dma_fence_ops dma_fence_stub_ops = {
1462 	.get_driver_name = dma_fence_stub_get_name,
1463 	.get_timeline_name = dma_fence_stub_get_name,
1464 };
1465 
1466 struct dma_fence *
1467 dma_fence_get_stub(void)
1468 {
1469 	mtx_enter(&dma_fence_stub_mtx);
1470 	if (dma_fence_stub.ops == NULL) {
1471 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1472 		    &dma_fence_stub_mtx, 0, 0);
1473 		dma_fence_signal_locked(&dma_fence_stub);
1474 	}
1475 	mtx_leave(&dma_fence_stub_mtx);
1476 
1477 	return dma_fence_get(&dma_fence_stub);
1478 }
1479 
1480 static const char *
1481 dma_fence_array_get_driver_name(struct dma_fence *fence)
1482 {
1483 	return "dma_fence_array";
1484 }
1485 
1486 static const char *
1487 dma_fence_array_get_timeline_name(struct dma_fence *fence)
1488 {
1489 	return "unbound";
1490 }
1491 
1492 static void
1493 irq_dma_fence_array_work(struct irq_work *wrk)
1494 {
1495 	struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work);
1496 
1497 	dma_fence_signal(&dfa->base);
1498 	dma_fence_put(&dfa->base);
1499 }
1500 
1501 static void
1502 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
1503 {
1504 	struct dma_fence_array_cb *array_cb =
1505 	    container_of(cb, struct dma_fence_array_cb, cb);
1506 	struct dma_fence_array *dfa = array_cb->array;
1507 
1508 	if (atomic_dec_and_test(&dfa->num_pending))
1509 		irq_work_queue(&dfa->work);
1510 	else
1511 		dma_fence_put(&dfa->base);
1512 }
1513 
1514 static bool
1515 dma_fence_array_enable_signaling(struct dma_fence *fence)
1516 {
1517 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1518 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
1519 	int i;
1520 
1521 	for (i = 0; i < dfa->num_fences; ++i) {
1522 		cb[i].array = dfa;
1523 		dma_fence_get(&dfa->base);
1524 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
1525 		    dma_fence_array_cb_func)) {
1526 			dma_fence_put(&dfa->base);
1527 			if (atomic_dec_and_test(&dfa->num_pending))
1528 				return false;
1529 		}
1530 	}
1531 
1532 	return true;
1533 }
1534 
1535 static bool dma_fence_array_signaled(struct dma_fence *fence)
1536 {
1537 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1538 
1539 	return atomic_read(&dfa->num_pending) <= 0;
1540 }
1541 
1542 static void dma_fence_array_release(struct dma_fence *fence)
1543 {
1544 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1545 	int i;
1546 
1547 	for (i = 0; i < dfa->num_fences; ++i)
1548 		dma_fence_put(dfa->fences[i]);
1549 
1550 	free(dfa->fences, M_DRM, 0);
1551 	dma_fence_free(fence);
1552 }
1553 
1554 struct dma_fence_array *
1555 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
1556     unsigned seqno, bool signal_on_any)
1557 {
1558 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
1559 	    (num_fences * sizeof(struct dma_fence_array_cb)),
1560 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1561 	if (dfa == NULL)
1562 		return NULL;
1563 
1564 	mtx_init(&dfa->lock, IPL_TTY);
1565 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
1566 	    context, seqno);
1567 	init_irq_work(&dfa->work, irq_dma_fence_array_work);
1568 
1569 	dfa->num_fences = num_fences;
1570 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
1571 	dfa->fences = fences;
1572 
1573 	return dfa;
1574 }
1575 
1576 const struct dma_fence_ops dma_fence_array_ops = {
1577 	.get_driver_name = dma_fence_array_get_driver_name,
1578 	.get_timeline_name = dma_fence_array_get_timeline_name,
1579 	.enable_signaling = dma_fence_array_enable_signaling,
1580 	.signaled = dma_fence_array_signaled,
1581 	.release = dma_fence_array_release,
1582 };
1583 
1584 int
1585 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
1586 {
1587 	return (ENXIO);
1588 }
1589 
1590 int
1591 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
1592 {
1593 	return (ENXIO);
1594 }
1595 
1596 int
1597 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
1598 {
1599 	return (ENOTTY);
1600 }
1601 
1602 int
1603 dmabuf_poll(struct file *fp, int events, struct proc *p)
1604 {
1605 	return (0);
1606 }
1607 
1608 int
1609 dmabuf_kqfilter(struct file *fp, struct knote *kn)
1610 {
1611 	return (EINVAL);
1612 }
1613 
1614 int
1615 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
1616 {
1617 	struct dma_buf *dmabuf = fp->f_data;
1618 
1619 	memset(st, 0, sizeof(*st));
1620 	st->st_size = dmabuf->size;
1621 	st->st_mode = S_IFIFO;	/* XXX */
1622 	return (0);
1623 }
1624 
1625 int
1626 dmabuf_close(struct file *fp, struct proc *p)
1627 {
1628 	struct dma_buf *dmabuf = fp->f_data;
1629 
1630 	fp->f_data = NULL;
1631 	KERNEL_LOCK();
1632 	dmabuf->ops->release(dmabuf);
1633 	KERNEL_UNLOCK();
1634 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
1635 	return (0);
1636 }
1637 
1638 int
1639 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
1640 {
1641 	struct dma_buf *dmabuf = fp->f_data;
1642 	off_t newoff;
1643 
1644 	if (*offset != 0)
1645 		return (EINVAL);
1646 
1647 	switch (whence) {
1648 	case SEEK_SET:
1649 		newoff = 0;
1650 		break;
1651 	case SEEK_END:
1652 		newoff = dmabuf->size;
1653 		break;
1654 	default:
1655 		return (EINVAL);
1656 	}
1657 	mtx_enter(&fp->f_mtx);
1658 	fp->f_offset = newoff;
1659 	mtx_leave(&fp->f_mtx);
1660 	*offset = newoff;
1661 	return (0);
1662 }
1663 
1664 const struct fileops dmabufops = {
1665 	.fo_read	= dmabuf_read,
1666 	.fo_write	= dmabuf_write,
1667 	.fo_ioctl	= dmabuf_ioctl,
1668 	.fo_poll	= dmabuf_poll,
1669 	.fo_kqfilter	= dmabuf_kqfilter,
1670 	.fo_stat	= dmabuf_stat,
1671 	.fo_close	= dmabuf_close,
1672 	.fo_seek	= dmabuf_seek,
1673 };
1674 
1675 struct dma_buf *
1676 dma_buf_export(const struct dma_buf_export_info *info)
1677 {
1678 	struct proc *p = curproc;
1679 	struct dma_buf *dmabuf;
1680 	struct file *fp;
1681 
1682 	fp = fnew(p);
1683 	if (fp == NULL)
1684 		return ERR_PTR(-ENFILE);
1685 	fp->f_type = DTYPE_DMABUF;
1686 	fp->f_ops = &dmabufops;
1687 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
1688 	dmabuf->priv = info->priv;
1689 	dmabuf->ops = info->ops;
1690 	dmabuf->size = info->size;
1691 	dmabuf->file = fp;
1692 	fp->f_data = dmabuf;
1693 	INIT_LIST_HEAD(&dmabuf->attachments);
1694 	return dmabuf;
1695 }
1696 
1697 struct dma_buf *
1698 dma_buf_get(int fd)
1699 {
1700 	struct proc *p = curproc;
1701 	struct filedesc *fdp = p->p_fd;
1702 	struct file *fp;
1703 
1704 	if ((fp = fd_getfile(fdp, fd)) == NULL)
1705 		return ERR_PTR(-EBADF);
1706 
1707 	if (fp->f_type != DTYPE_DMABUF) {
1708 		FRELE(fp, p);
1709 		return ERR_PTR(-EINVAL);
1710 	}
1711 
1712 	return fp->f_data;
1713 }
1714 
1715 void
1716 dma_buf_put(struct dma_buf *dmabuf)
1717 {
1718 	KASSERT(dmabuf);
1719 	KASSERT(dmabuf->file);
1720 
1721 	FRELE(dmabuf->file, curproc);
1722 }
1723 
1724 int
1725 dma_buf_fd(struct dma_buf *dmabuf, int flags)
1726 {
1727 	struct proc *p = curproc;
1728 	struct filedesc *fdp = p->p_fd;
1729 	struct file *fp = dmabuf->file;
1730 	int fd, cloexec, error;
1731 
1732 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
1733 
1734 	fdplock(fdp);
1735 restart:
1736 	if ((error = fdalloc(p, 0, &fd)) != 0) {
1737 		if (error == ENOSPC) {
1738 			fdexpand(p);
1739 			goto restart;
1740 		}
1741 		fdpunlock(fdp);
1742 		return -error;
1743 	}
1744 
1745 	fdinsert(fdp, fd, cloexec, fp);
1746 	fdpunlock(fdp);
1747 
1748 	return fd;
1749 }
1750 
1751 void
1752 get_dma_buf(struct dma_buf *dmabuf)
1753 {
1754 	FREF(dmabuf->file);
1755 }
1756 
1757 enum pci_bus_speed
1758 pcie_get_speed_cap(struct pci_dev *pdev)
1759 {
1760 	pci_chipset_tag_t	pc = pdev->pc;
1761 	pcitag_t		tag = pdev->tag;
1762 	int			pos ;
1763 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1764 	pcireg_t		id;
1765 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
1766 	int			bus, device, function;
1767 
1768 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1769 	    &pos, NULL))
1770 		return PCI_SPEED_UNKNOWN;
1771 
1772 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1773 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1774 
1775 	/* we've been informed via and serverworks don't make the cut */
1776 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1777 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1778 		return PCI_SPEED_UNKNOWN;
1779 
1780 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1781 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1782 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1783 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1784 
1785 	lnkcap &= 0x0f;
1786 	lnkcap2 &= 0xfe;
1787 
1788 	if (lnkcap2) { /* PCIE GEN 3.0 */
1789 		if (lnkcap2 & 0x02)
1790 			cap = PCIE_SPEED_2_5GT;
1791 		if (lnkcap2 & 0x04)
1792 			cap = PCIE_SPEED_5_0GT;
1793 		if (lnkcap2 & 0x08)
1794 			cap = PCIE_SPEED_8_0GT;
1795 		if (lnkcap2 & 0x10)
1796 			cap = PCIE_SPEED_16_0GT;
1797 	} else {
1798 		if (lnkcap & 0x01)
1799 			cap = PCIE_SPEED_2_5GT;
1800 		if (lnkcap & 0x02)
1801 			cap = PCIE_SPEED_5_0GT;
1802 	}
1803 
1804 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
1805 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
1806 	    lnkcap2);
1807 	return cap;
1808 }
1809 
1810 enum pcie_link_width
1811 pcie_get_width_cap(struct pci_dev *pdev)
1812 {
1813 	pci_chipset_tag_t	pc = pdev->pc;
1814 	pcitag_t		tag = pdev->tag;
1815 	int			pos ;
1816 	pcireg_t		lnkcap = 0;
1817 	pcireg_t		id;
1818 	int			bus, device, function;
1819 
1820 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1821 	    &pos, NULL))
1822 		return PCIE_LNK_WIDTH_UNKNOWN;
1823 
1824 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1825 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1826 
1827 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1828 
1829 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
1830 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
1831 
1832 	if (lnkcap)
1833 		return (lnkcap & 0x3f0) >> 4;
1834 	return PCIE_LNK_WIDTH_UNKNOWN;
1835 }
1836 
1837 int
1838 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1839     int sync, void *key)
1840 {
1841 	wakeup(wqe);
1842 	if (wqe->proc)
1843 		wake_up_process(wqe->proc);
1844 	list_del_init(&wqe->entry);
1845 	return 0;
1846 }
1847 
1848 static wait_queue_head_t bit_waitq;
1849 wait_queue_head_t var_waitq;
1850 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
1851 
1852 int
1853 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1854 {
1855 	int err;
1856 
1857 	if (!test_bit(bit, word))
1858 		return 0;
1859 
1860 	mtx_enter(&wait_bit_mtx);
1861 	while (test_bit(bit, word)) {
1862 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
1863 		    INFSLP);
1864 		if (err) {
1865 			mtx_leave(&wait_bit_mtx);
1866 			return 1;
1867 		}
1868 	}
1869 	mtx_leave(&wait_bit_mtx);
1870 	return 0;
1871 }
1872 
1873 int
1874 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
1875 {
1876 	int err;
1877 
1878 	if (!test_bit(bit, word))
1879 		return 0;
1880 
1881 	mtx_enter(&wait_bit_mtx);
1882 	while (test_bit(bit, word)) {
1883 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
1884 		if (err) {
1885 			mtx_leave(&wait_bit_mtx);
1886 			return 1;
1887 		}
1888 	}
1889 	mtx_leave(&wait_bit_mtx);
1890 	return 0;
1891 }
1892 
1893 void
1894 wake_up_bit(void *word, int bit)
1895 {
1896 	mtx_enter(&wait_bit_mtx);
1897 	wakeup(word);
1898 	mtx_leave(&wait_bit_mtx);
1899 }
1900 
1901 void
1902 clear_and_wake_up_bit(int bit, void *word)
1903 {
1904 	clear_bit(bit, word);
1905 	wake_up_bit(word, bit);
1906 }
1907 
1908 wait_queue_head_t *
1909 bit_waitqueue(void *word, int bit)
1910 {
1911 	/* XXX hash table of wait queues? */
1912 	return &bit_waitq;
1913 }
1914 
1915 struct workqueue_struct *system_wq;
1916 struct workqueue_struct *system_highpri_wq;
1917 struct workqueue_struct *system_unbound_wq;
1918 struct workqueue_struct *system_long_wq;
1919 struct taskq *taskletq;
1920 
1921 void
1922 drm_linux_init(void)
1923 {
1924 	system_wq = (struct workqueue_struct *)
1925 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
1926 	system_highpri_wq = (struct workqueue_struct *)
1927 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
1928 	system_unbound_wq = (struct workqueue_struct *)
1929 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
1930 	system_long_wq = (struct workqueue_struct *)
1931 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
1932 
1933 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
1934 
1935 	init_waitqueue_head(&bit_waitq);
1936 	init_waitqueue_head(&var_waitq);
1937 
1938 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
1939 	    "idrpl", NULL);
1940 }
1941 
1942 void
1943 drm_linux_exit(void)
1944 {
1945 	pool_destroy(&idr_pool);
1946 
1947 	taskq_destroy(taskletq);
1948 
1949 	taskq_destroy((struct taskq *)system_long_wq);
1950 	taskq_destroy((struct taskq *)system_unbound_wq);
1951 	taskq_destroy((struct taskq *)system_highpri_wq);
1952 	taskq_destroy((struct taskq *)system_wq);
1953 }
1954 
1955 #define PCIE_ECAP_RESIZE_BAR	0x15
1956 #define RBCAP0			0x04
1957 #define RBCTRL0			0x08
1958 #define RBCTRL_BARINDEX_MASK	0x07
1959 #define RBCTRL_BARSIZE_MASK	0x1f00
1960 #define RBCTRL_BARSIZE_SHIFT	8
1961 
1962 /* size in MB is 1 << nsize */
1963 int
1964 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
1965 {
1966 	pcireg_t	reg;
1967 	uint32_t	offset, capid;
1968 
1969 	KASSERT(bar == 0);
1970 
1971 	offset = PCI_PCIE_ECAP;
1972 
1973 	/* search PCI Express Extended Capabilities */
1974 	do {
1975 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
1976 		capid = PCI_PCIE_ECAP_ID(reg);
1977 		if (capid == PCIE_ECAP_RESIZE_BAR)
1978 			break;
1979 		offset = PCI_PCIE_ECAP_NEXT(reg);
1980 	} while (capid != 0);
1981 
1982 	if (capid == 0) {
1983 		printf("%s: could not find resize bar cap!\n", __func__);
1984 		return -ENOTSUP;
1985 	}
1986 
1987 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
1988 
1989 	if ((reg & (1 << (nsize + 4))) == 0) {
1990 		printf("%s size not supported\n", __func__);
1991 		return -ENOTSUP;
1992 	}
1993 
1994 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
1995 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
1996 		printf("%s BAR index not 0\n", __func__);
1997 		return -EINVAL;
1998 	}
1999 
2000 	reg &= ~RBCTRL_BARSIZE_MASK;
2001 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2002 
2003 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2004 
2005 	return 0;
2006 }
2007 
2008 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2009 
2010 int
2011 register_shrinker(struct shrinker *shrinker)
2012 {
2013 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2014 	return 0;
2015 }
2016 
2017 void
2018 unregister_shrinker(struct shrinker *shrinker)
2019 {
2020 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2021 }
2022 
2023 void
2024 drmbackoff(long npages)
2025 {
2026 	struct shrink_control sc;
2027 	struct shrinker *shrinker;
2028 	u_long ret;
2029 
2030 	shrinker = TAILQ_FIRST(&shrinkers);
2031 	while (shrinker && npages > 0) {
2032 		sc.nr_to_scan = npages;
2033 		ret = shrinker->scan_objects(shrinker, &sc);
2034 		npages -= ret;
2035 		shrinker = TAILQ_NEXT(shrinker, next);
2036 	}
2037 }
2038 
2039 void *
2040 bitmap_zalloc(u_int n, gfp_t flags)
2041 {
2042 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2043 }
2044 
2045 void
2046 bitmap_free(void *p)
2047 {
2048 	kfree(p);
2049 }
2050 
2051 int
2052 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2053 {
2054 	if (atomic_add_unless(v, -1, 1))
2055 		return 0;
2056 
2057 	rw_enter_write(lock);
2058 	if (atomic_dec_return(v) == 0)
2059 		return 1;
2060 	rw_exit_write(lock);
2061 	return 0;
2062 }
2063 
2064 int
2065 printk(const char *fmt, ...)
2066 {
2067 	int ret, level;
2068 	va_list ap;
2069 
2070 	if (fmt != NULL && *fmt == '\001') {
2071 		level = fmt[1];
2072 #ifndef DRMDEBUG
2073 		if (level >= KERN_INFO[1] && level <= '9')
2074 			return 0;
2075 #endif
2076 		fmt += 2;
2077 	}
2078 
2079 	va_start(ap, fmt);
2080 	ret = vprintf(fmt, ap);
2081 	va_end(ap);
2082 
2083 	return ret;
2084 }
2085 
2086 #define START(node) ((node)->start)
2087 #define LAST(node) ((node)->last)
2088 
2089 struct interval_tree_node *
2090 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2091     unsigned long last)
2092 {
2093 	struct interval_tree_node *node;
2094 	struct rb_node *rb;
2095 
2096 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2097 		node = rb_entry(rb, typeof(*node), rb);
2098 		if (LAST(node) >= start && START(node) <= last)
2099 			return node;
2100 	}
2101 	return NULL;
2102 }
2103 
2104 void
2105 interval_tree_remove(struct interval_tree_node *node,
2106     struct rb_root_cached *root)
2107 {
2108 	rb_erase_cached(&node->rb, root);
2109 }
2110 
2111 void
2112 interval_tree_insert(struct interval_tree_node *node,
2113     struct rb_root_cached *root)
2114 {
2115 	struct rb_node **iter = &root->rb_root.rb_node;
2116 	struct rb_node *parent = NULL;
2117 	struct interval_tree_node *iter_node;
2118 
2119 	while (*iter) {
2120 		parent = *iter;
2121 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
2122 
2123 		if (node->start < iter_node->start)
2124 			iter = &(*iter)->rb_left;
2125 		else
2126 			iter = &(*iter)->rb_right;
2127 	}
2128 
2129 	rb_link_node(&node->rb, parent, iter);
2130 	rb_insert_color_cached(&node->rb, root, false);
2131 }
2132