xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: drm_linux.c,v 1.64 2020/10/16 09:20:04 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/interrupt.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/scatterlist.h>
42 #include <linux/i2c.h>
43 #include <linux/pci.h>
44 #include <linux/notifier.h>
45 #include <linux/backlight.h>
46 #include <linux/shrinker.h>
47 #include <linux/fb.h>
48 #include <linux/xarray.h>
49 #include <linux/interval_tree.h>
50 
51 #include <drm/drm_device.h>
52 #include <drm/drm_print.h>
53 
54 #if defined(__amd64__) || defined(__i386__)
55 #include "bios.h"
56 #endif
57 
58 void
59 tasklet_run(void *arg)
60 {
61 	struct tasklet_struct *ts = arg;
62 
63 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
64 	if (tasklet_trylock(ts)) {
65 		if (!atomic_read(&ts->count))
66 			ts->func(ts->data);
67 		tasklet_unlock(ts);
68 	}
69 }
70 
71 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
72 volatile struct proc *sch_proc;
73 volatile void *sch_ident;
74 int sch_priority;
75 
76 void
77 set_current_state(int state)
78 {
79 	if (sch_ident != curproc)
80 		mtx_enter(&sch_mtx);
81 	MUTEX_ASSERT_LOCKED(&sch_mtx);
82 	sch_ident = sch_proc = curproc;
83 	sch_priority = state;
84 }
85 
86 void
87 __set_current_state(int state)
88 {
89 	KASSERT(state == TASK_RUNNING);
90 	if (sch_ident == curproc) {
91 		MUTEX_ASSERT_LOCKED(&sch_mtx);
92 		sch_ident = NULL;
93 		mtx_leave(&sch_mtx);
94 	}
95 }
96 
97 void
98 schedule(void)
99 {
100 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
101 }
102 
103 long
104 schedule_timeout(long timeout)
105 {
106 	struct sleep_state sls;
107 	long deadline;
108 	int wait, spl;
109 
110 	MUTEX_ASSERT_LOCKED(&sch_mtx);
111 	KASSERT(!cold);
112 
113 	sleep_setup(&sls, sch_ident, sch_priority, "schto");
114 	if (timeout != MAX_SCHEDULE_TIMEOUT)
115 		sleep_setup_timeout(&sls, timeout);
116 
117 	wait = (sch_proc == curproc && timeout > 0);
118 
119 	spl = MUTEX_OLDIPL(&sch_mtx);
120 	MUTEX_OLDIPL(&sch_mtx) = splsched();
121 	mtx_leave(&sch_mtx);
122 
123 	sleep_setup_signal(&sls);
124 
125 	if (timeout != MAX_SCHEDULE_TIMEOUT)
126 		deadline = ticks + timeout;
127 	sleep_finish_all(&sls, wait);
128 	if (timeout != MAX_SCHEDULE_TIMEOUT)
129 		timeout = deadline - ticks;
130 
131 	mtx_enter(&sch_mtx);
132 	MUTEX_OLDIPL(&sch_mtx) = spl;
133 	sch_ident = curproc;
134 
135 	return timeout > 0 ? timeout : 0;
136 }
137 
138 long
139 schedule_timeout_uninterruptible(long timeout)
140 {
141 	tsleep(curproc, PWAIT, "schtou", timeout);
142 	return 0;
143 }
144 
145 int
146 wake_up_process(struct proc *p)
147 {
148 	atomic_cas_ptr(&sch_proc, p, NULL);
149 	return wakeup_proc(p, NULL);
150 }
151 
152 void
153 flush_workqueue(struct workqueue_struct *wq)
154 {
155 	if (cold)
156 		return;
157 
158 	taskq_barrier((struct taskq *)wq);
159 }
160 
161 bool
162 flush_work(struct work_struct *work)
163 {
164 	if (cold)
165 		return false;
166 
167 	taskq_barrier(work->tq);
168 	return false;
169 }
170 
171 bool
172 flush_delayed_work(struct delayed_work *dwork)
173 {
174 	bool ret = false;
175 
176 	if (cold)
177 		return false;
178 
179 	while (timeout_pending(&dwork->to)) {
180 		tsleep(dwork, PWAIT, "fldwto", 1);
181 		ret = true;
182 	}
183 
184 	taskq_barrier(dwork->tq ? dwork->tq : (struct taskq *)system_wq);
185 	return ret;
186 }
187 
188 struct kthread {
189 	int (*func)(void *);
190 	void *data;
191 	struct proc *proc;
192 	volatile u_int flags;
193 #define KTHREAD_SHOULDSTOP	0x0000001
194 #define KTHREAD_STOPPED		0x0000002
195 #define KTHREAD_SHOULDPARK	0x0000004
196 #define KTHREAD_PARKED		0x0000008
197 	LIST_ENTRY(kthread) next;
198 };
199 
200 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
201 
202 void
203 kthread_func(void *arg)
204 {
205 	struct kthread *thread = arg;
206 	int ret;
207 
208 	ret = thread->func(thread->data);
209 	thread->flags |= KTHREAD_STOPPED;
210 	kthread_exit(ret);
211 }
212 
213 struct proc *
214 kthread_run(int (*func)(void *), void *data, const char *name)
215 {
216 	struct kthread *thread;
217 
218 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
219 	thread->func = func;
220 	thread->data = data;
221 	thread->flags = 0;
222 
223 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
224 		free(thread, M_DRM, sizeof(*thread));
225 		return ERR_PTR(-ENOMEM);
226 	}
227 
228 	LIST_INSERT_HEAD(&kthread_list, thread, next);
229 	return thread->proc;
230 }
231 
232 struct kthread *
233 kthread_lookup(struct proc *p)
234 {
235 	struct kthread *thread;
236 
237 	LIST_FOREACH(thread, &kthread_list, next) {
238 		if (thread->proc == p)
239 			break;
240 	}
241 	KASSERT(thread);
242 
243 	return thread;
244 }
245 
246 int
247 kthread_should_park(void)
248 {
249 	struct kthread *thread = kthread_lookup(curproc);
250 	return (thread->flags & KTHREAD_SHOULDPARK);
251 }
252 
253 void
254 kthread_parkme(void)
255 {
256 	struct kthread *thread = kthread_lookup(curproc);
257 
258 	while (thread->flags & KTHREAD_SHOULDPARK) {
259 		thread->flags |= KTHREAD_PARKED;
260 		wakeup(thread);
261 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
262 		thread->flags &= ~KTHREAD_PARKED;
263 	}
264 }
265 
266 void
267 kthread_park(struct proc *p)
268 {
269 	struct kthread *thread = kthread_lookup(p);
270 
271 	while ((thread->flags & KTHREAD_PARKED) == 0) {
272 		thread->flags |= KTHREAD_SHOULDPARK;
273 		wake_up_process(thread->proc);
274 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
275 	}
276 }
277 
278 void
279 kthread_unpark(struct proc *p)
280 {
281 	struct kthread *thread = kthread_lookup(p);
282 
283 	thread->flags &= ~KTHREAD_SHOULDPARK;
284 	wakeup(thread);
285 }
286 
287 int
288 kthread_should_stop(void)
289 {
290 	struct kthread *thread = kthread_lookup(curproc);
291 	return (thread->flags & KTHREAD_SHOULDSTOP);
292 }
293 
294 void
295 kthread_stop(struct proc *p)
296 {
297 	struct kthread *thread = kthread_lookup(p);
298 
299 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
300 		thread->flags |= KTHREAD_SHOULDSTOP;
301 		wake_up_process(thread->proc);
302 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
303 	}
304 	LIST_REMOVE(thread, next);
305 	free(thread, M_DRM, sizeof(*thread));
306 }
307 
308 #if NBIOS > 0
309 extern char smbios_board_vendor[];
310 extern char smbios_board_prod[];
311 extern char smbios_board_serial[];
312 #endif
313 
314 bool
315 dmi_match(int slot, const char *str)
316 {
317 	switch (slot) {
318 	case DMI_SYS_VENDOR:
319 		if (hw_vendor != NULL &&
320 		    !strcmp(hw_vendor, str))
321 			return true;
322 		break;
323 	case DMI_PRODUCT_NAME:
324 		if (hw_prod != NULL &&
325 		    !strcmp(hw_prod, str))
326 			return true;
327 		break;
328 	case DMI_PRODUCT_VERSION:
329 		if (hw_ver != NULL &&
330 		    !strcmp(hw_ver, str))
331 			return true;
332 		break;
333 #if NBIOS > 0
334 	case DMI_BOARD_VENDOR:
335 		if (strcmp(smbios_board_vendor, str) == 0)
336 			return true;
337 		break;
338 	case DMI_BOARD_NAME:
339 		if (strcmp(smbios_board_prod, str) == 0)
340 			return true;
341 		break;
342 	case DMI_BOARD_SERIAL:
343 		if (strcmp(smbios_board_serial, str) == 0)
344 			return true;
345 		break;
346 #else
347 	case DMI_BOARD_VENDOR:
348 		if (hw_vendor != NULL &&
349 		    !strcmp(hw_vendor, str))
350 			return true;
351 		break;
352 	case DMI_BOARD_NAME:
353 		if (hw_prod != NULL &&
354 		    !strcmp(hw_prod, str))
355 			return true;
356 		break;
357 #endif
358 	case DMI_NONE:
359 	default:
360 		return false;
361 	}
362 
363 	return false;
364 }
365 
366 static bool
367 dmi_found(const struct dmi_system_id *dsi)
368 {
369 	int i, slot;
370 
371 	for (i = 0; i < nitems(dsi->matches); i++) {
372 		slot = dsi->matches[i].slot;
373 		if (slot == DMI_NONE)
374 			break;
375 		if (!dmi_match(slot, dsi->matches[i].substr))
376 			return false;
377 	}
378 
379 	return true;
380 }
381 
382 const struct dmi_system_id *
383 dmi_first_match(const struct dmi_system_id *sysid)
384 {
385 	const struct dmi_system_id *dsi;
386 
387 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
388 		if (dmi_found(dsi))
389 			return dsi;
390 	}
391 
392 	return NULL;
393 }
394 
395 #if NBIOS > 0
396 extern char smbios_bios_date[];
397 #endif
398 
399 const char *
400 dmi_get_system_info(int slot)
401 {
402 	WARN_ON(slot != DMI_BIOS_DATE);
403 #if NBIOS > 0
404 	if (slot == DMI_BIOS_DATE)
405 		return smbios_bios_date;
406 #endif
407 	return NULL;
408 }
409 
410 int
411 dmi_check_system(const struct dmi_system_id *sysid)
412 {
413 	const struct dmi_system_id *dsi;
414 	int num = 0;
415 
416 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
417 		if (dmi_found(dsi)) {
418 			num++;
419 			if (dsi->callback && dsi->callback(dsi))
420 				break;
421 		}
422 	}
423 	return (num);
424 }
425 
426 struct vm_page *
427 alloc_pages(unsigned int gfp_mask, unsigned int order)
428 {
429 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
430 	struct uvm_constraint_range *constraint = &no_constraint;
431 	struct pglist mlist;
432 
433 	if (gfp_mask & M_CANFAIL)
434 		flags |= UVM_PLA_FAILOK;
435 	if (gfp_mask & M_ZERO)
436 		flags |= UVM_PLA_ZERO;
437 	if (gfp_mask & __GFP_DMA32)
438 		constraint = &dma_constraint;
439 
440 	TAILQ_INIT(&mlist);
441 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
442 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
443 		return NULL;
444 	return TAILQ_FIRST(&mlist);
445 }
446 
447 void
448 __free_pages(struct vm_page *page, unsigned int order)
449 {
450 	struct pglist mlist;
451 	int i;
452 
453 	TAILQ_INIT(&mlist);
454 	for (i = 0; i < (1 << order); i++)
455 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
456 	uvm_pglistfree(&mlist);
457 }
458 
459 void
460 __pagevec_release(struct pagevec *pvec)
461 {
462 	struct pglist mlist;
463 	int i;
464 
465 	TAILQ_INIT(&mlist);
466 	for (i = 0; i < pvec->nr; i++)
467 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
468 	uvm_pglistfree(&mlist);
469 	pagevec_reinit(pvec);
470 }
471 
472 void *
473 kmap(struct vm_page *pg)
474 {
475 	vaddr_t va;
476 
477 #if defined (__HAVE_PMAP_DIRECT)
478 	va = pmap_map_direct(pg);
479 #else
480 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
481 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
482 	pmap_update(pmap_kernel());
483 #endif
484 	return (void *)va;
485 }
486 
487 void
488 kunmap_va(void *addr)
489 {
490 	vaddr_t va = (vaddr_t)addr;
491 
492 #if defined (__HAVE_PMAP_DIRECT)
493 	pmap_unmap_direct(va);
494 #else
495 	pmap_kremove(va, PAGE_SIZE);
496 	pmap_update(pmap_kernel());
497 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
498 #endif
499 }
500 
501 void *
502 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
503      pgprot_t prot)
504 {
505 	vaddr_t va;
506 	paddr_t pa;
507 	int i;
508 
509 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
510 	if (va == 0)
511 		return NULL;
512 	for (i = 0; i < npages; i++) {
513 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
514 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
515 		    PROT_READ | PROT_WRITE,
516 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
517 		pmap_update(pmap_kernel());
518 	}
519 
520 	return (void *)va;
521 }
522 
523 void
524 vunmap(void *addr, size_t size)
525 {
526 	vaddr_t va = (vaddr_t)addr;
527 
528 	pmap_remove(pmap_kernel(), va, va + size);
529 	pmap_update(pmap_kernel());
530 	uvm_km_free(kernel_map, va, size);
531 }
532 
533 void
534 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
535     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
536 {
537 	const uint8_t *cbuf = buf;
538 	int i;
539 
540 	for (i = 0; i < len; i++) {
541 		if ((i % rowsize) == 0)
542 			printf("%s", prefix_str);
543 		printf("%02x", cbuf[i]);
544 		if ((i % rowsize) == (rowsize - 1))
545 			printf("\n");
546 		else
547 			printf(" ");
548 	}
549 }
550 
551 void *
552 memchr_inv(const void *s, int c, size_t n)
553 {
554 	if (n != 0) {
555 		const unsigned char *p = s;
556 
557 		do {
558 			if (*p++ != (unsigned char)c)
559 				return ((void *)(p - 1));
560 		}while (--n != 0);
561 	}
562 	return (NULL);
563 }
564 
565 int
566 panic_cmp(struct rb_node *a, struct rb_node *b)
567 {
568 	panic(__func__);
569 }
570 
571 #undef RB_ROOT
572 #define RB_ROOT(head)	(head)->rbh_root
573 
574 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
575 
576 /*
577  * This is a fairly minimal implementation of the Linux "idr" API.  It
578  * probably isn't very efficient, and defenitely isn't RCU safe.  The
579  * pre-load buffer is global instead of per-cpu; we rely on the kernel
580  * lock to make this work.  We do randomize our IDs in order to make
581  * them harder to guess.
582  */
583 
584 int idr_cmp(struct idr_entry *, struct idr_entry *);
585 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
586 
587 struct pool idr_pool;
588 struct idr_entry *idr_entry_cache;
589 
590 void
591 idr_init(struct idr *idr)
592 {
593 	static int initialized;
594 
595 	if (!initialized) {
596 		pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
597 		    "idrpl", NULL);
598 		initialized = 1;
599 	}
600 	SPLAY_INIT(&idr->tree);
601 }
602 
603 void
604 idr_destroy(struct idr *idr)
605 {
606 	struct idr_entry *id;
607 
608 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
609 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
610 		pool_put(&idr_pool, id);
611 	}
612 }
613 
614 void
615 idr_preload(unsigned int gfp_mask)
616 {
617 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
618 
619 	KERNEL_ASSERT_LOCKED();
620 
621 	if (idr_entry_cache == NULL)
622 		idr_entry_cache = pool_get(&idr_pool, flags);
623 }
624 
625 int
626 idr_alloc(struct idr *idr, void *ptr, int start, int end,
627     unsigned int gfp_mask)
628 {
629 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
630 	struct idr_entry *id;
631 	int begin;
632 
633 	KERNEL_ASSERT_LOCKED();
634 
635 	if (idr_entry_cache) {
636 		id = idr_entry_cache;
637 		idr_entry_cache = NULL;
638 	} else {
639 		id = pool_get(&idr_pool, flags);
640 		if (id == NULL)
641 			return -ENOMEM;
642 	}
643 
644 	if (end <= 0)
645 		end = INT_MAX;
646 
647 #ifdef notyet
648 	id->id = begin = start + arc4random_uniform(end - start);
649 #else
650 	id->id = begin = start;
651 #endif
652 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
653 		if (++id->id == end)
654 			id->id = start;
655 		if (id->id == begin) {
656 			pool_put(&idr_pool, id);
657 			return -ENOSPC;
658 		}
659 	}
660 	id->ptr = ptr;
661 	return id->id;
662 }
663 
664 void *
665 idr_replace(struct idr *idr, void *ptr, int id)
666 {
667 	struct idr_entry find, *res;
668 	void *old;
669 
670 	find.id = id;
671 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
672 	if (res == NULL)
673 		return ERR_PTR(-ENOENT);
674 	old = res->ptr;
675 	res->ptr = ptr;
676 	return old;
677 }
678 
679 void *
680 idr_remove(struct idr *idr, int id)
681 {
682 	struct idr_entry find, *res;
683 	void *ptr = NULL;
684 
685 	find.id = id;
686 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
687 	if (res) {
688 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
689 		ptr = res->ptr;
690 		pool_put(&idr_pool, res);
691 	}
692 	return ptr;
693 }
694 
695 void *
696 idr_find(struct idr *idr, int id)
697 {
698 	struct idr_entry find, *res;
699 
700 	find.id = id;
701 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
702 	if (res == NULL)
703 		return NULL;
704 	return res->ptr;
705 }
706 
707 void *
708 idr_get_next(struct idr *idr, int *id)
709 {
710 	struct idr_entry *res;
711 
712 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
713 		if (res->id >= *id) {
714 			*id = res->id;
715 			return res->ptr;
716 		}
717 	}
718 
719 	return NULL;
720 }
721 
722 int
723 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
724 {
725 	struct idr_entry *id;
726 	int ret;
727 
728 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
729 		ret = func(id->id, id->ptr, data);
730 		if (ret)
731 			return ret;
732 	}
733 
734 	return 0;
735 }
736 
737 int
738 idr_cmp(struct idr_entry *a, struct idr_entry *b)
739 {
740 	return (a->id < b->id ? -1 : a->id > b->id);
741 }
742 
743 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
744 
745 void
746 ida_init(struct ida *ida)
747 {
748 	ida->counter = 0;
749 }
750 
751 void
752 ida_destroy(struct ida *ida)
753 {
754 }
755 
756 void
757 ida_remove(struct ida *ida, int id)
758 {
759 }
760 
761 int
762 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
763     int flags)
764 {
765 	if (end <= 0)
766 		end = INT_MAX;
767 
768 	if (start > ida->counter)
769 		ida->counter = start;
770 
771 	if (ida->counter >= end)
772 		return -ENOSPC;
773 
774 	return ida->counter++;
775 }
776 
777 void
778 ida_simple_remove(struct ida *ida, int id)
779 {
780 }
781 
782 int
783 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
784 {
785 	return (a->id < b->id ? -1 : a->id > b->id);
786 }
787 
788 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
789 struct pool xa_pool;
790 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
791 
792 void
793 xa_init_flags(struct xarray *xa, gfp_t flags)
794 {
795 	static int initialized;
796 
797 	if (!initialized) {
798 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_TTY, 0,
799 		    "xapl", NULL);
800 		initialized = 1;
801 	}
802 	SPLAY_INIT(&xa->xa_tree);
803 }
804 
805 void
806 xa_destroy(struct xarray *xa)
807 {
808 	struct xarray_entry *id;
809 
810 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
811 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
812 		pool_put(&xa_pool, id);
813 	}
814 }
815 
816 int
817 xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
818 {
819 	struct xarray_entry *xid;
820 	int flags = (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
821 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
822 	int begin;
823 
824 	xid = pool_get(&xa_pool, flags);
825 	if (xid == NULL)
826 		return -ENOMEM;
827 
828 	if (limit <= 0)
829 		limit = INT_MAX;
830 
831 	xid->id = begin = start;
832 
833 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
834 		if (++xid->id == limit)
835 			xid->id = start;
836 		if (xid->id == begin) {
837 			pool_put(&xa_pool, xid);
838 			return -EBUSY;
839 		}
840 	}
841 	xid->ptr = entry;
842 	*id = xid->id;
843 	return 0;
844 }
845 
846 void *
847 xa_erase(struct xarray *xa, unsigned long index)
848 {
849 	struct xarray_entry find, *res;
850 	void *ptr = NULL;
851 
852 	find.id = index;
853 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
854 	if (res) {
855 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
856 		ptr = res->ptr;
857 		pool_put(&xa_pool, res);
858 	}
859 	return ptr;
860 }
861 
862 void *
863 xa_load(struct xarray *xa, unsigned long index)
864 {
865 	struct xarray_entry find, *res;
866 
867 	find.id = index;
868 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
869 	if (res == NULL)
870 		return NULL;
871 	return res->ptr;
872 }
873 
874 void *
875 xa_get_next(struct xarray *xa, unsigned long *index)
876 {
877 	struct xarray_entry *res;
878 
879 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
880 		if (res->id >= *index) {
881 			*index = res->id;
882 			return res->ptr;
883 		}
884 	}
885 
886 	return NULL;
887 }
888 
889 int
890 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
891 {
892 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
893 	    M_DRM, gfp_mask);
894 	if (table->sgl == NULL)
895 		return -ENOMEM;
896 	table->nents = table->orig_nents = nents;
897 	return 0;
898 }
899 
900 void
901 sg_free_table(struct sg_table *table)
902 {
903 	free(table->sgl, M_DRM,
904 	    table->orig_nents * sizeof(struct scatterlist));
905 }
906 
907 size_t
908 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
909     const void *buf, size_t buflen)
910 {
911 	panic("%s", __func__);
912 }
913 
914 int
915 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
916 {
917 	void *cmd = NULL;
918 	int cmdlen = 0;
919 	int err, ret = 0;
920 	int op;
921 
922 	iic_acquire_bus(&adap->ic, 0);
923 
924 	while (num > 2) {
925 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
926 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
927 		    msgs->buf, msgs->len, 0);
928 		if (err) {
929 			ret = -err;
930 			goto fail;
931 		}
932 		msgs++;
933 		num--;
934 		ret++;
935 	}
936 
937 	if (num > 1) {
938 		cmd = msgs->buf;
939 		cmdlen = msgs->len;
940 		msgs++;
941 		num--;
942 		ret++;
943 	}
944 
945 	op = (msgs->flags & I2C_M_RD) ?
946 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
947 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
948 	    msgs->buf, msgs->len, 0);
949 	if (err) {
950 		ret = -err;
951 		goto fail;
952 	}
953 	msgs++;
954 	ret++;
955 
956 fail:
957 	iic_release_bus(&adap->ic, 0);
958 
959 	return ret;
960 }
961 
962 int
963 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
964 {
965 	int ret;
966 
967 	if (adap->lock_ops)
968 		adap->lock_ops->lock_bus(adap, 0);
969 
970 	if (adap->algo)
971 		ret = adap->algo->master_xfer(adap, msgs, num);
972 	else
973 		ret = i2c_master_xfer(adap, msgs, num);
974 
975 	if (adap->lock_ops)
976 		adap->lock_ops->unlock_bus(adap, 0);
977 
978 	return ret;
979 }
980 
981 int
982 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
983 {
984 	struct i2c_algo_bit_data *algo = adap->algo_data;
985 	struct i2c_adapter bb;
986 
987 	memset(&bb, 0, sizeof(bb));
988 	bb.ic = algo->ic;
989 	bb.retries = adap->retries;
990 	return i2c_master_xfer(&bb, msgs, num);
991 }
992 
993 uint32_t
994 i2c_bb_functionality(struct i2c_adapter *adap)
995 {
996 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
997 }
998 
999 struct i2c_algorithm i2c_bit_algo = {
1000 	.master_xfer = i2c_bb_master_xfer,
1001 	.functionality = i2c_bb_functionality
1002 };
1003 
1004 int
1005 i2c_bit_add_bus(struct i2c_adapter *adap)
1006 {
1007 	adap->algo = &i2c_bit_algo;
1008 	adap->retries = 3;
1009 
1010 	return 0;
1011 }
1012 
1013 #if defined(__amd64__) || defined(__i386__)
1014 
1015 /*
1016  * This is a minimal implementation of the Linux vga_get/vga_put
1017  * interface.  In all likelyhood, it will only work for inteldrm(4) as
1018  * it assumes that if there is another active VGA device in the
1019  * system, it is sitting behind a PCI bridge.
1020  */
1021 
1022 extern int pci_enumerate_bus(struct pci_softc *,
1023     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1024 
1025 pcitag_t vga_bridge_tag;
1026 int vga_bridge_disabled;
1027 
1028 int
1029 vga_disable_bridge(struct pci_attach_args *pa)
1030 {
1031 	pcireg_t bhlc, bc;
1032 
1033 	if (pa->pa_domain != 0)
1034 		return 0;
1035 
1036 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1037 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1038 		return 0;
1039 
1040 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1041 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1042 		return 0;
1043 	bc &= ~PPB_BC_VGA_ENABLE;
1044 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1045 
1046 	vga_bridge_tag = pa->pa_tag;
1047 	vga_bridge_disabled = 1;
1048 
1049 	return 1;
1050 }
1051 
1052 void
1053 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1054 {
1055 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1056 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1057 }
1058 
1059 void
1060 vga_put(struct pci_dev *pdev, int rsrc)
1061 {
1062 	pcireg_t bc;
1063 
1064 	if (!vga_bridge_disabled)
1065 		return;
1066 
1067 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1068 	bc |= PPB_BC_VGA_ENABLE;
1069 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1070 
1071 	vga_bridge_disabled = 0;
1072 }
1073 
1074 #endif
1075 
1076 /*
1077  * ACPI types and interfaces.
1078  */
1079 
1080 #ifdef __HAVE_ACPI
1081 #include "acpi.h"
1082 #endif
1083 
1084 #if NACPI > 0
1085 
1086 #include <dev/acpi/acpireg.h>
1087 #include <dev/acpi/acpivar.h>
1088 #include <dev/acpi/amltypes.h>
1089 #include <dev/acpi/dsdt.h>
1090 
1091 acpi_status
1092 acpi_get_table(const char *sig, int instance,
1093     struct acpi_table_header **hdr)
1094 {
1095 	struct acpi_softc *sc = acpi_softc;
1096 	struct acpi_q *entry;
1097 
1098 	KASSERT(instance == 1);
1099 
1100 	if (sc == NULL)
1101 		return AE_NOT_FOUND;
1102 
1103 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1104 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1105 			*hdr = entry->q_table;
1106 			return 0;
1107 		}
1108 	}
1109 
1110 	return AE_NOT_FOUND;
1111 }
1112 
1113 acpi_status
1114 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1115 {
1116 	node = aml_searchname(node, name);
1117 	if (node == NULL)
1118 		return AE_NOT_FOUND;
1119 
1120 	*rnode = node;
1121 	return 0;
1122 }
1123 
1124 acpi_status
1125 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1126 {
1127 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1128 	KASSERT(type == ACPI_FULL_PATHNAME);
1129 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1130 	return 0;
1131 }
1132 
1133 acpi_status
1134 acpi_evaluate_object(acpi_handle node, const char *name,
1135     struct acpi_object_list *params, struct acpi_buffer *result)
1136 {
1137 	struct aml_value args[4], res;
1138 	union acpi_object *obj;
1139 	uint8_t *data;
1140 	int i;
1141 
1142 	KASSERT(params->count <= nitems(args));
1143 
1144 	for (i = 0; i < params->count; i++) {
1145 		args[i].type = params->pointer[i].type;
1146 		switch (args[i].type) {
1147 		case AML_OBJTYPE_INTEGER:
1148 			args[i].v_integer = params->pointer[i].integer.value;
1149 			break;
1150 		case AML_OBJTYPE_BUFFER:
1151 			args[i].length = params->pointer[i].buffer.length;
1152 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1153 			break;
1154 		default:
1155 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1156 			return AE_BAD_PARAMETER;
1157 		}
1158 	}
1159 
1160 	if (name) {
1161 		node = aml_searchname(node, name);
1162 		if (node == NULL)
1163 			return AE_NOT_FOUND;
1164 	}
1165 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1166 		aml_freevalue(&res);
1167 		return AE_ERROR;
1168 	}
1169 
1170 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1171 
1172 	result->length = sizeof(union acpi_object);
1173 	switch (res.type) {
1174 	case AML_OBJTYPE_BUFFER:
1175 		result->length += res.length;
1176 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1177 		obj = (union acpi_object *)result->pointer;
1178 		data = (uint8_t *)(obj + 1);
1179 		obj->type = res.type;
1180 		obj->buffer.length = res.length;
1181 		obj->buffer.pointer = data;
1182 		memcpy(data, res.v_buffer, res.length);
1183 		break;
1184 	default:
1185 		printf("%s: return type 0x%02x", __func__, res.type);
1186 		aml_freevalue(&res);
1187 		return AE_ERROR;
1188 	}
1189 
1190 	aml_freevalue(&res);
1191 	return 0;
1192 }
1193 
1194 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1195 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1196 
1197 int
1198 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1199 {
1200 	struct acpi_bus_event event;
1201 	struct notifier_block *nb;
1202 
1203 	event.device_class = ACPI_VIDEO_CLASS;
1204 	event.type = notify;
1205 
1206 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1207 		nb->notifier_call(nb, 0, &event);
1208 	return 0;
1209 }
1210 
1211 int
1212 register_acpi_notifier(struct notifier_block *nb)
1213 {
1214 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1215 	return 0;
1216 }
1217 
1218 int
1219 unregister_acpi_notifier(struct notifier_block *nb)
1220 {
1221 	struct notifier_block *tmp;
1222 
1223 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1224 		if (tmp == nb) {
1225 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1226 			    notifier_block, link);
1227 			return 0;
1228 		}
1229 	}
1230 
1231 	return -ENOENT;
1232 }
1233 
1234 const char *
1235 acpi_format_exception(acpi_status status)
1236 {
1237 	switch (status) {
1238 	case AE_NOT_FOUND:
1239 		return "not found";
1240 	case AE_BAD_PARAMETER:
1241 		return "bad parameter";
1242 	default:
1243 		return "unknown";
1244 	}
1245 }
1246 
1247 #endif
1248 
1249 void
1250 backlight_do_update_status(void *arg)
1251 {
1252 	backlight_update_status(arg);
1253 }
1254 
1255 struct backlight_device *
1256 backlight_device_register(const char *name, void *kdev, void *data,
1257     const struct backlight_ops *ops, struct backlight_properties *props)
1258 {
1259 	struct backlight_device *bd;
1260 
1261 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1262 	bd->ops = ops;
1263 	bd->props = *props;
1264 	bd->data = data;
1265 
1266 	task_set(&bd->task, backlight_do_update_status, bd);
1267 
1268 	return bd;
1269 }
1270 
1271 void
1272 backlight_device_unregister(struct backlight_device *bd)
1273 {
1274 	free(bd, M_DRM, sizeof(*bd));
1275 }
1276 
1277 void
1278 backlight_schedule_update_status(struct backlight_device *bd)
1279 {
1280 	task_add(systq, &bd->task);
1281 }
1282 
1283 inline int
1284 backlight_enable(struct backlight_device *bd)
1285 {
1286 	if (bd == NULL)
1287 		return 0;
1288 
1289 	bd->props.power = FB_BLANK_UNBLANK;
1290 
1291 	return bd->ops->update_status(bd);
1292 }
1293 
1294 inline int
1295 backlight_disable(struct backlight_device *bd)
1296 {
1297 	if (bd == NULL)
1298 		return 0;
1299 
1300 	bd->props.power = FB_BLANK_POWERDOWN;
1301 
1302 	return bd->ops->update_status(bd);
1303 }
1304 
1305 void
1306 drm_sysfs_hotplug_event(struct drm_device *dev)
1307 {
1308 	KNOTE(&dev->note, NOTE_CHANGE);
1309 }
1310 
1311 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1312 
1313 uint64_t
1314 dma_fence_context_alloc(unsigned int num)
1315 {
1316   return atomic64_add_return(num, &drm_fence_context_count) - num;
1317 }
1318 
1319 struct default_wait_cb {
1320 	struct dma_fence_cb base;
1321 	struct proc *proc;
1322 };
1323 
1324 static void
1325 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1326 {
1327 	struct default_wait_cb *wait =
1328 	    container_of(cb, struct default_wait_cb, base);
1329 	wake_up_process(wait->proc);
1330 }
1331 
1332 long
1333 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1334 {
1335 	long ret = timeout ? timeout : 1;
1336 	int err;
1337 	struct default_wait_cb cb;
1338 	bool was_set;
1339 
1340 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1341 		return ret;
1342 
1343 	mtx_enter(fence->lock);
1344 
1345 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1346 	    &fence->flags);
1347 
1348 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1349 		goto out;
1350 
1351 	if (!was_set && fence->ops->enable_signaling) {
1352 		if (!fence->ops->enable_signaling(fence)) {
1353 			dma_fence_signal_locked(fence);
1354 			goto out;
1355 		}
1356 	}
1357 
1358 	if (timeout == 0) {
1359 		ret = 0;
1360 		goto out;
1361 	}
1362 
1363 	cb.base.func = dma_fence_default_wait_cb;
1364 	cb.proc = curproc;
1365 	list_add(&cb.base.node, &fence->cb_list);
1366 
1367 	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1368 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0, "dmafence",
1369 		    timeout);
1370 		if (err == EINTR || err == ERESTART) {
1371 			ret = -ERESTARTSYS;
1372 			break;
1373 		} else if (err == EWOULDBLOCK) {
1374 			ret = 0;
1375 			break;
1376 		}
1377 	}
1378 
1379 	if (!list_empty(&cb.base.node))
1380 		list_del(&cb.base.node);
1381 out:
1382 	mtx_leave(fence->lock);
1383 
1384 	return ret;
1385 }
1386 
1387 static bool
1388 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1389     uint32_t *idx)
1390 {
1391 	int i;
1392 
1393 	for (i = 0; i < count; ++i) {
1394 		struct dma_fence *fence = fences[i];
1395 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1396 			if (idx)
1397 				*idx = i;
1398 			return true;
1399 		}
1400 	}
1401 	return false;
1402 }
1403 
1404 long
1405 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1406     bool intr, long timeout, uint32_t *idx)
1407 {
1408 	struct default_wait_cb *cb;
1409 	int i, err;
1410 	int ret = timeout;
1411 
1412 	if (timeout == 0) {
1413 		for (i = 0; i < count; i++) {
1414 			if (dma_fence_is_signaled(fences[i])) {
1415 				if (idx)
1416 					*idx = i;
1417 				return 1;
1418 			}
1419 		}
1420 		return 0;
1421 	}
1422 
1423 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1424 	if (cb == NULL)
1425 		return -ENOMEM;
1426 
1427 	for (i = 0; i < count; i++) {
1428 		struct dma_fence *fence = fences[i];
1429 		cb[i].proc = curproc;
1430 		if (dma_fence_add_callback(fence, &cb[i].base,
1431 		    dma_fence_default_wait_cb)) {
1432 			if (idx)
1433 				*idx = i;
1434 			goto cb_cleanup;
1435 		}
1436 	}
1437 
1438 	while (ret > 0) {
1439 		if (dma_fence_test_signaled_any(fences, count, idx))
1440 			break;
1441 
1442 		err = tsleep(curproc, intr ? PCATCH : 0,
1443 		    "dfwat", timeout);
1444 		if (err == EINTR || err == ERESTART) {
1445 			ret = -ERESTARTSYS;
1446 			break;
1447 		} else if (err == EWOULDBLOCK) {
1448 			ret = 0;
1449 			break;
1450 		}
1451 	}
1452 
1453 cb_cleanup:
1454 	while (i-- > 0)
1455 		dma_fence_remove_callback(fences[i], &cb[i].base);
1456 	free(cb, M_DRM, count * sizeof(*cb));
1457 	return ret;
1458 }
1459 
1460 static struct dma_fence dma_fence_stub;
1461 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1462 
1463 static const char *
1464 dma_fence_stub_get_name(struct dma_fence *fence)
1465 {
1466 	return "stub";
1467 }
1468 
1469 static const struct dma_fence_ops dma_fence_stub_ops = {
1470 	.get_driver_name = dma_fence_stub_get_name,
1471 	.get_timeline_name = dma_fence_stub_get_name,
1472 };
1473 
1474 struct dma_fence *
1475 dma_fence_get_stub(void)
1476 {
1477 	mtx_enter(&dma_fence_stub_mtx);
1478 	if (dma_fence_stub.ops == NULL) {
1479 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1480 		    &dma_fence_stub_mtx, 0, 0);
1481 		dma_fence_signal_locked(&dma_fence_stub);
1482 	}
1483 	mtx_leave(&dma_fence_stub_mtx);
1484 
1485 	return dma_fence_get(&dma_fence_stub);
1486 }
1487 
1488 static const char *
1489 dma_fence_array_get_driver_name(struct dma_fence *fence)
1490 {
1491 	return "dma_fence_array";
1492 }
1493 
1494 static const char *
1495 dma_fence_array_get_timeline_name(struct dma_fence *fence)
1496 {
1497 	return "unbound";
1498 }
1499 
1500 static void
1501 irq_dma_fence_array_work(struct irq_work *wrk)
1502 {
1503 	struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work);
1504 
1505 	dma_fence_signal(&dfa->base);
1506 	dma_fence_put(&dfa->base);
1507 }
1508 
1509 static void
1510 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
1511 {
1512 	struct dma_fence_array_cb *array_cb =
1513 	    container_of(cb, struct dma_fence_array_cb, cb);
1514 	struct dma_fence_array *dfa = array_cb->array;
1515 
1516 	if (atomic_dec_and_test(&dfa->num_pending))
1517 		irq_work_queue(&dfa->work);
1518 	else
1519 		dma_fence_put(&dfa->base);
1520 }
1521 
1522 static bool
1523 dma_fence_array_enable_signaling(struct dma_fence *fence)
1524 {
1525 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1526 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
1527 	int i;
1528 
1529 	for (i = 0; i < dfa->num_fences; ++i) {
1530 		cb[i].array = dfa;
1531 		dma_fence_get(&dfa->base);
1532 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
1533 		    dma_fence_array_cb_func)) {
1534 			dma_fence_put(&dfa->base);
1535 			if (atomic_dec_and_test(&dfa->num_pending))
1536 				return false;
1537 		}
1538 	}
1539 
1540 	return true;
1541 }
1542 
1543 static bool dma_fence_array_signaled(struct dma_fence *fence)
1544 {
1545 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1546 
1547 	return atomic_read(&dfa->num_pending) <= 0;
1548 }
1549 
1550 static void dma_fence_array_release(struct dma_fence *fence)
1551 {
1552 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1553 	int i;
1554 
1555 	for (i = 0; i < dfa->num_fences; ++i)
1556 		dma_fence_put(dfa->fences[i]);
1557 
1558 	free(dfa->fences, M_DRM, 0);
1559 	dma_fence_free(fence);
1560 }
1561 
1562 struct dma_fence_array *
1563 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
1564     unsigned seqno, bool signal_on_any)
1565 {
1566 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
1567 	    (num_fences * sizeof(struct dma_fence_array_cb)),
1568 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1569 	if (dfa == NULL)
1570 		return NULL;
1571 
1572 	mtx_init(&dfa->lock, IPL_TTY);
1573 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
1574 	    context, seqno);
1575 	init_irq_work(&dfa->work, irq_dma_fence_array_work);
1576 
1577 	dfa->num_fences = num_fences;
1578 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
1579 	dfa->fences = fences;
1580 
1581 	return dfa;
1582 }
1583 
1584 const struct dma_fence_ops dma_fence_array_ops = {
1585 	.get_driver_name = dma_fence_array_get_driver_name,
1586 	.get_timeline_name = dma_fence_array_get_timeline_name,
1587 	.enable_signaling = dma_fence_array_enable_signaling,
1588 	.signaled = dma_fence_array_signaled,
1589 	.release = dma_fence_array_release,
1590 };
1591 
1592 int
1593 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
1594 {
1595 	return (ENXIO);
1596 }
1597 
1598 int
1599 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
1600 {
1601 	return (ENXIO);
1602 }
1603 
1604 int
1605 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
1606 {
1607 	return (ENOTTY);
1608 }
1609 
1610 int
1611 dmabuf_poll(struct file *fp, int events, struct proc *p)
1612 {
1613 	return (0);
1614 }
1615 
1616 int
1617 dmabuf_kqfilter(struct file *fp, struct knote *kn)
1618 {
1619 	return (EINVAL);
1620 }
1621 
1622 int
1623 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
1624 {
1625 	struct dma_buf *dmabuf = fp->f_data;
1626 
1627 	memset(st, 0, sizeof(*st));
1628 	st->st_size = dmabuf->size;
1629 	st->st_mode = S_IFIFO;	/* XXX */
1630 	return (0);
1631 }
1632 
1633 int
1634 dmabuf_close(struct file *fp, struct proc *p)
1635 {
1636 	struct dma_buf *dmabuf = fp->f_data;
1637 
1638 	fp->f_data = NULL;
1639 	KERNEL_LOCK();
1640 	dmabuf->ops->release(dmabuf);
1641 	KERNEL_UNLOCK();
1642 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
1643 	return (0);
1644 }
1645 
1646 int
1647 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
1648 {
1649 	struct dma_buf *dmabuf = fp->f_data;
1650 	off_t newoff;
1651 
1652 	if (*offset != 0)
1653 		return (EINVAL);
1654 
1655 	switch (whence) {
1656 	case SEEK_SET:
1657 		newoff = 0;
1658 		break;
1659 	case SEEK_END:
1660 		newoff = dmabuf->size;
1661 		break;
1662 	default:
1663 		return (EINVAL);
1664 	}
1665 	mtx_enter(&fp->f_mtx);
1666 	fp->f_offset = newoff;
1667 	mtx_leave(&fp->f_mtx);
1668 	*offset = newoff;
1669 	return (0);
1670 }
1671 
1672 const struct fileops dmabufops = {
1673 	.fo_read	= dmabuf_read,
1674 	.fo_write	= dmabuf_write,
1675 	.fo_ioctl	= dmabuf_ioctl,
1676 	.fo_poll	= dmabuf_poll,
1677 	.fo_kqfilter	= dmabuf_kqfilter,
1678 	.fo_stat	= dmabuf_stat,
1679 	.fo_close	= dmabuf_close,
1680 	.fo_seek	= dmabuf_seek,
1681 };
1682 
1683 struct dma_buf *
1684 dma_buf_export(const struct dma_buf_export_info *info)
1685 {
1686 	struct proc *p = curproc;
1687 	struct dma_buf *dmabuf;
1688 	struct file *fp;
1689 
1690 	fp = fnew(p);
1691 	if (fp == NULL)
1692 		return ERR_PTR(-ENFILE);
1693 	fp->f_type = DTYPE_DMABUF;
1694 	fp->f_ops = &dmabufops;
1695 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
1696 	dmabuf->priv = info->priv;
1697 	dmabuf->ops = info->ops;
1698 	dmabuf->size = info->size;
1699 	dmabuf->file = fp;
1700 	fp->f_data = dmabuf;
1701 	INIT_LIST_HEAD(&dmabuf->attachments);
1702 	return dmabuf;
1703 }
1704 
1705 struct dma_buf *
1706 dma_buf_get(int fd)
1707 {
1708 	struct proc *p = curproc;
1709 	struct filedesc *fdp = p->p_fd;
1710 	struct file *fp;
1711 
1712 	if ((fp = fd_getfile(fdp, fd)) == NULL)
1713 		return ERR_PTR(-EBADF);
1714 
1715 	if (fp->f_type != DTYPE_DMABUF) {
1716 		FRELE(fp, p);
1717 		return ERR_PTR(-EINVAL);
1718 	}
1719 
1720 	return fp->f_data;
1721 }
1722 
1723 void
1724 dma_buf_put(struct dma_buf *dmabuf)
1725 {
1726 	KASSERT(dmabuf);
1727 	KASSERT(dmabuf->file);
1728 
1729 	FRELE(dmabuf->file, curproc);
1730 }
1731 
1732 int
1733 dma_buf_fd(struct dma_buf *dmabuf, int flags)
1734 {
1735 	struct proc *p = curproc;
1736 	struct filedesc *fdp = p->p_fd;
1737 	struct file *fp = dmabuf->file;
1738 	int fd, cloexec, error;
1739 
1740 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
1741 
1742 	fdplock(fdp);
1743 restart:
1744 	if ((error = fdalloc(p, 0, &fd)) != 0) {
1745 		if (error == ENOSPC) {
1746 			fdexpand(p);
1747 			goto restart;
1748 		}
1749 		fdpunlock(fdp);
1750 		return -error;
1751 	}
1752 
1753 	fdinsert(fdp, fd, cloexec, fp);
1754 	fdpunlock(fdp);
1755 
1756 	return fd;
1757 }
1758 
1759 void
1760 get_dma_buf(struct dma_buf *dmabuf)
1761 {
1762 	FREF(dmabuf->file);
1763 }
1764 
1765 enum pci_bus_speed
1766 pcie_get_speed_cap(struct pci_dev *pdev)
1767 {
1768 	pci_chipset_tag_t	pc = pdev->pc;
1769 	pcitag_t		tag = pdev->tag;
1770 	int			pos ;
1771 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1772 	pcireg_t		id;
1773 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
1774 	int			bus, device, function;
1775 
1776 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1777 	    &pos, NULL))
1778 		return PCI_SPEED_UNKNOWN;
1779 
1780 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1781 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1782 
1783 	/* we've been informed via and serverworks don't make the cut */
1784 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1785 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1786 		return PCI_SPEED_UNKNOWN;
1787 
1788 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1789 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1790 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1791 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1792 
1793 	lnkcap &= 0x0f;
1794 	lnkcap2 &= 0xfe;
1795 
1796 	if (lnkcap2) { /* PCIE GEN 3.0 */
1797 		if (lnkcap2 & 0x02)
1798 			cap = PCIE_SPEED_2_5GT;
1799 		if (lnkcap2 & 0x04)
1800 			cap = PCIE_SPEED_5_0GT;
1801 		if (lnkcap2 & 0x08)
1802 			cap = PCIE_SPEED_8_0GT;
1803 		if (lnkcap2 & 0x10)
1804 			cap = PCIE_SPEED_16_0GT;
1805 	} else {
1806 		if (lnkcap & 0x01)
1807 			cap = PCIE_SPEED_2_5GT;
1808 		if (lnkcap & 0x02)
1809 			cap = PCIE_SPEED_5_0GT;
1810 	}
1811 
1812 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
1813 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
1814 	    lnkcap2);
1815 	return cap;
1816 }
1817 
1818 enum pcie_link_width
1819 pcie_get_width_cap(struct pci_dev *pdev)
1820 {
1821 	pci_chipset_tag_t	pc = pdev->pc;
1822 	pcitag_t		tag = pdev->tag;
1823 	int			pos ;
1824 	pcireg_t		lnkcap = 0;
1825 	pcireg_t		id;
1826 	int			bus, device, function;
1827 
1828 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1829 	    &pos, NULL))
1830 		return PCIE_LNK_WIDTH_UNKNOWN;
1831 
1832 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1833 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1834 
1835 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1836 
1837 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
1838 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
1839 
1840 	if (lnkcap)
1841 		return (lnkcap & 0x3f0) >> 4;
1842 	return PCIE_LNK_WIDTH_UNKNOWN;
1843 }
1844 
1845 int
1846 default_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1847     int sync, void *key)
1848 {
1849 	wakeup(wqe);
1850 	if (wqe->proc)
1851 		wake_up_process(wqe->proc);
1852 	return 0;
1853 }
1854 
1855 int
1856 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1857     int sync, void *key)
1858 {
1859 	default_wake_function(wqe, mode, sync, key);
1860 	list_del_init(&wqe->entry);
1861 	return 0;
1862 }
1863 
1864 static wait_queue_head_t bit_waitq;
1865 wait_queue_head_t var_waitq;
1866 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
1867 
1868 int
1869 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1870 {
1871 	int err;
1872 
1873 	if (!test_bit(bit, word))
1874 		return 0;
1875 
1876 	mtx_enter(&wait_bit_mtx);
1877 	while (test_bit(bit, word)) {
1878 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
1879 		    INFSLP);
1880 		if (err) {
1881 			mtx_leave(&wait_bit_mtx);
1882 			return 1;
1883 		}
1884 	}
1885 	mtx_leave(&wait_bit_mtx);
1886 	return 0;
1887 }
1888 
1889 int
1890 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
1891 {
1892 	int err;
1893 
1894 	if (!test_bit(bit, word))
1895 		return 0;
1896 
1897 	mtx_enter(&wait_bit_mtx);
1898 	while (test_bit(bit, word)) {
1899 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
1900 		if (err) {
1901 			mtx_leave(&wait_bit_mtx);
1902 			return 1;
1903 		}
1904 	}
1905 	mtx_leave(&wait_bit_mtx);
1906 	return 0;
1907 }
1908 
1909 void
1910 wake_up_bit(void *word, int bit)
1911 {
1912 	mtx_enter(&wait_bit_mtx);
1913 	wakeup(word);
1914 	mtx_leave(&wait_bit_mtx);
1915 }
1916 
1917 void
1918 clear_and_wake_up_bit(int bit, void *word)
1919 {
1920 	clear_bit(bit, word);
1921 	wake_up_bit(word, bit);
1922 }
1923 
1924 wait_queue_head_t *
1925 bit_waitqueue(void *word, int bit)
1926 {
1927 	/* XXX hash table of wait queues? */
1928 	return &bit_waitq;
1929 }
1930 
1931 struct workqueue_struct *system_wq;
1932 struct workqueue_struct *system_highpri_wq;
1933 struct workqueue_struct *system_unbound_wq;
1934 struct workqueue_struct *system_long_wq;
1935 struct taskq *taskletq;
1936 
1937 void
1938 drm_linux_init(void)
1939 {
1940 	if (system_wq == NULL) {
1941 		system_wq = (struct workqueue_struct *)
1942 		    taskq_create("drmwq", 4, IPL_HIGH, 0);
1943 	}
1944 	if (system_highpri_wq == NULL) {
1945 		system_highpri_wq = (struct workqueue_struct *)
1946 		    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
1947 	}
1948 	if (system_unbound_wq == NULL) {
1949 		system_unbound_wq = (struct workqueue_struct *)
1950 		    taskq_create("drmubwq", 4, IPL_HIGH, 0);
1951 	}
1952 	if (system_long_wq == NULL) {
1953 		system_long_wq = (struct workqueue_struct *)
1954 		    taskq_create("drmlwq", 4, IPL_HIGH, 0);
1955 	}
1956 
1957 	if (taskletq == NULL)
1958 		taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
1959 
1960 	init_waitqueue_head(&bit_waitq);
1961 	init_waitqueue_head(&var_waitq);
1962 }
1963 
1964 #define PCIE_ECAP_RESIZE_BAR	0x15
1965 #define RBCAP0			0x04
1966 #define RBCTRL0			0x08
1967 #define RBCTRL_BARINDEX_MASK	0x07
1968 #define RBCTRL_BARSIZE_MASK	0x1f00
1969 #define RBCTRL_BARSIZE_SHIFT	8
1970 
1971 /* size in MB is 1 << nsize */
1972 int
1973 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
1974 {
1975 	pcireg_t	reg;
1976 	uint32_t	offset, capid;
1977 
1978 	KASSERT(bar == 0);
1979 
1980 	offset = PCI_PCIE_ECAP;
1981 
1982 	/* search PCI Express Extended Capabilities */
1983 	do {
1984 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
1985 		capid = PCI_PCIE_ECAP_ID(reg);
1986 		if (capid == PCIE_ECAP_RESIZE_BAR)
1987 			break;
1988 		offset = PCI_PCIE_ECAP_NEXT(reg);
1989 	} while (capid != 0);
1990 
1991 	if (capid == 0) {
1992 		printf("%s: could not find resize bar cap!\n", __func__);
1993 		return -ENOTSUP;
1994 	}
1995 
1996 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
1997 
1998 	if ((reg & (1 << (nsize + 4))) == 0) {
1999 		printf("%s size not supported\n", __func__);
2000 		return -ENOTSUP;
2001 	}
2002 
2003 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2004 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2005 		printf("%s BAR index not 0\n", __func__);
2006 		return -EINVAL;
2007 	}
2008 
2009 	reg &= ~RBCTRL_BARSIZE_MASK;
2010 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2011 
2012 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2013 
2014 	return 0;
2015 }
2016 
2017 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2018 
2019 int
2020 register_shrinker(struct shrinker *shrinker)
2021 {
2022 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2023 	return 0;
2024 }
2025 
2026 void
2027 unregister_shrinker(struct shrinker *shrinker)
2028 {
2029 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2030 }
2031 
2032 void
2033 drmbackoff(long npages)
2034 {
2035 	struct shrink_control sc;
2036 	struct shrinker *shrinker;
2037 	u_long ret;
2038 
2039 	shrinker = TAILQ_FIRST(&shrinkers);
2040 	while (shrinker && npages > 0) {
2041 		sc.nr_to_scan = npages;
2042 		ret = shrinker->scan_objects(shrinker, &sc);
2043 		npages -= ret;
2044 		shrinker = TAILQ_NEXT(shrinker, next);
2045 	}
2046 }
2047 
2048 void *
2049 bitmap_zalloc(u_int n, gfp_t flags)
2050 {
2051 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2052 }
2053 
2054 void
2055 bitmap_free(void *p)
2056 {
2057 	kfree(p);
2058 }
2059 
2060 int
2061 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2062 {
2063 	if (atomic_add_unless(v, -1, 1))
2064 		return 0;
2065 
2066 	rw_enter_write(lock);
2067 	if (atomic_dec_return(v) == 0)
2068 		return 1;
2069 	rw_exit_write(lock);
2070 	return 0;
2071 }
2072 
2073 int
2074 printk(const char *fmt, ...)
2075 {
2076 	int ret, level;
2077 	va_list ap;
2078 
2079 	if (fmt != NULL && *fmt == '\001') {
2080 		level = fmt[1];
2081 #ifndef DRMDEBUG
2082 		if (level >= KERN_INFO[1] && level <= '9')
2083 			return 0;
2084 #endif
2085 		fmt += 2;
2086 	}
2087 
2088 	va_start(ap, fmt);
2089 	ret = vprintf(fmt, ap);
2090 	va_end(ap);
2091 
2092 	return ret;
2093 }
2094 
2095 #define START(node) ((node)->start)
2096 #define LAST(node) ((node)->last)
2097 
2098 struct interval_tree_node *
2099 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2100     unsigned long last)
2101 {
2102 	struct interval_tree_node *node;
2103 	struct rb_node *rb;
2104 
2105 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2106 		node = rb_entry(rb, typeof(*node), rb);
2107 		if (LAST(node) >= start && START(node) <= last)
2108 			return node;
2109 	}
2110 	return NULL;
2111 }
2112 
2113 void
2114 interval_tree_remove(struct interval_tree_node *node,
2115     struct rb_root_cached *root)
2116 {
2117 	rb_erase_cached(&node->rb, root);
2118 }
2119 
2120 void
2121 interval_tree_insert(struct interval_tree_node *node,
2122     struct rb_root_cached *root)
2123 {
2124 	struct rb_node **iter = &root->rb_root.rb_node;
2125 	struct rb_node *parent = NULL;
2126 	struct interval_tree_node *iter_node;
2127 
2128 	while (*iter) {
2129 		parent = *iter;
2130 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
2131 
2132 		if (node->start < iter_node->start)
2133 			iter = &(*iter)->rb_left;
2134 		else
2135 			iter = &(*iter)->rb_right;
2136 	}
2137 
2138 	rb_link_node(&node->rb, parent, iter);
2139 	rb_insert_color_cached(&node->rb, root, false);
2140 }
2141