xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision 15572fcf8c6bfa0588565cd23f393a5a1499ea57)
1 /*	$OpenBSD: drm_linux.c,v 1.70 2020/11/14 23:08:47 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/interrupt.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/scatterlist.h>
42 #include <linux/i2c.h>
43 #include <linux/pci.h>
44 #include <linux/notifier.h>
45 #include <linux/backlight.h>
46 #include <linux/shrinker.h>
47 #include <linux/fb.h>
48 #include <linux/xarray.h>
49 #include <linux/interval_tree.h>
50 
51 #include <drm/drm_device.h>
52 #include <drm/drm_print.h>
53 
54 #if defined(__amd64__) || defined(__i386__)
55 #include "bios.h"
56 #endif
57 
58 void
59 tasklet_run(void *arg)
60 {
61 	struct tasklet_struct *ts = arg;
62 
63 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
64 	if (tasklet_trylock(ts)) {
65 		if (!atomic_read(&ts->count))
66 			ts->func(ts->data);
67 		tasklet_unlock(ts);
68 	}
69 }
70 
71 /* 32 bit powerpc lacks 64 bit atomics */
72 #if defined(__powerpc__) && !defined(__powerpc64__)
73 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
74 #endif
75 
76 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
77 volatile struct proc *sch_proc;
78 volatile void *sch_ident;
79 int sch_priority;
80 
81 void
82 set_current_state(int state)
83 {
84 	if (sch_ident != curproc)
85 		mtx_enter(&sch_mtx);
86 	MUTEX_ASSERT_LOCKED(&sch_mtx);
87 	sch_ident = sch_proc = curproc;
88 	sch_priority = state;
89 }
90 
91 void
92 __set_current_state(int state)
93 {
94 	KASSERT(state == TASK_RUNNING);
95 	if (sch_ident == curproc) {
96 		MUTEX_ASSERT_LOCKED(&sch_mtx);
97 		sch_ident = NULL;
98 		mtx_leave(&sch_mtx);
99 	}
100 }
101 
102 void
103 schedule(void)
104 {
105 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
106 }
107 
108 long
109 schedule_timeout(long timeout)
110 {
111 	struct sleep_state sls;
112 	long deadline;
113 	int wait, spl;
114 
115 	MUTEX_ASSERT_LOCKED(&sch_mtx);
116 	KASSERT(!cold);
117 
118 	sleep_setup(&sls, sch_ident, sch_priority, "schto");
119 	if (timeout != MAX_SCHEDULE_TIMEOUT)
120 		sleep_setup_timeout(&sls, timeout);
121 
122 	wait = (sch_proc == curproc && timeout > 0);
123 
124 	spl = MUTEX_OLDIPL(&sch_mtx);
125 	MUTEX_OLDIPL(&sch_mtx) = splsched();
126 	mtx_leave(&sch_mtx);
127 
128 	sleep_setup_signal(&sls);
129 
130 	if (timeout != MAX_SCHEDULE_TIMEOUT)
131 		deadline = ticks + timeout;
132 	sleep_finish_all(&sls, wait);
133 	if (timeout != MAX_SCHEDULE_TIMEOUT)
134 		timeout = deadline - ticks;
135 
136 	mtx_enter(&sch_mtx);
137 	MUTEX_OLDIPL(&sch_mtx) = spl;
138 	sch_ident = curproc;
139 
140 	return timeout > 0 ? timeout : 0;
141 }
142 
143 long
144 schedule_timeout_uninterruptible(long timeout)
145 {
146 	tsleep(curproc, PWAIT, "schtou", timeout);
147 	return 0;
148 }
149 
150 int
151 wake_up_process(struct proc *p)
152 {
153 	atomic_cas_ptr(&sch_proc, p, NULL);
154 	return wakeup_proc(p, NULL);
155 }
156 
157 void
158 flush_workqueue(struct workqueue_struct *wq)
159 {
160 	if (cold)
161 		return;
162 
163 	taskq_barrier((struct taskq *)wq);
164 }
165 
166 bool
167 flush_work(struct work_struct *work)
168 {
169 	if (cold)
170 		return false;
171 
172 	taskq_barrier(work->tq);
173 	return false;
174 }
175 
176 bool
177 flush_delayed_work(struct delayed_work *dwork)
178 {
179 	bool ret = false;
180 
181 	if (cold)
182 		return false;
183 
184 	while (timeout_pending(&dwork->to)) {
185 		tsleep(dwork, PWAIT, "fldwto", 1);
186 		ret = true;
187 	}
188 
189 	taskq_barrier(dwork->tq ? dwork->tq : (struct taskq *)system_wq);
190 	return ret;
191 }
192 
193 struct kthread {
194 	int (*func)(void *);
195 	void *data;
196 	struct proc *proc;
197 	volatile u_int flags;
198 #define KTHREAD_SHOULDSTOP	0x0000001
199 #define KTHREAD_STOPPED		0x0000002
200 #define KTHREAD_SHOULDPARK	0x0000004
201 #define KTHREAD_PARKED		0x0000008
202 	LIST_ENTRY(kthread) next;
203 };
204 
205 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
206 
207 void
208 kthread_func(void *arg)
209 {
210 	struct kthread *thread = arg;
211 	int ret;
212 
213 	ret = thread->func(thread->data);
214 	thread->flags |= KTHREAD_STOPPED;
215 	wakeup(thread);
216 	kthread_exit(ret);
217 }
218 
219 struct proc *
220 kthread_run(int (*func)(void *), void *data, const char *name)
221 {
222 	struct kthread *thread;
223 
224 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
225 	thread->func = func;
226 	thread->data = data;
227 	thread->flags = 0;
228 
229 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
230 		free(thread, M_DRM, sizeof(*thread));
231 		return ERR_PTR(-ENOMEM);
232 	}
233 
234 	LIST_INSERT_HEAD(&kthread_list, thread, next);
235 	return thread->proc;
236 }
237 
238 struct kthread *
239 kthread_lookup(struct proc *p)
240 {
241 	struct kthread *thread;
242 
243 	LIST_FOREACH(thread, &kthread_list, next) {
244 		if (thread->proc == p)
245 			break;
246 	}
247 	KASSERT(thread);
248 
249 	return thread;
250 }
251 
252 int
253 kthread_should_park(void)
254 {
255 	struct kthread *thread = kthread_lookup(curproc);
256 	return (thread->flags & KTHREAD_SHOULDPARK);
257 }
258 
259 void
260 kthread_parkme(void)
261 {
262 	struct kthread *thread = kthread_lookup(curproc);
263 
264 	while (thread->flags & KTHREAD_SHOULDPARK) {
265 		thread->flags |= KTHREAD_PARKED;
266 		wakeup(thread);
267 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
268 		thread->flags &= ~KTHREAD_PARKED;
269 	}
270 }
271 
272 void
273 kthread_park(struct proc *p)
274 {
275 	struct kthread *thread = kthread_lookup(p);
276 
277 	while ((thread->flags & KTHREAD_PARKED) == 0) {
278 		thread->flags |= KTHREAD_SHOULDPARK;
279 		wake_up_process(thread->proc);
280 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
281 	}
282 }
283 
284 void
285 kthread_unpark(struct proc *p)
286 {
287 	struct kthread *thread = kthread_lookup(p);
288 
289 	thread->flags &= ~KTHREAD_SHOULDPARK;
290 	wakeup(thread);
291 }
292 
293 int
294 kthread_should_stop(void)
295 {
296 	struct kthread *thread = kthread_lookup(curproc);
297 	return (thread->flags & KTHREAD_SHOULDSTOP);
298 }
299 
300 void
301 kthread_stop(struct proc *p)
302 {
303 	struct kthread *thread = kthread_lookup(p);
304 
305 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
306 		thread->flags |= KTHREAD_SHOULDSTOP;
307 		kthread_unpark(p);
308 		wake_up_process(thread->proc);
309 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
310 	}
311 	LIST_REMOVE(thread, next);
312 	free(thread, M_DRM, sizeof(*thread));
313 }
314 
315 #if NBIOS > 0
316 extern char smbios_board_vendor[];
317 extern char smbios_board_prod[];
318 extern char smbios_board_serial[];
319 #endif
320 
321 bool
322 dmi_match(int slot, const char *str)
323 {
324 	switch (slot) {
325 	case DMI_SYS_VENDOR:
326 		if (hw_vendor != NULL &&
327 		    !strcmp(hw_vendor, str))
328 			return true;
329 		break;
330 	case DMI_PRODUCT_NAME:
331 		if (hw_prod != NULL &&
332 		    !strcmp(hw_prod, str))
333 			return true;
334 		break;
335 	case DMI_PRODUCT_VERSION:
336 		if (hw_ver != NULL &&
337 		    !strcmp(hw_ver, str))
338 			return true;
339 		break;
340 #if NBIOS > 0
341 	case DMI_BOARD_VENDOR:
342 		if (strcmp(smbios_board_vendor, str) == 0)
343 			return true;
344 		break;
345 	case DMI_BOARD_NAME:
346 		if (strcmp(smbios_board_prod, str) == 0)
347 			return true;
348 		break;
349 	case DMI_BOARD_SERIAL:
350 		if (strcmp(smbios_board_serial, str) == 0)
351 			return true;
352 		break;
353 #else
354 	case DMI_BOARD_VENDOR:
355 		if (hw_vendor != NULL &&
356 		    !strcmp(hw_vendor, str))
357 			return true;
358 		break;
359 	case DMI_BOARD_NAME:
360 		if (hw_prod != NULL &&
361 		    !strcmp(hw_prod, str))
362 			return true;
363 		break;
364 #endif
365 	case DMI_NONE:
366 	default:
367 		return false;
368 	}
369 
370 	return false;
371 }
372 
373 static bool
374 dmi_found(const struct dmi_system_id *dsi)
375 {
376 	int i, slot;
377 
378 	for (i = 0; i < nitems(dsi->matches); i++) {
379 		slot = dsi->matches[i].slot;
380 		if (slot == DMI_NONE)
381 			break;
382 		if (!dmi_match(slot, dsi->matches[i].substr))
383 			return false;
384 	}
385 
386 	return true;
387 }
388 
389 const struct dmi_system_id *
390 dmi_first_match(const struct dmi_system_id *sysid)
391 {
392 	const struct dmi_system_id *dsi;
393 
394 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
395 		if (dmi_found(dsi))
396 			return dsi;
397 	}
398 
399 	return NULL;
400 }
401 
402 #if NBIOS > 0
403 extern char smbios_bios_date[];
404 #endif
405 
406 const char *
407 dmi_get_system_info(int slot)
408 {
409 	WARN_ON(slot != DMI_BIOS_DATE);
410 #if NBIOS > 0
411 	if (slot == DMI_BIOS_DATE)
412 		return smbios_bios_date;
413 #endif
414 	return NULL;
415 }
416 
417 int
418 dmi_check_system(const struct dmi_system_id *sysid)
419 {
420 	const struct dmi_system_id *dsi;
421 	int num = 0;
422 
423 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
424 		if (dmi_found(dsi)) {
425 			num++;
426 			if (dsi->callback && dsi->callback(dsi))
427 				break;
428 		}
429 	}
430 	return (num);
431 }
432 
433 struct vm_page *
434 alloc_pages(unsigned int gfp_mask, unsigned int order)
435 {
436 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
437 	struct uvm_constraint_range *constraint = &no_constraint;
438 	struct pglist mlist;
439 
440 	if (gfp_mask & M_CANFAIL)
441 		flags |= UVM_PLA_FAILOK;
442 	if (gfp_mask & M_ZERO)
443 		flags |= UVM_PLA_ZERO;
444 	if (gfp_mask & __GFP_DMA32)
445 		constraint = &dma_constraint;
446 
447 	TAILQ_INIT(&mlist);
448 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
449 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
450 		return NULL;
451 	return TAILQ_FIRST(&mlist);
452 }
453 
454 void
455 __free_pages(struct vm_page *page, unsigned int order)
456 {
457 	struct pglist mlist;
458 	int i;
459 
460 	TAILQ_INIT(&mlist);
461 	for (i = 0; i < (1 << order); i++)
462 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
463 	uvm_pglistfree(&mlist);
464 }
465 
466 void
467 __pagevec_release(struct pagevec *pvec)
468 {
469 	struct pglist mlist;
470 	int i;
471 
472 	TAILQ_INIT(&mlist);
473 	for (i = 0; i < pvec->nr; i++)
474 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
475 	uvm_pglistfree(&mlist);
476 	pagevec_reinit(pvec);
477 }
478 
479 void *
480 kmap(struct vm_page *pg)
481 {
482 	vaddr_t va;
483 
484 #if defined (__HAVE_PMAP_DIRECT)
485 	va = pmap_map_direct(pg);
486 #else
487 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
488 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
489 	pmap_update(pmap_kernel());
490 #endif
491 	return (void *)va;
492 }
493 
494 void
495 kunmap_va(void *addr)
496 {
497 	vaddr_t va = (vaddr_t)addr;
498 
499 #if defined (__HAVE_PMAP_DIRECT)
500 	pmap_unmap_direct(va);
501 #else
502 	pmap_kremove(va, PAGE_SIZE);
503 	pmap_update(pmap_kernel());
504 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
505 #endif
506 }
507 
508 void *
509 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
510      pgprot_t prot)
511 {
512 	vaddr_t va;
513 	paddr_t pa;
514 	int i;
515 
516 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
517 	if (va == 0)
518 		return NULL;
519 	for (i = 0; i < npages; i++) {
520 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
521 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
522 		    PROT_READ | PROT_WRITE,
523 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
524 		pmap_update(pmap_kernel());
525 	}
526 
527 	return (void *)va;
528 }
529 
530 void
531 vunmap(void *addr, size_t size)
532 {
533 	vaddr_t va = (vaddr_t)addr;
534 
535 	pmap_remove(pmap_kernel(), va, va + size);
536 	pmap_update(pmap_kernel());
537 	uvm_km_free(kernel_map, va, size);
538 }
539 
540 void
541 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
542     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
543 {
544 	const uint8_t *cbuf = buf;
545 	int i;
546 
547 	for (i = 0; i < len; i++) {
548 		if ((i % rowsize) == 0)
549 			printf("%s", prefix_str);
550 		printf("%02x", cbuf[i]);
551 		if ((i % rowsize) == (rowsize - 1))
552 			printf("\n");
553 		else
554 			printf(" ");
555 	}
556 }
557 
558 void *
559 memchr_inv(const void *s, int c, size_t n)
560 {
561 	if (n != 0) {
562 		const unsigned char *p = s;
563 
564 		do {
565 			if (*p++ != (unsigned char)c)
566 				return ((void *)(p - 1));
567 		} while (--n != 0);
568 	}
569 	return (NULL);
570 }
571 
572 int
573 panic_cmp(struct rb_node *a, struct rb_node *b)
574 {
575 	panic(__func__);
576 }
577 
578 #undef RB_ROOT
579 #define RB_ROOT(head)	(head)->rbh_root
580 
581 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
582 
583 /*
584  * This is a fairly minimal implementation of the Linux "idr" API.  It
585  * probably isn't very efficient, and defenitely isn't RCU safe.  The
586  * pre-load buffer is global instead of per-cpu; we rely on the kernel
587  * lock to make this work.  We do randomize our IDs in order to make
588  * them harder to guess.
589  */
590 
591 int idr_cmp(struct idr_entry *, struct idr_entry *);
592 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
593 
594 struct pool idr_pool;
595 struct idr_entry *idr_entry_cache;
596 
597 void
598 idr_init(struct idr *idr)
599 {
600 	SPLAY_INIT(&idr->tree);
601 }
602 
603 void
604 idr_destroy(struct idr *idr)
605 {
606 	struct idr_entry *id;
607 
608 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
609 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
610 		pool_put(&idr_pool, id);
611 	}
612 }
613 
614 void
615 idr_preload(unsigned int gfp_mask)
616 {
617 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
618 
619 	KERNEL_ASSERT_LOCKED();
620 
621 	if (idr_entry_cache == NULL)
622 		idr_entry_cache = pool_get(&idr_pool, flags);
623 }
624 
625 int
626 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
627 {
628 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
629 	struct idr_entry *id;
630 	int begin;
631 
632 	KERNEL_ASSERT_LOCKED();
633 
634 	if (idr_entry_cache) {
635 		id = idr_entry_cache;
636 		idr_entry_cache = NULL;
637 	} else {
638 		id = pool_get(&idr_pool, flags);
639 		if (id == NULL)
640 			return -ENOMEM;
641 	}
642 
643 	if (end <= 0)
644 		end = INT_MAX;
645 
646 #ifdef notyet
647 	id->id = begin = start + arc4random_uniform(end - start);
648 #else
649 	id->id = begin = start;
650 #endif
651 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
652 		if (id->id == end)
653 			id->id = start;
654 		else
655 			id->id++;
656 		if (id->id == begin) {
657 			pool_put(&idr_pool, id);
658 			return -ENOSPC;
659 		}
660 	}
661 	id->ptr = ptr;
662 	return id->id;
663 }
664 
665 void *
666 idr_replace(struct idr *idr, void *ptr, unsigned long id)
667 {
668 	struct idr_entry find, *res;
669 	void *old;
670 
671 	find.id = id;
672 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
673 	if (res == NULL)
674 		return ERR_PTR(-ENOENT);
675 	old = res->ptr;
676 	res->ptr = ptr;
677 	return old;
678 }
679 
680 void *
681 idr_remove(struct idr *idr, unsigned long id)
682 {
683 	struct idr_entry find, *res;
684 	void *ptr = NULL;
685 
686 	find.id = id;
687 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
688 	if (res) {
689 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
690 		ptr = res->ptr;
691 		pool_put(&idr_pool, res);
692 	}
693 	return ptr;
694 }
695 
696 void *
697 idr_find(struct idr *idr, unsigned long id)
698 {
699 	struct idr_entry find, *res;
700 
701 	find.id = id;
702 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
703 	if (res == NULL)
704 		return NULL;
705 	return res->ptr;
706 }
707 
708 void *
709 idr_get_next(struct idr *idr, int *id)
710 {
711 	struct idr_entry *res;
712 
713 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
714 		if (res->id >= *id) {
715 			*id = res->id;
716 			return res->ptr;
717 		}
718 	}
719 
720 	return NULL;
721 }
722 
723 int
724 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
725 {
726 	struct idr_entry *id;
727 	int ret;
728 
729 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
730 		ret = func(id->id, id->ptr, data);
731 		if (ret)
732 			return ret;
733 	}
734 
735 	return 0;
736 }
737 
738 int
739 idr_cmp(struct idr_entry *a, struct idr_entry *b)
740 {
741 	return (a->id < b->id ? -1 : a->id > b->id);
742 }
743 
744 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
745 
746 void
747 ida_init(struct ida *ida)
748 {
749 	idr_init(&ida->idr);
750 }
751 
752 void
753 ida_destroy(struct ida *ida)
754 {
755 	idr_destroy(&ida->idr);
756 }
757 
758 int
759 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
760     gfp_t gfp_mask)
761 {
762 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
763 }
764 
765 void
766 ida_simple_remove(struct ida *ida, unsigned int id)
767 {
768 	idr_remove(&ida->idr, id);
769 }
770 
771 int
772 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
773 {
774 	return (a->id < b->id ? -1 : a->id > b->id);
775 }
776 
777 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
778 struct pool xa_pool;
779 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
780 
781 void
782 xa_init_flags(struct xarray *xa, gfp_t flags)
783 {
784 	static int initialized;
785 
786 	if (!initialized) {
787 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_TTY, 0,
788 		    "xapl", NULL);
789 		initialized = 1;
790 	}
791 	SPLAY_INIT(&xa->xa_tree);
792 }
793 
794 void
795 xa_destroy(struct xarray *xa)
796 {
797 	struct xarray_entry *id;
798 
799 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
800 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
801 		pool_put(&xa_pool, id);
802 	}
803 }
804 
805 int
806 xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
807 {
808 	struct xarray_entry *xid;
809 	int flags = (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
810 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
811 	int begin;
812 
813 	xid = pool_get(&xa_pool, flags);
814 	if (xid == NULL)
815 		return -ENOMEM;
816 
817 	if (limit <= 0)
818 		limit = INT_MAX;
819 
820 	xid->id = begin = start;
821 
822 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
823 		if (xid->id == limit)
824 			xid->id = start;
825 		else
826 			xid->id++;
827 		if (xid->id == begin) {
828 			pool_put(&xa_pool, xid);
829 			return -EBUSY;
830 		}
831 	}
832 	xid->ptr = entry;
833 	*id = xid->id;
834 	return 0;
835 }
836 
837 void *
838 xa_erase(struct xarray *xa, unsigned long index)
839 {
840 	struct xarray_entry find, *res;
841 	void *ptr = NULL;
842 
843 	find.id = index;
844 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
845 	if (res) {
846 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
847 		ptr = res->ptr;
848 		pool_put(&xa_pool, res);
849 	}
850 	return ptr;
851 }
852 
853 void *
854 xa_load(struct xarray *xa, unsigned long index)
855 {
856 	struct xarray_entry find, *res;
857 
858 	find.id = index;
859 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
860 	if (res == NULL)
861 		return NULL;
862 	return res->ptr;
863 }
864 
865 void *
866 xa_get_next(struct xarray *xa, unsigned long *index)
867 {
868 	struct xarray_entry *res;
869 
870 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
871 		if (res->id >= *index) {
872 			*index = res->id;
873 			return res->ptr;
874 		}
875 	}
876 
877 	return NULL;
878 }
879 
880 int
881 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
882 {
883 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
884 	    M_DRM, gfp_mask);
885 	if (table->sgl == NULL)
886 		return -ENOMEM;
887 	table->nents = table->orig_nents = nents;
888 	return 0;
889 }
890 
891 void
892 sg_free_table(struct sg_table *table)
893 {
894 	free(table->sgl, M_DRM,
895 	    table->orig_nents * sizeof(struct scatterlist));
896 }
897 
898 size_t
899 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
900     const void *buf, size_t buflen)
901 {
902 	panic("%s", __func__);
903 }
904 
905 int
906 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
907 {
908 	void *cmd = NULL;
909 	int cmdlen = 0;
910 	int err, ret = 0;
911 	int op;
912 
913 	iic_acquire_bus(&adap->ic, 0);
914 
915 	while (num > 2) {
916 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
917 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
918 		    msgs->buf, msgs->len, 0);
919 		if (err) {
920 			ret = -err;
921 			goto fail;
922 		}
923 		msgs++;
924 		num--;
925 		ret++;
926 	}
927 
928 	if (num > 1) {
929 		cmd = msgs->buf;
930 		cmdlen = msgs->len;
931 		msgs++;
932 		num--;
933 		ret++;
934 	}
935 
936 	op = (msgs->flags & I2C_M_RD) ?
937 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
938 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
939 	    msgs->buf, msgs->len, 0);
940 	if (err) {
941 		ret = -err;
942 		goto fail;
943 	}
944 	msgs++;
945 	ret++;
946 
947 fail:
948 	iic_release_bus(&adap->ic, 0);
949 
950 	return ret;
951 }
952 
953 int
954 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
955 {
956 	int ret;
957 
958 	if (adap->lock_ops)
959 		adap->lock_ops->lock_bus(adap, 0);
960 
961 	if (adap->algo)
962 		ret = adap->algo->master_xfer(adap, msgs, num);
963 	else
964 		ret = i2c_master_xfer(adap, msgs, num);
965 
966 	if (adap->lock_ops)
967 		adap->lock_ops->unlock_bus(adap, 0);
968 
969 	return ret;
970 }
971 
972 int
973 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
974 {
975 	struct i2c_algo_bit_data *algo = adap->algo_data;
976 	struct i2c_adapter bb;
977 
978 	memset(&bb, 0, sizeof(bb));
979 	bb.ic = algo->ic;
980 	bb.retries = adap->retries;
981 	return i2c_master_xfer(&bb, msgs, num);
982 }
983 
984 uint32_t
985 i2c_bb_functionality(struct i2c_adapter *adap)
986 {
987 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
988 }
989 
990 struct i2c_algorithm i2c_bit_algo = {
991 	.master_xfer = i2c_bb_master_xfer,
992 	.functionality = i2c_bb_functionality
993 };
994 
995 int
996 i2c_bit_add_bus(struct i2c_adapter *adap)
997 {
998 	adap->algo = &i2c_bit_algo;
999 	adap->retries = 3;
1000 
1001 	return 0;
1002 }
1003 
1004 #if defined(__amd64__) || defined(__i386__)
1005 
1006 /*
1007  * This is a minimal implementation of the Linux vga_get/vga_put
1008  * interface.  In all likelyhood, it will only work for inteldrm(4) as
1009  * it assumes that if there is another active VGA device in the
1010  * system, it is sitting behind a PCI bridge.
1011  */
1012 
1013 extern int pci_enumerate_bus(struct pci_softc *,
1014     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1015 
1016 pcitag_t vga_bridge_tag;
1017 int vga_bridge_disabled;
1018 
1019 int
1020 vga_disable_bridge(struct pci_attach_args *pa)
1021 {
1022 	pcireg_t bhlc, bc;
1023 
1024 	if (pa->pa_domain != 0)
1025 		return 0;
1026 
1027 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1028 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1029 		return 0;
1030 
1031 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1032 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1033 		return 0;
1034 	bc &= ~PPB_BC_VGA_ENABLE;
1035 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1036 
1037 	vga_bridge_tag = pa->pa_tag;
1038 	vga_bridge_disabled = 1;
1039 
1040 	return 1;
1041 }
1042 
1043 void
1044 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1045 {
1046 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1047 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1048 }
1049 
1050 void
1051 vga_put(struct pci_dev *pdev, int rsrc)
1052 {
1053 	pcireg_t bc;
1054 
1055 	if (!vga_bridge_disabled)
1056 		return;
1057 
1058 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1059 	bc |= PPB_BC_VGA_ENABLE;
1060 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1061 
1062 	vga_bridge_disabled = 0;
1063 }
1064 
1065 #endif
1066 
1067 /*
1068  * ACPI types and interfaces.
1069  */
1070 
1071 #ifdef __HAVE_ACPI
1072 #include "acpi.h"
1073 #endif
1074 
1075 #if NACPI > 0
1076 
1077 #include <dev/acpi/acpireg.h>
1078 #include <dev/acpi/acpivar.h>
1079 #include <dev/acpi/amltypes.h>
1080 #include <dev/acpi/dsdt.h>
1081 
1082 acpi_status
1083 acpi_get_table(const char *sig, int instance,
1084     struct acpi_table_header **hdr)
1085 {
1086 	struct acpi_softc *sc = acpi_softc;
1087 	struct acpi_q *entry;
1088 
1089 	KASSERT(instance == 1);
1090 
1091 	if (sc == NULL)
1092 		return AE_NOT_FOUND;
1093 
1094 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1095 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1096 			*hdr = entry->q_table;
1097 			return 0;
1098 		}
1099 	}
1100 
1101 	return AE_NOT_FOUND;
1102 }
1103 
1104 acpi_status
1105 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1106 {
1107 	node = aml_searchname(node, name);
1108 	if (node == NULL)
1109 		return AE_NOT_FOUND;
1110 
1111 	*rnode = node;
1112 	return 0;
1113 }
1114 
1115 acpi_status
1116 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1117 {
1118 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1119 	KASSERT(type == ACPI_FULL_PATHNAME);
1120 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1121 	return 0;
1122 }
1123 
1124 acpi_status
1125 acpi_evaluate_object(acpi_handle node, const char *name,
1126     struct acpi_object_list *params, struct acpi_buffer *result)
1127 {
1128 	struct aml_value args[4], res;
1129 	union acpi_object *obj;
1130 	uint8_t *data;
1131 	int i;
1132 
1133 	KASSERT(params->count <= nitems(args));
1134 
1135 	for (i = 0; i < params->count; i++) {
1136 		args[i].type = params->pointer[i].type;
1137 		switch (args[i].type) {
1138 		case AML_OBJTYPE_INTEGER:
1139 			args[i].v_integer = params->pointer[i].integer.value;
1140 			break;
1141 		case AML_OBJTYPE_BUFFER:
1142 			args[i].length = params->pointer[i].buffer.length;
1143 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1144 			break;
1145 		default:
1146 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1147 			return AE_BAD_PARAMETER;
1148 		}
1149 	}
1150 
1151 	if (name) {
1152 		node = aml_searchname(node, name);
1153 		if (node == NULL)
1154 			return AE_NOT_FOUND;
1155 	}
1156 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1157 		aml_freevalue(&res);
1158 		return AE_ERROR;
1159 	}
1160 
1161 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1162 
1163 	result->length = sizeof(union acpi_object);
1164 	switch (res.type) {
1165 	case AML_OBJTYPE_BUFFER:
1166 		result->length += res.length;
1167 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1168 		obj = (union acpi_object *)result->pointer;
1169 		data = (uint8_t *)(obj + 1);
1170 		obj->type = res.type;
1171 		obj->buffer.length = res.length;
1172 		obj->buffer.pointer = data;
1173 		memcpy(data, res.v_buffer, res.length);
1174 		break;
1175 	default:
1176 		printf("%s: return type 0x%02x", __func__, res.type);
1177 		aml_freevalue(&res);
1178 		return AE_ERROR;
1179 	}
1180 
1181 	aml_freevalue(&res);
1182 	return 0;
1183 }
1184 
1185 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1186 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1187 
1188 int
1189 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1190 {
1191 	struct acpi_bus_event event;
1192 	struct notifier_block *nb;
1193 
1194 	event.device_class = ACPI_VIDEO_CLASS;
1195 	event.type = notify;
1196 
1197 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1198 		nb->notifier_call(nb, 0, &event);
1199 	return 0;
1200 }
1201 
1202 int
1203 register_acpi_notifier(struct notifier_block *nb)
1204 {
1205 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1206 	return 0;
1207 }
1208 
1209 int
1210 unregister_acpi_notifier(struct notifier_block *nb)
1211 {
1212 	struct notifier_block *tmp;
1213 
1214 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1215 		if (tmp == nb) {
1216 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1217 			    notifier_block, link);
1218 			return 0;
1219 		}
1220 	}
1221 
1222 	return -ENOENT;
1223 }
1224 
1225 const char *
1226 acpi_format_exception(acpi_status status)
1227 {
1228 	switch (status) {
1229 	case AE_NOT_FOUND:
1230 		return "not found";
1231 	case AE_BAD_PARAMETER:
1232 		return "bad parameter";
1233 	default:
1234 		return "unknown";
1235 	}
1236 }
1237 
1238 #endif
1239 
1240 void
1241 backlight_do_update_status(void *arg)
1242 {
1243 	backlight_update_status(arg);
1244 }
1245 
1246 struct backlight_device *
1247 backlight_device_register(const char *name, void *kdev, void *data,
1248     const struct backlight_ops *ops, struct backlight_properties *props)
1249 {
1250 	struct backlight_device *bd;
1251 
1252 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1253 	bd->ops = ops;
1254 	bd->props = *props;
1255 	bd->data = data;
1256 
1257 	task_set(&bd->task, backlight_do_update_status, bd);
1258 
1259 	return bd;
1260 }
1261 
1262 void
1263 backlight_device_unregister(struct backlight_device *bd)
1264 {
1265 	free(bd, M_DRM, sizeof(*bd));
1266 }
1267 
1268 void
1269 backlight_schedule_update_status(struct backlight_device *bd)
1270 {
1271 	task_add(systq, &bd->task);
1272 }
1273 
1274 inline int
1275 backlight_enable(struct backlight_device *bd)
1276 {
1277 	if (bd == NULL)
1278 		return 0;
1279 
1280 	bd->props.power = FB_BLANK_UNBLANK;
1281 
1282 	return bd->ops->update_status(bd);
1283 }
1284 
1285 inline int
1286 backlight_disable(struct backlight_device *bd)
1287 {
1288 	if (bd == NULL)
1289 		return 0;
1290 
1291 	bd->props.power = FB_BLANK_POWERDOWN;
1292 
1293 	return bd->ops->update_status(bd);
1294 }
1295 
1296 void
1297 drm_sysfs_hotplug_event(struct drm_device *dev)
1298 {
1299 	KNOTE(&dev->note, NOTE_CHANGE);
1300 }
1301 
1302 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1303 
1304 uint64_t
1305 dma_fence_context_alloc(unsigned int num)
1306 {
1307   return atomic64_add_return(num, &drm_fence_context_count) - num;
1308 }
1309 
1310 struct default_wait_cb {
1311 	struct dma_fence_cb base;
1312 	struct proc *proc;
1313 };
1314 
1315 static void
1316 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1317 {
1318 	struct default_wait_cb *wait =
1319 	    container_of(cb, struct default_wait_cb, base);
1320 	wake_up_process(wait->proc);
1321 }
1322 
1323 long
1324 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1325 {
1326 	long ret = timeout ? timeout : 1;
1327 	int err;
1328 	struct default_wait_cb cb;
1329 	bool was_set;
1330 
1331 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1332 		return ret;
1333 
1334 	mtx_enter(fence->lock);
1335 
1336 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1337 	    &fence->flags);
1338 
1339 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1340 		goto out;
1341 
1342 	if (!was_set && fence->ops->enable_signaling) {
1343 		if (!fence->ops->enable_signaling(fence)) {
1344 			dma_fence_signal_locked(fence);
1345 			goto out;
1346 		}
1347 	}
1348 
1349 	if (timeout == 0) {
1350 		ret = 0;
1351 		goto out;
1352 	}
1353 
1354 	cb.base.func = dma_fence_default_wait_cb;
1355 	cb.proc = curproc;
1356 	list_add(&cb.base.node, &fence->cb_list);
1357 
1358 	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1359 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0, "dmafence",
1360 		    timeout);
1361 		if (err == EINTR || err == ERESTART) {
1362 			ret = -ERESTARTSYS;
1363 			break;
1364 		} else if (err == EWOULDBLOCK) {
1365 			ret = 0;
1366 			break;
1367 		}
1368 	}
1369 
1370 	if (!list_empty(&cb.base.node))
1371 		list_del(&cb.base.node);
1372 out:
1373 	mtx_leave(fence->lock);
1374 
1375 	return ret;
1376 }
1377 
1378 static bool
1379 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1380     uint32_t *idx)
1381 {
1382 	int i;
1383 
1384 	for (i = 0; i < count; ++i) {
1385 		struct dma_fence *fence = fences[i];
1386 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1387 			if (idx)
1388 				*idx = i;
1389 			return true;
1390 		}
1391 	}
1392 	return false;
1393 }
1394 
1395 long
1396 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1397     bool intr, long timeout, uint32_t *idx)
1398 {
1399 	struct default_wait_cb *cb;
1400 	int i, err;
1401 	int ret = timeout;
1402 
1403 	if (timeout == 0) {
1404 		for (i = 0; i < count; i++) {
1405 			if (dma_fence_is_signaled(fences[i])) {
1406 				if (idx)
1407 					*idx = i;
1408 				return 1;
1409 			}
1410 		}
1411 		return 0;
1412 	}
1413 
1414 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1415 	if (cb == NULL)
1416 		return -ENOMEM;
1417 
1418 	for (i = 0; i < count; i++) {
1419 		struct dma_fence *fence = fences[i];
1420 		cb[i].proc = curproc;
1421 		if (dma_fence_add_callback(fence, &cb[i].base,
1422 		    dma_fence_default_wait_cb)) {
1423 			if (idx)
1424 				*idx = i;
1425 			goto cb_cleanup;
1426 		}
1427 	}
1428 
1429 	while (ret > 0) {
1430 		if (dma_fence_test_signaled_any(fences, count, idx))
1431 			break;
1432 
1433 		err = tsleep(curproc, intr ? PCATCH : 0,
1434 		    "dfwat", timeout);
1435 		if (err == EINTR || err == ERESTART) {
1436 			ret = -ERESTARTSYS;
1437 			break;
1438 		} else if (err == EWOULDBLOCK) {
1439 			ret = 0;
1440 			break;
1441 		}
1442 	}
1443 
1444 cb_cleanup:
1445 	while (i-- > 0)
1446 		dma_fence_remove_callback(fences[i], &cb[i].base);
1447 	free(cb, M_DRM, count * sizeof(*cb));
1448 	return ret;
1449 }
1450 
1451 static struct dma_fence dma_fence_stub;
1452 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1453 
1454 static const char *
1455 dma_fence_stub_get_name(struct dma_fence *fence)
1456 {
1457 	return "stub";
1458 }
1459 
1460 static const struct dma_fence_ops dma_fence_stub_ops = {
1461 	.get_driver_name = dma_fence_stub_get_name,
1462 	.get_timeline_name = dma_fence_stub_get_name,
1463 };
1464 
1465 struct dma_fence *
1466 dma_fence_get_stub(void)
1467 {
1468 	mtx_enter(&dma_fence_stub_mtx);
1469 	if (dma_fence_stub.ops == NULL) {
1470 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1471 		    &dma_fence_stub_mtx, 0, 0);
1472 		dma_fence_signal_locked(&dma_fence_stub);
1473 	}
1474 	mtx_leave(&dma_fence_stub_mtx);
1475 
1476 	return dma_fence_get(&dma_fence_stub);
1477 }
1478 
1479 static const char *
1480 dma_fence_array_get_driver_name(struct dma_fence *fence)
1481 {
1482 	return "dma_fence_array";
1483 }
1484 
1485 static const char *
1486 dma_fence_array_get_timeline_name(struct dma_fence *fence)
1487 {
1488 	return "unbound";
1489 }
1490 
1491 static void
1492 irq_dma_fence_array_work(struct irq_work *wrk)
1493 {
1494 	struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work);
1495 
1496 	dma_fence_signal(&dfa->base);
1497 	dma_fence_put(&dfa->base);
1498 }
1499 
1500 static void
1501 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
1502 {
1503 	struct dma_fence_array_cb *array_cb =
1504 	    container_of(cb, struct dma_fence_array_cb, cb);
1505 	struct dma_fence_array *dfa = array_cb->array;
1506 
1507 	if (atomic_dec_and_test(&dfa->num_pending))
1508 		irq_work_queue(&dfa->work);
1509 	else
1510 		dma_fence_put(&dfa->base);
1511 }
1512 
1513 static bool
1514 dma_fence_array_enable_signaling(struct dma_fence *fence)
1515 {
1516 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1517 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
1518 	int i;
1519 
1520 	for (i = 0; i < dfa->num_fences; ++i) {
1521 		cb[i].array = dfa;
1522 		dma_fence_get(&dfa->base);
1523 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
1524 		    dma_fence_array_cb_func)) {
1525 			dma_fence_put(&dfa->base);
1526 			if (atomic_dec_and_test(&dfa->num_pending))
1527 				return false;
1528 		}
1529 	}
1530 
1531 	return true;
1532 }
1533 
1534 static bool dma_fence_array_signaled(struct dma_fence *fence)
1535 {
1536 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1537 
1538 	return atomic_read(&dfa->num_pending) <= 0;
1539 }
1540 
1541 static void dma_fence_array_release(struct dma_fence *fence)
1542 {
1543 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1544 	int i;
1545 
1546 	for (i = 0; i < dfa->num_fences; ++i)
1547 		dma_fence_put(dfa->fences[i]);
1548 
1549 	free(dfa->fences, M_DRM, 0);
1550 	dma_fence_free(fence);
1551 }
1552 
1553 struct dma_fence_array *
1554 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
1555     unsigned seqno, bool signal_on_any)
1556 {
1557 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
1558 	    (num_fences * sizeof(struct dma_fence_array_cb)),
1559 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1560 	if (dfa == NULL)
1561 		return NULL;
1562 
1563 	mtx_init(&dfa->lock, IPL_TTY);
1564 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
1565 	    context, seqno);
1566 	init_irq_work(&dfa->work, irq_dma_fence_array_work);
1567 
1568 	dfa->num_fences = num_fences;
1569 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
1570 	dfa->fences = fences;
1571 
1572 	return dfa;
1573 }
1574 
1575 const struct dma_fence_ops dma_fence_array_ops = {
1576 	.get_driver_name = dma_fence_array_get_driver_name,
1577 	.get_timeline_name = dma_fence_array_get_timeline_name,
1578 	.enable_signaling = dma_fence_array_enable_signaling,
1579 	.signaled = dma_fence_array_signaled,
1580 	.release = dma_fence_array_release,
1581 };
1582 
1583 int
1584 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
1585 {
1586 	return (ENXIO);
1587 }
1588 
1589 int
1590 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
1591 {
1592 	return (ENXIO);
1593 }
1594 
1595 int
1596 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
1597 {
1598 	return (ENOTTY);
1599 }
1600 
1601 int
1602 dmabuf_poll(struct file *fp, int events, struct proc *p)
1603 {
1604 	return (0);
1605 }
1606 
1607 int
1608 dmabuf_kqfilter(struct file *fp, struct knote *kn)
1609 {
1610 	return (EINVAL);
1611 }
1612 
1613 int
1614 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
1615 {
1616 	struct dma_buf *dmabuf = fp->f_data;
1617 
1618 	memset(st, 0, sizeof(*st));
1619 	st->st_size = dmabuf->size;
1620 	st->st_mode = S_IFIFO;	/* XXX */
1621 	return (0);
1622 }
1623 
1624 int
1625 dmabuf_close(struct file *fp, struct proc *p)
1626 {
1627 	struct dma_buf *dmabuf = fp->f_data;
1628 
1629 	fp->f_data = NULL;
1630 	KERNEL_LOCK();
1631 	dmabuf->ops->release(dmabuf);
1632 	KERNEL_UNLOCK();
1633 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
1634 	return (0);
1635 }
1636 
1637 int
1638 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
1639 {
1640 	struct dma_buf *dmabuf = fp->f_data;
1641 	off_t newoff;
1642 
1643 	if (*offset != 0)
1644 		return (EINVAL);
1645 
1646 	switch (whence) {
1647 	case SEEK_SET:
1648 		newoff = 0;
1649 		break;
1650 	case SEEK_END:
1651 		newoff = dmabuf->size;
1652 		break;
1653 	default:
1654 		return (EINVAL);
1655 	}
1656 	mtx_enter(&fp->f_mtx);
1657 	fp->f_offset = newoff;
1658 	mtx_leave(&fp->f_mtx);
1659 	*offset = newoff;
1660 	return (0);
1661 }
1662 
1663 const struct fileops dmabufops = {
1664 	.fo_read	= dmabuf_read,
1665 	.fo_write	= dmabuf_write,
1666 	.fo_ioctl	= dmabuf_ioctl,
1667 	.fo_poll	= dmabuf_poll,
1668 	.fo_kqfilter	= dmabuf_kqfilter,
1669 	.fo_stat	= dmabuf_stat,
1670 	.fo_close	= dmabuf_close,
1671 	.fo_seek	= dmabuf_seek,
1672 };
1673 
1674 struct dma_buf *
1675 dma_buf_export(const struct dma_buf_export_info *info)
1676 {
1677 	struct proc *p = curproc;
1678 	struct dma_buf *dmabuf;
1679 	struct file *fp;
1680 
1681 	fp = fnew(p);
1682 	if (fp == NULL)
1683 		return ERR_PTR(-ENFILE);
1684 	fp->f_type = DTYPE_DMABUF;
1685 	fp->f_ops = &dmabufops;
1686 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
1687 	dmabuf->priv = info->priv;
1688 	dmabuf->ops = info->ops;
1689 	dmabuf->size = info->size;
1690 	dmabuf->file = fp;
1691 	fp->f_data = dmabuf;
1692 	INIT_LIST_HEAD(&dmabuf->attachments);
1693 	return dmabuf;
1694 }
1695 
1696 struct dma_buf *
1697 dma_buf_get(int fd)
1698 {
1699 	struct proc *p = curproc;
1700 	struct filedesc *fdp = p->p_fd;
1701 	struct file *fp;
1702 
1703 	if ((fp = fd_getfile(fdp, fd)) == NULL)
1704 		return ERR_PTR(-EBADF);
1705 
1706 	if (fp->f_type != DTYPE_DMABUF) {
1707 		FRELE(fp, p);
1708 		return ERR_PTR(-EINVAL);
1709 	}
1710 
1711 	return fp->f_data;
1712 }
1713 
1714 void
1715 dma_buf_put(struct dma_buf *dmabuf)
1716 {
1717 	KASSERT(dmabuf);
1718 	KASSERT(dmabuf->file);
1719 
1720 	FRELE(dmabuf->file, curproc);
1721 }
1722 
1723 int
1724 dma_buf_fd(struct dma_buf *dmabuf, int flags)
1725 {
1726 	struct proc *p = curproc;
1727 	struct filedesc *fdp = p->p_fd;
1728 	struct file *fp = dmabuf->file;
1729 	int fd, cloexec, error;
1730 
1731 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
1732 
1733 	fdplock(fdp);
1734 restart:
1735 	if ((error = fdalloc(p, 0, &fd)) != 0) {
1736 		if (error == ENOSPC) {
1737 			fdexpand(p);
1738 			goto restart;
1739 		}
1740 		fdpunlock(fdp);
1741 		return -error;
1742 	}
1743 
1744 	fdinsert(fdp, fd, cloexec, fp);
1745 	fdpunlock(fdp);
1746 
1747 	return fd;
1748 }
1749 
1750 void
1751 get_dma_buf(struct dma_buf *dmabuf)
1752 {
1753 	FREF(dmabuf->file);
1754 }
1755 
1756 enum pci_bus_speed
1757 pcie_get_speed_cap(struct pci_dev *pdev)
1758 {
1759 	pci_chipset_tag_t	pc = pdev->pc;
1760 	pcitag_t		tag = pdev->tag;
1761 	int			pos ;
1762 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1763 	pcireg_t		id;
1764 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
1765 	int			bus, device, function;
1766 
1767 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1768 	    &pos, NULL))
1769 		return PCI_SPEED_UNKNOWN;
1770 
1771 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1772 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1773 
1774 	/* we've been informed via and serverworks don't make the cut */
1775 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1776 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1777 		return PCI_SPEED_UNKNOWN;
1778 
1779 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1780 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1781 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1782 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1783 
1784 	lnkcap &= 0x0f;
1785 	lnkcap2 &= 0xfe;
1786 
1787 	if (lnkcap2) { /* PCIE GEN 3.0 */
1788 		if (lnkcap2 & 0x02)
1789 			cap = PCIE_SPEED_2_5GT;
1790 		if (lnkcap2 & 0x04)
1791 			cap = PCIE_SPEED_5_0GT;
1792 		if (lnkcap2 & 0x08)
1793 			cap = PCIE_SPEED_8_0GT;
1794 		if (lnkcap2 & 0x10)
1795 			cap = PCIE_SPEED_16_0GT;
1796 	} else {
1797 		if (lnkcap & 0x01)
1798 			cap = PCIE_SPEED_2_5GT;
1799 		if (lnkcap & 0x02)
1800 			cap = PCIE_SPEED_5_0GT;
1801 	}
1802 
1803 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
1804 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
1805 	    lnkcap2);
1806 	return cap;
1807 }
1808 
1809 enum pcie_link_width
1810 pcie_get_width_cap(struct pci_dev *pdev)
1811 {
1812 	pci_chipset_tag_t	pc = pdev->pc;
1813 	pcitag_t		tag = pdev->tag;
1814 	int			pos ;
1815 	pcireg_t		lnkcap = 0;
1816 	pcireg_t		id;
1817 	int			bus, device, function;
1818 
1819 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1820 	    &pos, NULL))
1821 		return PCIE_LNK_WIDTH_UNKNOWN;
1822 
1823 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1824 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1825 
1826 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1827 
1828 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
1829 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
1830 
1831 	if (lnkcap)
1832 		return (lnkcap & 0x3f0) >> 4;
1833 	return PCIE_LNK_WIDTH_UNKNOWN;
1834 }
1835 
1836 int
1837 default_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1838     int sync, void *key)
1839 {
1840 	wakeup(wqe);
1841 	if (wqe->proc)
1842 		wake_up_process(wqe->proc);
1843 	return 0;
1844 }
1845 
1846 int
1847 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1848     int sync, void *key)
1849 {
1850 	default_wake_function(wqe, mode, sync, key);
1851 	list_del_init(&wqe->entry);
1852 	return 0;
1853 }
1854 
1855 static wait_queue_head_t bit_waitq;
1856 wait_queue_head_t var_waitq;
1857 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
1858 
1859 int
1860 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1861 {
1862 	int err;
1863 
1864 	if (!test_bit(bit, word))
1865 		return 0;
1866 
1867 	mtx_enter(&wait_bit_mtx);
1868 	while (test_bit(bit, word)) {
1869 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
1870 		    INFSLP);
1871 		if (err) {
1872 			mtx_leave(&wait_bit_mtx);
1873 			return 1;
1874 		}
1875 	}
1876 	mtx_leave(&wait_bit_mtx);
1877 	return 0;
1878 }
1879 
1880 int
1881 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
1882 {
1883 	int err;
1884 
1885 	if (!test_bit(bit, word))
1886 		return 0;
1887 
1888 	mtx_enter(&wait_bit_mtx);
1889 	while (test_bit(bit, word)) {
1890 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
1891 		if (err) {
1892 			mtx_leave(&wait_bit_mtx);
1893 			return 1;
1894 		}
1895 	}
1896 	mtx_leave(&wait_bit_mtx);
1897 	return 0;
1898 }
1899 
1900 void
1901 wake_up_bit(void *word, int bit)
1902 {
1903 	mtx_enter(&wait_bit_mtx);
1904 	wakeup(word);
1905 	mtx_leave(&wait_bit_mtx);
1906 }
1907 
1908 void
1909 clear_and_wake_up_bit(int bit, void *word)
1910 {
1911 	clear_bit(bit, word);
1912 	wake_up_bit(word, bit);
1913 }
1914 
1915 wait_queue_head_t *
1916 bit_waitqueue(void *word, int bit)
1917 {
1918 	/* XXX hash table of wait queues? */
1919 	return &bit_waitq;
1920 }
1921 
1922 struct workqueue_struct *system_wq;
1923 struct workqueue_struct *system_highpri_wq;
1924 struct workqueue_struct *system_unbound_wq;
1925 struct workqueue_struct *system_long_wq;
1926 struct taskq *taskletq;
1927 
1928 void
1929 drm_linux_init(void)
1930 {
1931 	system_wq = (struct workqueue_struct *)
1932 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
1933 	system_highpri_wq = (struct workqueue_struct *)
1934 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
1935 	system_unbound_wq = (struct workqueue_struct *)
1936 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
1937 	system_long_wq = (struct workqueue_struct *)
1938 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
1939 
1940 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
1941 
1942 	init_waitqueue_head(&bit_waitq);
1943 	init_waitqueue_head(&var_waitq);
1944 
1945 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
1946 	    "idrpl", NULL);
1947 }
1948 
1949 void
1950 drm_linux_exit(void)
1951 {
1952 	pool_destroy(&idr_pool);
1953 
1954 	taskq_destroy(taskletq);
1955 
1956 	taskq_destroy((struct taskq *)system_long_wq);
1957 	taskq_destroy((struct taskq *)system_unbound_wq);
1958 	taskq_destroy((struct taskq *)system_highpri_wq);
1959 	taskq_destroy((struct taskq *)system_wq);
1960 }
1961 
1962 #define PCIE_ECAP_RESIZE_BAR	0x15
1963 #define RBCAP0			0x04
1964 #define RBCTRL0			0x08
1965 #define RBCTRL_BARINDEX_MASK	0x07
1966 #define RBCTRL_BARSIZE_MASK	0x1f00
1967 #define RBCTRL_BARSIZE_SHIFT	8
1968 
1969 /* size in MB is 1 << nsize */
1970 int
1971 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
1972 {
1973 	pcireg_t	reg;
1974 	uint32_t	offset, capid;
1975 
1976 	KASSERT(bar == 0);
1977 
1978 	offset = PCI_PCIE_ECAP;
1979 
1980 	/* search PCI Express Extended Capabilities */
1981 	do {
1982 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
1983 		capid = PCI_PCIE_ECAP_ID(reg);
1984 		if (capid == PCIE_ECAP_RESIZE_BAR)
1985 			break;
1986 		offset = PCI_PCIE_ECAP_NEXT(reg);
1987 	} while (capid != 0);
1988 
1989 	if (capid == 0) {
1990 		printf("%s: could not find resize bar cap!\n", __func__);
1991 		return -ENOTSUP;
1992 	}
1993 
1994 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
1995 
1996 	if ((reg & (1 << (nsize + 4))) == 0) {
1997 		printf("%s size not supported\n", __func__);
1998 		return -ENOTSUP;
1999 	}
2000 
2001 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2002 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2003 		printf("%s BAR index not 0\n", __func__);
2004 		return -EINVAL;
2005 	}
2006 
2007 	reg &= ~RBCTRL_BARSIZE_MASK;
2008 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2009 
2010 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2011 
2012 	return 0;
2013 }
2014 
2015 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2016 
2017 int
2018 register_shrinker(struct shrinker *shrinker)
2019 {
2020 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2021 	return 0;
2022 }
2023 
2024 void
2025 unregister_shrinker(struct shrinker *shrinker)
2026 {
2027 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2028 }
2029 
2030 void
2031 drmbackoff(long npages)
2032 {
2033 	struct shrink_control sc;
2034 	struct shrinker *shrinker;
2035 	u_long ret;
2036 
2037 	shrinker = TAILQ_FIRST(&shrinkers);
2038 	while (shrinker && npages > 0) {
2039 		sc.nr_to_scan = npages;
2040 		ret = shrinker->scan_objects(shrinker, &sc);
2041 		npages -= ret;
2042 		shrinker = TAILQ_NEXT(shrinker, next);
2043 	}
2044 }
2045 
2046 void *
2047 bitmap_zalloc(u_int n, gfp_t flags)
2048 {
2049 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2050 }
2051 
2052 void
2053 bitmap_free(void *p)
2054 {
2055 	kfree(p);
2056 }
2057 
2058 int
2059 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2060 {
2061 	if (atomic_add_unless(v, -1, 1))
2062 		return 0;
2063 
2064 	rw_enter_write(lock);
2065 	if (atomic_dec_return(v) == 0)
2066 		return 1;
2067 	rw_exit_write(lock);
2068 	return 0;
2069 }
2070 
2071 int
2072 printk(const char *fmt, ...)
2073 {
2074 	int ret, level;
2075 	va_list ap;
2076 
2077 	if (fmt != NULL && *fmt == '\001') {
2078 		level = fmt[1];
2079 #ifndef DRMDEBUG
2080 		if (level >= KERN_INFO[1] && level <= '9')
2081 			return 0;
2082 #endif
2083 		fmt += 2;
2084 	}
2085 
2086 	va_start(ap, fmt);
2087 	ret = vprintf(fmt, ap);
2088 	va_end(ap);
2089 
2090 	return ret;
2091 }
2092 
2093 #define START(node) ((node)->start)
2094 #define LAST(node) ((node)->last)
2095 
2096 struct interval_tree_node *
2097 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2098     unsigned long last)
2099 {
2100 	struct interval_tree_node *node;
2101 	struct rb_node *rb;
2102 
2103 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2104 		node = rb_entry(rb, typeof(*node), rb);
2105 		if (LAST(node) >= start && START(node) <= last)
2106 			return node;
2107 	}
2108 	return NULL;
2109 }
2110 
2111 void
2112 interval_tree_remove(struct interval_tree_node *node,
2113     struct rb_root_cached *root)
2114 {
2115 	rb_erase_cached(&node->rb, root);
2116 }
2117 
2118 void
2119 interval_tree_insert(struct interval_tree_node *node,
2120     struct rb_root_cached *root)
2121 {
2122 	struct rb_node **iter = &root->rb_root.rb_node;
2123 	struct rb_node *parent = NULL;
2124 	struct interval_tree_node *iter_node;
2125 
2126 	while (*iter) {
2127 		parent = *iter;
2128 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
2129 
2130 		if (node->start < iter_node->start)
2131 			iter = &(*iter)->rb_left;
2132 		else
2133 			iter = &(*iter)->rb_right;
2134 	}
2135 
2136 	rb_link_node(&node->rb, parent, iter);
2137 	rb_insert_color_cached(&node->rb, root, false);
2138 }
2139