xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision 505ee9ea3b177e2387d907a91ca7da069f3f14d8)
1 /*	$OpenBSD: drm_linux.c,v 1.61 2020/07/02 11:01:21 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/param.h>
21 #include <sys/event.h>
22 #include <sys/filedesc.h>
23 #include <sys/kthread.h>
24 #include <sys/stat.h>
25 #include <sys/unistd.h>
26 #include <sys/proc.h>
27 #include <sys/pool.h>
28 #include <sys/fcntl.h>
29 
30 #include <dev/pci/ppbreg.h>
31 
32 #include <linux/dma-buf.h>
33 #include <linux/mod_devicetable.h>
34 #include <linux/acpi.h>
35 #include <linux/pagevec.h>
36 #include <linux/dma-fence-array.h>
37 #include <linux/interrupt.h>
38 #include <linux/err.h>
39 #include <linux/idr.h>
40 #include <linux/scatterlist.h>
41 #include <linux/i2c.h>
42 #include <linux/pci.h>
43 #include <linux/notifier.h>
44 #include <linux/backlight.h>
45 #include <linux/shrinker.h>
46 #include <linux/fb.h>
47 #include <linux/xarray.h>
48 
49 #include <drm/drm_device.h>
50 #include <drm/drm_print.h>
51 
52 #if defined(__amd64__) || defined(__i386__)
53 #include "bios.h"
54 #endif
55 
56 void
57 tasklet_run(void *arg)
58 {
59 	struct tasklet_struct *ts = arg;
60 
61 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
62 	if (tasklet_trylock(ts)) {
63 		if (!atomic_read(&ts->count))
64 			ts->func(ts->data);
65 		tasklet_unlock(ts);
66 	}
67 }
68 
69 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
70 volatile struct proc *sch_proc;
71 volatile void *sch_ident;
72 int sch_priority;
73 
74 void
75 set_current_state(int state)
76 {
77 	if (sch_ident != curproc)
78 		mtx_enter(&sch_mtx);
79 	MUTEX_ASSERT_LOCKED(&sch_mtx);
80 	sch_ident = sch_proc = curproc;
81 	sch_priority = state;
82 }
83 
84 void
85 __set_current_state(int state)
86 {
87 	KASSERT(state == TASK_RUNNING);
88 	if (sch_ident == curproc) {
89 		MUTEX_ASSERT_LOCKED(&sch_mtx);
90 		sch_ident = NULL;
91 		mtx_leave(&sch_mtx);
92 	}
93 }
94 
95 void
96 schedule(void)
97 {
98 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
99 }
100 
101 long
102 schedule_timeout(long timeout)
103 {
104 	struct sleep_state sls;
105 	long deadline;
106 	int wait, spl;
107 
108 	MUTEX_ASSERT_LOCKED(&sch_mtx);
109 	KASSERT(!cold);
110 
111 	sleep_setup(&sls, sch_ident, sch_priority, "schto");
112 	if (timeout != MAX_SCHEDULE_TIMEOUT)
113 		sleep_setup_timeout(&sls, timeout);
114 
115 	wait = (sch_proc == curproc && timeout > 0);
116 
117 	spl = MUTEX_OLDIPL(&sch_mtx);
118 	MUTEX_OLDIPL(&sch_mtx) = splsched();
119 	mtx_leave(&sch_mtx);
120 
121 	sleep_setup_signal(&sls);
122 
123 	if (timeout != MAX_SCHEDULE_TIMEOUT)
124 		deadline = ticks + timeout;
125 	sleep_finish_all(&sls, wait);
126 	if (timeout != MAX_SCHEDULE_TIMEOUT)
127 		timeout = deadline - ticks;
128 
129 	mtx_enter(&sch_mtx);
130 	MUTEX_OLDIPL(&sch_mtx) = spl;
131 	sch_ident = curproc;
132 
133 	return timeout > 0 ? timeout : 0;
134 }
135 
136 long
137 schedule_timeout_uninterruptible(long timeout)
138 {
139 	tsleep(curproc, PWAIT, "schtou", timeout);
140 	return 0;
141 }
142 
143 int
144 wake_up_process(struct proc *p)
145 {
146 	atomic_cas_ptr(&sch_proc, p, NULL);
147 	return wakeup_proc(p, NULL);
148 }
149 
150 void
151 flush_workqueue(struct workqueue_struct *wq)
152 {
153 	if (cold)
154 		return;
155 
156 	taskq_barrier((struct taskq *)wq);
157 }
158 
159 bool
160 flush_work(struct work_struct *work)
161 {
162 	if (cold)
163 		return false;
164 
165 	taskq_barrier(work->tq);
166 	return false;
167 }
168 
169 bool
170 flush_delayed_work(struct delayed_work *dwork)
171 {
172 	bool ret = false;
173 
174 	if (cold)
175 		return false;
176 
177 	while (timeout_pending(&dwork->to)) {
178 		tsleep(dwork, PWAIT, "fldwto", 1);
179 		ret = true;
180 	}
181 
182 	taskq_barrier(dwork->tq ? dwork->tq : (struct taskq *)system_wq);
183 	return ret;
184 }
185 
186 struct kthread {
187 	int (*func)(void *);
188 	void *data;
189 	struct proc *proc;
190 	volatile u_int flags;
191 #define KTHREAD_SHOULDSTOP	0x0000001
192 #define KTHREAD_STOPPED		0x0000002
193 #define KTHREAD_SHOULDPARK	0x0000004
194 #define KTHREAD_PARKED		0x0000008
195 	LIST_ENTRY(kthread) next;
196 };
197 
198 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
199 
200 void
201 kthread_func(void *arg)
202 {
203 	struct kthread *thread = arg;
204 	int ret;
205 
206 	ret = thread->func(thread->data);
207 	thread->flags |= KTHREAD_STOPPED;
208 	kthread_exit(ret);
209 }
210 
211 struct proc *
212 kthread_run(int (*func)(void *), void *data, const char *name)
213 {
214 	struct kthread *thread;
215 
216 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
217 	thread->func = func;
218 	thread->data = data;
219 	thread->flags = 0;
220 
221 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
222 		free(thread, M_DRM, sizeof(*thread));
223 		return ERR_PTR(-ENOMEM);
224 	}
225 
226 	LIST_INSERT_HEAD(&kthread_list, thread, next);
227 	return thread->proc;
228 }
229 
230 struct kthread *
231 kthread_lookup(struct proc *p)
232 {
233 	struct kthread *thread;
234 
235 	LIST_FOREACH(thread, &kthread_list, next) {
236 		if (thread->proc == p)
237 			break;
238 	}
239 	KASSERT(thread);
240 
241 	return thread;
242 }
243 
244 int
245 kthread_should_park(void)
246 {
247 	struct kthread *thread = kthread_lookup(curproc);
248 	return (thread->flags & KTHREAD_SHOULDPARK);
249 }
250 
251 void
252 kthread_parkme(void)
253 {
254 	struct kthread *thread = kthread_lookup(curproc);
255 
256 	while (thread->flags & KTHREAD_SHOULDPARK) {
257 		thread->flags |= KTHREAD_PARKED;
258 		wakeup(thread);
259 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
260 		thread->flags &= ~KTHREAD_PARKED;
261 	}
262 }
263 
264 void
265 kthread_park(struct proc *p)
266 {
267 	struct kthread *thread = kthread_lookup(p);
268 
269 	while ((thread->flags & KTHREAD_PARKED) == 0) {
270 		thread->flags |= KTHREAD_SHOULDPARK;
271 		wake_up_process(thread->proc);
272 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
273 	}
274 }
275 
276 void
277 kthread_unpark(struct proc *p)
278 {
279 	struct kthread *thread = kthread_lookup(p);
280 
281 	thread->flags &= ~KTHREAD_SHOULDPARK;
282 	wakeup(thread);
283 }
284 
285 int
286 kthread_should_stop(void)
287 {
288 	struct kthread *thread = kthread_lookup(curproc);
289 	return (thread->flags & KTHREAD_SHOULDSTOP);
290 }
291 
292 void
293 kthread_stop(struct proc *p)
294 {
295 	struct kthread *thread = kthread_lookup(p);
296 
297 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
298 		thread->flags |= KTHREAD_SHOULDSTOP;
299 		wake_up_process(thread->proc);
300 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
301 	}
302 	LIST_REMOVE(thread, next);
303 	free(thread, M_DRM, sizeof(*thread));
304 }
305 
306 int64_t
307 timeval_to_ns(const struct timeval *tv)
308 {
309 	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
310 		tv->tv_usec * NSEC_PER_USEC;
311 }
312 
313 struct timeval
314 ns_to_timeval(const int64_t nsec)
315 {
316 	struct timeval tv;
317 	int32_t rem;
318 
319 	if (nsec == 0) {
320 		tv.tv_sec = 0;
321 		tv.tv_usec = 0;
322 		return (tv);
323 	}
324 
325 	tv.tv_sec = nsec / NSEC_PER_SEC;
326 	rem = nsec % NSEC_PER_SEC;
327 	if (rem < 0) {
328 		tv.tv_sec--;
329 		rem += NSEC_PER_SEC;
330 	}
331 	tv.tv_usec = rem / 1000;
332 	return (tv);
333 }
334 
335 int64_t
336 timeval_to_ms(const struct timeval *tv)
337 {
338 	return ((int64_t)tv->tv_sec * 1000) + (tv->tv_usec / 1000);
339 }
340 
341 int64_t
342 timeval_to_us(const struct timeval *tv)
343 {
344 	return ((int64_t)tv->tv_sec * 1000000) + tv->tv_usec;
345 }
346 
347 extern char *hw_vendor, *hw_prod, *hw_ver;
348 
349 #if NBIOS > 0
350 extern char smbios_board_vendor[];
351 extern char smbios_board_prod[];
352 extern char smbios_board_serial[];
353 #endif
354 
355 bool
356 dmi_match(int slot, const char *str)
357 {
358 	switch (slot) {
359 	case DMI_SYS_VENDOR:
360 		if (hw_vendor != NULL &&
361 		    !strcmp(hw_vendor, str))
362 			return true;
363 		break;
364 	case DMI_PRODUCT_NAME:
365 		if (hw_prod != NULL &&
366 		    !strcmp(hw_prod, str))
367 			return true;
368 		break;
369 	case DMI_PRODUCT_VERSION:
370 		if (hw_ver != NULL &&
371 		    !strcmp(hw_ver, str))
372 			return true;
373 		break;
374 #if NBIOS > 0
375 	case DMI_BOARD_VENDOR:
376 		if (strcmp(smbios_board_vendor, str) == 0)
377 			return true;
378 		break;
379 	case DMI_BOARD_NAME:
380 		if (strcmp(smbios_board_prod, str) == 0)
381 			return true;
382 		break;
383 	case DMI_BOARD_SERIAL:
384 		if (strcmp(smbios_board_serial, str) == 0)
385 			return true;
386 		break;
387 #else
388 	case DMI_BOARD_VENDOR:
389 		if (hw_vendor != NULL &&
390 		    !strcmp(hw_vendor, str))
391 			return true;
392 		break;
393 	case DMI_BOARD_NAME:
394 		if (hw_prod != NULL &&
395 		    !strcmp(hw_prod, str))
396 			return true;
397 		break;
398 #endif
399 	case DMI_NONE:
400 	default:
401 		return false;
402 	}
403 
404 	return false;
405 }
406 
407 static bool
408 dmi_found(const struct dmi_system_id *dsi)
409 {
410 	int i, slot;
411 
412 	for (i = 0; i < nitems(dsi->matches); i++) {
413 		slot = dsi->matches[i].slot;
414 		if (slot == DMI_NONE)
415 			break;
416 		if (!dmi_match(slot, dsi->matches[i].substr))
417 			return false;
418 	}
419 
420 	return true;
421 }
422 
423 const struct dmi_system_id *
424 dmi_first_match(const struct dmi_system_id *sysid)
425 {
426 	const struct dmi_system_id *dsi;
427 
428 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
429 		if (dmi_found(dsi))
430 			return dsi;
431 	}
432 
433 	return NULL;
434 }
435 
436 #if NBIOS > 0
437 extern char smbios_bios_date[];
438 #endif
439 
440 const char *
441 dmi_get_system_info(int slot)
442 {
443 	WARN_ON(slot != DMI_BIOS_DATE);
444 #if NBIOS > 0
445 	if (slot == DMI_BIOS_DATE)
446 		return smbios_bios_date;
447 #endif
448 	return NULL;
449 }
450 
451 int
452 dmi_check_system(const struct dmi_system_id *sysid)
453 {
454 	const struct dmi_system_id *dsi;
455 	int num = 0;
456 
457 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
458 		if (dmi_found(dsi)) {
459 			num++;
460 			if (dsi->callback && dsi->callback(dsi))
461 				break;
462 		}
463 	}
464 	return (num);
465 }
466 
467 struct vm_page *
468 alloc_pages(unsigned int gfp_mask, unsigned int order)
469 {
470 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
471 	struct uvm_constraint_range *constraint = &no_constraint;
472 	struct pglist mlist;
473 
474 	if (gfp_mask & M_CANFAIL)
475 		flags |= UVM_PLA_FAILOK;
476 	if (gfp_mask & M_ZERO)
477 		flags |= UVM_PLA_ZERO;
478 	if (gfp_mask & __GFP_DMA32)
479 		constraint = &dma_constraint;
480 
481 	TAILQ_INIT(&mlist);
482 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
483 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
484 		return NULL;
485 	return TAILQ_FIRST(&mlist);
486 }
487 
488 void
489 __free_pages(struct vm_page *page, unsigned int order)
490 {
491 	struct pglist mlist;
492 	int i;
493 
494 	TAILQ_INIT(&mlist);
495 	for (i = 0; i < (1 << order); i++)
496 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
497 	uvm_pglistfree(&mlist);
498 }
499 
500 void
501 __pagevec_release(struct pagevec *pvec)
502 {
503 	struct pglist mlist;
504 	int i;
505 
506 	TAILQ_INIT(&mlist);
507 	for (i = 0; i < pvec->nr; i++)
508 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
509 	uvm_pglistfree(&mlist);
510 	pagevec_reinit(pvec);
511 }
512 
513 void *
514 kmap(struct vm_page *pg)
515 {
516 	vaddr_t va;
517 
518 #if defined (__HAVE_PMAP_DIRECT)
519 	va = pmap_map_direct(pg);
520 #else
521 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
522 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
523 	pmap_update(pmap_kernel());
524 #endif
525 	return (void *)va;
526 }
527 
528 void
529 kunmap_va(void *addr)
530 {
531 	vaddr_t va = (vaddr_t)addr;
532 
533 #if defined (__HAVE_PMAP_DIRECT)
534 	pmap_unmap_direct(va);
535 #else
536 	pmap_kremove(va, PAGE_SIZE);
537 	pmap_update(pmap_kernel());
538 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
539 #endif
540 }
541 
542 void *
543 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
544      pgprot_t prot)
545 {
546 	vaddr_t va;
547 	paddr_t pa;
548 	int i;
549 
550 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
551 	if (va == 0)
552 		return NULL;
553 	for (i = 0; i < npages; i++) {
554 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
555 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
556 		    PROT_READ | PROT_WRITE,
557 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
558 		pmap_update(pmap_kernel());
559 	}
560 
561 	return (void *)va;
562 }
563 
564 void
565 vunmap(void *addr, size_t size)
566 {
567 	vaddr_t va = (vaddr_t)addr;
568 
569 	pmap_remove(pmap_kernel(), va, va + size);
570 	pmap_update(pmap_kernel());
571 	uvm_km_free(kernel_map, va, size);
572 }
573 
574 void
575 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
576     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
577 {
578 	const uint8_t *cbuf = buf;
579 	int i;
580 
581 	for (i = 0; i < len; i++) {
582 		if ((i % rowsize) == 0)
583 			printf("%s", prefix_str);
584 		printf("%02x", cbuf[i]);
585 		if ((i % rowsize) == (rowsize - 1))
586 			printf("\n");
587 		else
588 			printf(" ");
589 	}
590 }
591 
592 void *
593 memchr_inv(const void *s, int c, size_t n)
594 {
595 	if (n != 0) {
596 		const unsigned char *p = s;
597 
598 		do {
599 			if (*p++ != (unsigned char)c)
600 				return ((void *)(p - 1));
601 		}while (--n != 0);
602 	}
603 	return (NULL);
604 }
605 
606 int
607 panic_cmp(struct rb_node *a, struct rb_node *b)
608 {
609 	panic(__func__);
610 }
611 
612 #undef RB_ROOT
613 #define RB_ROOT(head)	(head)->rbh_root
614 
615 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
616 
617 /*
618  * This is a fairly minimal implementation of the Linux "idr" API.  It
619  * probably isn't very efficient, and defenitely isn't RCU safe.  The
620  * pre-load buffer is global instead of per-cpu; we rely on the kernel
621  * lock to make this work.  We do randomize our IDs in order to make
622  * them harder to guess.
623  */
624 
625 int idr_cmp(struct idr_entry *, struct idr_entry *);
626 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
627 
628 struct pool idr_pool;
629 struct idr_entry *idr_entry_cache;
630 
631 void
632 idr_init(struct idr *idr)
633 {
634 	static int initialized;
635 
636 	if (!initialized) {
637 		pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
638 		    "idrpl", NULL);
639 		initialized = 1;
640 	}
641 	SPLAY_INIT(&idr->tree);
642 }
643 
644 void
645 idr_destroy(struct idr *idr)
646 {
647 	struct idr_entry *id;
648 
649 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
650 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
651 		pool_put(&idr_pool, id);
652 	}
653 }
654 
655 void
656 idr_preload(unsigned int gfp_mask)
657 {
658 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
659 
660 	KERNEL_ASSERT_LOCKED();
661 
662 	if (idr_entry_cache == NULL)
663 		idr_entry_cache = pool_get(&idr_pool, flags);
664 }
665 
666 int
667 idr_alloc(struct idr *idr, void *ptr, int start, int end,
668     unsigned int gfp_mask)
669 {
670 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
671 	struct idr_entry *id;
672 	int begin;
673 
674 	KERNEL_ASSERT_LOCKED();
675 
676 	if (idr_entry_cache) {
677 		id = idr_entry_cache;
678 		idr_entry_cache = NULL;
679 	} else {
680 		id = pool_get(&idr_pool, flags);
681 		if (id == NULL)
682 			return -ENOMEM;
683 	}
684 
685 	if (end <= 0)
686 		end = INT_MAX;
687 
688 #ifdef notyet
689 	id->id = begin = start + arc4random_uniform(end - start);
690 #else
691 	id->id = begin = start;
692 #endif
693 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
694 		if (++id->id == end)
695 			id->id = start;
696 		if (id->id == begin) {
697 			pool_put(&idr_pool, id);
698 			return -ENOSPC;
699 		}
700 	}
701 	id->ptr = ptr;
702 	return id->id;
703 }
704 
705 void *
706 idr_replace(struct idr *idr, void *ptr, int id)
707 {
708 	struct idr_entry find, *res;
709 	void *old;
710 
711 	find.id = id;
712 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
713 	if (res == NULL)
714 		return ERR_PTR(-ENOENT);
715 	old = res->ptr;
716 	res->ptr = ptr;
717 	return old;
718 }
719 
720 void *
721 idr_remove(struct idr *idr, int id)
722 {
723 	struct idr_entry find, *res;
724 	void *ptr = NULL;
725 
726 	find.id = id;
727 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
728 	if (res) {
729 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
730 		ptr = res->ptr;
731 		pool_put(&idr_pool, res);
732 	}
733 	return ptr;
734 }
735 
736 void *
737 idr_find(struct idr *idr, int id)
738 {
739 	struct idr_entry find, *res;
740 
741 	find.id = id;
742 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
743 	if (res == NULL)
744 		return NULL;
745 	return res->ptr;
746 }
747 
748 void *
749 idr_get_next(struct idr *idr, int *id)
750 {
751 	struct idr_entry *res;
752 
753 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
754 		if (res->id >= *id) {
755 			*id = res->id;
756 			return res->ptr;
757 		}
758 	}
759 
760 	return NULL;
761 }
762 
763 int
764 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
765 {
766 	struct idr_entry *id;
767 	int ret;
768 
769 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
770 		ret = func(id->id, id->ptr, data);
771 		if (ret)
772 			return ret;
773 	}
774 
775 	return 0;
776 }
777 
778 int
779 idr_cmp(struct idr_entry *a, struct idr_entry *b)
780 {
781 	return (a->id < b->id ? -1 : a->id > b->id);
782 }
783 
784 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
785 
786 void
787 ida_init(struct ida *ida)
788 {
789 	ida->counter = 0;
790 }
791 
792 void
793 ida_destroy(struct ida *ida)
794 {
795 }
796 
797 void
798 ida_remove(struct ida *ida, int id)
799 {
800 }
801 
802 int
803 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
804     int flags)
805 {
806 	if (end <= 0)
807 		end = INT_MAX;
808 
809 	if (start > ida->counter)
810 		ida->counter = start;
811 
812 	if (ida->counter >= end)
813 		return -ENOSPC;
814 
815 	return ida->counter++;
816 }
817 
818 void
819 ida_simple_remove(struct ida *ida, int id)
820 {
821 }
822 
823 int
824 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
825 {
826 	return (a->id < b->id ? -1 : a->id > b->id);
827 }
828 
829 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
830 struct pool xa_pool;
831 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
832 
833 void
834 xa_init_flags(struct xarray *xa, gfp_t flags)
835 {
836 	static int initialized;
837 
838 	if (!initialized) {
839 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_TTY, 0,
840 		    "xapl", NULL);
841 		initialized = 1;
842 	}
843 	SPLAY_INIT(&xa->xa_tree);
844 }
845 
846 void
847 xa_destroy(struct xarray *xa)
848 {
849 	struct xarray_entry *id;
850 
851 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
852 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
853 		pool_put(&xa_pool, id);
854 	}
855 }
856 
857 int
858 xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
859 {
860 	struct xarray_entry *xid;
861 	int flags = (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
862 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
863 	int begin;
864 
865 	xid = pool_get(&xa_pool, flags);
866 	if (xid == NULL)
867 		return -ENOMEM;
868 
869 	if (limit <= 0)
870 		limit = INT_MAX;
871 
872 	xid->id = begin = start;
873 
874 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
875 		if (++xid->id == limit)
876 			xid->id = start;
877 		if (xid->id == begin) {
878 			pool_put(&xa_pool, xid);
879 			return -EBUSY;
880 		}
881 	}
882 	xid->ptr = entry;
883 	*id = xid->id;
884 	return 0;
885 }
886 
887 void *
888 xa_erase(struct xarray *xa, unsigned long index)
889 {
890 	struct xarray_entry find, *res;
891 	void *ptr = NULL;
892 
893 	find.id = index;
894 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
895 	if (res) {
896 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
897 		ptr = res->ptr;
898 		pool_put(&xa_pool, res);
899 	}
900 	return ptr;
901 }
902 
903 void *
904 xa_load(struct xarray *xa, unsigned long index)
905 {
906 	struct xarray_entry find, *res;
907 
908 	find.id = index;
909 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
910 	if (res == NULL)
911 		return NULL;
912 	return res->ptr;
913 }
914 
915 void *
916 xa_get_next(struct xarray *xa, unsigned long *index)
917 {
918 	struct xarray_entry *res;
919 
920 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
921 		if (res->id >= *index) {
922 			*index = res->id;
923 			return res->ptr;
924 		}
925 	}
926 
927 	return NULL;
928 }
929 
930 int
931 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
932 {
933 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
934 	    M_DRM, gfp_mask);
935 	if (table->sgl == NULL)
936 		return -ENOMEM;
937 	table->nents = table->orig_nents = nents;
938 	return 0;
939 }
940 
941 void
942 sg_free_table(struct sg_table *table)
943 {
944 	free(table->sgl, M_DRM,
945 	    table->orig_nents * sizeof(struct scatterlist));
946 }
947 
948 size_t
949 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
950     const void *buf, size_t buflen)
951 {
952 	panic("%s", __func__);
953 }
954 
955 int
956 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
957 {
958 	void *cmd = NULL;
959 	int cmdlen = 0;
960 	int err, ret = 0;
961 	int op;
962 
963 	iic_acquire_bus(&adap->ic, 0);
964 
965 	while (num > 2) {
966 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
967 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
968 		    msgs->buf, msgs->len, 0);
969 		if (err) {
970 			ret = -err;
971 			goto fail;
972 		}
973 		msgs++;
974 		num--;
975 		ret++;
976 	}
977 
978 	if (num > 1) {
979 		cmd = msgs->buf;
980 		cmdlen = msgs->len;
981 		msgs++;
982 		num--;
983 		ret++;
984 	}
985 
986 	op = (msgs->flags & I2C_M_RD) ?
987 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
988 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
989 	    msgs->buf, msgs->len, 0);
990 	if (err) {
991 		ret = -err;
992 		goto fail;
993 	}
994 	msgs++;
995 	ret++;
996 
997 fail:
998 	iic_release_bus(&adap->ic, 0);
999 
1000 	return ret;
1001 }
1002 
1003 int
1004 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1005 {
1006 	int ret;
1007 
1008 	if (adap->lock_ops)
1009 		adap->lock_ops->lock_bus(adap, 0);
1010 
1011 	if (adap->algo)
1012 		ret = adap->algo->master_xfer(adap, msgs, num);
1013 	else
1014 		ret = i2c_master_xfer(adap, msgs, num);
1015 
1016 	if (adap->lock_ops)
1017 		adap->lock_ops->unlock_bus(adap, 0);
1018 
1019 	return ret;
1020 }
1021 
1022 int
1023 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1024 {
1025 	struct i2c_algo_bit_data *algo = adap->algo_data;
1026 	struct i2c_adapter bb;
1027 
1028 	memset(&bb, 0, sizeof(bb));
1029 	bb.ic = algo->ic;
1030 	bb.retries = adap->retries;
1031 	return i2c_master_xfer(&bb, msgs, num);
1032 }
1033 
1034 uint32_t
1035 i2c_bb_functionality(struct i2c_adapter *adap)
1036 {
1037 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1038 }
1039 
1040 struct i2c_algorithm i2c_bit_algo = {
1041 	.master_xfer = i2c_bb_master_xfer,
1042 	.functionality = i2c_bb_functionality
1043 };
1044 
1045 int
1046 i2c_bit_add_bus(struct i2c_adapter *adap)
1047 {
1048 	adap->algo = &i2c_bit_algo;
1049 	adap->retries = 3;
1050 
1051 	return 0;
1052 }
1053 
1054 #if defined(__amd64__) || defined(__i386__)
1055 
1056 /*
1057  * This is a minimal implementation of the Linux vga_get/vga_put
1058  * interface.  In all likelyhood, it will only work for inteldrm(4) as
1059  * it assumes that if there is another active VGA device in the
1060  * system, it is sitting behind a PCI bridge.
1061  */
1062 
1063 extern int pci_enumerate_bus(struct pci_softc *,
1064     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1065 
1066 pcitag_t vga_bridge_tag;
1067 int vga_bridge_disabled;
1068 
1069 int
1070 vga_disable_bridge(struct pci_attach_args *pa)
1071 {
1072 	pcireg_t bhlc, bc;
1073 
1074 	if (pa->pa_domain != 0)
1075 		return 0;
1076 
1077 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1078 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1079 		return 0;
1080 
1081 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1082 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1083 		return 0;
1084 	bc &= ~PPB_BC_VGA_ENABLE;
1085 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1086 
1087 	vga_bridge_tag = pa->pa_tag;
1088 	vga_bridge_disabled = 1;
1089 
1090 	return 1;
1091 }
1092 
1093 void
1094 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1095 {
1096 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1097 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1098 }
1099 
1100 void
1101 vga_put(struct pci_dev *pdev, int rsrc)
1102 {
1103 	pcireg_t bc;
1104 
1105 	if (!vga_bridge_disabled)
1106 		return;
1107 
1108 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1109 	bc |= PPB_BC_VGA_ENABLE;
1110 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1111 
1112 	vga_bridge_disabled = 0;
1113 }
1114 
1115 #endif
1116 
1117 /*
1118  * ACPI types and interfaces.
1119  */
1120 
1121 #ifdef __HAVE_ACPI
1122 #include "acpi.h"
1123 #endif
1124 
1125 #if NACPI > 0
1126 
1127 #include <dev/acpi/acpireg.h>
1128 #include <dev/acpi/acpivar.h>
1129 #include <dev/acpi/amltypes.h>
1130 #include <dev/acpi/dsdt.h>
1131 
1132 acpi_status
1133 acpi_get_table(const char *sig, int instance,
1134     struct acpi_table_header **hdr)
1135 {
1136 	struct acpi_softc *sc = acpi_softc;
1137 	struct acpi_q *entry;
1138 
1139 	KASSERT(instance == 1);
1140 
1141 	if (sc == NULL)
1142 		return AE_NOT_FOUND;
1143 
1144 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1145 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1146 			*hdr = entry->q_table;
1147 			return 0;
1148 		}
1149 	}
1150 
1151 	return AE_NOT_FOUND;
1152 }
1153 
1154 acpi_status
1155 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1156 {
1157 	node = aml_searchname(node, name);
1158 	if (node == NULL)
1159 		return AE_NOT_FOUND;
1160 
1161 	*rnode = node;
1162 	return 0;
1163 }
1164 
1165 acpi_status
1166 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1167 {
1168 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1169 	KASSERT(type == ACPI_FULL_PATHNAME);
1170 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1171 	return 0;
1172 }
1173 
1174 acpi_status
1175 acpi_evaluate_object(acpi_handle node, const char *name,
1176     struct acpi_object_list *params, struct acpi_buffer *result)
1177 {
1178 	struct aml_value args[4], res;
1179 	union acpi_object *obj;
1180 	uint8_t *data;
1181 	int i;
1182 
1183 	KASSERT(params->count <= nitems(args));
1184 
1185 	for (i = 0; i < params->count; i++) {
1186 		args[i].type = params->pointer[i].type;
1187 		switch (args[i].type) {
1188 		case AML_OBJTYPE_INTEGER:
1189 			args[i].v_integer = params->pointer[i].integer.value;
1190 			break;
1191 		case AML_OBJTYPE_BUFFER:
1192 			args[i].length = params->pointer[i].buffer.length;
1193 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1194 			break;
1195 		default:
1196 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1197 			return AE_BAD_PARAMETER;
1198 		}
1199 	}
1200 
1201 	if (name) {
1202 		node = aml_searchname(node, name);
1203 		if (node == NULL)
1204 			return AE_NOT_FOUND;
1205 	}
1206 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1207 		aml_freevalue(&res);
1208 		return AE_ERROR;
1209 	}
1210 
1211 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1212 
1213 	result->length = sizeof(union acpi_object);
1214 	switch (res.type) {
1215 	case AML_OBJTYPE_BUFFER:
1216 		result->length += res.length;
1217 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1218 		obj = (union acpi_object *)result->pointer;
1219 		data = (uint8_t *)(obj + 1);
1220 		obj->type = res.type;
1221 		obj->buffer.length = res.length;
1222 		obj->buffer.pointer = data;
1223 		memcpy(data, res.v_buffer, res.length);
1224 		break;
1225 	default:
1226 		printf("%s: return type 0x%02x", __func__, res.type);
1227 		aml_freevalue(&res);
1228 		return AE_ERROR;
1229 	}
1230 
1231 	aml_freevalue(&res);
1232 	return 0;
1233 }
1234 
1235 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1236 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1237 
1238 int
1239 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1240 {
1241 	struct acpi_bus_event event;
1242 	struct notifier_block *nb;
1243 
1244 	event.device_class = ACPI_VIDEO_CLASS;
1245 	event.type = notify;
1246 
1247 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1248 		nb->notifier_call(nb, 0, &event);
1249 	return 0;
1250 }
1251 
1252 int
1253 register_acpi_notifier(struct notifier_block *nb)
1254 {
1255 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1256 	return 0;
1257 }
1258 
1259 int
1260 unregister_acpi_notifier(struct notifier_block *nb)
1261 {
1262 	struct notifier_block *tmp;
1263 
1264 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1265 		if (tmp == nb) {
1266 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1267 			    notifier_block, link);
1268 			return 0;
1269 		}
1270 	}
1271 
1272 	return -ENOENT;
1273 }
1274 
1275 const char *
1276 acpi_format_exception(acpi_status status)
1277 {
1278 	switch (status) {
1279 	case AE_NOT_FOUND:
1280 		return "not found";
1281 	case AE_BAD_PARAMETER:
1282 		return "bad parameter";
1283 	default:
1284 		return "unknown";
1285 	}
1286 }
1287 
1288 #endif
1289 
1290 void
1291 backlight_do_update_status(void *arg)
1292 {
1293 	backlight_update_status(arg);
1294 }
1295 
1296 struct backlight_device *
1297 backlight_device_register(const char *name, void *kdev, void *data,
1298     const struct backlight_ops *ops, struct backlight_properties *props)
1299 {
1300 	struct backlight_device *bd;
1301 
1302 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1303 	bd->ops = ops;
1304 	bd->props = *props;
1305 	bd->data = data;
1306 
1307 	task_set(&bd->task, backlight_do_update_status, bd);
1308 
1309 	return bd;
1310 }
1311 
1312 void
1313 backlight_device_unregister(struct backlight_device *bd)
1314 {
1315 	free(bd, M_DRM, sizeof(*bd));
1316 }
1317 
1318 void
1319 backlight_schedule_update_status(struct backlight_device *bd)
1320 {
1321 	task_add(systq, &bd->task);
1322 }
1323 
1324 inline int
1325 backlight_enable(struct backlight_device *bd)
1326 {
1327 	if (bd == NULL)
1328 		return 0;
1329 
1330 	bd->props.power = FB_BLANK_UNBLANK;
1331 
1332 	return bd->ops->update_status(bd);
1333 }
1334 
1335 inline int
1336 backlight_disable(struct backlight_device *bd)
1337 {
1338 	if (bd == NULL)
1339 		return 0;
1340 
1341 	bd->props.power = FB_BLANK_POWERDOWN;
1342 
1343 	return bd->ops->update_status(bd);
1344 }
1345 
1346 void
1347 drm_sysfs_hotplug_event(struct drm_device *dev)
1348 {
1349 	KNOTE(&dev->note, NOTE_CHANGE);
1350 }
1351 
1352 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1353 
1354 uint64_t
1355 dma_fence_context_alloc(unsigned int num)
1356 {
1357   return atomic64_add_return(num, &drm_fence_context_count) - num;
1358 }
1359 
1360 struct default_wait_cb {
1361 	struct dma_fence_cb base;
1362 	struct proc *proc;
1363 };
1364 
1365 static void
1366 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1367 {
1368 	struct default_wait_cb *wait =
1369 	    container_of(cb, struct default_wait_cb, base);
1370 	wake_up_process(wait->proc);
1371 }
1372 
1373 long
1374 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1375 {
1376 	long ret = timeout ? timeout : 1;
1377 	int err;
1378 	struct default_wait_cb cb;
1379 	bool was_set;
1380 
1381 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1382 		return ret;
1383 
1384 	mtx_enter(fence->lock);
1385 
1386 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1387 	    &fence->flags);
1388 
1389 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1390 		goto out;
1391 
1392 	if (!was_set && fence->ops->enable_signaling) {
1393 		if (!fence->ops->enable_signaling(fence)) {
1394 			dma_fence_signal_locked(fence);
1395 			goto out;
1396 		}
1397 	}
1398 
1399 	if (timeout == 0) {
1400 		ret = 0;
1401 		goto out;
1402 	}
1403 
1404 	cb.base.func = dma_fence_default_wait_cb;
1405 	cb.proc = curproc;
1406 	list_add(&cb.base.node, &fence->cb_list);
1407 
1408 	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1409 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0, "dmafence",
1410 		    timeout);
1411 		if (err == EINTR || err == ERESTART) {
1412 			ret = -ERESTARTSYS;
1413 			break;
1414 		} else if (err == EWOULDBLOCK) {
1415 			ret = 0;
1416 			break;
1417 		}
1418 	}
1419 
1420 	if (!list_empty(&cb.base.node))
1421 		list_del(&cb.base.node);
1422 out:
1423 	mtx_leave(fence->lock);
1424 
1425 	return ret;
1426 }
1427 
1428 static bool
1429 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1430     uint32_t *idx)
1431 {
1432 	int i;
1433 
1434 	for (i = 0; i < count; ++i) {
1435 		struct dma_fence *fence = fences[i];
1436 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1437 			if (idx)
1438 				*idx = i;
1439 			return true;
1440 		}
1441 	}
1442 	return false;
1443 }
1444 
1445 long
1446 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1447     bool intr, long timeout, uint32_t *idx)
1448 {
1449 	struct default_wait_cb *cb;
1450 	int i, err;
1451 	int ret = timeout;
1452 
1453 	if (timeout == 0) {
1454 		for (i = 0; i < count; i++) {
1455 			if (dma_fence_is_signaled(fences[i])) {
1456 				if (idx)
1457 					*idx = i;
1458 				return 1;
1459 			}
1460 		}
1461 		return 0;
1462 	}
1463 
1464 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1465 	if (cb == NULL)
1466 		return -ENOMEM;
1467 
1468 	for (i = 0; i < count; i++) {
1469 		struct dma_fence *fence = fences[i];
1470 		cb[i].proc = curproc;
1471 		if (dma_fence_add_callback(fence, &cb[i].base,
1472 		    dma_fence_default_wait_cb)) {
1473 			if (idx)
1474 				*idx = i;
1475 			goto cb_cleanup;
1476 		}
1477 	}
1478 
1479 	while (ret > 0) {
1480 		if (dma_fence_test_signaled_any(fences, count, idx))
1481 			break;
1482 
1483 		err = tsleep(curproc, intr ? PCATCH : 0,
1484 		    "dfwat", timeout);
1485 		if (err == EINTR || err == ERESTART) {
1486 			ret = -ERESTARTSYS;
1487 			break;
1488 		} else if (err == EWOULDBLOCK) {
1489 			ret = 0;
1490 			break;
1491 		}
1492 	}
1493 
1494 cb_cleanup:
1495 	while (i-- > 0)
1496 		dma_fence_remove_callback(fences[i], &cb[i].base);
1497 	free(cb, M_DRM, count * sizeof(*cb));
1498 	return ret;
1499 }
1500 
1501 static struct dma_fence dma_fence_stub;
1502 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1503 
1504 static const char *
1505 dma_fence_stub_get_name(struct dma_fence *fence)
1506 {
1507 	return "stub";
1508 }
1509 
1510 static const struct dma_fence_ops dma_fence_stub_ops = {
1511 	.get_driver_name = dma_fence_stub_get_name,
1512 	.get_timeline_name = dma_fence_stub_get_name,
1513 };
1514 
1515 struct dma_fence *
1516 dma_fence_get_stub(void)
1517 {
1518 	mtx_enter(&dma_fence_stub_mtx);
1519 	if (dma_fence_stub.ops == NULL) {
1520 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1521 		    &dma_fence_stub_mtx, 0, 0);
1522 		dma_fence_signal_locked(&dma_fence_stub);
1523 	}
1524 	mtx_leave(&dma_fence_stub_mtx);
1525 
1526 	return dma_fence_get(&dma_fence_stub);
1527 }
1528 
1529 static const char *
1530 dma_fence_array_get_driver_name(struct dma_fence *fence)
1531 {
1532 	return "dma_fence_array";
1533 }
1534 
1535 static const char *
1536 dma_fence_array_get_timeline_name(struct dma_fence *fence)
1537 {
1538 	return "unbound";
1539 }
1540 
1541 static void
1542 irq_dma_fence_array_work(struct irq_work *wrk)
1543 {
1544 	struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work);
1545 
1546 	dma_fence_signal(&dfa->base);
1547 	dma_fence_put(&dfa->base);
1548 }
1549 
1550 static void
1551 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
1552 {
1553 	struct dma_fence_array_cb *array_cb =
1554 	    container_of(cb, struct dma_fence_array_cb, cb);
1555 	struct dma_fence_array *dfa = array_cb->array;
1556 
1557 	if (atomic_dec_and_test(&dfa->num_pending))
1558 		irq_work_queue(&dfa->work);
1559 	else
1560 		dma_fence_put(&dfa->base);
1561 }
1562 
1563 static bool
1564 dma_fence_array_enable_signaling(struct dma_fence *fence)
1565 {
1566 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1567 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
1568 	int i;
1569 
1570 	for (i = 0; i < dfa->num_fences; ++i) {
1571 		cb[i].array = dfa;
1572 		dma_fence_get(&dfa->base);
1573 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
1574 		    dma_fence_array_cb_func)) {
1575 			dma_fence_put(&dfa->base);
1576 			if (atomic_dec_and_test(&dfa->num_pending))
1577 				return false;
1578 		}
1579 	}
1580 
1581 	return true;
1582 }
1583 
1584 static bool dma_fence_array_signaled(struct dma_fence *fence)
1585 {
1586 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1587 
1588 	return atomic_read(&dfa->num_pending) <= 0;
1589 }
1590 
1591 static void dma_fence_array_release(struct dma_fence *fence)
1592 {
1593 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1594 	int i;
1595 
1596 	for (i = 0; i < dfa->num_fences; ++i)
1597 		dma_fence_put(dfa->fences[i]);
1598 
1599 	free(dfa->fences, M_DRM, 0);
1600 	dma_fence_free(fence);
1601 }
1602 
1603 struct dma_fence_array *
1604 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
1605     unsigned seqno, bool signal_on_any)
1606 {
1607 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
1608 	    (num_fences * sizeof(struct dma_fence_array_cb)),
1609 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1610 	if (dfa == NULL)
1611 		return NULL;
1612 
1613 	mtx_init(&dfa->lock, IPL_TTY);
1614 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
1615 	    context, seqno);
1616 	init_irq_work(&dfa->work, irq_dma_fence_array_work);
1617 
1618 	dfa->num_fences = num_fences;
1619 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
1620 	dfa->fences = fences;
1621 
1622 	return dfa;
1623 }
1624 
1625 const struct dma_fence_ops dma_fence_array_ops = {
1626 	.get_driver_name = dma_fence_array_get_driver_name,
1627 	.get_timeline_name = dma_fence_array_get_timeline_name,
1628 	.enable_signaling = dma_fence_array_enable_signaling,
1629 	.signaled = dma_fence_array_signaled,
1630 	.release = dma_fence_array_release,
1631 };
1632 
1633 int
1634 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
1635 {
1636 	return (ENXIO);
1637 }
1638 
1639 int
1640 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
1641 {
1642 	return (ENXIO);
1643 }
1644 
1645 int
1646 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
1647 {
1648 	return (ENOTTY);
1649 }
1650 
1651 int
1652 dmabuf_poll(struct file *fp, int events, struct proc *p)
1653 {
1654 	return (0);
1655 }
1656 
1657 int
1658 dmabuf_kqfilter(struct file *fp, struct knote *kn)
1659 {
1660 	return (EINVAL);
1661 }
1662 
1663 int
1664 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
1665 {
1666 	struct dma_buf *dmabuf = fp->f_data;
1667 
1668 	memset(st, 0, sizeof(*st));
1669 	st->st_size = dmabuf->size;
1670 	st->st_mode = S_IFIFO;	/* XXX */
1671 	return (0);
1672 }
1673 
1674 int
1675 dmabuf_close(struct file *fp, struct proc *p)
1676 {
1677 	struct dma_buf *dmabuf = fp->f_data;
1678 
1679 	fp->f_data = NULL;
1680 	KERNEL_LOCK();
1681 	dmabuf->ops->release(dmabuf);
1682 	KERNEL_UNLOCK();
1683 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
1684 	return (0);
1685 }
1686 
1687 int
1688 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
1689 {
1690 	struct dma_buf *dmabuf = fp->f_data;
1691 	off_t newoff;
1692 
1693 	if (*offset != 0)
1694 		return (EINVAL);
1695 
1696 	switch (whence) {
1697 	case SEEK_SET:
1698 		newoff = 0;
1699 		break;
1700 	case SEEK_END:
1701 		newoff = dmabuf->size;
1702 		break;
1703 	default:
1704 		return (EINVAL);
1705 	}
1706 	mtx_enter(&fp->f_mtx);
1707 	fp->f_offset = newoff;
1708 	mtx_leave(&fp->f_mtx);
1709 	*offset = newoff;
1710 	return (0);
1711 }
1712 
1713 const struct fileops dmabufops = {
1714 	.fo_read	= dmabuf_read,
1715 	.fo_write	= dmabuf_write,
1716 	.fo_ioctl	= dmabuf_ioctl,
1717 	.fo_poll	= dmabuf_poll,
1718 	.fo_kqfilter	= dmabuf_kqfilter,
1719 	.fo_stat	= dmabuf_stat,
1720 	.fo_close	= dmabuf_close,
1721 	.fo_seek	= dmabuf_seek,
1722 };
1723 
1724 struct dma_buf *
1725 dma_buf_export(const struct dma_buf_export_info *info)
1726 {
1727 	struct proc *p = curproc;
1728 	struct dma_buf *dmabuf;
1729 	struct file *fp;
1730 
1731 	fp = fnew(p);
1732 	if (fp == NULL)
1733 		return ERR_PTR(-ENFILE);
1734 	fp->f_type = DTYPE_DMABUF;
1735 	fp->f_ops = &dmabufops;
1736 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
1737 	dmabuf->priv = info->priv;
1738 	dmabuf->ops = info->ops;
1739 	dmabuf->size = info->size;
1740 	dmabuf->file = fp;
1741 	fp->f_data = dmabuf;
1742 	INIT_LIST_HEAD(&dmabuf->attachments);
1743 	return dmabuf;
1744 }
1745 
1746 struct dma_buf *
1747 dma_buf_get(int fd)
1748 {
1749 	struct proc *p = curproc;
1750 	struct filedesc *fdp = p->p_fd;
1751 	struct file *fp;
1752 
1753 	if ((fp = fd_getfile(fdp, fd)) == NULL)
1754 		return ERR_PTR(-EBADF);
1755 
1756 	if (fp->f_type != DTYPE_DMABUF) {
1757 		FRELE(fp, p);
1758 		return ERR_PTR(-EINVAL);
1759 	}
1760 
1761 	return fp->f_data;
1762 }
1763 
1764 void
1765 dma_buf_put(struct dma_buf *dmabuf)
1766 {
1767 	KASSERT(dmabuf);
1768 	KASSERT(dmabuf->file);
1769 
1770 	FRELE(dmabuf->file, curproc);
1771 }
1772 
1773 int
1774 dma_buf_fd(struct dma_buf *dmabuf, int flags)
1775 {
1776 	struct proc *p = curproc;
1777 	struct filedesc *fdp = p->p_fd;
1778 	struct file *fp = dmabuf->file;
1779 	int fd, cloexec, error;
1780 
1781 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
1782 
1783 	fdplock(fdp);
1784 restart:
1785 	if ((error = fdalloc(p, 0, &fd)) != 0) {
1786 		if (error == ENOSPC) {
1787 			fdexpand(p);
1788 			goto restart;
1789 		}
1790 		fdpunlock(fdp);
1791 		return -error;
1792 	}
1793 
1794 	fdinsert(fdp, fd, cloexec, fp);
1795 	fdpunlock(fdp);
1796 
1797 	return fd;
1798 }
1799 
1800 void
1801 get_dma_buf(struct dma_buf *dmabuf)
1802 {
1803 	FREF(dmabuf->file);
1804 }
1805 
1806 enum pci_bus_speed
1807 pcie_get_speed_cap(struct pci_dev *pdev)
1808 {
1809 	pci_chipset_tag_t	pc = pdev->pc;
1810 	pcitag_t		tag = pdev->tag;
1811 	int			pos ;
1812 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1813 	pcireg_t		id;
1814 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
1815 	int			bus, device, function;
1816 
1817 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1818 	    &pos, NULL))
1819 		return PCI_SPEED_UNKNOWN;
1820 
1821 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1822 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1823 
1824 	/* we've been informed via and serverworks don't make the cut */
1825 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1826 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1827 		return PCI_SPEED_UNKNOWN;
1828 
1829 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1830 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1831 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1832 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1833 
1834 	lnkcap &= 0x0f;
1835 	lnkcap2 &= 0xfe;
1836 
1837 	if (lnkcap2) { /* PCIE GEN 3.0 */
1838 		if (lnkcap2 & 0x02)
1839 			cap = PCIE_SPEED_2_5GT;
1840 		if (lnkcap2 & 0x04)
1841 			cap = PCIE_SPEED_5_0GT;
1842 		if (lnkcap2 & 0x08)
1843 			cap = PCIE_SPEED_8_0GT;
1844 		if (lnkcap2 & 0x10)
1845 			cap = PCIE_SPEED_16_0GT;
1846 	} else {
1847 		if (lnkcap & 0x01)
1848 			cap = PCIE_SPEED_2_5GT;
1849 		if (lnkcap & 0x02)
1850 			cap = PCIE_SPEED_5_0GT;
1851 	}
1852 
1853 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
1854 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
1855 	    lnkcap2);
1856 	return cap;
1857 }
1858 
1859 enum pcie_link_width
1860 pcie_get_width_cap(struct pci_dev *pdev)
1861 {
1862 	pci_chipset_tag_t	pc = pdev->pc;
1863 	pcitag_t		tag = pdev->tag;
1864 	int			pos ;
1865 	pcireg_t		lnkcap = 0;
1866 	pcireg_t		id;
1867 	int			bus, device, function;
1868 
1869 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1870 	    &pos, NULL))
1871 		return PCIE_LNK_WIDTH_UNKNOWN;
1872 
1873 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1874 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1875 
1876 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1877 
1878 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
1879 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
1880 
1881 	if (lnkcap)
1882 		return (lnkcap & 0x3f0) >> 4;
1883 	return PCIE_LNK_WIDTH_UNKNOWN;
1884 }
1885 
1886 int
1887 default_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1888     int sync, void *key)
1889 {
1890 	wakeup(wqe);
1891 	if (wqe->proc)
1892 		wake_up_process(wqe->proc);
1893 	return 0;
1894 }
1895 
1896 int
1897 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1898     int sync, void *key)
1899 {
1900 	default_wake_function(wqe, mode, sync, key);
1901 	list_del_init(&wqe->entry);
1902 	return 0;
1903 }
1904 
1905 static wait_queue_head_t bit_waitq;
1906 wait_queue_head_t var_waitq;
1907 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
1908 
1909 int
1910 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1911 {
1912 	int err;
1913 
1914 	if (!test_bit(bit, word))
1915 		return 0;
1916 
1917 	mtx_enter(&wait_bit_mtx);
1918 	while (test_bit(bit, word)) {
1919 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
1920 		    INFSLP);
1921 		if (err) {
1922 			mtx_leave(&wait_bit_mtx);
1923 			return 1;
1924 		}
1925 	}
1926 	mtx_leave(&wait_bit_mtx);
1927 	return 0;
1928 }
1929 
1930 int
1931 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
1932 {
1933 	int err;
1934 
1935 	if (!test_bit(bit, word))
1936 		return 0;
1937 
1938 	mtx_enter(&wait_bit_mtx);
1939 	while (test_bit(bit, word)) {
1940 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
1941 		if (err) {
1942 			mtx_leave(&wait_bit_mtx);
1943 			return 1;
1944 		}
1945 	}
1946 	mtx_leave(&wait_bit_mtx);
1947 	return 0;
1948 }
1949 
1950 void
1951 wake_up_bit(void *word, int bit)
1952 {
1953 	mtx_enter(&wait_bit_mtx);
1954 	wakeup(word);
1955 	mtx_leave(&wait_bit_mtx);
1956 }
1957 
1958 void
1959 clear_and_wake_up_bit(int bit, void *word)
1960 {
1961 	clear_bit(bit, word);
1962 	wake_up_bit(word, bit);
1963 }
1964 
1965 wait_queue_head_t *
1966 bit_waitqueue(void *word, int bit)
1967 {
1968 	/* XXX hash table of wait queues? */
1969 	return &bit_waitq;
1970 }
1971 
1972 struct workqueue_struct *system_wq;
1973 struct workqueue_struct *system_highpri_wq;
1974 struct workqueue_struct *system_unbound_wq;
1975 struct workqueue_struct *system_long_wq;
1976 struct taskq *taskletq;
1977 
1978 void
1979 drm_linux_init(void)
1980 {
1981 	if (system_wq == NULL) {
1982 		system_wq = (struct workqueue_struct *)
1983 		    taskq_create("drmwq", 4, IPL_HIGH, 0);
1984 	}
1985 	if (system_highpri_wq == NULL) {
1986 		system_highpri_wq = (struct workqueue_struct *)
1987 		    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
1988 	}
1989 	if (system_unbound_wq == NULL) {
1990 		system_unbound_wq = (struct workqueue_struct *)
1991 		    taskq_create("drmubwq", 4, IPL_HIGH, 0);
1992 	}
1993 	if (system_long_wq == NULL) {
1994 		system_long_wq = (struct workqueue_struct *)
1995 		    taskq_create("drmlwq", 4, IPL_HIGH, 0);
1996 	}
1997 
1998 	if (taskletq == NULL)
1999 		taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2000 
2001 	init_waitqueue_head(&bit_waitq);
2002 	init_waitqueue_head(&var_waitq);
2003 }
2004 
2005 #define PCIE_ECAP_RESIZE_BAR	0x15
2006 #define RBCAP0			0x04
2007 #define RBCTRL0			0x08
2008 #define RBCTRL_BARINDEX_MASK	0x07
2009 #define RBCTRL_BARSIZE_MASK	0x1f00
2010 #define RBCTRL_BARSIZE_SHIFT	8
2011 
2012 /* size in MB is 1 << nsize */
2013 int
2014 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2015 {
2016 	pcireg_t	reg;
2017 	uint32_t	offset, capid;
2018 
2019 	KASSERT(bar == 0);
2020 
2021 	offset = PCI_PCIE_ECAP;
2022 
2023 	/* search PCI Express Extended Capabilities */
2024 	do {
2025 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2026 		capid = PCI_PCIE_ECAP_ID(reg);
2027 		if (capid == PCIE_ECAP_RESIZE_BAR)
2028 			break;
2029 		offset = PCI_PCIE_ECAP_NEXT(reg);
2030 	} while (capid != 0);
2031 
2032 	if (capid == 0) {
2033 		printf("%s: could not find resize bar cap!\n", __func__);
2034 		return -ENOTSUP;
2035 	}
2036 
2037 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2038 
2039 	if ((reg & (1 << (nsize + 4))) == 0) {
2040 		printf("%s size not supported\n", __func__);
2041 		return -ENOTSUP;
2042 	}
2043 
2044 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2045 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2046 		printf("%s BAR index not 0\n", __func__);
2047 		return -EINVAL;
2048 	}
2049 
2050 	reg &= ~RBCTRL_BARSIZE_MASK;
2051 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2052 
2053 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2054 
2055 	return 0;
2056 }
2057 
2058 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2059 
2060 int
2061 register_shrinker(struct shrinker *shrinker)
2062 {
2063 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2064 	return 0;
2065 }
2066 
2067 void
2068 unregister_shrinker(struct shrinker *shrinker)
2069 {
2070 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2071 }
2072 
2073 void
2074 drmbackoff(long npages)
2075 {
2076 	struct shrink_control sc;
2077 	struct shrinker *shrinker;
2078 	u_long ret;
2079 
2080 	shrinker = TAILQ_FIRST(&shrinkers);
2081 	while (shrinker && npages > 0) {
2082 		sc.nr_to_scan = npages;
2083 		ret = shrinker->scan_objects(shrinker, &sc);
2084 		npages -= ret;
2085 		shrinker = TAILQ_NEXT(shrinker, next);
2086 	}
2087 }
2088 
2089 void *
2090 bitmap_zalloc(u_int n, gfp_t flags)
2091 {
2092 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2093 }
2094 
2095 void
2096 bitmap_free(void *p)
2097 {
2098 	kfree(p);
2099 }
2100 
2101 int
2102 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2103 {
2104 	if (atomic_add_unless(v, -1, 1))
2105 		return 0;
2106 
2107 	rw_enter_write(lock);
2108 	if (atomic_dec_return(v) == 0)
2109 		return 1;
2110 	rw_exit_write(lock);
2111 	return 0;
2112 }
2113 
2114 int
2115 printk(const char *fmt, ...)
2116 {
2117 	int ret, level;
2118 	va_list ap;
2119 
2120 	if (fmt != NULL && *fmt == '\001') {
2121 		level = fmt[1];
2122 #ifndef DRMDEBUG
2123 		if (level >= KERN_INFO[1] && level <= '9')
2124 			return 0;
2125 #endif
2126 		fmt += 2;
2127 	}
2128 
2129 	va_start(ap, fmt);
2130 	ret = vprintf(fmt, ap);
2131 	va_end(ap);
2132 
2133 	return ret;
2134 }
2135