xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision dcc91c2622318df8f66a9bca2d2864253df1bfc3)
1 /*	$OpenBSD: drm_linux.c,v 1.115 2024/07/13 15:38:21 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 #include <linux/processor.h>
53 #include <linux/sync_file.h>
54 #include <linux/suspend.h>
55 
56 #include <drm/drm_device.h>
57 #include <drm/drm_connector.h>
58 #include <drm/drm_print.h>
59 
60 #if defined(__amd64__) || defined(__i386__)
61 #include "bios.h"
62 #endif
63 
64 /* allowed to sleep */
65 void
66 tasklet_unlock_wait(struct tasklet_struct *ts)
67 {
68 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
69 		cpu_relax();
70 }
71 
72 /* must not sleep */
73 void
74 tasklet_unlock_spin_wait(struct tasklet_struct *ts)
75 {
76 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
77 		cpu_relax();
78 }
79 
80 void
81 tasklet_run(void *arg)
82 {
83 	struct tasklet_struct *ts = arg;
84 
85 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
86 	if (tasklet_trylock(ts)) {
87 		if (!atomic_read(&ts->count)) {
88 			if (ts->use_callback)
89 				ts->callback(ts);
90 			else
91 				ts->func(ts->data);
92 		}
93 		tasklet_unlock(ts);
94 	}
95 }
96 
97 /* 32 bit powerpc lacks 64 bit atomics */
98 #if defined(__powerpc__) && !defined(__powerpc64__)
99 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
100 #endif
101 
102 void
103 set_current_state(int state)
104 {
105 	int prio = state;
106 
107 	KASSERT(state != TASK_RUNNING);
108 	/* check if already on the sleep list */
109 	if (curproc->p_wchan != NULL)
110 		return;
111 	sleep_setup(curproc, prio, "schto");
112 }
113 
114 void
115 __set_current_state(int state)
116 {
117 	struct proc *p = curproc;
118 
119 	KASSERT(state == TASK_RUNNING);
120 	SCHED_LOCK();
121 	unsleep(p);
122 	p->p_stat = SONPROC;
123 	atomic_clearbits_int(&p->p_flag, P_WSLEEP);
124 	SCHED_UNLOCK();
125 }
126 
127 void
128 schedule(void)
129 {
130 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
131 }
132 
133 long
134 schedule_timeout(long timeout)
135 {
136 	unsigned long deadline;
137 	int timo = 0;
138 
139 	KASSERT(!cold);
140 
141 	if (timeout != MAX_SCHEDULE_TIMEOUT)
142 		timo = timeout;
143 	if (timeout != MAX_SCHEDULE_TIMEOUT)
144 		deadline = jiffies + timeout;
145 	sleep_finish(timo, timeout > 0);
146 	if (timeout != MAX_SCHEDULE_TIMEOUT)
147 		timeout = deadline - jiffies;
148 
149 	return timeout > 0 ? timeout : 0;
150 }
151 
152 long
153 schedule_timeout_uninterruptible(long timeout)
154 {
155 	tsleep(curproc, PWAIT, "schtou", timeout);
156 	return 0;
157 }
158 
159 int
160 wake_up_process(struct proc *p)
161 {
162 	int rv;
163 
164 	SCHED_LOCK();
165 	rv = wakeup_proc(p, 0);
166 	SCHED_UNLOCK();
167 	return rv;
168 }
169 
170 int
171 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
172     int sync, void *key)
173 {
174 	if (wqe->private)
175 		wake_up_process(wqe->private);
176 	list_del_init(&wqe->entry);
177 	return 0;
178 }
179 
180 void
181 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state)
182 {
183 	mtx_enter(&wqh->lock);
184 	if (list_empty(&wqe->entry))
185 		__add_wait_queue(wqh, wqe);
186 	mtx_leave(&wqh->lock);
187 
188 	set_current_state(state);
189 }
190 
191 void
192 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
193 {
194 	__set_current_state(TASK_RUNNING);
195 
196 	mtx_enter(&wqh->lock);
197 	if (!list_empty(&wqe->entry))
198 		list_del_init(&wqe->entry);
199 	mtx_leave(&wqh->lock);
200 }
201 
202 void
203 flush_workqueue(struct workqueue_struct *wq)
204 {
205 	if (cold)
206 		return;
207 
208 	if (wq)
209 		taskq_barrier((struct taskq *)wq);
210 }
211 
212 bool
213 flush_work(struct work_struct *work)
214 {
215 	if (cold)
216 		return false;
217 
218 	if (work->tq)
219 		taskq_barrier(work->tq);
220 	return false;
221 }
222 
223 bool
224 flush_delayed_work(struct delayed_work *dwork)
225 {
226 	bool ret = false;
227 
228 	if (cold)
229 		return false;
230 
231 	while (timeout_pending(&dwork->to)) {
232 		tsleep(dwork, PWAIT, "fldwto", 1);
233 		ret = true;
234 	}
235 
236 	if (dwork->tq)
237 		taskq_barrier(dwork->tq);
238 	return ret;
239 }
240 
241 struct kthread {
242 	int (*func)(void *);
243 	void *data;
244 	struct proc *proc;
245 	volatile u_int flags;
246 #define KTHREAD_SHOULDSTOP	0x0000001
247 #define KTHREAD_STOPPED		0x0000002
248 #define KTHREAD_SHOULDPARK	0x0000004
249 #define KTHREAD_PARKED		0x0000008
250 	LIST_ENTRY(kthread) next;
251 };
252 
253 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
254 
255 void
256 kthread_func(void *arg)
257 {
258 	struct kthread *thread = arg;
259 	int ret;
260 
261 	ret = thread->func(thread->data);
262 	thread->flags |= KTHREAD_STOPPED;
263 	wakeup(thread);
264 	kthread_exit(ret);
265 }
266 
267 struct proc *
268 kthread_run(int (*func)(void *), void *data, const char *name)
269 {
270 	struct kthread *thread;
271 
272 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
273 	thread->func = func;
274 	thread->data = data;
275 	thread->flags = 0;
276 
277 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
278 		free(thread, M_DRM, sizeof(*thread));
279 		return ERR_PTR(-ENOMEM);
280 	}
281 
282 	LIST_INSERT_HEAD(&kthread_list, thread, next);
283 	return thread->proc;
284 }
285 
286 struct kthread_worker *
287 kthread_create_worker(unsigned int flags, const char *fmt, ...)
288 {
289 	char name[MAXCOMLEN+1];
290 	va_list ap;
291 
292 	struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
293 	va_start(ap, fmt);
294 	vsnprintf(name, sizeof(name), fmt, ap);
295 	va_end(ap);
296 	w->tq = taskq_create(name, 1, IPL_HIGH, 0);
297 
298 	return w;
299 }
300 
301 void
302 kthread_destroy_worker(struct kthread_worker *worker)
303 {
304 	taskq_destroy(worker->tq);
305 	free(worker, M_DRM, sizeof(*worker));
306 
307 }
308 
309 void
310 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
311 {
312 	work->tq = NULL;
313 	task_set(&work->task, (void (*)(void *))func, work);
314 }
315 
316 bool
317 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
318 {
319 	work->tq = worker->tq;
320 	return task_add(work->tq, &work->task);
321 }
322 
323 bool
324 kthread_cancel_work_sync(struct kthread_work *work)
325 {
326 	return task_del(work->tq, &work->task);
327 }
328 
329 void
330 kthread_flush_work(struct kthread_work *work)
331 {
332 	if (cold)
333 		return;
334 
335 	if (work->tq)
336 		taskq_barrier(work->tq);
337 }
338 
339 void
340 kthread_flush_worker(struct kthread_worker *worker)
341 {
342 	if (cold)
343 		return;
344 
345 	if (worker->tq)
346 		taskq_barrier(worker->tq);
347 }
348 
349 struct kthread *
350 kthread_lookup(struct proc *p)
351 {
352 	struct kthread *thread;
353 
354 	LIST_FOREACH(thread, &kthread_list, next) {
355 		if (thread->proc == p)
356 			break;
357 	}
358 	KASSERT(thread);
359 
360 	return thread;
361 }
362 
363 int
364 kthread_should_park(void)
365 {
366 	struct kthread *thread = kthread_lookup(curproc);
367 	return (thread->flags & KTHREAD_SHOULDPARK);
368 }
369 
370 void
371 kthread_parkme(void)
372 {
373 	struct kthread *thread = kthread_lookup(curproc);
374 
375 	while (thread->flags & KTHREAD_SHOULDPARK) {
376 		thread->flags |= KTHREAD_PARKED;
377 		wakeup(thread);
378 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
379 		thread->flags &= ~KTHREAD_PARKED;
380 	}
381 }
382 
383 void
384 kthread_park(struct proc *p)
385 {
386 	struct kthread *thread = kthread_lookup(p);
387 
388 	while ((thread->flags & KTHREAD_PARKED) == 0) {
389 		thread->flags |= KTHREAD_SHOULDPARK;
390 		wake_up_process(thread->proc);
391 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
392 	}
393 }
394 
395 void
396 kthread_unpark(struct proc *p)
397 {
398 	struct kthread *thread = kthread_lookup(p);
399 
400 	thread->flags &= ~KTHREAD_SHOULDPARK;
401 	wakeup(thread);
402 }
403 
404 int
405 kthread_should_stop(void)
406 {
407 	struct kthread *thread = kthread_lookup(curproc);
408 	return (thread->flags & KTHREAD_SHOULDSTOP);
409 }
410 
411 void
412 kthread_stop(struct proc *p)
413 {
414 	struct kthread *thread = kthread_lookup(p);
415 
416 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
417 		thread->flags |= KTHREAD_SHOULDSTOP;
418 		kthread_unpark(p);
419 		wake_up_process(thread->proc);
420 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
421 	}
422 	LIST_REMOVE(thread, next);
423 	free(thread, M_DRM, sizeof(*thread));
424 }
425 
426 #if NBIOS > 0
427 extern char smbios_board_vendor[];
428 extern char smbios_board_prod[];
429 extern char smbios_board_serial[];
430 #endif
431 
432 bool
433 dmi_match(int slot, const char *str)
434 {
435 	switch (slot) {
436 	case DMI_SYS_VENDOR:
437 		if (hw_vendor != NULL &&
438 		    !strcmp(hw_vendor, str))
439 			return true;
440 		break;
441 	case DMI_PRODUCT_NAME:
442 		if (hw_prod != NULL &&
443 		    !strcmp(hw_prod, str))
444 			return true;
445 		break;
446 	case DMI_PRODUCT_VERSION:
447 		if (hw_ver != NULL &&
448 		    !strcmp(hw_ver, str))
449 			return true;
450 		break;
451 #if NBIOS > 0
452 	case DMI_BOARD_VENDOR:
453 		if (strcmp(smbios_board_vendor, str) == 0)
454 			return true;
455 		break;
456 	case DMI_BOARD_NAME:
457 		if (strcmp(smbios_board_prod, str) == 0)
458 			return true;
459 		break;
460 	case DMI_BOARD_SERIAL:
461 		if (strcmp(smbios_board_serial, str) == 0)
462 			return true;
463 		break;
464 #else
465 	case DMI_BOARD_VENDOR:
466 		if (hw_vendor != NULL &&
467 		    !strcmp(hw_vendor, str))
468 			return true;
469 		break;
470 	case DMI_BOARD_NAME:
471 		if (hw_prod != NULL &&
472 		    !strcmp(hw_prod, str))
473 			return true;
474 		break;
475 #endif
476 	case DMI_NONE:
477 	default:
478 		return false;
479 	}
480 
481 	return false;
482 }
483 
484 static bool
485 dmi_found(const struct dmi_system_id *dsi)
486 {
487 	int i, slot;
488 
489 	for (i = 0; i < nitems(dsi->matches); i++) {
490 		slot = dsi->matches[i].slot;
491 		if (slot == DMI_NONE)
492 			break;
493 		if (!dmi_match(slot, dsi->matches[i].substr))
494 			return false;
495 	}
496 
497 	return true;
498 }
499 
500 const struct dmi_system_id *
501 dmi_first_match(const struct dmi_system_id *sysid)
502 {
503 	const struct dmi_system_id *dsi;
504 
505 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
506 		if (dmi_found(dsi))
507 			return dsi;
508 	}
509 
510 	return NULL;
511 }
512 
513 #if NBIOS > 0
514 extern char smbios_bios_date[];
515 extern char smbios_bios_version[];
516 #endif
517 
518 const char *
519 dmi_get_system_info(int slot)
520 {
521 #if NBIOS > 0
522 	switch (slot) {
523 	case DMI_BIOS_DATE:
524 		return smbios_bios_date;
525 	case DMI_BIOS_VERSION:
526 		return smbios_bios_version;
527 	default:
528 		printf("%s slot %d not handled\n", __func__, slot);
529 	}
530 #endif
531 	return NULL;
532 }
533 
534 int
535 dmi_check_system(const struct dmi_system_id *sysid)
536 {
537 	const struct dmi_system_id *dsi;
538 	int num = 0;
539 
540 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
541 		if (dmi_found(dsi)) {
542 			num++;
543 			if (dsi->callback && dsi->callback(dsi))
544 				break;
545 		}
546 	}
547 	return (num);
548 }
549 
550 struct vm_page *
551 alloc_pages(unsigned int gfp_mask, unsigned int order)
552 {
553 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
554 	struct uvm_constraint_range *constraint = &no_constraint;
555 	struct pglist mlist;
556 
557 	if (gfp_mask & M_CANFAIL)
558 		flags |= UVM_PLA_FAILOK;
559 	if (gfp_mask & M_ZERO)
560 		flags |= UVM_PLA_ZERO;
561 	if (gfp_mask & __GFP_DMA32)
562 		constraint = &dma_constraint;
563 
564 	TAILQ_INIT(&mlist);
565 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
566 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
567 		return NULL;
568 	return TAILQ_FIRST(&mlist);
569 }
570 
571 void
572 __free_pages(struct vm_page *page, unsigned int order)
573 {
574 	struct pglist mlist;
575 	int i;
576 
577 	TAILQ_INIT(&mlist);
578 	for (i = 0; i < (1 << order); i++)
579 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
580 	uvm_pglistfree(&mlist);
581 }
582 
583 void
584 __pagevec_release(struct pagevec *pvec)
585 {
586 	struct pglist mlist;
587 	int i;
588 
589 	TAILQ_INIT(&mlist);
590 	for (i = 0; i < pvec->nr; i++)
591 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
592 	uvm_pglistfree(&mlist);
593 	pagevec_reinit(pvec);
594 }
595 
596 static struct kmem_va_mode kv_physwait = {
597 	.kv_map = &phys_map,
598 	.kv_wait = 1,
599 };
600 
601 void *
602 kmap(struct vm_page *pg)
603 {
604 	vaddr_t va;
605 
606 #if defined (__HAVE_PMAP_DIRECT)
607 	va = pmap_map_direct(pg);
608 #else
609 	va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok);
610 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
611 	pmap_update(pmap_kernel());
612 #endif
613 	return (void *)va;
614 }
615 
616 void
617 kunmap_va(void *addr)
618 {
619 	vaddr_t va = (vaddr_t)addr;
620 
621 #if defined (__HAVE_PMAP_DIRECT)
622 	pmap_unmap_direct(va);
623 #else
624 	pmap_kremove(va, PAGE_SIZE);
625 	pmap_update(pmap_kernel());
626 	km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none);
627 #endif
628 }
629 
630 vaddr_t kmap_atomic_va;
631 int kmap_atomic_inuse;
632 
633 void *
634 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
635 {
636 	KASSERT(!kmap_atomic_inuse);
637 
638 	kmap_atomic_inuse = 1;
639 	pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
640 	    PROT_READ | PROT_WRITE);
641 	return (void *)kmap_atomic_va;
642 }
643 
644 void
645 kunmap_atomic(void *addr)
646 {
647 	KASSERT(kmap_atomic_inuse);
648 
649 	pmap_kremove(kmap_atomic_va, PAGE_SIZE);
650 	kmap_atomic_inuse = 0;
651 }
652 
653 void *
654 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
655      pgprot_t prot)
656 {
657 	vaddr_t va;
658 	paddr_t pa;
659 	int i;
660 
661 	va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none,
662 	    &kd_nowait);
663 	if (va == 0)
664 		return NULL;
665 	for (i = 0; i < npages; i++) {
666 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
667 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
668 		    PROT_READ | PROT_WRITE,
669 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
670 		pmap_update(pmap_kernel());
671 	}
672 
673 	return (void *)va;
674 }
675 
676 void *
677 vmap_pfn(unsigned long *pfns, unsigned int npfn, pgprot_t prot)
678 {
679 	vaddr_t va;
680 	paddr_t pa;
681 	int i;
682 
683 	va = (vaddr_t)km_alloc(PAGE_SIZE * npfn, &kv_any, &kp_none,
684 	    &kd_nowait);
685 	if (va == 0)
686 		return NULL;
687 	for (i = 0; i < npfn; i++) {
688 		pa = round_page(pfns[i]) | prot;
689 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
690 		    PROT_READ | PROT_WRITE,
691 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
692 		pmap_update(pmap_kernel());
693 	}
694 
695 	return (void *)va;
696 }
697 
698 void
699 vunmap(void *addr, size_t size)
700 {
701 	vaddr_t va = (vaddr_t)addr;
702 
703 	pmap_remove(pmap_kernel(), va, va + size);
704 	pmap_update(pmap_kernel());
705 	km_free((void *)va, size, &kv_any, &kp_none);
706 }
707 
708 bool
709 is_vmalloc_addr(const void *p)
710 {
711 	vaddr_t min, max, addr;
712 
713 	min = vm_map_min(kernel_map);
714 	max = vm_map_max(kernel_map);
715 	addr = (vaddr_t)p;
716 
717 	if (addr >= min && addr <= max)
718 		return true;
719 	else
720 		return false;
721 }
722 
723 void
724 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
725     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
726 {
727 	const uint8_t *cbuf = buf;
728 	int i;
729 
730 	for (i = 0; i < len; i++) {
731 		if ((i % rowsize) == 0)
732 			printf("%s", prefix_str);
733 		printf("%02x", cbuf[i]);
734 		if ((i % rowsize) == (rowsize - 1))
735 			printf("\n");
736 		else
737 			printf(" ");
738 	}
739 }
740 
741 void *
742 memchr_inv(const void *s, int c, size_t n)
743 {
744 	if (n != 0) {
745 		const unsigned char *p = s;
746 
747 		do {
748 			if (*p++ != (unsigned char)c)
749 				return ((void *)(p - 1));
750 		} while (--n != 0);
751 	}
752 	return (NULL);
753 }
754 
755 int
756 panic_cmp(struct rb_node *a, struct rb_node *b)
757 {
758 	panic(__func__);
759 }
760 
761 #undef RB_ROOT
762 #define RB_ROOT(head)	(head)->rbh_root
763 
764 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
765 
766 /*
767  * This is a fairly minimal implementation of the Linux "idr" API.  It
768  * probably isn't very efficient, and definitely isn't RCU safe.  The
769  * pre-load buffer is global instead of per-cpu; we rely on the kernel
770  * lock to make this work.  We do randomize our IDs in order to make
771  * them harder to guess.
772  */
773 
774 int idr_cmp(struct idr_entry *, struct idr_entry *);
775 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
776 
777 struct pool idr_pool;
778 struct idr_entry *idr_entry_cache;
779 
780 void
781 idr_init(struct idr *idr)
782 {
783 	SPLAY_INIT(&idr->tree);
784 }
785 
786 void
787 idr_destroy(struct idr *idr)
788 {
789 	struct idr_entry *id;
790 
791 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
792 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
793 		pool_put(&idr_pool, id);
794 	}
795 }
796 
797 void
798 idr_preload(unsigned int gfp_mask)
799 {
800 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
801 
802 	KERNEL_ASSERT_LOCKED();
803 
804 	if (idr_entry_cache == NULL)
805 		idr_entry_cache = pool_get(&idr_pool, flags);
806 }
807 
808 int
809 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
810 {
811 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
812 	struct idr_entry *id;
813 	int begin;
814 
815 	KERNEL_ASSERT_LOCKED();
816 
817 	if (idr_entry_cache) {
818 		id = idr_entry_cache;
819 		idr_entry_cache = NULL;
820 	} else {
821 		id = pool_get(&idr_pool, flags);
822 		if (id == NULL)
823 			return -ENOMEM;
824 	}
825 
826 	if (end <= 0)
827 		end = INT_MAX;
828 
829 #ifdef notyet
830 	id->id = begin = start + arc4random_uniform(end - start);
831 #else
832 	id->id = begin = start;
833 #endif
834 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
835 		if (id->id == end)
836 			id->id = start;
837 		else
838 			id->id++;
839 		if (id->id == begin) {
840 			pool_put(&idr_pool, id);
841 			return -ENOSPC;
842 		}
843 	}
844 	id->ptr = ptr;
845 	return id->id;
846 }
847 
848 void *
849 idr_replace(struct idr *idr, void *ptr, unsigned long id)
850 {
851 	struct idr_entry find, *res;
852 	void *old;
853 
854 	find.id = id;
855 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
856 	if (res == NULL)
857 		return ERR_PTR(-ENOENT);
858 	old = res->ptr;
859 	res->ptr = ptr;
860 	return old;
861 }
862 
863 void *
864 idr_remove(struct idr *idr, unsigned long id)
865 {
866 	struct idr_entry find, *res;
867 	void *ptr = NULL;
868 
869 	find.id = id;
870 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
871 	if (res) {
872 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
873 		ptr = res->ptr;
874 		pool_put(&idr_pool, res);
875 	}
876 	return ptr;
877 }
878 
879 void *
880 idr_find(struct idr *idr, unsigned long id)
881 {
882 	struct idr_entry find, *res;
883 
884 	find.id = id;
885 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
886 	if (res == NULL)
887 		return NULL;
888 	return res->ptr;
889 }
890 
891 void *
892 idr_get_next(struct idr *idr, int *id)
893 {
894 	struct idr_entry *res;
895 
896 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
897 		if (res->id >= *id) {
898 			*id = res->id;
899 			return res->ptr;
900 		}
901 	}
902 
903 	return NULL;
904 }
905 
906 int
907 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
908 {
909 	struct idr_entry *id;
910 	int ret;
911 
912 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
913 		ret = func(id->id, id->ptr, data);
914 		if (ret)
915 			return ret;
916 	}
917 
918 	return 0;
919 }
920 
921 int
922 idr_cmp(struct idr_entry *a, struct idr_entry *b)
923 {
924 	return (a->id < b->id ? -1 : a->id > b->id);
925 }
926 
927 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
928 
929 void
930 ida_init(struct ida *ida)
931 {
932 	idr_init(&ida->idr);
933 }
934 
935 void
936 ida_destroy(struct ida *ida)
937 {
938 	idr_destroy(&ida->idr);
939 }
940 
941 int
942 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
943     gfp_t gfp_mask)
944 {
945 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
946 }
947 
948 void
949 ida_simple_remove(struct ida *ida, unsigned int id)
950 {
951 	idr_remove(&ida->idr, id);
952 }
953 
954 int
955 ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
956 {
957 	return idr_alloc(&ida->idr, NULL, min, INT_MAX, gfp);
958 }
959 
960 int
961 ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
962 {
963 	return idr_alloc(&ida->idr, NULL, 0, max - 1, gfp);
964 }
965 
966 void
967 ida_free(struct ida *ida, unsigned int id)
968 {
969 	idr_remove(&ida->idr, id);
970 }
971 
972 int
973 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
974 {
975 	return (a->id < b->id ? -1 : a->id > b->id);
976 }
977 
978 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
979 struct pool xa_pool;
980 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
981 
982 void
983 xa_init_flags(struct xarray *xa, gfp_t flags)
984 {
985 	static int initialized;
986 
987 	if (!initialized) {
988 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0,
989 		    "xapl", NULL);
990 		initialized = 1;
991 	}
992 	SPLAY_INIT(&xa->xa_tree);
993 	if (flags & XA_FLAGS_LOCK_IRQ)
994 		mtx_init(&xa->xa_lock, IPL_TTY);
995 	else
996 		mtx_init(&xa->xa_lock, IPL_NONE);
997 }
998 
999 void
1000 xa_destroy(struct xarray *xa)
1001 {
1002 	struct xarray_entry *id;
1003 
1004 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
1005 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
1006 		pool_put(&xa_pool, id);
1007 	}
1008 }
1009 
1010 /* Don't wrap ids. */
1011 int
1012 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
1013 {
1014 	struct xarray_entry *xid;
1015 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
1016 	int begin;
1017 
1018 	if (gfp & GFP_NOWAIT) {
1019 		xid = pool_get(&xa_pool, PR_NOWAIT);
1020 	} else {
1021 		mtx_leave(&xa->xa_lock);
1022 		xid = pool_get(&xa_pool, PR_WAITOK);
1023 		mtx_enter(&xa->xa_lock);
1024 	}
1025 
1026 	if (xid == NULL)
1027 		return -ENOMEM;
1028 
1029 	if (limit <= 0)
1030 		limit = INT_MAX;
1031 
1032 	xid->id = begin = start;
1033 
1034 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
1035 		if (xid->id == limit)
1036 			xid->id = start;
1037 		else
1038 			xid->id++;
1039 		if (xid->id == begin) {
1040 			pool_put(&xa_pool, xid);
1041 			return -EBUSY;
1042 		}
1043 	}
1044 	xid->ptr = entry;
1045 	*id = xid->id;
1046 	return 0;
1047 }
1048 
1049 /*
1050  * Wrap ids and store next id.
1051  * We walk the entire tree so don't special case wrapping.
1052  * The only caller of this (i915_drm_client.c) doesn't use next id.
1053  */
1054 int
1055 __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, int limit, u32 *next,
1056     gfp_t gfp)
1057 {
1058 	int r = __xa_alloc(xa, id, entry, limit, gfp);
1059 	*next = *id + 1;
1060 	return r;
1061 }
1062 
1063 void *
1064 __xa_erase(struct xarray *xa, unsigned long index)
1065 {
1066 	struct xarray_entry find, *res;
1067 	void *ptr = NULL;
1068 
1069 	find.id = index;
1070 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1071 	if (res) {
1072 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
1073 		ptr = res->ptr;
1074 		pool_put(&xa_pool, res);
1075 	}
1076 	return ptr;
1077 }
1078 
1079 void *
1080 __xa_load(struct xarray *xa, unsigned long index)
1081 {
1082 	struct xarray_entry find, *res;
1083 
1084 	find.id = index;
1085 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1086 	if (res == NULL)
1087 		return NULL;
1088 	return res->ptr;
1089 }
1090 
1091 void *
1092 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1093 {
1094 	struct xarray_entry find, *res;
1095 	void *prev;
1096 
1097 	if (entry == NULL)
1098 		return __xa_erase(xa, index);
1099 
1100 	find.id = index;
1101 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1102 	if (res != NULL) {
1103 		/* index exists */
1104 		/* XXX Multislot entries updates not implemented yet */
1105 		prev = res->ptr;
1106 		res->ptr = entry;
1107 		return prev;
1108 	}
1109 
1110 	/* index not found, add new */
1111 	if (gfp & GFP_NOWAIT) {
1112 		res = pool_get(&xa_pool, PR_NOWAIT);
1113 	} else {
1114 		mtx_leave(&xa->xa_lock);
1115 		res = pool_get(&xa_pool, PR_WAITOK);
1116 		mtx_enter(&xa->xa_lock);
1117 	}
1118 	if (res == NULL)
1119 		return XA_ERROR(-ENOMEM);
1120 	res->id = index;
1121 	res->ptr = entry;
1122 	if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL)
1123 		return XA_ERROR(-EINVAL);
1124 	return NULL; /* no prev entry at index */
1125 }
1126 
1127 void *
1128 xa_get_next(struct xarray *xa, unsigned long *index)
1129 {
1130 	struct xarray_entry *res;
1131 
1132 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
1133 		if (res->id >= *index) {
1134 			*index = res->id;
1135 			return res->ptr;
1136 		}
1137 	}
1138 
1139 	return NULL;
1140 }
1141 
1142 int
1143 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1144 {
1145 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
1146 	    M_DRM, gfp_mask | M_ZERO);
1147 	if (table->sgl == NULL)
1148 		return -ENOMEM;
1149 	table->nents = table->orig_nents = nents;
1150 	sg_mark_end(&table->sgl[nents - 1]);
1151 	return 0;
1152 }
1153 
1154 void
1155 sg_free_table(struct sg_table *table)
1156 {
1157 	free(table->sgl, M_DRM,
1158 	    table->orig_nents * sizeof(struct scatterlist));
1159 	table->orig_nents = 0;
1160 	table->sgl = NULL;
1161 }
1162 
1163 size_t
1164 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1165     const void *buf, size_t buflen)
1166 {
1167 	panic("%s", __func__);
1168 }
1169 
1170 int
1171 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1172 {
1173 	void *cmd = NULL;
1174 	int cmdlen = 0;
1175 	int err, ret = 0;
1176 	int op;
1177 
1178 	iic_acquire_bus(&adap->ic, 0);
1179 
1180 	while (num > 2) {
1181 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1182 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1183 		    msgs->buf, msgs->len, 0);
1184 		if (err) {
1185 			ret = -err;
1186 			goto fail;
1187 		}
1188 		msgs++;
1189 		num--;
1190 		ret++;
1191 	}
1192 
1193 	if (num > 1) {
1194 		cmd = msgs->buf;
1195 		cmdlen = msgs->len;
1196 		msgs++;
1197 		num--;
1198 		ret++;
1199 	}
1200 
1201 	op = (msgs->flags & I2C_M_RD) ?
1202 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1203 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1204 	    msgs->buf, msgs->len, 0);
1205 	if (err) {
1206 		ret = -err;
1207 		goto fail;
1208 	}
1209 	msgs++;
1210 	ret++;
1211 
1212 fail:
1213 	iic_release_bus(&adap->ic, 0);
1214 
1215 	return ret;
1216 }
1217 
1218 int
1219 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1220 {
1221 	int ret, retries;
1222 
1223 	retries = adap->retries;
1224 retry:
1225 	if (adap->algo)
1226 		ret = adap->algo->master_xfer(adap, msgs, num);
1227 	else
1228 		ret = i2c_master_xfer(adap, msgs, num);
1229 	if (ret == -EAGAIN && retries > 0) {
1230 		retries--;
1231 		goto retry;
1232 	}
1233 
1234 	return ret;
1235 }
1236 
1237 int
1238 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1239 {
1240 	int ret;
1241 
1242 	if (adap->lock_ops)
1243 		adap->lock_ops->lock_bus(adap, 0);
1244 
1245 	ret = __i2c_transfer(adap, msgs, num);
1246 
1247 	if (adap->lock_ops)
1248 		adap->lock_ops->unlock_bus(adap, 0);
1249 
1250 	return ret;
1251 }
1252 
1253 int
1254 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1255 {
1256 	struct i2c_algo_bit_data *algo = adap->algo_data;
1257 	struct i2c_adapter bb;
1258 
1259 	memset(&bb, 0, sizeof(bb));
1260 	bb.ic = algo->ic;
1261 	bb.retries = adap->retries;
1262 	return i2c_master_xfer(&bb, msgs, num);
1263 }
1264 
1265 uint32_t
1266 i2c_bb_functionality(struct i2c_adapter *adap)
1267 {
1268 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1269 }
1270 
1271 struct i2c_algorithm i2c_bit_algo = {
1272 	.master_xfer = i2c_bb_master_xfer,
1273 	.functionality = i2c_bb_functionality
1274 };
1275 
1276 int
1277 i2c_bit_add_bus(struct i2c_adapter *adap)
1278 {
1279 	adap->algo = &i2c_bit_algo;
1280 	adap->retries = 3;
1281 
1282 	return 0;
1283 }
1284 
1285 #if defined(__amd64__) || defined(__i386__)
1286 
1287 /*
1288  * This is a minimal implementation of the Linux vga_get/vga_put
1289  * interface.  In all likelihood, it will only work for inteldrm(4) as
1290  * it assumes that if there is another active VGA device in the
1291  * system, it is sitting behind a PCI bridge.
1292  */
1293 
1294 extern int pci_enumerate_bus(struct pci_softc *,
1295     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1296 
1297 pcitag_t vga_bridge_tag;
1298 int vga_bridge_disabled;
1299 
1300 int
1301 vga_disable_bridge(struct pci_attach_args *pa)
1302 {
1303 	pcireg_t bhlc, bc;
1304 
1305 	if (pa->pa_domain != 0)
1306 		return 0;
1307 
1308 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1309 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1310 		return 0;
1311 
1312 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1313 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1314 		return 0;
1315 	bc &= ~PPB_BC_VGA_ENABLE;
1316 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1317 
1318 	vga_bridge_tag = pa->pa_tag;
1319 	vga_bridge_disabled = 1;
1320 
1321 	return 1;
1322 }
1323 
1324 void
1325 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1326 {
1327 	if (pdev->pci->sc_bridgetag != NULL)
1328 		return;
1329 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1330 }
1331 
1332 void
1333 vga_put(struct pci_dev *pdev, int rsrc)
1334 {
1335 	pcireg_t bc;
1336 
1337 	if (!vga_bridge_disabled)
1338 		return;
1339 
1340 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1341 	bc |= PPB_BC_VGA_ENABLE;
1342 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1343 
1344 	vga_bridge_disabled = 0;
1345 }
1346 
1347 #endif
1348 
1349 suspend_state_t pm_suspend_target_state;
1350 
1351 /*
1352  * ACPI types and interfaces.
1353  */
1354 
1355 #ifdef __HAVE_ACPI
1356 #include "acpi.h"
1357 #endif
1358 
1359 #if NACPI > 0
1360 
1361 #include <dev/acpi/acpireg.h>
1362 #include <dev/acpi/acpivar.h>
1363 #include <dev/acpi/amltypes.h>
1364 #include <dev/acpi/dsdt.h>
1365 
1366 struct acpi_fadt acpi_gbl_FADT;
1367 
1368 acpi_status
1369 acpi_get_table(const char *sig, int instance,
1370     struct acpi_table_header **hdr)
1371 {
1372 	struct acpi_softc *sc = acpi_softc;
1373 	struct acpi_q *entry;
1374 
1375 	KASSERT(instance == 1);
1376 
1377 	if (sc == NULL)
1378 		return AE_NOT_FOUND;
1379 
1380 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1381 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1382 			*hdr = entry->q_table;
1383 			return 0;
1384 		}
1385 	}
1386 
1387 	return AE_NOT_FOUND;
1388 }
1389 
1390 void
1391 acpi_put_table(struct acpi_table_header *hdr)
1392 {
1393 }
1394 
1395 acpi_status
1396 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1397 {
1398 	node = aml_searchname(node, name);
1399 	if (node == NULL)
1400 		return AE_NOT_FOUND;
1401 
1402 	*rnode = node;
1403 	return 0;
1404 }
1405 
1406 acpi_status
1407 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1408 {
1409 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1410 	KASSERT(type == ACPI_FULL_PATHNAME);
1411 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1412 	return 0;
1413 }
1414 
1415 acpi_status
1416 acpi_evaluate_object(acpi_handle node, const char *name,
1417     struct acpi_object_list *params, struct acpi_buffer *result)
1418 {
1419 	struct aml_value args[4], res;
1420 	union acpi_object *obj;
1421 	uint8_t *data;
1422 	int i;
1423 
1424 	KASSERT(params->count <= nitems(args));
1425 
1426 	for (i = 0; i < params->count; i++) {
1427 		args[i].type = params->pointer[i].type;
1428 		switch (args[i].type) {
1429 		case AML_OBJTYPE_INTEGER:
1430 			args[i].v_integer = params->pointer[i].integer.value;
1431 			break;
1432 		case AML_OBJTYPE_BUFFER:
1433 			args[i].length = params->pointer[i].buffer.length;
1434 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1435 			break;
1436 		default:
1437 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1438 			return AE_BAD_PARAMETER;
1439 		}
1440 	}
1441 
1442 	if (name) {
1443 		node = aml_searchname(node, name);
1444 		if (node == NULL)
1445 			return AE_NOT_FOUND;
1446 	}
1447 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1448 		aml_freevalue(&res);
1449 		return AE_ERROR;
1450 	}
1451 
1452 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1453 
1454 	result->length = sizeof(union acpi_object);
1455 	switch (res.type) {
1456 	case AML_OBJTYPE_BUFFER:
1457 		result->length += res.length;
1458 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1459 		obj = (union acpi_object *)result->pointer;
1460 		data = (uint8_t *)(obj + 1);
1461 		obj->type = res.type;
1462 		obj->buffer.length = res.length;
1463 		obj->buffer.pointer = data;
1464 		memcpy(data, res.v_buffer, res.length);
1465 		break;
1466 	default:
1467 		printf("%s: return type 0x%02x", __func__, res.type);
1468 		aml_freevalue(&res);
1469 		return AE_ERROR;
1470 	}
1471 
1472 	aml_freevalue(&res);
1473 	return 0;
1474 }
1475 
1476 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1477 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1478 
1479 int
1480 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1481 {
1482 	struct acpi_bus_event event;
1483 	struct notifier_block *nb;
1484 
1485 	event.device_class = ACPI_VIDEO_CLASS;
1486 	event.type = notify;
1487 
1488 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1489 		nb->notifier_call(nb, 0, &event);
1490 	return 0;
1491 }
1492 
1493 int
1494 register_acpi_notifier(struct notifier_block *nb)
1495 {
1496 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1497 	return 0;
1498 }
1499 
1500 int
1501 unregister_acpi_notifier(struct notifier_block *nb)
1502 {
1503 	struct notifier_block *tmp;
1504 
1505 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1506 		if (tmp == nb) {
1507 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1508 			    notifier_block, link);
1509 			return 0;
1510 		}
1511 	}
1512 
1513 	return -ENOENT;
1514 }
1515 
1516 const char *
1517 acpi_format_exception(acpi_status status)
1518 {
1519 	switch (status) {
1520 	case AE_NOT_FOUND:
1521 		return "not found";
1522 	case AE_BAD_PARAMETER:
1523 		return "bad parameter";
1524 	default:
1525 		return "unknown";
1526 	}
1527 }
1528 
1529 int
1530 acpi_target_system_state(void)
1531 {
1532 	return acpi_softc->sc_state;
1533 }
1534 
1535 #endif
1536 
1537 SLIST_HEAD(,backlight_device) backlight_device_list =
1538     SLIST_HEAD_INITIALIZER(backlight_device_list);
1539 
1540 void
1541 backlight_do_update_status(void *arg)
1542 {
1543 	backlight_update_status(arg);
1544 }
1545 
1546 struct backlight_device *
1547 backlight_device_register(const char *name, void *kdev, void *data,
1548     const struct backlight_ops *ops, const struct backlight_properties *props)
1549 {
1550 	struct backlight_device *bd;
1551 
1552 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1553 	bd->ops = ops;
1554 	bd->props = *props;
1555 	bd->data = data;
1556 
1557 	task_set(&bd->task, backlight_do_update_status, bd);
1558 
1559 	SLIST_INSERT_HEAD(&backlight_device_list, bd, next);
1560 	bd->name = name;
1561 
1562 	return bd;
1563 }
1564 
1565 void
1566 backlight_device_unregister(struct backlight_device *bd)
1567 {
1568 	SLIST_REMOVE(&backlight_device_list, bd, backlight_device, next);
1569 	free(bd, M_DRM, sizeof(*bd));
1570 }
1571 
1572 void
1573 backlight_schedule_update_status(struct backlight_device *bd)
1574 {
1575 	task_add(systq, &bd->task);
1576 }
1577 
1578 int
1579 backlight_enable(struct backlight_device *bd)
1580 {
1581 	if (bd == NULL)
1582 		return 0;
1583 
1584 	bd->props.power = FB_BLANK_UNBLANK;
1585 
1586 	return bd->ops->update_status(bd);
1587 }
1588 
1589 int
1590 backlight_disable(struct backlight_device *bd)
1591 {
1592 	if (bd == NULL)
1593 		return 0;
1594 
1595 	bd->props.power = FB_BLANK_POWERDOWN;
1596 
1597 	return bd->ops->update_status(bd);
1598 }
1599 
1600 struct backlight_device *
1601 backlight_device_get_by_name(const char *name)
1602 {
1603 	struct backlight_device *bd;
1604 
1605 	SLIST_FOREACH(bd, &backlight_device_list, next) {
1606 		if (strcmp(name, bd->name) == 0)
1607 			return bd;
1608 	}
1609 
1610 	return NULL;
1611 }
1612 
1613 struct drvdata {
1614 	struct device *dev;
1615 	void *data;
1616 	SLIST_ENTRY(drvdata) next;
1617 };
1618 
1619 SLIST_HEAD(,drvdata) drvdata_list = SLIST_HEAD_INITIALIZER(drvdata_list);
1620 
1621 void
1622 dev_set_drvdata(struct device *dev, void *data)
1623 {
1624 	struct drvdata *drvdata;
1625 
1626 	SLIST_FOREACH(drvdata, &drvdata_list, next) {
1627 		if (drvdata->dev == dev) {
1628 			drvdata->data = data;
1629 			return;
1630 		}
1631 	}
1632 
1633 	if (data == NULL)
1634 		return;
1635 
1636 	drvdata = malloc(sizeof(*drvdata), M_DRM, M_WAITOK);
1637 	drvdata->dev = dev;
1638 	drvdata->data = data;
1639 
1640 	SLIST_INSERT_HEAD(&drvdata_list, drvdata, next);
1641 }
1642 
1643 void *
1644 dev_get_drvdata(struct device *dev)
1645 {
1646 	struct drvdata *drvdata;
1647 
1648 	SLIST_FOREACH(drvdata, &drvdata_list, next) {
1649 		if (drvdata->dev == dev)
1650 			return drvdata->data;
1651 	}
1652 
1653 	return NULL;
1654 }
1655 
1656 void
1657 drm_sysfs_hotplug_event(struct drm_device *dev)
1658 {
1659 	knote_locked(&dev->note, NOTE_CHANGE);
1660 }
1661 
1662 void
1663 drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
1664 {
1665 	knote_locked(&connector->dev->note, NOTE_CHANGE);
1666 }
1667 
1668 void
1669 drm_sysfs_connector_status_event(struct drm_connector *connector,
1670     struct drm_property *property)
1671 {
1672 	STUB();
1673 }
1674 
1675 void
1676 drm_sysfs_connector_property_event(struct drm_connector *connector,
1677     struct drm_property *property)
1678 {
1679 	STUB();
1680 }
1681 
1682 struct dma_fence *
1683 dma_fence_get(struct dma_fence *fence)
1684 {
1685 	if (fence)
1686 		kref_get(&fence->refcount);
1687 	return fence;
1688 }
1689 
1690 struct dma_fence *
1691 dma_fence_get_rcu(struct dma_fence *fence)
1692 {
1693 	if (fence)
1694 		kref_get(&fence->refcount);
1695 	return fence;
1696 }
1697 
1698 struct dma_fence *
1699 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1700 {
1701 	struct dma_fence *fence;
1702 	if (dfp == NULL)
1703 		return NULL;
1704 	fence = *dfp;
1705 	if (fence)
1706 		kref_get(&fence->refcount);
1707 	return fence;
1708 }
1709 
1710 void
1711 dma_fence_release(struct kref *ref)
1712 {
1713 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1714 	if (fence->ops && fence->ops->release)
1715 		fence->ops->release(fence);
1716 	else
1717 		free(fence, M_DRM, 0);
1718 }
1719 
1720 void
1721 dma_fence_put(struct dma_fence *fence)
1722 {
1723 	if (fence)
1724 		kref_put(&fence->refcount, dma_fence_release);
1725 }
1726 
1727 int
1728 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp)
1729 {
1730 	struct dma_fence_cb *cur, *tmp;
1731 	struct list_head cb_list;
1732 
1733 	if (fence == NULL)
1734 		return -EINVAL;
1735 
1736 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1737 		return -EINVAL;
1738 
1739 	list_replace(&fence->cb_list, &cb_list);
1740 
1741 	fence->timestamp = timestamp;
1742 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1743 
1744 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1745 		INIT_LIST_HEAD(&cur->node);
1746 		cur->func(fence, cur);
1747 	}
1748 
1749 	return 0;
1750 }
1751 
1752 int
1753 dma_fence_signal(struct dma_fence *fence)
1754 {
1755 	int r;
1756 
1757 	if (fence == NULL)
1758 		return -EINVAL;
1759 
1760 	mtx_enter(fence->lock);
1761 	r = dma_fence_signal_timestamp_locked(fence, ktime_get());
1762 	mtx_leave(fence->lock);
1763 
1764 	return r;
1765 }
1766 
1767 int
1768 dma_fence_signal_locked(struct dma_fence *fence)
1769 {
1770 	if (fence == NULL)
1771 		return -EINVAL;
1772 
1773 	return dma_fence_signal_timestamp_locked(fence, ktime_get());
1774 }
1775 
1776 int
1777 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
1778 {
1779 	int r;
1780 
1781 	if (fence == NULL)
1782 		return -EINVAL;
1783 
1784 	mtx_enter(fence->lock);
1785 	r = dma_fence_signal_timestamp_locked(fence, timestamp);
1786 	mtx_leave(fence->lock);
1787 
1788 	return r;
1789 }
1790 
1791 bool
1792 dma_fence_is_signaled(struct dma_fence *fence)
1793 {
1794 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1795 		return true;
1796 
1797 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1798 		dma_fence_signal(fence);
1799 		return true;
1800 	}
1801 
1802 	return false;
1803 }
1804 
1805 bool
1806 dma_fence_is_signaled_locked(struct dma_fence *fence)
1807 {
1808 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1809 		return true;
1810 
1811 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1812 		dma_fence_signal_locked(fence);
1813 		return true;
1814 	}
1815 
1816 	return false;
1817 }
1818 
1819 ktime_t
1820 dma_fence_timestamp(struct dma_fence *fence)
1821 {
1822 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1823 		while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
1824 			CPU_BUSY_CYCLE();
1825 		return fence->timestamp;
1826 	} else {
1827 		return ktime_get();
1828 	}
1829 }
1830 
1831 long
1832 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1833 {
1834 	if (timeout < 0)
1835 		return -EINVAL;
1836 
1837 	if (fence->ops->wait)
1838 		return fence->ops->wait(fence, intr, timeout);
1839 	else
1840 		return dma_fence_default_wait(fence, intr, timeout);
1841 }
1842 
1843 long
1844 dma_fence_wait(struct dma_fence *fence, bool intr)
1845 {
1846 	long ret;
1847 
1848 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1849 	if (ret < 0)
1850 		return ret;
1851 
1852 	return 0;
1853 }
1854 
1855 void
1856 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1857 {
1858 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1859 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1860 	    fence->ops->enable_signaling) {
1861 		mtx_enter(fence->lock);
1862 		if (!fence->ops->enable_signaling(fence))
1863 			dma_fence_signal_locked(fence);
1864 		mtx_leave(fence->lock);
1865 	}
1866 }
1867 
1868 void
1869 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1870     struct mutex *lock, uint64_t context, uint64_t seqno)
1871 {
1872 	fence->ops = ops;
1873 	fence->lock = lock;
1874 	fence->context = context;
1875 	fence->seqno = seqno;
1876 	fence->flags = 0;
1877 	fence->error = 0;
1878 	kref_init(&fence->refcount);
1879 	INIT_LIST_HEAD(&fence->cb_list);
1880 }
1881 
1882 int
1883 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1884     dma_fence_func_t func)
1885 {
1886 	int ret = 0;
1887 	bool was_set;
1888 
1889 	if (WARN_ON(!fence || !func))
1890 		return -EINVAL;
1891 
1892 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1893 		INIT_LIST_HEAD(&cb->node);
1894 		return -ENOENT;
1895 	}
1896 
1897 	mtx_enter(fence->lock);
1898 
1899 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1900 
1901 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1902 		ret = -ENOENT;
1903 	else if (!was_set && fence->ops->enable_signaling) {
1904 		if (!fence->ops->enable_signaling(fence)) {
1905 			dma_fence_signal_locked(fence);
1906 			ret = -ENOENT;
1907 		}
1908 	}
1909 
1910 	if (!ret) {
1911 		cb->func = func;
1912 		list_add_tail(&cb->node, &fence->cb_list);
1913 	} else
1914 		INIT_LIST_HEAD(&cb->node);
1915 	mtx_leave(fence->lock);
1916 
1917 	return ret;
1918 }
1919 
1920 bool
1921 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1922 {
1923 	bool ret;
1924 
1925 	mtx_enter(fence->lock);
1926 
1927 	ret = !list_empty(&cb->node);
1928 	if (ret)
1929 		list_del_init(&cb->node);
1930 
1931 	mtx_leave(fence->lock);
1932 
1933 	return ret;
1934 }
1935 
1936 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1937 
1938 uint64_t
1939 dma_fence_context_alloc(unsigned int num)
1940 {
1941   return atomic64_add_return(num, &drm_fence_context_count) - num;
1942 }
1943 
1944 struct default_wait_cb {
1945 	struct dma_fence_cb base;
1946 	struct proc *proc;
1947 };
1948 
1949 static void
1950 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1951 {
1952 	struct default_wait_cb *wait =
1953 	    container_of(cb, struct default_wait_cb, base);
1954 	wake_up_process(wait->proc);
1955 }
1956 
1957 long
1958 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1959 {
1960 	long ret = timeout ? timeout : 1;
1961 	unsigned long end;
1962 	int err;
1963 	struct default_wait_cb cb;
1964 	bool was_set;
1965 
1966 	KASSERT(timeout <= INT_MAX);
1967 
1968 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1969 		return ret;
1970 
1971 	mtx_enter(fence->lock);
1972 
1973 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1974 	    &fence->flags);
1975 
1976 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1977 		goto out;
1978 
1979 	if (!was_set && fence->ops->enable_signaling) {
1980 		if (!fence->ops->enable_signaling(fence)) {
1981 			dma_fence_signal_locked(fence);
1982 			goto out;
1983 		}
1984 	}
1985 
1986 	if (timeout == 0) {
1987 		ret = 0;
1988 		goto out;
1989 	}
1990 
1991 	cb.base.func = dma_fence_default_wait_cb;
1992 	cb.proc = curproc;
1993 	list_add(&cb.base.node, &fence->cb_list);
1994 
1995 	end = jiffies + timeout;
1996 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1997 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1998 			break;
1999 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
2000 		    "dmafence", ret);
2001 		if (err == EINTR || err == ERESTART) {
2002 			ret = -ERESTARTSYS;
2003 			break;
2004 		}
2005 	}
2006 
2007 	if (!list_empty(&cb.base.node))
2008 		list_del(&cb.base.node);
2009 out:
2010 	mtx_leave(fence->lock);
2011 
2012 	return ret;
2013 }
2014 
2015 static bool
2016 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
2017     uint32_t *idx)
2018 {
2019 	int i;
2020 
2021 	for (i = 0; i < count; ++i) {
2022 		struct dma_fence *fence = fences[i];
2023 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
2024 			if (idx)
2025 				*idx = i;
2026 			return true;
2027 		}
2028 	}
2029 	return false;
2030 }
2031 
2032 long
2033 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
2034     bool intr, long timeout, uint32_t *idx)
2035 {
2036 	struct default_wait_cb *cb;
2037 	long ret = timeout;
2038 	unsigned long end;
2039 	int i, err;
2040 
2041 	KASSERT(timeout <= INT_MAX);
2042 
2043 	if (timeout == 0) {
2044 		for (i = 0; i < count; i++) {
2045 			if (dma_fence_is_signaled(fences[i])) {
2046 				if (idx)
2047 					*idx = i;
2048 				return 1;
2049 			}
2050 		}
2051 		return 0;
2052 	}
2053 
2054 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2055 	if (cb == NULL)
2056 		return -ENOMEM;
2057 
2058 	for (i = 0; i < count; i++) {
2059 		struct dma_fence *fence = fences[i];
2060 		cb[i].proc = curproc;
2061 		if (dma_fence_add_callback(fence, &cb[i].base,
2062 		    dma_fence_default_wait_cb)) {
2063 			if (idx)
2064 				*idx = i;
2065 			goto cb_cleanup;
2066 		}
2067 	}
2068 
2069 	end = jiffies + timeout;
2070 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
2071 		if (dma_fence_test_signaled_any(fences, count, idx))
2072 			break;
2073 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
2074 		if (err == EINTR || err == ERESTART) {
2075 			ret = -ERESTARTSYS;
2076 			break;
2077 		}
2078 	}
2079 
2080 cb_cleanup:
2081 	while (i-- > 0)
2082 		dma_fence_remove_callback(fences[i], &cb[i].base);
2083 	free(cb, M_DRM, count * sizeof(*cb));
2084 	return ret;
2085 }
2086 
2087 void
2088 dma_fence_set_deadline(struct dma_fence *f, ktime_t t)
2089 {
2090 	if (f->ops->set_deadline == NULL)
2091 		return;
2092 	if (dma_fence_is_signaled(f) == false)
2093 		f->ops->set_deadline(f, t);
2094 }
2095 
2096 static struct dma_fence dma_fence_stub;
2097 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
2098 
2099 static const char *
2100 dma_fence_stub_get_name(struct dma_fence *fence)
2101 {
2102 	return "stub";
2103 }
2104 
2105 static const struct dma_fence_ops dma_fence_stub_ops = {
2106 	.get_driver_name = dma_fence_stub_get_name,
2107 	.get_timeline_name = dma_fence_stub_get_name,
2108 };
2109 
2110 struct dma_fence *
2111 dma_fence_get_stub(void)
2112 {
2113 	mtx_enter(&dma_fence_stub_mtx);
2114 	if (dma_fence_stub.ops == NULL) {
2115 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
2116 		    &dma_fence_stub_mtx, 0, 0);
2117 		dma_fence_signal_locked(&dma_fence_stub);
2118 	}
2119 	mtx_leave(&dma_fence_stub_mtx);
2120 
2121 	return dma_fence_get(&dma_fence_stub);
2122 }
2123 
2124 struct dma_fence *
2125 dma_fence_allocate_private_stub(ktime_t ts)
2126 {
2127 	struct dma_fence *f = malloc(sizeof(*f), M_DRM,
2128 	    M_ZERO | M_WAITOK | M_CANFAIL);
2129 	if (f == NULL)
2130 		return NULL;
2131 	dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
2132 	dma_fence_signal_timestamp(f, ts);
2133 	return f;
2134 }
2135 
2136 static const char *
2137 dma_fence_array_get_driver_name(struct dma_fence *fence)
2138 {
2139 	return "dma_fence_array";
2140 }
2141 
2142 static const char *
2143 dma_fence_array_get_timeline_name(struct dma_fence *fence)
2144 {
2145 	return "unbound";
2146 }
2147 
2148 static void
2149 irq_dma_fence_array_work(void *arg)
2150 {
2151 	struct dma_fence_array *dfa = (struct dma_fence_array *)arg;
2152 	dma_fence_signal(&dfa->base);
2153 	dma_fence_put(&dfa->base);
2154 }
2155 
2156 static void
2157 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
2158 {
2159 	struct dma_fence_array_cb *array_cb =
2160 	    container_of(cb, struct dma_fence_array_cb, cb);
2161 	struct dma_fence_array *dfa = array_cb->array;
2162 
2163 	if (atomic_dec_and_test(&dfa->num_pending))
2164 		timeout_add(&dfa->to, 1);
2165 	else
2166 		dma_fence_put(&dfa->base);
2167 }
2168 
2169 static bool
2170 dma_fence_array_enable_signaling(struct dma_fence *fence)
2171 {
2172 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2173 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
2174 	int i;
2175 
2176 	for (i = 0; i < dfa->num_fences; ++i) {
2177 		cb[i].array = dfa;
2178 		dma_fence_get(&dfa->base);
2179 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
2180 		    dma_fence_array_cb_func)) {
2181 			dma_fence_put(&dfa->base);
2182 			if (atomic_dec_and_test(&dfa->num_pending))
2183 				return false;
2184 		}
2185 	}
2186 
2187 	return true;
2188 }
2189 
2190 static bool
2191 dma_fence_array_signaled(struct dma_fence *fence)
2192 {
2193 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2194 
2195 	return atomic_read(&dfa->num_pending) <= 0;
2196 }
2197 
2198 static void
2199 dma_fence_array_release(struct dma_fence *fence)
2200 {
2201 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2202 	int i;
2203 
2204 	for (i = 0; i < dfa->num_fences; ++i)
2205 		dma_fence_put(dfa->fences[i]);
2206 
2207 	free(dfa->fences, M_DRM, 0);
2208 	dma_fence_free(fence);
2209 }
2210 
2211 struct dma_fence_array *
2212 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
2213     unsigned seqno, bool signal_on_any)
2214 {
2215 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
2216 	    (num_fences * sizeof(struct dma_fence_array_cb)),
2217 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2218 	if (dfa == NULL)
2219 		return NULL;
2220 
2221 	mtx_init(&dfa->lock, IPL_TTY);
2222 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
2223 	    context, seqno);
2224 	timeout_set(&dfa->to, irq_dma_fence_array_work, dfa);
2225 
2226 	dfa->num_fences = num_fences;
2227 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
2228 	dfa->fences = fences;
2229 
2230 	return dfa;
2231 }
2232 
2233 struct dma_fence *
2234 dma_fence_array_first(struct dma_fence *f)
2235 {
2236 	struct dma_fence_array *dfa;
2237 
2238 	if (f == NULL)
2239 		return NULL;
2240 
2241 	if ((dfa = to_dma_fence_array(f)) == NULL)
2242 		return f;
2243 
2244 	if (dfa->num_fences > 0)
2245 		return dfa->fences[0];
2246 
2247 	return NULL;
2248 }
2249 
2250 struct dma_fence *
2251 dma_fence_array_next(struct dma_fence *f, unsigned int i)
2252 {
2253 	struct dma_fence_array *dfa;
2254 
2255 	if (f == NULL)
2256 		return NULL;
2257 
2258 	if ((dfa = to_dma_fence_array(f)) == NULL)
2259 		return NULL;
2260 
2261 	if (i < dfa->num_fences)
2262 		return dfa->fences[i];
2263 
2264 	return NULL;
2265 }
2266 
2267 const struct dma_fence_ops dma_fence_array_ops = {
2268 	.get_driver_name = dma_fence_array_get_driver_name,
2269 	.get_timeline_name = dma_fence_array_get_timeline_name,
2270 	.enable_signaling = dma_fence_array_enable_signaling,
2271 	.signaled = dma_fence_array_signaled,
2272 	.release = dma_fence_array_release,
2273 };
2274 
2275 int
2276 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
2277 {
2278 	struct dma_fence_chain *chain;
2279 	struct dma_fence *fence;
2280 
2281 	if (seqno == 0)
2282 		return 0;
2283 
2284 	if ((chain = to_dma_fence_chain(*df)) == NULL)
2285 		return -EINVAL;
2286 
2287 	fence = &chain->base;
2288 	if (fence->seqno < seqno)
2289 		return -EINVAL;
2290 
2291 	dma_fence_chain_for_each(*df, fence) {
2292 		if ((*df)->context != fence->context)
2293 			break;
2294 
2295 		chain = to_dma_fence_chain(*df);
2296 		if (chain->prev_seqno < seqno)
2297 			break;
2298 	}
2299 	dma_fence_put(fence);
2300 
2301 	return 0;
2302 }
2303 
2304 void
2305 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
2306     struct dma_fence *fence, uint64_t seqno)
2307 {
2308 	uint64_t context;
2309 
2310 	chain->fence = fence;
2311 	chain->prev = prev;
2312 	mtx_init(&chain->lock, IPL_TTY);
2313 
2314 	/* if prev is a chain */
2315 	if (to_dma_fence_chain(prev) != NULL) {
2316 		if (__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
2317 			chain->prev_seqno = prev->seqno;
2318 			context = prev->context;
2319 		} else {
2320 			chain->prev_seqno = 0;
2321 			context = dma_fence_context_alloc(1);
2322 			seqno = prev->seqno;
2323 		}
2324 	} else {
2325 		chain->prev_seqno = 0;
2326 		context = dma_fence_context_alloc(1);
2327 	}
2328 
2329 	dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
2330 	    context, seqno);
2331 }
2332 
2333 static const char *
2334 dma_fence_chain_get_driver_name(struct dma_fence *fence)
2335 {
2336 	return "dma_fence_chain";
2337 }
2338 
2339 static const char *
2340 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
2341 {
2342 	return "unbound";
2343 }
2344 
2345 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
2346 
2347 static void
2348 dma_fence_chain_timo(void *arg)
2349 {
2350 	struct dma_fence_chain *chain = (struct dma_fence_chain *)arg;
2351 
2352 	if (dma_fence_chain_enable_signaling(&chain->base) == false)
2353 		dma_fence_signal(&chain->base);
2354 	dma_fence_put(&chain->base);
2355 }
2356 
2357 static void
2358 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
2359 {
2360 	struct dma_fence_chain *chain =
2361 	    container_of(cb, struct dma_fence_chain, cb);
2362 	timeout_set(&chain->to, dma_fence_chain_timo, chain);
2363 	timeout_add(&chain->to, 1);
2364 	dma_fence_put(f);
2365 }
2366 
2367 static bool
2368 dma_fence_chain_enable_signaling(struct dma_fence *fence)
2369 {
2370 	struct dma_fence_chain *chain, *h;
2371 	struct dma_fence *f;
2372 
2373 	h = to_dma_fence_chain(fence);
2374 	dma_fence_get(&h->base);
2375 	dma_fence_chain_for_each(fence, &h->base) {
2376 		chain = to_dma_fence_chain(fence);
2377 		if (chain == NULL)
2378 			f = fence;
2379 		else
2380 			f = chain->fence;
2381 
2382 		dma_fence_get(f);
2383 		if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) {
2384 			dma_fence_put(fence);
2385 			return true;
2386 		}
2387 		dma_fence_put(f);
2388 	}
2389 	dma_fence_put(&h->base);
2390 	return false;
2391 }
2392 
2393 static bool
2394 dma_fence_chain_signaled(struct dma_fence *fence)
2395 {
2396 	struct dma_fence_chain *chain;
2397 	struct dma_fence *f;
2398 
2399 	dma_fence_chain_for_each(fence, fence) {
2400 		chain = to_dma_fence_chain(fence);
2401 		if (chain == NULL)
2402 			f = fence;
2403 		else
2404 			f = chain->fence;
2405 
2406 		if (dma_fence_is_signaled(f) == false) {
2407 			dma_fence_put(fence);
2408 			return false;
2409 		}
2410 	}
2411 	return true;
2412 }
2413 
2414 static void
2415 dma_fence_chain_release(struct dma_fence *fence)
2416 {
2417 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
2418 	struct dma_fence_chain *prev_chain;
2419 	struct dma_fence *prev;
2420 
2421 	for (prev = chain->prev; prev != NULL; prev = chain->prev) {
2422 		if (kref_read(&prev->refcount) > 1)
2423 			break;
2424 		if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
2425 			break;
2426 		chain->prev = prev_chain->prev;
2427 		prev_chain->prev = NULL;
2428 		dma_fence_put(prev);
2429 	}
2430 	dma_fence_put(prev);
2431 	dma_fence_put(chain->fence);
2432 	dma_fence_free(fence);
2433 }
2434 
2435 struct dma_fence *
2436 dma_fence_chain_walk(struct dma_fence *fence)
2437 {
2438 	struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain;
2439 	struct dma_fence *prev, *new_prev, *tmp;
2440 
2441 	if (chain == NULL) {
2442 		dma_fence_put(fence);
2443 		return NULL;
2444 	}
2445 
2446 	while ((prev = dma_fence_get(chain->prev)) != NULL) {
2447 		prev_chain = to_dma_fence_chain(prev);
2448 		if (prev_chain != NULL) {
2449 			if (!dma_fence_is_signaled(prev_chain->fence))
2450 				break;
2451 			new_prev = dma_fence_get(prev_chain->prev);
2452 		} else {
2453 			if (!dma_fence_is_signaled(prev))
2454 				break;
2455 			new_prev = NULL;
2456 		}
2457 		tmp = atomic_cas_ptr(&chain->prev, prev, new_prev);
2458 		dma_fence_put(tmp == prev ? prev : new_prev);
2459 		dma_fence_put(prev);
2460 	}
2461 
2462 	dma_fence_put(fence);
2463 	return prev;
2464 }
2465 
2466 const struct dma_fence_ops dma_fence_chain_ops = {
2467 	.get_driver_name = dma_fence_chain_get_driver_name,
2468 	.get_timeline_name = dma_fence_chain_get_timeline_name,
2469 	.enable_signaling = dma_fence_chain_enable_signaling,
2470 	.signaled = dma_fence_chain_signaled,
2471 	.release = dma_fence_chain_release,
2472 	.use_64bit_seqno = true,
2473 };
2474 
2475 bool
2476 dma_fence_is_container(struct dma_fence *fence)
2477 {
2478 	return (fence->ops == &dma_fence_chain_ops) ||
2479 	    (fence->ops == &dma_fence_array_ops);
2480 }
2481 
2482 int
2483 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2484 {
2485 	return (ENXIO);
2486 }
2487 
2488 int
2489 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2490 {
2491 	return (ENXIO);
2492 }
2493 
2494 int
2495 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2496 {
2497 	return (ENOTTY);
2498 }
2499 
2500 int
2501 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2502 {
2503 	return (EINVAL);
2504 }
2505 
2506 int
2507 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2508 {
2509 	struct dma_buf *dmabuf = fp->f_data;
2510 
2511 	memset(st, 0, sizeof(*st));
2512 	st->st_size = dmabuf->size;
2513 	st->st_mode = S_IFIFO;	/* XXX */
2514 	return (0);
2515 }
2516 
2517 int
2518 dmabuf_close(struct file *fp, struct proc *p)
2519 {
2520 	struct dma_buf *dmabuf = fp->f_data;
2521 
2522 	fp->f_data = NULL;
2523 	KERNEL_LOCK();
2524 	dmabuf->ops->release(dmabuf);
2525 	KERNEL_UNLOCK();
2526 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
2527 	return (0);
2528 }
2529 
2530 int
2531 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2532 {
2533 	struct dma_buf *dmabuf = fp->f_data;
2534 	off_t newoff;
2535 
2536 	if (*offset != 0)
2537 		return (EINVAL);
2538 
2539 	switch (whence) {
2540 	case SEEK_SET:
2541 		newoff = 0;
2542 		break;
2543 	case SEEK_END:
2544 		newoff = dmabuf->size;
2545 		break;
2546 	default:
2547 		return (EINVAL);
2548 	}
2549 	mtx_enter(&fp->f_mtx);
2550 	fp->f_offset = newoff;
2551 	mtx_leave(&fp->f_mtx);
2552 	*offset = newoff;
2553 	return (0);
2554 }
2555 
2556 const struct fileops dmabufops = {
2557 	.fo_read	= dmabuf_read,
2558 	.fo_write	= dmabuf_write,
2559 	.fo_ioctl	= dmabuf_ioctl,
2560 	.fo_kqfilter	= dmabuf_kqfilter,
2561 	.fo_stat	= dmabuf_stat,
2562 	.fo_close	= dmabuf_close,
2563 	.fo_seek	= dmabuf_seek,
2564 };
2565 
2566 struct dma_buf *
2567 dma_buf_export(const struct dma_buf_export_info *info)
2568 {
2569 	struct proc *p = curproc;
2570 	struct dma_buf *dmabuf;
2571 	struct file *fp;
2572 
2573 	fp = fnew(p);
2574 	if (fp == NULL)
2575 		return ERR_PTR(-ENFILE);
2576 	fp->f_type = DTYPE_DMABUF;
2577 	fp->f_ops = &dmabufops;
2578 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2579 	dmabuf->priv = info->priv;
2580 	dmabuf->ops = info->ops;
2581 	dmabuf->size = info->size;
2582 	dmabuf->file = fp;
2583 	fp->f_data = dmabuf;
2584 	INIT_LIST_HEAD(&dmabuf->attachments);
2585 	return dmabuf;
2586 }
2587 
2588 struct dma_buf *
2589 dma_buf_get(int fd)
2590 {
2591 	struct proc *p = curproc;
2592 	struct filedesc *fdp = p->p_fd;
2593 	struct file *fp;
2594 
2595 	if ((fp = fd_getfile(fdp, fd)) == NULL)
2596 		return ERR_PTR(-EBADF);
2597 
2598 	if (fp->f_type != DTYPE_DMABUF) {
2599 		FRELE(fp, p);
2600 		return ERR_PTR(-EINVAL);
2601 	}
2602 
2603 	return fp->f_data;
2604 }
2605 
2606 void
2607 dma_buf_put(struct dma_buf *dmabuf)
2608 {
2609 	KASSERT(dmabuf);
2610 	KASSERT(dmabuf->file);
2611 
2612 	FRELE(dmabuf->file, curproc);
2613 }
2614 
2615 int
2616 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2617 {
2618 	struct proc *p = curproc;
2619 	struct filedesc *fdp = p->p_fd;
2620 	struct file *fp = dmabuf->file;
2621 	int fd, cloexec, error;
2622 
2623 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2624 
2625 	fdplock(fdp);
2626 restart:
2627 	if ((error = fdalloc(p, 0, &fd)) != 0) {
2628 		if (error == ENOSPC) {
2629 			fdexpand(p);
2630 			goto restart;
2631 		}
2632 		fdpunlock(fdp);
2633 		return -error;
2634 	}
2635 
2636 	fdinsert(fdp, fd, cloexec, fp);
2637 	fdpunlock(fdp);
2638 
2639 	return fd;
2640 }
2641 
2642 void
2643 get_dma_buf(struct dma_buf *dmabuf)
2644 {
2645 	FREF(dmabuf->file);
2646 }
2647 
2648 enum pci_bus_speed
2649 pcie_get_speed_cap(struct pci_dev *pdev)
2650 {
2651 	pci_chipset_tag_t	pc;
2652 	pcitag_t		tag;
2653 	int			pos ;
2654 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
2655 	pcireg_t		id;
2656 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
2657 	int			bus, device, function;
2658 
2659 	if (pdev == NULL)
2660 		return PCI_SPEED_UNKNOWN;
2661 
2662 	pc = pdev->pc;
2663 	tag = pdev->tag;
2664 
2665 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2666 	    &pos, NULL))
2667 		return PCI_SPEED_UNKNOWN;
2668 
2669 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2670 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2671 
2672 	/* we've been informed via and serverworks don't make the cut */
2673 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2674 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
2675 		return PCI_SPEED_UNKNOWN;
2676 
2677 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2678 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2679 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2680 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2681 
2682 	lnkcap &= 0x0f;
2683 	lnkcap2 &= 0xfe;
2684 
2685 	if (lnkcap2) { /* PCIE GEN 3.0 */
2686 		if (lnkcap2 & 0x02)
2687 			cap = PCIE_SPEED_2_5GT;
2688 		if (lnkcap2 & 0x04)
2689 			cap = PCIE_SPEED_5_0GT;
2690 		if (lnkcap2 & 0x08)
2691 			cap = PCIE_SPEED_8_0GT;
2692 		if (lnkcap2 & 0x10)
2693 			cap = PCIE_SPEED_16_0GT;
2694 		if (lnkcap2 & 0x20)
2695 			cap = PCIE_SPEED_32_0GT;
2696 		if (lnkcap2 & 0x40)
2697 			cap = PCIE_SPEED_64_0GT;
2698 	} else {
2699 		if (lnkcap & 0x01)
2700 			cap = PCIE_SPEED_2_5GT;
2701 		if (lnkcap & 0x02)
2702 			cap = PCIE_SPEED_5_0GT;
2703 	}
2704 
2705 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2706 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2707 	    lnkcap2);
2708 	return cap;
2709 }
2710 
2711 enum pcie_link_width
2712 pcie_get_width_cap(struct pci_dev *pdev)
2713 {
2714 	pci_chipset_tag_t	pc = pdev->pc;
2715 	pcitag_t		tag = pdev->tag;
2716 	int			pos ;
2717 	pcireg_t		lnkcap = 0;
2718 	pcireg_t		id;
2719 	int			bus, device, function;
2720 
2721 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2722 	    &pos, NULL))
2723 		return PCIE_LNK_WIDTH_UNKNOWN;
2724 
2725 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2726 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2727 
2728 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2729 
2730 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2731 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2732 
2733 	if (lnkcap)
2734 		return (lnkcap & 0x3f0) >> 4;
2735 	return PCIE_LNK_WIDTH_UNKNOWN;
2736 }
2737 
2738 bool
2739 pcie_aspm_enabled(struct pci_dev *pdev)
2740 {
2741 	pci_chipset_tag_t	pc = pdev->pc;
2742 	pcitag_t		tag = pdev->tag;
2743 	int			pos ;
2744 	pcireg_t		lcsr;
2745 
2746 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2747 	    &pos, NULL))
2748 		return false;
2749 
2750 	lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR);
2751 	if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0)
2752 		return true;
2753 
2754 	return false;
2755 }
2756 
2757 static wait_queue_head_t bit_waitq;
2758 wait_queue_head_t var_waitq;
2759 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2760 
2761 int
2762 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2763 {
2764 	int err;
2765 
2766 	if (!test_bit(bit, word))
2767 		return 0;
2768 
2769 	mtx_enter(&wait_bit_mtx);
2770 	while (test_bit(bit, word)) {
2771 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2772 		    INFSLP);
2773 		if (err) {
2774 			mtx_leave(&wait_bit_mtx);
2775 			return 1;
2776 		}
2777 	}
2778 	mtx_leave(&wait_bit_mtx);
2779 	return 0;
2780 }
2781 
2782 int
2783 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2784 {
2785 	int err;
2786 
2787 	if (!test_bit(bit, word))
2788 		return 0;
2789 
2790 	mtx_enter(&wait_bit_mtx);
2791 	while (test_bit(bit, word)) {
2792 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2793 		if (err) {
2794 			mtx_leave(&wait_bit_mtx);
2795 			return 1;
2796 		}
2797 	}
2798 	mtx_leave(&wait_bit_mtx);
2799 	return 0;
2800 }
2801 
2802 void
2803 wake_up_bit(void *word, int bit)
2804 {
2805 	mtx_enter(&wait_bit_mtx);
2806 	wakeup(word);
2807 	mtx_leave(&wait_bit_mtx);
2808 }
2809 
2810 void
2811 clear_and_wake_up_bit(int bit, void *word)
2812 {
2813 	clear_bit(bit, word);
2814 	wake_up_bit(word, bit);
2815 }
2816 
2817 wait_queue_head_t *
2818 bit_waitqueue(void *word, int bit)
2819 {
2820 	/* XXX hash table of wait queues? */
2821 	return &bit_waitq;
2822 }
2823 
2824 wait_queue_head_t *
2825 __var_waitqueue(void *p)
2826 {
2827 	/* XXX hash table of wait queues? */
2828 	return &bit_waitq;
2829 }
2830 
2831 struct workqueue_struct *system_wq;
2832 struct workqueue_struct *system_highpri_wq;
2833 struct workqueue_struct *system_unbound_wq;
2834 struct workqueue_struct *system_long_wq;
2835 struct taskq *taskletq;
2836 
2837 void
2838 drm_linux_init(void)
2839 {
2840 	system_wq = (struct workqueue_struct *)
2841 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
2842 	system_highpri_wq = (struct workqueue_struct *)
2843 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2844 	system_unbound_wq = (struct workqueue_struct *)
2845 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2846 	system_long_wq = (struct workqueue_struct *)
2847 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2848 
2849 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2850 
2851 	init_waitqueue_head(&bit_waitq);
2852 	init_waitqueue_head(&var_waitq);
2853 
2854 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2855 	    "idrpl", NULL);
2856 
2857 	kmap_atomic_va =
2858 	    (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2859 
2860 #if NACPI > 0
2861 	if (acpi_softc) {
2862 		memcpy(&acpi_gbl_FADT, acpi_softc->sc_fadt,
2863 		    sizeof(acpi_gbl_FADT));
2864 	}
2865 #endif
2866 }
2867 
2868 void
2869 drm_linux_exit(void)
2870 {
2871 	pool_destroy(&idr_pool);
2872 
2873 	taskq_destroy(taskletq);
2874 
2875 	taskq_destroy((struct taskq *)system_long_wq);
2876 	taskq_destroy((struct taskq *)system_unbound_wq);
2877 	taskq_destroy((struct taskq *)system_highpri_wq);
2878 	taskq_destroy((struct taskq *)system_wq);
2879 }
2880 
2881 #define PCIE_ECAP_RESIZE_BAR	0x15
2882 #define RBCAP0			0x04
2883 #define RBCTRL0			0x08
2884 #define RBCTRL_BARINDEX_MASK	0x07
2885 #define RBCTRL_BARSIZE_MASK	0x1f00
2886 #define RBCTRL_BARSIZE_SHIFT	8
2887 
2888 /* size in MB is 1 << nsize */
2889 int
2890 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2891 {
2892 	pcireg_t	reg;
2893 	uint32_t	offset, capid;
2894 
2895 	KASSERT(bar == 0);
2896 
2897 	offset = PCI_PCIE_ECAP;
2898 
2899 	/* search PCI Express Extended Capabilities */
2900 	do {
2901 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2902 		capid = PCI_PCIE_ECAP_ID(reg);
2903 		if (capid == PCIE_ECAP_RESIZE_BAR)
2904 			break;
2905 		offset = PCI_PCIE_ECAP_NEXT(reg);
2906 	} while (capid != 0);
2907 
2908 	if (capid == 0) {
2909 		printf("%s: could not find resize bar cap!\n", __func__);
2910 		return -ENOTSUP;
2911 	}
2912 
2913 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2914 
2915 	if ((reg & (1 << (nsize + 4))) == 0) {
2916 		printf("%s size not supported\n", __func__);
2917 		return -ENOTSUP;
2918 	}
2919 
2920 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2921 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2922 		printf("%s BAR index not 0\n", __func__);
2923 		return -EINVAL;
2924 	}
2925 
2926 	reg &= ~RBCTRL_BARSIZE_MASK;
2927 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2928 
2929 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2930 
2931 	return 0;
2932 }
2933 
2934 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2935 
2936 int
2937 register_shrinker(struct shrinker *shrinker, const char *format, ...)
2938 {
2939 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2940 	return 0;
2941 }
2942 
2943 void
2944 unregister_shrinker(struct shrinker *shrinker)
2945 {
2946 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2947 }
2948 
2949 void
2950 drmbackoff(long npages)
2951 {
2952 	struct shrink_control sc;
2953 	struct shrinker *shrinker;
2954 	u_long ret;
2955 
2956 	shrinker = TAILQ_FIRST(&shrinkers);
2957 	while (shrinker && npages > 0) {
2958 		sc.nr_to_scan = npages;
2959 		ret = shrinker->scan_objects(shrinker, &sc);
2960 		npages -= ret;
2961 		shrinker = TAILQ_NEXT(shrinker, next);
2962 	}
2963 }
2964 
2965 void *
2966 bitmap_zalloc(u_int n, gfp_t flags)
2967 {
2968 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2969 }
2970 
2971 void
2972 bitmap_free(void *p)
2973 {
2974 	kfree(p);
2975 }
2976 
2977 int
2978 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2979 {
2980 	if (atomic_add_unless(v, -1, 1))
2981 		return 0;
2982 
2983 	rw_enter_write(lock);
2984 	if (atomic_dec_return(v) == 0)
2985 		return 1;
2986 	rw_exit_write(lock);
2987 	return 0;
2988 }
2989 
2990 int
2991 printk(const char *fmt, ...)
2992 {
2993 	int ret, level;
2994 	va_list ap;
2995 
2996 	if (fmt != NULL && *fmt == '\001') {
2997 		level = fmt[1];
2998 #ifndef DRMDEBUG
2999 		if (level >= KERN_INFO[1] && level <= '9')
3000 			return 0;
3001 #endif
3002 		fmt += 2;
3003 	}
3004 
3005 	va_start(ap, fmt);
3006 	ret = vprintf(fmt, ap);
3007 	va_end(ap);
3008 
3009 	return ret;
3010 }
3011 
3012 #define START(node) ((node)->start)
3013 #define LAST(node) ((node)->last)
3014 
3015 struct interval_tree_node *
3016 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
3017     unsigned long last)
3018 {
3019 	struct interval_tree_node *node;
3020 	struct rb_node *rb;
3021 
3022 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
3023 		node = rb_entry(rb, typeof(*node), rb);
3024 		if (LAST(node) >= start && START(node) <= last)
3025 			return node;
3026 	}
3027 	return NULL;
3028 }
3029 
3030 void
3031 interval_tree_remove(struct interval_tree_node *node,
3032     struct rb_root_cached *root)
3033 {
3034 	rb_erase_cached(&node->rb, root);
3035 }
3036 
3037 void
3038 interval_tree_insert(struct interval_tree_node *node,
3039     struct rb_root_cached *root)
3040 {
3041 	struct rb_node **iter = &root->rb_root.rb_node;
3042 	struct rb_node *parent = NULL;
3043 	struct interval_tree_node *iter_node;
3044 
3045 	while (*iter) {
3046 		parent = *iter;
3047 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
3048 
3049 		if (node->start < iter_node->start)
3050 			iter = &(*iter)->rb_left;
3051 		else
3052 			iter = &(*iter)->rb_right;
3053 	}
3054 
3055 	rb_link_node(&node->rb, parent, iter);
3056 	rb_insert_color_cached(&node->rb, root, false);
3057 }
3058 
3059 int
3060 syncfile_read(struct file *fp, struct uio *uio, int fflags)
3061 {
3062 	return ENXIO;
3063 }
3064 
3065 int
3066 syncfile_write(struct file *fp, struct uio *uio, int fflags)
3067 {
3068 	return ENXIO;
3069 }
3070 
3071 int
3072 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
3073 {
3074 	return ENOTTY;
3075 }
3076 
3077 int
3078 syncfile_kqfilter(struct file *fp, struct knote *kn)
3079 {
3080 	return EINVAL;
3081 }
3082 
3083 int
3084 syncfile_stat(struct file *fp, struct stat *st, struct proc *p)
3085 {
3086 	memset(st, 0, sizeof(*st));
3087 	st->st_mode = S_IFIFO;	/* XXX */
3088 	return 0;
3089 }
3090 
3091 int
3092 syncfile_close(struct file *fp, struct proc *p)
3093 {
3094 	struct sync_file *sf = fp->f_data;
3095 
3096 	dma_fence_put(sf->fence);
3097 	fp->f_data = NULL;
3098 	free(sf, M_DRM, sizeof(struct sync_file));
3099 	return 0;
3100 }
3101 
3102 int
3103 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
3104 {
3105 	off_t newoff;
3106 
3107 	if (*offset != 0)
3108 		return EINVAL;
3109 
3110 	switch (whence) {
3111 	case SEEK_SET:
3112 		newoff = 0;
3113 		break;
3114 	case SEEK_END:
3115 		newoff = 0;
3116 		break;
3117 	default:
3118 		return EINVAL;
3119 	}
3120 	mtx_enter(&fp->f_mtx);
3121 	fp->f_offset = newoff;
3122 	mtx_leave(&fp->f_mtx);
3123 	*offset = newoff;
3124 	return 0;
3125 }
3126 
3127 const struct fileops syncfileops = {
3128 	.fo_read	= syncfile_read,
3129 	.fo_write	= syncfile_write,
3130 	.fo_ioctl	= syncfile_ioctl,
3131 	.fo_kqfilter	= syncfile_kqfilter,
3132 	.fo_stat	= syncfile_stat,
3133 	.fo_close	= syncfile_close,
3134 	.fo_seek	= syncfile_seek,
3135 };
3136 
3137 void
3138 fd_install(int fd, struct file *fp)
3139 {
3140 	struct proc *p = curproc;
3141 	struct filedesc *fdp = p->p_fd;
3142 
3143 	if (fp->f_type != DTYPE_SYNC)
3144 		return;
3145 
3146 	fdplock(fdp);
3147 	/* all callers use get_unused_fd_flags(O_CLOEXEC) */
3148 	fdinsert(fdp, fd, UF_EXCLOSE, fp);
3149 	fdpunlock(fdp);
3150 }
3151 
3152 void
3153 fput(struct file *fp)
3154 {
3155 	if (fp->f_type != DTYPE_SYNC)
3156 		return;
3157 
3158 	FRELE(fp, curproc);
3159 }
3160 
3161 int
3162 get_unused_fd_flags(unsigned int flags)
3163 {
3164 	struct proc *p = curproc;
3165 	struct filedesc *fdp = p->p_fd;
3166 	int error, fd;
3167 
3168 	KASSERT((flags & O_CLOEXEC) != 0);
3169 
3170 	fdplock(fdp);
3171 retryalloc:
3172 	if ((error = fdalloc(p, 0, &fd)) != 0) {
3173 		if (error == ENOSPC) {
3174 			fdexpand(p);
3175 			goto retryalloc;
3176 		}
3177 		fdpunlock(fdp);
3178 		return -1;
3179 	}
3180 	fdpunlock(fdp);
3181 
3182 	return fd;
3183 }
3184 
3185 void
3186 put_unused_fd(int fd)
3187 {
3188 	struct filedesc *fdp = curproc->p_fd;
3189 
3190 	fdplock(fdp);
3191 	fdremove(fdp, fd);
3192 	fdpunlock(fdp);
3193 }
3194 
3195 struct dma_fence *
3196 sync_file_get_fence(int fd)
3197 {
3198 	struct proc *p = curproc;
3199 	struct filedesc *fdp = p->p_fd;
3200 	struct file *fp;
3201 	struct sync_file *sf;
3202 	struct dma_fence *f;
3203 
3204 	if ((fp = fd_getfile(fdp, fd)) == NULL)
3205 		return NULL;
3206 
3207 	if (fp->f_type != DTYPE_SYNC) {
3208 		FRELE(fp, p);
3209 		return NULL;
3210 	}
3211 	sf = fp->f_data;
3212 	f = dma_fence_get(sf->fence);
3213 	FRELE(sf->file, p);
3214 	return f;
3215 }
3216 
3217 struct sync_file *
3218 sync_file_create(struct dma_fence *fence)
3219 {
3220 	struct proc *p = curproc;
3221 	struct sync_file *sf;
3222 	struct file *fp;
3223 
3224 	fp = fnew(p);
3225 	if (fp == NULL)
3226 		return NULL;
3227 	fp->f_type = DTYPE_SYNC;
3228 	fp->f_ops = &syncfileops;
3229 	sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO);
3230 	sf->file = fp;
3231 	sf->fence = dma_fence_get(fence);
3232 	fp->f_data = sf;
3233 	return sf;
3234 }
3235 
3236 bool
3237 drm_firmware_drivers_only(void)
3238 {
3239 	return false;
3240 }
3241 
3242 
3243 void *
3244 memremap(phys_addr_t phys_addr, size_t size, int flags)
3245 {
3246 	STUB();
3247 	return NULL;
3248 }
3249 
3250 void
3251 memunmap(void *addr)
3252 {
3253 	STUB();
3254 }
3255 
3256 #include <linux/platform_device.h>
3257 
3258 bus_dma_tag_t
3259 dma_tag_lookup(struct device *dev)
3260 {
3261 	extern struct cfdriver drm_cd;
3262 	struct drm_device *drm;
3263 	int i;
3264 
3265 	for (i = 0; i < drm_cd.cd_ndevs; i++) {
3266 		drm = drm_cd.cd_devs[i];
3267 		if (drm && drm->dev == dev)
3268 			return drm->dmat;
3269 	}
3270 
3271 	return ((struct platform_device *)dev)->dmat;
3272 }
3273 
3274 LIST_HEAD(, drm_dmamem) dmamem_list = LIST_HEAD_INITIALIZER(dmamem_list);
3275 
3276 void *
3277 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
3278     int gfp)
3279 {
3280 	bus_dma_tag_t dmat = dma_tag_lookup(dev);
3281 	struct drm_dmamem *mem;
3282 
3283 	mem = drm_dmamem_alloc(dmat, size, PAGE_SIZE, 1, size,
3284 	    BUS_DMA_COHERENT, 0);
3285 	if (mem == NULL)
3286 		return NULL;
3287 	*dma_handle = mem->map->dm_segs[0].ds_addr;
3288 	LIST_INSERT_HEAD(&dmamem_list, mem, next);
3289 	return mem->kva;
3290 }
3291 
3292 void
3293 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
3294     dma_addr_t dma_handle)
3295 {
3296 	bus_dma_tag_t dmat = dma_tag_lookup(dev);
3297 	struct drm_dmamem *mem;
3298 
3299 	LIST_FOREACH(mem, &dmamem_list, next) {
3300 		if (mem->kva == cpu_addr)
3301 			break;
3302 	}
3303 	KASSERT(mem);
3304 	KASSERT(mem->size == size);
3305 	KASSERT(mem->map->dm_segs[0].ds_addr == dma_handle);
3306 
3307 	LIST_REMOVE(mem, next);
3308 	drm_dmamem_free(dmat, mem);
3309 }
3310 
3311 int
3312 dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
3313     dma_addr_t dma_addr, size_t size)
3314 {
3315 	paddr_t pa;
3316 	int ret;
3317 
3318 	if (!pmap_extract(pmap_kernel(), (vaddr_t)cpu_addr, &pa))
3319 		return -EINVAL;
3320 
3321 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
3322 	if (ret)
3323 		return ret;
3324 
3325 	sg_set_page(sgt->sgl, PHYS_TO_VM_PAGE(pa), size, 0);
3326 	return 0;
3327 }
3328 
3329 dma_addr_t
3330 dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
3331     enum dma_data_direction dir, u_long attr)
3332 {
3333 	bus_dma_tag_t dmat= dma_tag_lookup(dev);
3334 	bus_dmamap_t map;
3335 	bus_dma_segment_t seg;
3336 
3337 	if (bus_dmamap_create(dmat, size, 1, size, 0,
3338 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &map))
3339 		return DMA_MAPPING_ERROR;
3340 	seg.ds_addr = phys_addr;
3341 	seg.ds_len = size;
3342 	if (bus_dmamap_load_raw(dmat, map, &seg, 1, size, BUS_DMA_WAITOK)) {
3343 		bus_dmamap_destroy(dmat, map);
3344 		return DMA_MAPPING_ERROR;
3345 	}
3346 
3347 	return map->dm_segs[0].ds_addr;
3348 }
3349 
3350 #ifdef BUS_DMA_FIXED
3351 
3352 #include <linux/iommu.h>
3353 
3354 size_t
3355 iommu_map_sgtable(struct iommu_domain *domain, u_long iova,
3356     struct sg_table *sgt, int prot)
3357 {
3358 	bus_dma_segment_t seg;
3359 	int error;
3360 
3361 	error = bus_dmamap_create(domain->dmat, sgt->sgl->length, 1,
3362 	    sgt->sgl->length, 0, BUS_DMA_WAITOK, &sgt->dmamap);
3363 	if (error)
3364 		return -ENOMEM;
3365 
3366 	sgt->dmamap->dm_segs[0].ds_addr = iova;
3367 	sgt->dmamap->dm_segs[0].ds_len = sgt->sgl->length;
3368 	sgt->dmamap->dm_nsegs = 1;
3369 	seg.ds_addr = VM_PAGE_TO_PHYS(sgt->sgl->__page);
3370 	seg.ds_len = sgt->sgl->length;
3371 	error = bus_dmamap_load_raw(domain->dmat, sgt->dmamap, &seg, 1,
3372 	    sgt->sgl->length, BUS_DMA_WAITOK | BUS_DMA_FIXED);
3373 	if (error)
3374 		return -ENOMEM;
3375 
3376 	return sg_dma_len(sgt->sgl);
3377 }
3378 
3379 size_t
3380 iommu_unmap(struct iommu_domain *domain, u_long iova, size_t size)
3381 {
3382 	STUB();
3383 	return 0;
3384 }
3385 
3386 struct iommu_domain *
3387 iommu_get_domain_for_dev(struct device *dev)
3388 {
3389 	STUB();
3390 	return NULL;
3391 }
3392 
3393 phys_addr_t
3394 iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
3395 {
3396 	STUB();
3397 	return 0;
3398 }
3399 
3400 struct iommu_domain *
3401 iommu_domain_alloc(struct bus_type *type)
3402 {
3403 	return malloc(sizeof(struct iommu_domain), M_DEVBUF, M_WAITOK | M_ZERO);
3404 }
3405 
3406 int
3407 iommu_attach_device(struct iommu_domain *domain, struct device *dev)
3408 {
3409 	struct platform_device *pdev = (struct platform_device *)dev;
3410 
3411 	domain->dmat = pdev->dmat;
3412 	return 0;
3413 }
3414 
3415 #endif
3416 
3417 #include <linux/component.h>
3418 
3419 struct component {
3420 	struct device *dev;
3421 	struct device *adev;
3422 	const struct component_ops *ops;
3423 	SLIST_ENTRY(component) next;
3424 };
3425 
3426 SLIST_HEAD(,component) component_list = SLIST_HEAD_INITIALIZER(component_list);
3427 
3428 int
3429 component_add(struct device *dev, const struct component_ops *ops)
3430 {
3431 	struct component *component;
3432 
3433 	component = malloc(sizeof(*component), M_DEVBUF, M_WAITOK | M_ZERO);
3434 	component->dev = dev;
3435 	component->ops = ops;
3436 	SLIST_INSERT_HEAD(&component_list, component, next);
3437 	return 0;
3438 }
3439 
3440 int
3441 component_add_typed(struct device *dev, const struct component_ops *ops,
3442 	int type)
3443 {
3444 	return component_add(dev, ops);
3445 }
3446 
3447 int
3448 component_bind_all(struct device *dev, void *data)
3449 {
3450 	struct component *component;
3451 	int ret = 0;
3452 
3453 	SLIST_FOREACH(component, &component_list, next) {
3454 		if (component->adev == dev) {
3455 			ret = component->ops->bind(component->dev, NULL, data);
3456 			if (ret)
3457 				break;
3458 		}
3459 	}
3460 
3461 	return ret;
3462 }
3463 
3464 struct component_match_entry {
3465 	int (*compare)(struct device *, void *);
3466 	void *data;
3467 };
3468 
3469 struct component_match {
3470 	struct component_match_entry match[4];
3471 	int nmatches;
3472 };
3473 
3474 int
3475 component_master_add_with_match(struct device *dev,
3476     const struct component_master_ops *ops, struct component_match *match)
3477 {
3478 	struct component *component;
3479 	int found = 0;
3480 	int i, ret;
3481 
3482 	SLIST_FOREACH(component, &component_list, next) {
3483 		for (i = 0; i < match->nmatches; i++) {
3484 			struct component_match_entry *m = &match->match[i];
3485 			if (m->compare(component->dev, m->data)) {
3486 				component->adev = dev;
3487 				found = 1;
3488 				break;
3489 			}
3490 		}
3491 	}
3492 
3493 	if (found) {
3494 		ret = ops->bind(dev);
3495 		if (ret)
3496 			return ret;
3497 	}
3498 
3499 	return 0;
3500 }
3501 
3502 #ifdef __HAVE_FDT
3503 
3504 #include <linux/platform_device.h>
3505 #include <dev/ofw/openfirm.h>
3506 #include <dev/ofw/fdt.h>
3507 #include <machine/fdt.h>
3508 
3509 LIST_HEAD(, platform_device) pdev_list = LIST_HEAD_INITIALIZER(pdev_list);
3510 
3511 void
3512 platform_device_register(struct platform_device *pdev)
3513 {
3514 	int i;
3515 
3516 	pdev->num_resources = pdev->faa->fa_nreg;
3517 	if (pdev->faa->fa_nreg > 0) {
3518 		pdev->resource = mallocarray(pdev->faa->fa_nreg,
3519 		    sizeof(*pdev->resource), M_DEVBUF, M_WAITOK | M_ZERO);
3520 		for (i = 0; i < pdev->faa->fa_nreg; i++) {
3521 			pdev->resource[i].start = pdev->faa->fa_reg[i].addr;
3522 			pdev->resource[i].end = pdev->faa->fa_reg[i].addr +
3523 			    pdev->faa->fa_reg[i].size - 1;
3524 		}
3525 	}
3526 
3527 	pdev->parent = pdev->dev.dv_parent;
3528 	pdev->node = pdev->faa->fa_node;
3529 	pdev->iot = pdev->faa->fa_iot;
3530 	pdev->dmat = pdev->faa->fa_dmat;
3531 	LIST_INSERT_HEAD(&pdev_list, pdev, next);
3532 }
3533 
3534 
3535 struct resource *
3536 platform_get_resource(struct platform_device *pdev, u_int type, u_int num)
3537 {
3538 	KASSERT(num < pdev->num_resources);
3539 	return &pdev->resource[num];
3540 }
3541 
3542 void __iomem *
3543 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
3544 				      const char *name)
3545 {
3546 	bus_space_handle_t ioh;
3547 	int err, idx;
3548 
3549 	idx = OF_getindex(pdev->node, name, "reg-names");
3550 	if (idx == -1 || idx >= pdev->num_resources)
3551 		return ERR_PTR(-EINVAL);
3552 
3553 	err = bus_space_map(pdev->iot, pdev->resource[idx].start,
3554 	    pdev->resource[idx].end - pdev->resource[idx].start + 1,
3555 	    BUS_SPACE_MAP_LINEAR, &ioh);
3556 	if (err)
3557 		return ERR_PTR(-err);
3558 
3559 	return bus_space_vaddr(pdev->iot, ioh);
3560 }
3561 
3562 #include <dev/ofw/ofw_clock.h>
3563 #include <linux/clk.h>
3564 
3565 struct clk *
3566 devm_clk_get(struct device *dev, const char *name)
3567 {
3568 	struct platform_device *pdev = (struct platform_device *)dev;
3569 	struct clk *clk;
3570 
3571 	clk = malloc(sizeof(*clk), M_DEVBUF, M_WAITOK);
3572 	clk->freq = clock_get_frequency(pdev->node, name);
3573 	return clk;
3574 }
3575 
3576 u_long
3577 clk_get_rate(struct clk *clk)
3578 {
3579 	return clk->freq;
3580 }
3581 
3582 #include <linux/gpio/consumer.h>
3583 #include <dev/ofw/ofw_gpio.h>
3584 
3585 struct gpio_desc {
3586 	uint32_t gpios[4];
3587 };
3588 
3589 struct gpio_desc *
3590 devm_gpiod_get_optional(struct device *dev, const char *name, int flags)
3591 {
3592 	struct platform_device *pdev = (struct platform_device *)dev;
3593 	struct gpio_desc *desc;
3594 	char fullname[128];
3595 	int len;
3596 
3597 	snprintf(fullname, sizeof(fullname), "%s-gpios", name);
3598 
3599 	desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO);
3600 	len = OF_getpropintarray(pdev->node, fullname, desc->gpios,
3601 	     sizeof(desc->gpios));
3602 	KASSERT(len <= sizeof(desc->gpios));
3603 	if (len < 0) {
3604 		free(desc, M_DEVBUF, sizeof(*desc));
3605 		return NULL;
3606 	}
3607 
3608 	switch (flags) {
3609 	case GPIOD_IN:
3610 		gpio_controller_config_pin(desc->gpios, GPIO_CONFIG_INPUT);
3611 		break;
3612 	case GPIOD_OUT_HIGH:
3613 		gpio_controller_config_pin(desc->gpios, GPIO_CONFIG_OUTPUT);
3614 		gpio_controller_set_pin(desc->gpios, 1);
3615 		break;
3616 	default:
3617 		panic("%s: unimplemented flags 0x%x", __func__, flags);
3618 	}
3619 
3620 	return desc;
3621 }
3622 
3623 int
3624 gpiod_get_value_cansleep(const struct gpio_desc *desc)
3625 {
3626 	return gpio_controller_get_pin(((struct gpio_desc *)desc)->gpios);
3627 }
3628 
3629 struct phy {
3630 	int node;
3631 	const char *name;
3632 };
3633 
3634 struct phy *
3635 devm_phy_optional_get(struct device *dev, const char *name)
3636 {
3637 	struct platform_device *pdev = (struct platform_device *)dev;
3638 	struct phy *phy;
3639 	int idx;
3640 
3641 	idx = OF_getindex(pdev->node, name, "phy-names");
3642 	if (idx == -1)
3643 		return NULL;
3644 
3645 	phy = malloc(sizeof(*phy), M_DEVBUF, M_WAITOK);
3646 	phy->node = pdev->node;
3647 	phy->name = name;
3648 
3649 	return phy;
3650 }
3651 
3652 struct bus_type platform_bus_type;
3653 
3654 #include <dev/ofw/ofw_misc.h>
3655 
3656 #include <linux/of.h>
3657 #include <linux/platform_device.h>
3658 
3659 struct device_node *
3660 __of_devnode(void *arg)
3661 {
3662 	struct device *dev = container_of(arg, struct device, of_node);
3663 	struct platform_device *pdev = (struct platform_device *)dev;
3664 
3665 	return (struct device_node *)(uintptr_t)pdev->node;
3666 }
3667 
3668 int
3669 __of_device_is_compatible(struct device_node *np, const char *compatible)
3670 {
3671 	return OF_is_compatible((uintptr_t)np, compatible);
3672 }
3673 
3674 int
3675 __of_property_present(struct device_node *np, const char *propname)
3676 {
3677 	return OF_getpropbool((uintptr_t)np, (char *)propname);
3678 }
3679 
3680 int
3681 __of_property_read_variable_u32_array(struct device_node *np,
3682     const char *propname, uint32_t *out_values, size_t sz_min, size_t sz_max)
3683 {
3684 	int len;
3685 
3686 	len = OF_getpropintarray((uintptr_t)np, (char *)propname, out_values,
3687 	    sz_max * sizeof(*out_values));
3688 	if (len < 0)
3689 		return -EINVAL;
3690 	if (len == 0)
3691 		return -ENODATA;
3692 	if (len < sz_min * sizeof(*out_values) ||
3693 	    len > sz_max * sizeof(*out_values))
3694 		return -EOVERFLOW;
3695 	if (sz_min == 1 && sz_max == 1)
3696 		return 0;
3697 	return len / sizeof(*out_values);
3698 }
3699 
3700 int
3701 __of_property_read_variable_u64_array(struct device_node *np,
3702     const char *propname, uint64_t *out_values, size_t sz_min, size_t sz_max)
3703 {
3704 	int len;
3705 
3706 	len = OF_getpropint64array((uintptr_t)np, (char *)propname, out_values,
3707 	    sz_max * sizeof(*out_values));
3708 	if (len < 0)
3709 		return -EINVAL;
3710 	if (len == 0)
3711 		return -ENODATA;
3712 	if (len < sz_min * sizeof(*out_values) ||
3713 	    len > sz_max * sizeof(*out_values))
3714 		return -EOVERFLOW;
3715 	if (sz_min == 1 && sz_max == 1)
3716 		return 0;
3717 	return len / sizeof(*out_values);
3718 }
3719 
3720 int
3721 __of_property_match_string(struct device_node *np,
3722     const char *propname, const char *str)
3723 {
3724 	int idx;
3725 
3726 	idx = OF_getindex((uintptr_t)np, str, propname);
3727 	if (idx == -1)
3728 		return -ENODATA;
3729 	return idx;
3730 }
3731 
3732 struct device_node *
3733 __of_parse_phandle(struct device_node *np, const char *propname, int idx)
3734 {
3735 	uint32_t phandles[16] = {};
3736 	int len, node;
3737 
3738 	len = OF_getpropintarray((uintptr_t)np, (char *)propname, phandles,
3739 	    sizeof(phandles));
3740 	if (len < (idx + 1) * sizeof(uint32_t))
3741 		return NULL;
3742 
3743 	node = OF_getnodebyphandle(phandles[idx]);
3744 	if (node == 0)
3745 		return NULL;
3746 
3747 	return (struct device_node *)(uintptr_t)node;
3748 }
3749 
3750 int
3751 __of_parse_phandle_with_args(struct device_node *np, const char *propname,
3752     const char *cellsname, int idx, struct of_phandle_args *args)
3753 {
3754 	uint32_t phandles[16] = {};
3755 	int i, len, node;
3756 
3757 	len = OF_getpropintarray((uintptr_t)np, (char *)propname, phandles,
3758 	    sizeof(phandles));
3759 	if (len < (idx + 1) * sizeof(uint32_t))
3760 		return -ENOENT;
3761 
3762 	node = OF_getnodebyphandle(phandles[idx]);
3763 	if (node == 0)
3764 		return -ENOENT;
3765 
3766 	args->np = (struct device_node *)(uintptr_t)node;
3767 	args->args_count = OF_getpropint(node, (char *)cellsname, 0);
3768 	for (i = 0; i < args->args_count; i++)
3769 		args->args[i] = phandles[i + 1];
3770 
3771 	return 0;
3772 }
3773 
3774 int
3775 of_address_to_resource(struct device_node *np, int idx, struct resource *res)
3776 {
3777 	uint64_t reg[16] = {};
3778 	int len;
3779 
3780 	KASSERT(idx < 8);
3781 
3782 	len = OF_getpropint64array((uintptr_t)np, "reg", reg, sizeof(reg));
3783 	if (len < 0 || idx >= (len / (2 * sizeof(uint64_t))))
3784 		return -EINVAL;
3785 
3786 	res->start = reg[2 * idx];
3787 	res->end = reg[2 * idx] + reg[2 * idx + 1] - 1;
3788 
3789 	return 0;
3790 }
3791 
3792 static int
3793 next_node(int node)
3794 {
3795 	int peer = OF_peer(node);
3796 
3797 	while (node && !peer) {
3798 		node = OF_parent(node);
3799 		if (node)
3800 			peer = OF_peer(node);
3801 	}
3802 
3803 	return peer;
3804 }
3805 
3806 static int
3807 find_matching_node(int node, const struct of_device_id *id)
3808 {
3809 	int child, match;
3810 	int i;
3811 
3812 	for (child = OF_child(node); child; child = OF_peer(child)) {
3813 		match = find_matching_node(child, id);
3814 		if (match)
3815 			return match;
3816 	}
3817 
3818 	for (i = 0; id[i].compatible; i++) {
3819 		if (OF_is_compatible(node, id[i].compatible))
3820 			return node;
3821 	}
3822 
3823 	return 0;
3824 }
3825 
3826 struct device_node *
3827 __matching_node(struct device_node *np, const struct of_device_id *id)
3828 {
3829 	int node = OF_peer(0);
3830 	int match;
3831 
3832 	if (np)
3833 		node = next_node((uintptr_t)np);
3834 	while (node) {
3835 		match = find_matching_node(node, id);
3836 		if (match)
3837 			return (struct device_node *)(uintptr_t)match;
3838 		node = next_node(node);
3839 	}
3840 
3841 	return NULL;
3842 }
3843 
3844 struct platform_device *
3845 of_platform_device_create(struct device_node *np, const char *bus_id,
3846     struct device *parent)
3847 {
3848 	struct platform_device *pdev;
3849 
3850 	pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK | M_ZERO);
3851 	pdev->node = (intptr_t)np;
3852 	pdev->parent = parent;
3853 
3854 	LIST_INSERT_HEAD(&pdev_list, pdev, next);
3855 
3856 	return pdev;
3857 }
3858 
3859 struct platform_device *
3860 of_find_device_by_node(struct device_node *np)
3861 {
3862 	struct platform_device *pdev;
3863 
3864 	LIST_FOREACH(pdev, &pdev_list, next) {
3865 		if (pdev->node == (intptr_t)np)
3866 			return pdev;
3867 	}
3868 
3869 	return NULL;
3870 }
3871 
3872 int
3873 of_device_is_available(struct device_node *np)
3874 {
3875 	char status[32];
3876 
3877 	if (OF_getprop((uintptr_t)np, "status", status, sizeof(status)) > 0 &&
3878 	    strcmp(status, "disabled") == 0)
3879 		return 0;
3880 
3881 	return 1;
3882 }
3883 
3884 int
3885 of_dma_configure(struct device *dev, struct device_node *np, int force_dma)
3886 {
3887 	struct platform_device *pdev = (struct platform_device *)dev;
3888 	bus_dma_tag_t dmat = dma_tag_lookup(pdev->parent);
3889 
3890 	pdev->dmat = iommu_device_map(pdev->node, dmat);
3891 	return 0;
3892 }
3893 
3894 struct device_node *
3895 __of_get_compatible_child(void *p, const char *compat)
3896 {
3897 	struct device *dev = container_of(p, struct device, of_node);
3898 	struct platform_device *pdev = (struct platform_device *)dev;
3899 	int child;
3900 
3901 	for (child = OF_child(pdev->node); child; child = OF_peer(child)) {
3902 		if (OF_is_compatible(child, compat))
3903 			return (struct device_node *)(uintptr_t)child;
3904 	}
3905 	return NULL;
3906 }
3907 
3908 struct device_node *
3909 __of_get_child_by_name(void *p, const char *name)
3910 {
3911 	struct device *dev = container_of(p, struct device, of_node);
3912 	struct platform_device *pdev = (struct platform_device *)dev;
3913 	int child;
3914 
3915 	child = OF_getnodebyname(pdev->node, name);
3916 	if (child == 0)
3917 		return NULL;
3918 	return (struct device_node *)(uintptr_t)child;
3919 }
3920 
3921 int
3922 component_compare_of(struct device *dev, void *data)
3923 {
3924 	struct platform_device *pdev = (struct platform_device *)dev;
3925 
3926 	return (pdev->node == (intptr_t)data);
3927 }
3928 
3929 void
3930 drm_of_component_match_add(struct device *master,
3931 			   struct component_match **matchptr,
3932 			   int (*compare)(struct device *, void *),
3933 			   struct device_node *np)
3934 {
3935 	struct component_match *match = *matchptr;
3936 
3937 	if (match == NULL) {
3938 		match = malloc(sizeof(struct component_match),
3939 		    M_DEVBUF, M_WAITOK | M_ZERO);
3940 		*matchptr = match;
3941 	}
3942 
3943 	KASSERT(match->nmatches < nitems(match->match));
3944 	match->match[match->nmatches].compare = compare;
3945 	match->match[match->nmatches].data = np;
3946 	match->nmatches++;
3947 }
3948 
3949 #endif
3950