xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: drm_linux.c,v 1.56 2020/01/16 16:35:03 mpi Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <drm/drmP.h>
20 #include <dev/pci/ppbreg.h>
21 #include <sys/event.h>
22 #include <sys/filedesc.h>
23 #include <sys/kthread.h>
24 #include <sys/stat.h>
25 #include <sys/unistd.h>
26 #include <linux/dma-buf.h>
27 #include <linux/mod_devicetable.h>
28 #include <linux/acpi.h>
29 #include <linux/pagevec.h>
30 #include <linux/dma-fence-array.h>
31 
32 #if defined(__amd64__) || defined(__i386__)
33 #include "bios.h"
34 #endif
35 
36 void
37 tasklet_run(void *arg)
38 {
39 	struct tasklet_struct *ts = arg;
40 
41 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
42 	if (tasklet_trylock(ts)) {
43 		if (!atomic_read(&ts->count))
44 			ts->func(ts->data);
45 		tasklet_unlock(ts);
46 	}
47 }
48 
49 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
50 volatile struct proc *sch_proc;
51 volatile void *sch_ident;
52 int sch_priority;
53 
54 void
55 set_current_state(int state)
56 {
57 	if (sch_ident != curproc)
58 		mtx_enter(&sch_mtx);
59 	MUTEX_ASSERT_LOCKED(&sch_mtx);
60 	sch_ident = sch_proc = curproc;
61 	sch_priority = state;
62 }
63 
64 void
65 __set_current_state(int state)
66 {
67 	KASSERT(state == TASK_RUNNING);
68 	if (sch_ident == curproc) {
69 		MUTEX_ASSERT_LOCKED(&sch_mtx);
70 		sch_ident = NULL;
71 		mtx_leave(&sch_mtx);
72 	}
73 }
74 
75 void
76 schedule(void)
77 {
78 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
79 }
80 
81 long
82 schedule_timeout(long timeout)
83 {
84 	struct sleep_state sls;
85 	long deadline;
86 	int wait, spl;
87 
88 	MUTEX_ASSERT_LOCKED(&sch_mtx);
89 	KASSERT(!cold);
90 
91 	sleep_setup(&sls, sch_ident, sch_priority, "schto");
92 	if (timeout != MAX_SCHEDULE_TIMEOUT)
93 		sleep_setup_timeout(&sls, timeout);
94 	sleep_setup_signal(&sls);
95 
96 	wait = (sch_proc == curproc && timeout > 0);
97 
98 	spl = MUTEX_OLDIPL(&sch_mtx);
99 	MUTEX_OLDIPL(&sch_mtx) = splsched();
100 	mtx_leave(&sch_mtx);
101 
102 	if (timeout != MAX_SCHEDULE_TIMEOUT)
103 		deadline = ticks + timeout;
104 	sleep_finish_all(&sls, wait);
105 	if (timeout != MAX_SCHEDULE_TIMEOUT)
106 		timeout = deadline - ticks;
107 
108 	mtx_enter(&sch_mtx);
109 	MUTEX_OLDIPL(&sch_mtx) = spl;
110 	sch_ident = curproc;
111 
112 	return timeout > 0 ? timeout : 0;
113 }
114 
115 int
116 wake_up_process(struct proc *p)
117 {
118 	atomic_cas_ptr(&sch_proc, p, NULL);
119 	return wakeup_proc(p, NULL);
120 }
121 
122 void
123 flush_workqueue(struct workqueue_struct *wq)
124 {
125 	if (cold)
126 		return;
127 
128 	taskq_barrier((struct taskq *)wq);
129 }
130 
131 bool
132 flush_work(struct work_struct *work)
133 {
134 	if (cold)
135 		return false;
136 
137 	taskq_barrier(work->tq);
138 	return false;
139 }
140 
141 bool
142 flush_delayed_work(struct delayed_work *dwork)
143 {
144 	bool ret = false;
145 
146 	if (cold)
147 		return false;
148 
149 	while (timeout_pending(&dwork->to)) {
150 		tsleep(dwork, PWAIT, "fldwto", 1);
151 		ret = true;
152 	}
153 
154 	taskq_barrier(dwork->tq ? dwork->tq : (struct taskq *)system_wq);
155 	return ret;
156 }
157 
158 struct kthread {
159 	int (*func)(void *);
160 	void *data;
161 	struct proc *proc;
162 	volatile u_int flags;
163 #define KTHREAD_SHOULDSTOP	0x0000001
164 #define KTHREAD_STOPPED		0x0000002
165 #define KTHREAD_SHOULDPARK	0x0000004
166 #define KTHREAD_PARKED		0x0000008
167 	LIST_ENTRY(kthread) next;
168 };
169 
170 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
171 
172 void
173 kthread_func(void *arg)
174 {
175 	struct kthread *thread = arg;
176 	int ret;
177 
178 	ret = thread->func(thread->data);
179 	thread->flags |= KTHREAD_STOPPED;
180 	kthread_exit(ret);
181 }
182 
183 struct proc *
184 kthread_run(int (*func)(void *), void *data, const char *name)
185 {
186 	struct kthread *thread;
187 
188 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
189 	thread->func = func;
190 	thread->data = data;
191 	thread->flags = 0;
192 
193 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
194 		free(thread, M_DRM, sizeof(*thread));
195 		return ERR_PTR(-ENOMEM);
196 	}
197 
198 	LIST_INSERT_HEAD(&kthread_list, thread, next);
199 	return thread->proc;
200 }
201 
202 struct kthread *
203 kthread_lookup(struct proc *p)
204 {
205 	struct kthread *thread;
206 
207 	LIST_FOREACH(thread, &kthread_list, next) {
208 		if (thread->proc == p)
209 			break;
210 	}
211 	KASSERT(thread);
212 
213 	return thread;
214 }
215 
216 int
217 kthread_should_park(void)
218 {
219 	struct kthread *thread = kthread_lookup(curproc);
220 	return (thread->flags & KTHREAD_SHOULDPARK);
221 }
222 
223 void
224 kthread_parkme(void)
225 {
226 	struct kthread *thread = kthread_lookup(curproc);
227 
228 	while (thread->flags & KTHREAD_SHOULDPARK) {
229 		thread->flags |= KTHREAD_PARKED;
230 		wakeup(thread);
231 		tsleep_nsec(thread, PPAUSE | PCATCH, "parkme", INFSLP);
232 		thread->flags &= ~KTHREAD_PARKED;
233 	}
234 }
235 
236 void
237 kthread_park(struct proc *p)
238 {
239 	struct kthread *thread = kthread_lookup(p);
240 
241 	while ((thread->flags & KTHREAD_PARKED) == 0) {
242 		thread->flags |= KTHREAD_SHOULDPARK;
243 		wake_up_process(thread->proc);
244 		tsleep_nsec(thread, PPAUSE | PCATCH, "park", INFSLP);
245 	}
246 }
247 
248 void
249 kthread_unpark(struct proc *p)
250 {
251 	struct kthread *thread = kthread_lookup(p);
252 
253 	thread->flags &= ~KTHREAD_SHOULDPARK;
254 	wakeup(thread);
255 }
256 
257 int
258 kthread_should_stop(void)
259 {
260 	struct kthread *thread = kthread_lookup(curproc);
261 	return (thread->flags & KTHREAD_SHOULDSTOP);
262 }
263 
264 void
265 kthread_stop(struct proc *p)
266 {
267 	struct kthread *thread = kthread_lookup(p);
268 
269 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
270 		thread->flags |= KTHREAD_SHOULDSTOP;
271 		wake_up_process(thread->proc);
272 		tsleep_nsec(thread, PPAUSE | PCATCH, "stop", INFSLP);
273 	}
274 	LIST_REMOVE(thread, next);
275 	free(thread, M_DRM, sizeof(*thread));
276 }
277 
278 struct timespec
279 ns_to_timespec(const int64_t nsec)
280 {
281 	struct timespec ts;
282 	int32_t rem;
283 
284 	if (nsec == 0) {
285 		ts.tv_sec = 0;
286 		ts.tv_nsec = 0;
287 		return (ts);
288 	}
289 
290 	ts.tv_sec = nsec / NSEC_PER_SEC;
291 	rem = nsec % NSEC_PER_SEC;
292 	if (rem < 0) {
293 		ts.tv_sec--;
294 		rem += NSEC_PER_SEC;
295 	}
296 	ts.tv_nsec = rem;
297 	return (ts);
298 }
299 
300 int64_t
301 timeval_to_ns(const struct timeval *tv)
302 {
303 	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
304 		tv->tv_usec * NSEC_PER_USEC;
305 }
306 
307 struct timeval
308 ns_to_timeval(const int64_t nsec)
309 {
310 	struct timeval tv;
311 	int32_t rem;
312 
313 	if (nsec == 0) {
314 		tv.tv_sec = 0;
315 		tv.tv_usec = 0;
316 		return (tv);
317 	}
318 
319 	tv.tv_sec = nsec / NSEC_PER_SEC;
320 	rem = nsec % NSEC_PER_SEC;
321 	if (rem < 0) {
322 		tv.tv_sec--;
323 		rem += NSEC_PER_SEC;
324 	}
325 	tv.tv_usec = rem / 1000;
326 	return (tv);
327 }
328 
329 int64_t
330 timeval_to_ms(const struct timeval *tv)
331 {
332 	return ((int64_t)tv->tv_sec * 1000) + (tv->tv_usec / 1000);
333 }
334 
335 int64_t
336 timeval_to_us(const struct timeval *tv)
337 {
338 	return ((int64_t)tv->tv_sec * 1000000) + tv->tv_usec;
339 }
340 
341 extern char *hw_vendor, *hw_prod, *hw_ver;
342 
343 #if NBIOS > 0
344 extern char smbios_board_vendor[];
345 extern char smbios_board_prod[];
346 extern char smbios_board_serial[];
347 #endif
348 
349 bool
350 dmi_match(int slot, const char *str)
351 {
352 	switch (slot) {
353 	case DMI_SYS_VENDOR:
354 		if (hw_vendor != NULL &&
355 		    !strcmp(hw_vendor, str))
356 			return true;
357 		break;
358 	case DMI_PRODUCT_NAME:
359 		if (hw_prod != NULL &&
360 		    !strcmp(hw_prod, str))
361 			return true;
362 		break;
363 	case DMI_PRODUCT_VERSION:
364 		if (hw_ver != NULL &&
365 		    !strcmp(hw_ver, str))
366 			return true;
367 		break;
368 #if NBIOS > 0
369 	case DMI_BOARD_VENDOR:
370 		if (strcmp(smbios_board_vendor, str) == 0)
371 			return true;
372 		break;
373 	case DMI_BOARD_NAME:
374 		if (strcmp(smbios_board_prod, str) == 0)
375 			return true;
376 		break;
377 	case DMI_BOARD_SERIAL:
378 		if (strcmp(smbios_board_serial, str) == 0)
379 			return true;
380 		break;
381 #else
382 	case DMI_BOARD_VENDOR:
383 		if (hw_vendor != NULL &&
384 		    !strcmp(hw_vendor, str))
385 			return true;
386 		break;
387 	case DMI_BOARD_NAME:
388 		if (hw_prod != NULL &&
389 		    !strcmp(hw_prod, str))
390 			return true;
391 		break;
392 #endif
393 	case DMI_NONE:
394 	default:
395 		return false;
396 	}
397 
398 	return false;
399 }
400 
401 static bool
402 dmi_found(const struct dmi_system_id *dsi)
403 {
404 	int i, slot;
405 
406 	for (i = 0; i < nitems(dsi->matches); i++) {
407 		slot = dsi->matches[i].slot;
408 		if (slot == DMI_NONE)
409 			break;
410 		if (!dmi_match(slot, dsi->matches[i].substr))
411 			return false;
412 	}
413 
414 	return true;
415 }
416 
417 const struct dmi_system_id *
418 dmi_first_match(const struct dmi_system_id *sysid)
419 {
420 	const struct dmi_system_id *dsi;
421 
422 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
423 		if (dmi_found(dsi))
424 			return dsi;
425 	}
426 
427 	return NULL;
428 }
429 
430 #if NBIOS > 0
431 extern char smbios_bios_date[];
432 #endif
433 
434 const char *
435 dmi_get_system_info(int slot)
436 {
437 	WARN_ON(slot != DMI_BIOS_DATE);
438 #if NBIOS > 0
439 	if (slot == DMI_BIOS_DATE)
440 		return smbios_bios_date;
441 #endif
442 	return NULL;
443 }
444 
445 int
446 dmi_check_system(const struct dmi_system_id *sysid)
447 {
448 	const struct dmi_system_id *dsi;
449 	int num = 0;
450 
451 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
452 		if (dmi_found(dsi)) {
453 			num++;
454 			if (dsi->callback && dsi->callback(dsi))
455 				break;
456 		}
457 	}
458 	return (num);
459 }
460 
461 struct vm_page *
462 alloc_pages(unsigned int gfp_mask, unsigned int order)
463 {
464 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
465 	struct uvm_constraint_range *constraint = &no_constraint;
466 	struct pglist mlist;
467 
468 	if (gfp_mask & M_CANFAIL)
469 		flags |= UVM_PLA_FAILOK;
470 	if (gfp_mask & M_ZERO)
471 		flags |= UVM_PLA_ZERO;
472 	if (gfp_mask & __GFP_DMA32)
473 		constraint = &dma_constraint;
474 
475 	TAILQ_INIT(&mlist);
476 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
477 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
478 		return NULL;
479 	return TAILQ_FIRST(&mlist);
480 }
481 
482 void
483 __free_pages(struct vm_page *page, unsigned int order)
484 {
485 	struct pglist mlist;
486 	int i;
487 
488 	TAILQ_INIT(&mlist);
489 	for (i = 0; i < (1 << order); i++)
490 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
491 	uvm_pglistfree(&mlist);
492 }
493 
494 void
495 __pagevec_release(struct pagevec *pvec)
496 {
497 	struct pglist mlist;
498 	int i;
499 
500 	TAILQ_INIT(&mlist);
501 	for (i = 0; i < pvec->nr; i++)
502 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
503 	uvm_pglistfree(&mlist);
504 	pagevec_reinit(pvec);
505 }
506 
507 void *
508 kmap(struct vm_page *pg)
509 {
510 	vaddr_t va;
511 
512 #if defined (__HAVE_PMAP_DIRECT)
513 	va = pmap_map_direct(pg);
514 #else
515 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
516 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
517 	pmap_update(pmap_kernel());
518 #endif
519 	return (void *)va;
520 }
521 
522 void
523 kunmap(void *addr)
524 {
525 	vaddr_t va = (vaddr_t)addr;
526 
527 #if defined (__HAVE_PMAP_DIRECT)
528 	pmap_unmap_direct(va);
529 #else
530 	pmap_kremove(va, PAGE_SIZE);
531 	pmap_update(pmap_kernel());
532 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
533 #endif
534 }
535 
536 void *
537 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
538      pgprot_t prot)
539 {
540 	vaddr_t va;
541 	paddr_t pa;
542 	int i;
543 
544 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
545 	if (va == 0)
546 		return NULL;
547 	for (i = 0; i < npages; i++) {
548 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
549 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
550 		    PROT_READ | PROT_WRITE,
551 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
552 		pmap_update(pmap_kernel());
553 	}
554 
555 	return (void *)va;
556 }
557 
558 void
559 vunmap(void *addr, size_t size)
560 {
561 	vaddr_t va = (vaddr_t)addr;
562 
563 	pmap_remove(pmap_kernel(), va, va + size);
564 	pmap_update(pmap_kernel());
565 	uvm_km_free(kernel_map, va, size);
566 }
567 
568 void
569 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
570     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
571 {
572 	const uint8_t *cbuf = buf;
573 	int i;
574 
575 	for (i = 0; i < len; i++) {
576 		if ((i % rowsize) == 0)
577 			printf("%s", prefix_str);
578 		printf("%02x", cbuf[i]);
579 		if ((i % rowsize) == (rowsize - 1))
580 			printf("\n");
581 		else
582 			printf(" ");
583 	}
584 }
585 
586 void *
587 memchr_inv(const void *s, int c, size_t n)
588 {
589 	if (n != 0) {
590 		const unsigned char *p = s;
591 
592 		do {
593 			if (*p++ != (unsigned char)c)
594 				return ((void *)(p - 1));
595 		}while (--n != 0);
596 	}
597 	return (NULL);
598 }
599 
600 int
601 panic_cmp(struct rb_node *a, struct rb_node *b)
602 {
603 	panic(__func__);
604 }
605 
606 #undef RB_ROOT
607 #define RB_ROOT(head)	(head)->rbh_root
608 
609 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
610 
611 /*
612  * This is a fairly minimal implementation of the Linux "idr" API.  It
613  * probably isn't very efficient, and defenitely isn't RCU safe.  The
614  * pre-load buffer is global instead of per-cpu; we rely on the kernel
615  * lock to make this work.  We do randomize our IDs in order to make
616  * them harder to guess.
617  */
618 
619 int idr_cmp(struct idr_entry *, struct idr_entry *);
620 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
621 
622 struct pool idr_pool;
623 struct idr_entry *idr_entry_cache;
624 
625 void
626 idr_init(struct idr *idr)
627 {
628 	static int initialized;
629 
630 	if (!initialized) {
631 		pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
632 		    "idrpl", NULL);
633 		initialized = 1;
634 	}
635 	SPLAY_INIT(&idr->tree);
636 }
637 
638 void
639 idr_destroy(struct idr *idr)
640 {
641 	struct idr_entry *id;
642 
643 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
644 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
645 		pool_put(&idr_pool, id);
646 	}
647 }
648 
649 void
650 idr_preload(unsigned int gfp_mask)
651 {
652 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
653 
654 	KERNEL_ASSERT_LOCKED();
655 
656 	if (idr_entry_cache == NULL)
657 		idr_entry_cache = pool_get(&idr_pool, flags);
658 }
659 
660 int
661 idr_alloc(struct idr *idr, void *ptr, int start, int end,
662     unsigned int gfp_mask)
663 {
664 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
665 	struct idr_entry *id;
666 	int begin;
667 
668 	KERNEL_ASSERT_LOCKED();
669 
670 	if (idr_entry_cache) {
671 		id = idr_entry_cache;
672 		idr_entry_cache = NULL;
673 	} else {
674 		id = pool_get(&idr_pool, flags);
675 		if (id == NULL)
676 			return -ENOMEM;
677 	}
678 
679 	if (end <= 0)
680 		end = INT_MAX;
681 
682 #ifdef notyet
683 	id->id = begin = start + arc4random_uniform(end - start);
684 #else
685 	id->id = begin = start;
686 #endif
687 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
688 		if (++id->id == end)
689 			id->id = start;
690 		if (id->id == begin) {
691 			pool_put(&idr_pool, id);
692 			return -ENOSPC;
693 		}
694 	}
695 	id->ptr = ptr;
696 	return id->id;
697 }
698 
699 void *
700 idr_replace(struct idr *idr, void *ptr, int id)
701 {
702 	struct idr_entry find, *res;
703 	void *old;
704 
705 	find.id = id;
706 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
707 	if (res == NULL)
708 		return ERR_PTR(-ENOENT);
709 	old = res->ptr;
710 	res->ptr = ptr;
711 	return old;
712 }
713 
714 void *
715 idr_remove(struct idr *idr, int id)
716 {
717 	struct idr_entry find, *res;
718 	void *ptr = NULL;
719 
720 	find.id = id;
721 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
722 	if (res) {
723 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
724 		ptr = res->ptr;
725 		pool_put(&idr_pool, res);
726 	}
727 	return ptr;
728 }
729 
730 void *
731 idr_find(struct idr *idr, int id)
732 {
733 	struct idr_entry find, *res;
734 
735 	find.id = id;
736 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
737 	if (res == NULL)
738 		return NULL;
739 	return res->ptr;
740 }
741 
742 void *
743 idr_get_next(struct idr *idr, int *id)
744 {
745 	struct idr_entry *res;
746 
747 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
748 		if (res->id >= *id) {
749 			*id = res->id;
750 			return res->ptr;
751 		}
752 	}
753 
754 	return NULL;
755 }
756 
757 int
758 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
759 {
760 	struct idr_entry *id;
761 	int ret;
762 
763 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
764 		ret = func(id->id, id->ptr, data);
765 		if (ret)
766 			return ret;
767 	}
768 
769 	return 0;
770 }
771 
772 int
773 idr_cmp(struct idr_entry *a, struct idr_entry *b)
774 {
775 	return (a->id < b->id ? -1 : a->id > b->id);
776 }
777 
778 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
779 
780 void
781 ida_init(struct ida *ida)
782 {
783 	ida->counter = 0;
784 }
785 
786 void
787 ida_destroy(struct ida *ida)
788 {
789 }
790 
791 void
792 ida_remove(struct ida *ida, int id)
793 {
794 }
795 
796 int
797 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
798     int flags)
799 {
800 	if (end <= 0)
801 		end = INT_MAX;
802 
803 	if (start > ida->counter)
804 		ida->counter = start;
805 
806 	if (ida->counter >= end)
807 		return -ENOSPC;
808 
809 	return ida->counter++;
810 }
811 
812 void
813 ida_simple_remove(struct ida *ida, int id)
814 {
815 }
816 
817 int
818 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
819 {
820 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
821 	    M_DRM, gfp_mask);
822 	if (table->sgl == NULL)
823 		return -ENOMEM;
824 	table->nents = table->orig_nents = nents;
825 	return 0;
826 }
827 
828 void
829 sg_free_table(struct sg_table *table)
830 {
831 	free(table->sgl, M_DRM,
832 	    table->orig_nents * sizeof(struct scatterlist));
833 }
834 
835 size_t
836 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
837     const void *buf, size_t buflen)
838 {
839 	panic("%s", __func__);
840 }
841 
842 int
843 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
844 {
845 	void *cmd = NULL;
846 	int cmdlen = 0;
847 	int err, ret = 0;
848 	int op;
849 
850 	iic_acquire_bus(&adap->ic, 0);
851 
852 	while (num > 2) {
853 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
854 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
855 		    msgs->buf, msgs->len, 0);
856 		if (err) {
857 			ret = -err;
858 			goto fail;
859 		}
860 		msgs++;
861 		num--;
862 		ret++;
863 	}
864 
865 	if (num > 1) {
866 		cmd = msgs->buf;
867 		cmdlen = msgs->len;
868 		msgs++;
869 		num--;
870 		ret++;
871 	}
872 
873 	op = (msgs->flags & I2C_M_RD) ?
874 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
875 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
876 	    msgs->buf, msgs->len, 0);
877 	if (err) {
878 		ret = -err;
879 		goto fail;
880 	}
881 	msgs++;
882 	ret++;
883 
884 fail:
885 	iic_release_bus(&adap->ic, 0);
886 
887 	return ret;
888 }
889 
890 int
891 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
892 {
893 	if (adap->algo)
894 		return adap->algo->master_xfer(adap, msgs, num);
895 
896 	return i2c_master_xfer(adap, msgs, num);
897 }
898 
899 int
900 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
901 {
902 	struct i2c_algo_bit_data *algo = adap->algo_data;
903 	struct i2c_adapter bb;
904 
905 	memset(&bb, 0, sizeof(bb));
906 	bb.ic = algo->ic;
907 	bb.retries = adap->retries;
908 	return i2c_master_xfer(&bb, msgs, num);
909 }
910 
911 uint32_t
912 i2c_bb_functionality(struct i2c_adapter *adap)
913 {
914 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
915 }
916 
917 struct i2c_algorithm i2c_bit_algo = {
918 	.master_xfer = i2c_bb_master_xfer,
919 	.functionality = i2c_bb_functionality
920 };
921 
922 int
923 i2c_bit_add_bus(struct i2c_adapter *adap)
924 {
925 	adap->algo = &i2c_bit_algo;
926 	adap->retries = 3;
927 
928 	return 0;
929 }
930 
931 #if defined(__amd64__) || defined(__i386__)
932 
933 /*
934  * This is a minimal implementation of the Linux vga_get/vga_put
935  * interface.  In all likelyhood, it will only work for inteldrm(4) as
936  * it assumes that if there is another active VGA device in the
937  * system, it is sitting behind a PCI bridge.
938  */
939 
940 extern int pci_enumerate_bus(struct pci_softc *,
941     int (*)(struct pci_attach_args *), struct pci_attach_args *);
942 
943 pcitag_t vga_bridge_tag;
944 int vga_bridge_disabled;
945 
946 int
947 vga_disable_bridge(struct pci_attach_args *pa)
948 {
949 	pcireg_t bhlc, bc;
950 
951 	if (pa->pa_domain != 0)
952 		return 0;
953 
954 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
955 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
956 		return 0;
957 
958 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
959 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
960 		return 0;
961 	bc &= ~PPB_BC_VGA_ENABLE;
962 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
963 
964 	vga_bridge_tag = pa->pa_tag;
965 	vga_bridge_disabled = 1;
966 
967 	return 1;
968 }
969 
970 void
971 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
972 {
973 	KASSERT(pdev->pci->sc_bridgetag == NULL);
974 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
975 }
976 
977 void
978 vga_put(struct pci_dev *pdev, int rsrc)
979 {
980 	pcireg_t bc;
981 
982 	if (!vga_bridge_disabled)
983 		return;
984 
985 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
986 	bc |= PPB_BC_VGA_ENABLE;
987 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
988 
989 	vga_bridge_disabled = 0;
990 }
991 
992 #endif
993 
994 /*
995  * ACPI types and interfaces.
996  */
997 
998 #ifdef __HAVE_ACPI
999 #include "acpi.h"
1000 #endif
1001 
1002 #if NACPI > 0
1003 
1004 #include <dev/acpi/acpireg.h>
1005 #include <dev/acpi/acpivar.h>
1006 #include <dev/acpi/amltypes.h>
1007 #include <dev/acpi/dsdt.h>
1008 
1009 acpi_status
1010 acpi_get_table(const char *sig, int instance,
1011     struct acpi_table_header **hdr)
1012 {
1013 	struct acpi_softc *sc = acpi_softc;
1014 	struct acpi_q *entry;
1015 
1016 	KASSERT(instance == 1);
1017 
1018 	if (sc == NULL)
1019 		return AE_NOT_FOUND;
1020 
1021 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1022 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1023 			*hdr = entry->q_table;
1024 			return 0;
1025 		}
1026 	}
1027 
1028 	return AE_NOT_FOUND;
1029 }
1030 
1031 acpi_status
1032 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1033 {
1034 	node = aml_searchname(node, name);
1035 	if (node == NULL)
1036 		return AE_NOT_FOUND;
1037 
1038 	*rnode = node;
1039 	return 0;
1040 }
1041 
1042 acpi_status
1043 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1044 {
1045 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1046 	KASSERT(type == ACPI_FULL_PATHNAME);
1047 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1048 	return 0;
1049 }
1050 
1051 acpi_status
1052 acpi_evaluate_object(acpi_handle node, const char *name,
1053     struct acpi_object_list *params, struct acpi_buffer *result)
1054 {
1055 	struct aml_value args[4], res;
1056 	union acpi_object *obj;
1057 	uint8_t *data;
1058 	int i;
1059 
1060 	KASSERT(params->count <= nitems(args));
1061 
1062 	for (i = 0; i < params->count; i++) {
1063 		args[i].type = params->pointer[i].type;
1064 		switch (args[i].type) {
1065 		case AML_OBJTYPE_INTEGER:
1066 			args[i].v_integer = params->pointer[i].integer.value;
1067 			break;
1068 		case AML_OBJTYPE_BUFFER:
1069 			args[i].length = params->pointer[i].buffer.length;
1070 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1071 			break;
1072 		default:
1073 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1074 			return AE_BAD_PARAMETER;
1075 		}
1076 	}
1077 
1078 	if (name) {
1079 		node = aml_searchname(node, name);
1080 		if (node == NULL)
1081 			return AE_NOT_FOUND;
1082 	}
1083 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1084 		aml_freevalue(&res);
1085 		return AE_ERROR;
1086 	}
1087 
1088 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1089 
1090 	result->length = sizeof(union acpi_object);
1091 	switch (res.type) {
1092 	case AML_OBJTYPE_BUFFER:
1093 		result->length += res.length;
1094 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1095 		obj = (union acpi_object *)result->pointer;
1096 		data = (uint8_t *)(obj + 1);
1097 		obj->type = res.type;
1098 		obj->buffer.length = res.length;
1099 		obj->buffer.pointer = data;
1100 		memcpy(data, res.v_buffer, res.length);
1101 		break;
1102 	default:
1103 		printf("%s: return type 0x%02x", __func__, res.type);
1104 		aml_freevalue(&res);
1105 		return AE_ERROR;
1106 	}
1107 
1108 	aml_freevalue(&res);
1109 	return 0;
1110 }
1111 
1112 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1113 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1114 
1115 int
1116 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1117 {
1118 	struct acpi_bus_event event;
1119 	struct notifier_block *nb;
1120 
1121 	event.device_class = ACPI_VIDEO_CLASS;
1122 	event.type = notify;
1123 
1124 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1125 		nb->notifier_call(nb, 0, &event);
1126 	return 0;
1127 }
1128 
1129 int
1130 register_acpi_notifier(struct notifier_block *nb)
1131 {
1132 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1133 	return 0;
1134 }
1135 
1136 int
1137 unregister_acpi_notifier(struct notifier_block *nb)
1138 {
1139 	struct notifier_block *tmp;
1140 
1141 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1142 		if (tmp == nb) {
1143 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1144 			    notifier_block, link);
1145 			return 0;
1146 		}
1147 	}
1148 
1149 	return -ENOENT;
1150 }
1151 
1152 const char *
1153 acpi_format_exception(acpi_status status)
1154 {
1155 	switch (status) {
1156 	case AE_NOT_FOUND:
1157 		return "not found";
1158 	case AE_BAD_PARAMETER:
1159 		return "bad parameter";
1160 	default:
1161 		return "unknown";
1162 	}
1163 }
1164 
1165 #endif
1166 
1167 void
1168 backlight_do_update_status(void *arg)
1169 {
1170 	backlight_update_status(arg);
1171 }
1172 
1173 struct backlight_device *
1174 backlight_device_register(const char *name, void *kdev, void *data,
1175     const struct backlight_ops *ops, struct backlight_properties *props)
1176 {
1177 	struct backlight_device *bd;
1178 
1179 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1180 	bd->ops = ops;
1181 	bd->props = *props;
1182 	bd->data = data;
1183 
1184 	task_set(&bd->task, backlight_do_update_status, bd);
1185 
1186 	return bd;
1187 }
1188 
1189 void
1190 backlight_device_unregister(struct backlight_device *bd)
1191 {
1192 	free(bd, M_DRM, sizeof(*bd));
1193 }
1194 
1195 void
1196 backlight_schedule_update_status(struct backlight_device *bd)
1197 {
1198 	task_add(systq, &bd->task);
1199 }
1200 
1201 void
1202 drm_sysfs_hotplug_event(struct drm_device *dev)
1203 {
1204 	KNOTE(&dev->note, NOTE_CHANGE);
1205 }
1206 
1207 unsigned int drm_fence_count;
1208 
1209 unsigned int
1210 dma_fence_context_alloc(unsigned int num)
1211 {
1212 	return __sync_add_and_fetch(&drm_fence_count, num) - num;
1213 }
1214 
1215 struct default_wait_cb {
1216 	struct dma_fence_cb base;
1217 	struct proc *proc;
1218 };
1219 
1220 static void
1221 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1222 {
1223 	struct default_wait_cb *wait =
1224 	    container_of(cb, struct default_wait_cb, base);
1225 	wake_up_process(wait->proc);
1226 }
1227 
1228 long
1229 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1230 {
1231 	long ret = timeout ? timeout : 1;
1232 	int err;
1233 	struct default_wait_cb cb;
1234 	bool was_set;
1235 
1236 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1237 		return ret;
1238 
1239 	mtx_enter(fence->lock);
1240 
1241 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1242 	    &fence->flags);
1243 
1244 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1245 		goto out;
1246 
1247 	if (!was_set && fence->ops->enable_signaling) {
1248 		if (!fence->ops->enable_signaling(fence)) {
1249 			dma_fence_signal_locked(fence);
1250 			goto out;
1251 		}
1252 	}
1253 
1254 	if (timeout == 0) {
1255 		ret = 0;
1256 		goto out;
1257 	}
1258 
1259 	cb.base.func = dma_fence_default_wait_cb;
1260 	cb.proc = curproc;
1261 	list_add(&cb.base.node, &fence->cb_list);
1262 
1263 	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1264 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0, "dmafence",
1265 		    timeout);
1266 		if (err == EINTR || err == ERESTART) {
1267 			ret = -ERESTARTSYS;
1268 			break;
1269 		} else if (err == EWOULDBLOCK) {
1270 			ret = 0;
1271 			break;
1272 		}
1273 	}
1274 
1275 	if (!list_empty(&cb.base.node))
1276 		list_del(&cb.base.node);
1277 out:
1278 	mtx_leave(fence->lock);
1279 
1280 	return ret;
1281 }
1282 
1283 static bool
1284 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1285     uint32_t *idx)
1286 {
1287 	int i;
1288 
1289 	for (i = 0; i < count; ++i) {
1290 		struct dma_fence *fence = fences[i];
1291 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1292 			if (idx)
1293 				*idx = i;
1294 			return true;
1295 		}
1296 	}
1297 	return false;
1298 }
1299 
1300 long
1301 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1302     bool intr, long timeout, uint32_t *idx)
1303 {
1304 	struct default_wait_cb *cb;
1305 	int i, err;
1306 	int ret = timeout;
1307 
1308 	if (timeout == 0) {
1309 		for (i = 0; i < count; i++) {
1310 			if (dma_fence_is_signaled(fences[i])) {
1311 				if (idx)
1312 					*idx = i;
1313 				return 1;
1314 			}
1315 		}
1316 		return 0;
1317 	}
1318 
1319 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1320 	if (cb == NULL)
1321 		return -ENOMEM;
1322 
1323 	for (i = 0; i < count; i++) {
1324 		struct dma_fence *fence = fences[i];
1325 		cb[i].proc = curproc;
1326 		if (dma_fence_add_callback(fence, &cb[i].base,
1327 		    dma_fence_default_wait_cb)) {
1328 			if (idx)
1329 				*idx = i;
1330 			goto cb_cleanup;
1331 		}
1332 	}
1333 
1334 	while (ret > 0) {
1335 		if (dma_fence_test_signaled_any(fences, count, idx))
1336 			break;
1337 
1338 		err = tsleep(curproc, intr ? PCATCH : 0,
1339 		    "dfwat", timeout);
1340 		if (err == EINTR || err == ERESTART) {
1341 			ret = -ERESTARTSYS;
1342 			break;
1343 		} else if (err == EWOULDBLOCK) {
1344 			ret = 0;
1345 			break;
1346 		}
1347 	}
1348 
1349 cb_cleanup:
1350 	while (i-- > 0)
1351 		dma_fence_remove_callback(fences[i], &cb[i].base);
1352 	free(cb, M_DRM, count * sizeof(*cb));
1353 	return ret;
1354 }
1355 
1356 static const char *
1357 dma_fence_array_get_driver_name(struct dma_fence *fence)
1358 {
1359 	return "dma_fence_array";
1360 }
1361 
1362 static const char *
1363 dma_fence_array_get_timeline_name(struct dma_fence *fence)
1364 {
1365 	return "unbound";
1366 }
1367 
1368 static void
1369 irq_dma_fence_array_work(struct irq_work *wrk)
1370 {
1371 	struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work);
1372 
1373 	dma_fence_signal(&dfa->base);
1374 	dma_fence_put(&dfa->base);
1375 }
1376 
1377 static void
1378 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
1379 {
1380 	struct dma_fence_array_cb *array_cb =
1381 	    container_of(cb, struct dma_fence_array_cb, cb);
1382 	struct dma_fence_array *dfa = array_cb->array;
1383 
1384 	if (atomic_dec_and_test(&dfa->num_pending))
1385 		irq_work_queue(&dfa->work);
1386 	else
1387 		dma_fence_put(&dfa->base);
1388 }
1389 
1390 static bool
1391 dma_fence_array_enable_signaling(struct dma_fence *fence)
1392 {
1393 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1394 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
1395 	int i;
1396 
1397 	for (i = 0; i < dfa->num_fences; ++i) {
1398 		cb[i].array = dfa;
1399 		dma_fence_get(&dfa->base);
1400 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
1401 		    dma_fence_array_cb_func)) {
1402 			dma_fence_put(&dfa->base);
1403 			if (atomic_dec_and_test(&dfa->num_pending))
1404 				return false;
1405 		}
1406 	}
1407 
1408 	return true;
1409 }
1410 
1411 static bool dma_fence_array_signaled(struct dma_fence *fence)
1412 {
1413 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1414 
1415 	return atomic_read(&dfa->num_pending) <= 0;
1416 }
1417 
1418 static void dma_fence_array_release(struct dma_fence *fence)
1419 {
1420 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1421 	int i;
1422 
1423 	for (i = 0; i < dfa->num_fences; ++i)
1424 		dma_fence_put(dfa->fences[i]);
1425 
1426 	free(dfa->fences, M_DRM, 0);
1427 	dma_fence_free(fence);
1428 }
1429 
1430 struct dma_fence_array *
1431 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
1432     unsigned seqno, bool signal_on_any)
1433 {
1434 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
1435 	    (num_fences * sizeof(struct dma_fence_array_cb)),
1436 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1437 	if (dfa == NULL)
1438 		return NULL;
1439 
1440 	mtx_init(&dfa->lock, IPL_TTY);
1441 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
1442 	    context, seqno);
1443 	init_irq_work(&dfa->work, irq_dma_fence_array_work);
1444 
1445 	dfa->num_fences = num_fences;
1446 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
1447 	dfa->fences = fences;
1448 
1449 	return dfa;
1450 }
1451 
1452 const struct dma_fence_ops dma_fence_array_ops = {
1453 	.get_driver_name = dma_fence_array_get_driver_name,
1454 	.get_timeline_name = dma_fence_array_get_timeline_name,
1455 	.enable_signaling = dma_fence_array_enable_signaling,
1456 	.signaled = dma_fence_array_signaled,
1457 	.release = dma_fence_array_release,
1458 };
1459 
1460 int
1461 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
1462 {
1463 	return (ENXIO);
1464 }
1465 
1466 int
1467 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
1468 {
1469 	return (ENXIO);
1470 }
1471 
1472 int
1473 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
1474 {
1475 	return (ENOTTY);
1476 }
1477 
1478 int
1479 dmabuf_poll(struct file *fp, int events, struct proc *p)
1480 {
1481 	return (0);
1482 }
1483 
1484 int
1485 dmabuf_kqfilter(struct file *fp, struct knote *kn)
1486 {
1487 	return (EINVAL);
1488 }
1489 
1490 int
1491 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
1492 {
1493 	struct dma_buf *dmabuf = fp->f_data;
1494 
1495 	memset(st, 0, sizeof(*st));
1496 	st->st_size = dmabuf->size;
1497 	st->st_mode = S_IFIFO;	/* XXX */
1498 	return (0);
1499 }
1500 
1501 int
1502 dmabuf_close(struct file *fp, struct proc *p)
1503 {
1504 	struct dma_buf *dmabuf = fp->f_data;
1505 
1506 	fp->f_data = NULL;
1507 	KERNEL_LOCK();
1508 	dmabuf->ops->release(dmabuf);
1509 	KERNEL_UNLOCK();
1510 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
1511 	return (0);
1512 }
1513 
1514 int
1515 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
1516 {
1517 	struct dma_buf *dmabuf = fp->f_data;
1518 	off_t newoff;
1519 
1520 	if (*offset != 0)
1521 		return (EINVAL);
1522 
1523 	switch (whence) {
1524 	case SEEK_SET:
1525 		newoff = 0;
1526 		break;
1527 	case SEEK_END:
1528 		newoff = dmabuf->size;
1529 		break;
1530 	default:
1531 		return (EINVAL);
1532 	}
1533 	mtx_enter(&fp->f_mtx);
1534 	fp->f_offset = newoff;
1535 	mtx_leave(&fp->f_mtx);
1536 	*offset = newoff;
1537 	return (0);
1538 }
1539 
1540 const struct fileops dmabufops = {
1541 	.fo_read	= dmabuf_read,
1542 	.fo_write	= dmabuf_write,
1543 	.fo_ioctl	= dmabuf_ioctl,
1544 	.fo_poll	= dmabuf_poll,
1545 	.fo_kqfilter	= dmabuf_kqfilter,
1546 	.fo_stat	= dmabuf_stat,
1547 	.fo_close	= dmabuf_close,
1548 	.fo_seek	= dmabuf_seek,
1549 };
1550 
1551 struct dma_buf *
1552 dma_buf_export(const struct dma_buf_export_info *info)
1553 {
1554 	struct proc *p = curproc;
1555 	struct dma_buf *dmabuf;
1556 	struct file *fp;
1557 
1558 	fp = fnew(p);
1559 	if (fp == NULL)
1560 		return ERR_PTR(-ENFILE);
1561 	fp->f_type = DTYPE_DMABUF;
1562 	fp->f_ops = &dmabufops;
1563 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
1564 	dmabuf->priv = info->priv;
1565 	dmabuf->ops = info->ops;
1566 	dmabuf->size = info->size;
1567 	dmabuf->file = fp;
1568 	fp->f_data = dmabuf;
1569 	return dmabuf;
1570 }
1571 
1572 struct dma_buf *
1573 dma_buf_get(int fd)
1574 {
1575 	struct proc *p = curproc;
1576 	struct filedesc *fdp = p->p_fd;
1577 	struct file *fp;
1578 
1579 	if ((fp = fd_getfile(fdp, fd)) == NULL)
1580 		return ERR_PTR(-EBADF);
1581 
1582 	if (fp->f_type != DTYPE_DMABUF) {
1583 		FRELE(fp, p);
1584 		return ERR_PTR(-EINVAL);
1585 	}
1586 
1587 	return fp->f_data;
1588 }
1589 
1590 void
1591 dma_buf_put(struct dma_buf *dmabuf)
1592 {
1593 	KASSERT(dmabuf);
1594 	KASSERT(dmabuf->file);
1595 
1596 	FRELE(dmabuf->file, curproc);
1597 }
1598 
1599 int
1600 dma_buf_fd(struct dma_buf *dmabuf, int flags)
1601 {
1602 	struct proc *p = curproc;
1603 	struct filedesc *fdp = p->p_fd;
1604 	struct file *fp = dmabuf->file;
1605 	int fd, cloexec, error;
1606 
1607 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
1608 
1609 	fdplock(fdp);
1610 restart:
1611 	if ((error = fdalloc(p, 0, &fd)) != 0) {
1612 		if (error == ENOSPC) {
1613 			fdexpand(p);
1614 			goto restart;
1615 		}
1616 		fdpunlock(fdp);
1617 		return -error;
1618 	}
1619 
1620 	fdinsert(fdp, fd, cloexec, fp);
1621 	fdpunlock(fdp);
1622 
1623 	return fd;
1624 }
1625 
1626 void
1627 get_dma_buf(struct dma_buf *dmabuf)
1628 {
1629 	FREF(dmabuf->file);
1630 }
1631 
1632 enum pci_bus_speed
1633 pcie_get_speed_cap(struct pci_dev *pdev)
1634 {
1635 	pci_chipset_tag_t	pc = pdev->pc;
1636 	pcitag_t		tag = pdev->tag;
1637 	int			pos ;
1638 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1639 	pcireg_t		id;
1640 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
1641 	int			bus, device, function;
1642 
1643 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1644 	    &pos, NULL))
1645 		return PCI_SPEED_UNKNOWN;
1646 
1647 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1648 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1649 
1650 	/* we've been informed via and serverworks don't make the cut */
1651 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1652 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1653 		return PCI_SPEED_UNKNOWN;
1654 
1655 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1656 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1657 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1658 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1659 
1660 	lnkcap &= 0x0f;
1661 	lnkcap2 &= 0xfe;
1662 
1663 	if (lnkcap2) { /* PCIE GEN 3.0 */
1664 		if (lnkcap2 & 0x02)
1665 			cap = PCIE_SPEED_2_5GT;
1666 		if (lnkcap2 & 0x04)
1667 			cap = PCIE_SPEED_5_0GT;
1668 		if (lnkcap2 & 0x08)
1669 			cap = PCIE_SPEED_8_0GT;
1670 		if (lnkcap2 & 0x10)
1671 			cap = PCIE_SPEED_16_0GT;
1672 	} else {
1673 		if (lnkcap & 0x01)
1674 			cap = PCIE_SPEED_2_5GT;
1675 		if (lnkcap & 0x02)
1676 			cap = PCIE_SPEED_5_0GT;
1677 	}
1678 
1679 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
1680 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
1681 	    lnkcap2);
1682 	return cap;
1683 }
1684 
1685 enum pcie_link_width
1686 pcie_get_width_cap(struct pci_dev *pdev)
1687 {
1688 	pci_chipset_tag_t	pc = pdev->pc;
1689 	pcitag_t		tag = pdev->tag;
1690 	int			pos ;
1691 	pcireg_t		lnkcap = 0;
1692 	pcireg_t		id;
1693 	int			bus, device, function;
1694 
1695 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1696 	    &pos, NULL))
1697 		return PCIE_LNK_WIDTH_UNKNOWN;
1698 
1699 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1700 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1701 
1702 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1703 
1704 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
1705 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
1706 
1707 	if (lnkcap)
1708 		return (lnkcap & 0x3f0) >> 4;
1709 	return PCIE_LNK_WIDTH_UNKNOWN;
1710 }
1711 
1712 int
1713 default_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1714     int sync, void *key)
1715 {
1716 	wakeup(wqe);
1717 	if (wqe->proc)
1718 		wake_up_process(wqe->proc);
1719 	return 0;
1720 }
1721 
1722 int
1723 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1724     int sync, void *key)
1725 {
1726 	default_wake_function(wqe, mode, sync, key);
1727 	list_del_init(&wqe->entry);
1728 	return 0;
1729 }
1730 
1731 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
1732 
1733 int
1734 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1735 {
1736 	int err;
1737 
1738 	if (!test_bit(bit, word))
1739 		return 0;
1740 
1741 	mtx_enter(&wait_bit_mtx);
1742 	while (test_bit(bit, word)) {
1743 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
1744 		    INFSLP);
1745 		if (err) {
1746 			mtx_leave(&wait_bit_mtx);
1747 			return 1;
1748 		}
1749 	}
1750 	mtx_leave(&wait_bit_mtx);
1751 	return 0;
1752 }
1753 
1754 int
1755 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
1756 {
1757 	int err;
1758 
1759 	if (!test_bit(bit, word))
1760 		return 0;
1761 
1762 	mtx_enter(&wait_bit_mtx);
1763 	while (test_bit(bit, word)) {
1764 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
1765 		if (err) {
1766 			mtx_leave(&wait_bit_mtx);
1767 			return 1;
1768 		}
1769 	}
1770 	mtx_leave(&wait_bit_mtx);
1771 	return 0;
1772 }
1773 
1774 void
1775 wake_up_bit(void *word, int bit)
1776 {
1777 	mtx_enter(&wait_bit_mtx);
1778 	wakeup(word);
1779 	mtx_leave(&wait_bit_mtx);
1780 }
1781 
1782 struct workqueue_struct *system_wq;
1783 struct workqueue_struct *system_unbound_wq;
1784 struct workqueue_struct *system_long_wq;
1785 struct taskq *taskletq;
1786 
1787 void
1788 drm_linux_init(void)
1789 {
1790 	if (system_wq == NULL) {
1791 		system_wq = (struct workqueue_struct *)
1792 		    taskq_create("drmwq", 4, IPL_HIGH, 0);
1793 	}
1794 	if (system_unbound_wq == NULL) {
1795 		system_unbound_wq = (struct workqueue_struct *)
1796 		    taskq_create("drmubwq", 4, IPL_HIGH, 0);
1797 	}
1798 	if (system_long_wq == NULL) {
1799 		system_long_wq = (struct workqueue_struct *)
1800 		    taskq_create("drmlwq", 4, IPL_HIGH, 0);
1801 	}
1802 
1803 	if (taskletq == NULL)
1804 		taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
1805 }
1806 
1807 #define PCIE_ECAP_RESIZE_BAR	0x15
1808 #define RBCAP0			0x04
1809 #define RBCTRL0			0x08
1810 #define RBCTRL_BARINDEX_MASK	0x07
1811 #define RBCTRL_BARSIZE_MASK	0x1f00
1812 #define RBCTRL_BARSIZE_SHIFT	8
1813 
1814 /* size in MB is 1 << nsize */
1815 int
1816 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
1817 {
1818 	pcireg_t	reg;
1819 	uint32_t	offset, capid;
1820 
1821 	KASSERT(bar == 0);
1822 
1823 	offset = PCI_PCIE_ECAP;
1824 
1825 	/* search PCI Express Extended Capabilities */
1826 	do {
1827 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
1828 		capid = PCI_PCIE_ECAP_ID(reg);
1829 		if (capid == PCIE_ECAP_RESIZE_BAR)
1830 			break;
1831 		offset = PCI_PCIE_ECAP_NEXT(reg);
1832 	} while (capid != 0);
1833 
1834 	if (capid == 0) {
1835 		printf("%s: could not find resize bar cap!\n", __func__);
1836 		return -ENOTSUP;
1837 	}
1838 
1839 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
1840 
1841 	if ((reg & (1 << (nsize + 4))) == 0) {
1842 		printf("%s size not supported\n", __func__);
1843 		return -ENOTSUP;
1844 	}
1845 
1846 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
1847 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
1848 		printf("%s BAR index not 0\n", __func__);
1849 		return -EINVAL;
1850 	}
1851 
1852 	reg &= ~RBCTRL_BARSIZE_MASK;
1853 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
1854 
1855 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
1856 
1857 	return 0;
1858 }
1859 
1860 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
1861 
1862 int
1863 register_shrinker(struct shrinker *shrinker)
1864 {
1865 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
1866 	return 0;
1867 }
1868 
1869 void
1870 unregister_shrinker(struct shrinker *shrinker)
1871 {
1872 	TAILQ_REMOVE(&shrinkers, shrinker, next);
1873 }
1874 
1875 void
1876 drmbackoff(long npages)
1877 {
1878 	struct shrink_control sc;
1879 	struct shrinker *shrinker;
1880 	u_long ret;
1881 
1882 	shrinker = TAILQ_FIRST(&shrinkers);
1883 	while (shrinker && npages > 0) {
1884 		sc.nr_to_scan = npages;
1885 		ret = shrinker->scan_objects(shrinker, &sc);
1886 		npages -= ret;
1887 		shrinker = TAILQ_NEXT(shrinker, next);
1888 	}
1889 }
1890