xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision cba26e98faa2b48aa4705f205ed876af460243a2)
1 /*	$OpenBSD: drm_linux.c,v 1.75 2021/01/08 23:02:09 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/interrupt.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/scatterlist.h>
42 #include <linux/i2c.h>
43 #include <linux/pci.h>
44 #include <linux/notifier.h>
45 #include <linux/backlight.h>
46 #include <linux/shrinker.h>
47 #include <linux/fb.h>
48 #include <linux/xarray.h>
49 #include <linux/interval_tree.h>
50 
51 #include <drm/drm_device.h>
52 #include <drm/drm_print.h>
53 
54 #if defined(__amd64__) || defined(__i386__)
55 #include "bios.h"
56 #endif
57 
58 void
59 tasklet_run(void *arg)
60 {
61 	struct tasklet_struct *ts = arg;
62 
63 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
64 	if (tasklet_trylock(ts)) {
65 		if (!atomic_read(&ts->count))
66 			ts->func(ts->data);
67 		tasklet_unlock(ts);
68 	}
69 }
70 
71 /* 32 bit powerpc lacks 64 bit atomics */
72 #if defined(__powerpc__) && !defined(__powerpc64__)
73 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
74 #endif
75 
76 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
77 volatile struct proc *sch_proc;
78 volatile void *sch_ident;
79 int sch_priority;
80 
81 void
82 set_current_state(int state)
83 {
84 	if (sch_ident != curproc)
85 		mtx_enter(&sch_mtx);
86 	MUTEX_ASSERT_LOCKED(&sch_mtx);
87 	sch_ident = sch_proc = curproc;
88 	sch_priority = state;
89 }
90 
91 void
92 __set_current_state(int state)
93 {
94 	KASSERT(state == TASK_RUNNING);
95 	if (sch_ident == curproc) {
96 		MUTEX_ASSERT_LOCKED(&sch_mtx);
97 		sch_ident = NULL;
98 		mtx_leave(&sch_mtx);
99 	}
100 }
101 
102 void
103 schedule(void)
104 {
105 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
106 }
107 
108 long
109 schedule_timeout(long timeout)
110 {
111 	struct sleep_state sls;
112 	unsigned long deadline;
113 	int wait, spl;
114 
115 	MUTEX_ASSERT_LOCKED(&sch_mtx);
116 	KASSERT(!cold);
117 
118 	sleep_setup(&sls, sch_ident, sch_priority, "schto");
119 	if (timeout != MAX_SCHEDULE_TIMEOUT)
120 		sleep_setup_timeout(&sls, timeout);
121 
122 	wait = (sch_proc == curproc && timeout > 0);
123 
124 	spl = MUTEX_OLDIPL(&sch_mtx);
125 	MUTEX_OLDIPL(&sch_mtx) = splsched();
126 	mtx_leave(&sch_mtx);
127 
128 	sleep_setup_signal(&sls);
129 
130 	if (timeout != MAX_SCHEDULE_TIMEOUT)
131 		deadline = jiffies + timeout;
132 	sleep_finish_all(&sls, wait);
133 	if (timeout != MAX_SCHEDULE_TIMEOUT)
134 		timeout = deadline - jiffies;
135 
136 	mtx_enter(&sch_mtx);
137 	MUTEX_OLDIPL(&sch_mtx) = spl;
138 	sch_ident = curproc;
139 
140 	return timeout > 0 ? timeout : 0;
141 }
142 
143 long
144 schedule_timeout_uninterruptible(long timeout)
145 {
146 	tsleep(curproc, PWAIT, "schtou", timeout);
147 	return 0;
148 }
149 
150 int
151 wake_up_process(struct proc *p)
152 {
153 	atomic_cas_ptr(&sch_proc, p, NULL);
154 	return wakeup_proc(p, NULL);
155 }
156 
157 void
158 flush_workqueue(struct workqueue_struct *wq)
159 {
160 	if (cold)
161 		return;
162 
163 	taskq_barrier((struct taskq *)wq);
164 }
165 
166 bool
167 flush_work(struct work_struct *work)
168 {
169 	if (cold)
170 		return false;
171 
172 	taskq_barrier(work->tq);
173 	return false;
174 }
175 
176 bool
177 flush_delayed_work(struct delayed_work *dwork)
178 {
179 	bool ret = false;
180 
181 	if (cold)
182 		return false;
183 
184 	while (timeout_pending(&dwork->to)) {
185 		tsleep(dwork, PWAIT, "fldwto", 1);
186 		ret = true;
187 	}
188 
189 	taskq_barrier(dwork->tq ? dwork->tq : (struct taskq *)system_wq);
190 	return ret;
191 }
192 
193 struct kthread {
194 	int (*func)(void *);
195 	void *data;
196 	struct proc *proc;
197 	volatile u_int flags;
198 #define KTHREAD_SHOULDSTOP	0x0000001
199 #define KTHREAD_STOPPED		0x0000002
200 #define KTHREAD_SHOULDPARK	0x0000004
201 #define KTHREAD_PARKED		0x0000008
202 	LIST_ENTRY(kthread) next;
203 };
204 
205 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
206 
207 void
208 kthread_func(void *arg)
209 {
210 	struct kthread *thread = arg;
211 	int ret;
212 
213 	ret = thread->func(thread->data);
214 	thread->flags |= KTHREAD_STOPPED;
215 	wakeup(thread);
216 	kthread_exit(ret);
217 }
218 
219 struct proc *
220 kthread_run(int (*func)(void *), void *data, const char *name)
221 {
222 	struct kthread *thread;
223 
224 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
225 	thread->func = func;
226 	thread->data = data;
227 	thread->flags = 0;
228 
229 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
230 		free(thread, M_DRM, sizeof(*thread));
231 		return ERR_PTR(-ENOMEM);
232 	}
233 
234 	LIST_INSERT_HEAD(&kthread_list, thread, next);
235 	return thread->proc;
236 }
237 
238 struct kthread *
239 kthread_lookup(struct proc *p)
240 {
241 	struct kthread *thread;
242 
243 	LIST_FOREACH(thread, &kthread_list, next) {
244 		if (thread->proc == p)
245 			break;
246 	}
247 	KASSERT(thread);
248 
249 	return thread;
250 }
251 
252 int
253 kthread_should_park(void)
254 {
255 	struct kthread *thread = kthread_lookup(curproc);
256 	return (thread->flags & KTHREAD_SHOULDPARK);
257 }
258 
259 void
260 kthread_parkme(void)
261 {
262 	struct kthread *thread = kthread_lookup(curproc);
263 
264 	while (thread->flags & KTHREAD_SHOULDPARK) {
265 		thread->flags |= KTHREAD_PARKED;
266 		wakeup(thread);
267 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
268 		thread->flags &= ~KTHREAD_PARKED;
269 	}
270 }
271 
272 void
273 kthread_park(struct proc *p)
274 {
275 	struct kthread *thread = kthread_lookup(p);
276 
277 	while ((thread->flags & KTHREAD_PARKED) == 0) {
278 		thread->flags |= KTHREAD_SHOULDPARK;
279 		wake_up_process(thread->proc);
280 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
281 	}
282 }
283 
284 void
285 kthread_unpark(struct proc *p)
286 {
287 	struct kthread *thread = kthread_lookup(p);
288 
289 	thread->flags &= ~KTHREAD_SHOULDPARK;
290 	wakeup(thread);
291 }
292 
293 int
294 kthread_should_stop(void)
295 {
296 	struct kthread *thread = kthread_lookup(curproc);
297 	return (thread->flags & KTHREAD_SHOULDSTOP);
298 }
299 
300 void
301 kthread_stop(struct proc *p)
302 {
303 	struct kthread *thread = kthread_lookup(p);
304 
305 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
306 		thread->flags |= KTHREAD_SHOULDSTOP;
307 		kthread_unpark(p);
308 		wake_up_process(thread->proc);
309 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
310 	}
311 	LIST_REMOVE(thread, next);
312 	free(thread, M_DRM, sizeof(*thread));
313 }
314 
315 #if NBIOS > 0
316 extern char smbios_board_vendor[];
317 extern char smbios_board_prod[];
318 extern char smbios_board_serial[];
319 #endif
320 
321 bool
322 dmi_match(int slot, const char *str)
323 {
324 	switch (slot) {
325 	case DMI_SYS_VENDOR:
326 		if (hw_vendor != NULL &&
327 		    !strcmp(hw_vendor, str))
328 			return true;
329 		break;
330 	case DMI_PRODUCT_NAME:
331 		if (hw_prod != NULL &&
332 		    !strcmp(hw_prod, str))
333 			return true;
334 		break;
335 	case DMI_PRODUCT_VERSION:
336 		if (hw_ver != NULL &&
337 		    !strcmp(hw_ver, str))
338 			return true;
339 		break;
340 #if NBIOS > 0
341 	case DMI_BOARD_VENDOR:
342 		if (strcmp(smbios_board_vendor, str) == 0)
343 			return true;
344 		break;
345 	case DMI_BOARD_NAME:
346 		if (strcmp(smbios_board_prod, str) == 0)
347 			return true;
348 		break;
349 	case DMI_BOARD_SERIAL:
350 		if (strcmp(smbios_board_serial, str) == 0)
351 			return true;
352 		break;
353 #else
354 	case DMI_BOARD_VENDOR:
355 		if (hw_vendor != NULL &&
356 		    !strcmp(hw_vendor, str))
357 			return true;
358 		break;
359 	case DMI_BOARD_NAME:
360 		if (hw_prod != NULL &&
361 		    !strcmp(hw_prod, str))
362 			return true;
363 		break;
364 #endif
365 	case DMI_NONE:
366 	default:
367 		return false;
368 	}
369 
370 	return false;
371 }
372 
373 static bool
374 dmi_found(const struct dmi_system_id *dsi)
375 {
376 	int i, slot;
377 
378 	for (i = 0; i < nitems(dsi->matches); i++) {
379 		slot = dsi->matches[i].slot;
380 		if (slot == DMI_NONE)
381 			break;
382 		if (!dmi_match(slot, dsi->matches[i].substr))
383 			return false;
384 	}
385 
386 	return true;
387 }
388 
389 const struct dmi_system_id *
390 dmi_first_match(const struct dmi_system_id *sysid)
391 {
392 	const struct dmi_system_id *dsi;
393 
394 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
395 		if (dmi_found(dsi))
396 			return dsi;
397 	}
398 
399 	return NULL;
400 }
401 
402 #if NBIOS > 0
403 extern char smbios_bios_date[];
404 #endif
405 
406 const char *
407 dmi_get_system_info(int slot)
408 {
409 	WARN_ON(slot != DMI_BIOS_DATE);
410 #if NBIOS > 0
411 	if (slot == DMI_BIOS_DATE)
412 		return smbios_bios_date;
413 #endif
414 	return NULL;
415 }
416 
417 int
418 dmi_check_system(const struct dmi_system_id *sysid)
419 {
420 	const struct dmi_system_id *dsi;
421 	int num = 0;
422 
423 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
424 		if (dmi_found(dsi)) {
425 			num++;
426 			if (dsi->callback && dsi->callback(dsi))
427 				break;
428 		}
429 	}
430 	return (num);
431 }
432 
433 struct vmalloc_entry {
434 	const void	*addr;
435 	size_t		size;
436 	RBT_ENTRY(vmalloc_entry) vmalloc_node;
437 };
438 
439 struct pool vmalloc_pool;
440 RBT_HEAD(vmalloc_tree, vmalloc_entry) vmalloc_tree;
441 
442 RBT_PROTOTYPE(vmalloc_tree, vmalloc_entry, vmalloc_node, vmalloc_compare);
443 
444 static inline int
445 vmalloc_compare(const struct vmalloc_entry *a, const struct vmalloc_entry *b)
446 {
447 	vaddr_t va = (vaddr_t)a->addr;
448 	vaddr_t vb = (vaddr_t)b->addr;
449 
450 	return va < vb ? -1 : va > vb;
451 }
452 
453 RBT_GENERATE(vmalloc_tree, vmalloc_entry, vmalloc_node, vmalloc_compare);
454 
455 bool
456 is_vmalloc_addr(const void *addr)
457 {
458 	struct vmalloc_entry key;
459 	struct vmalloc_entry *entry;
460 
461 	key.addr = addr;
462 	entry = RBT_FIND(vmalloc_tree, &vmalloc_tree, &key);
463 	return (entry != NULL);
464 }
465 
466 void *
467 vmalloc(unsigned long size)
468 {
469 	struct vmalloc_entry *entry;
470 	void *addr;
471 
472 	size = round_page(size);
473 	addr = km_alloc(size, &kv_any, &kp_dirty, &kd_waitok);
474 	if (addr) {
475 		entry = pool_get(&vmalloc_pool, PR_WAITOK);
476 		entry->addr = addr;
477 		entry->size = size;
478 		RBT_INSERT(vmalloc_tree, &vmalloc_tree, entry);
479 	}
480 
481 	return addr;
482 }
483 
484 void *
485 vzalloc(unsigned long size)
486 {
487 	struct vmalloc_entry *entry;
488 	void *addr;
489 
490 	size = round_page(size);
491 	addr = km_alloc(size, &kv_any, &kp_zero, &kd_waitok);
492 	if (addr) {
493 		entry = pool_get(&vmalloc_pool, PR_WAITOK);
494 		entry->addr = addr;
495 		entry->size = size;
496 		RBT_INSERT(vmalloc_tree, &vmalloc_tree, entry);
497 	}
498 
499 	return addr;
500 }
501 
502 void
503 vfree(const void *addr)
504 {
505 	struct vmalloc_entry key;
506 	struct vmalloc_entry *entry;
507 
508 	key.addr = addr;
509 	entry = RBT_FIND(vmalloc_tree, &vmalloc_tree, &key);
510 	if (entry == NULL)
511 		panic("%s: non vmalloced addr %p", __func__, addr);
512 
513 	RBT_REMOVE(vmalloc_tree, &vmalloc_tree, entry);
514 	km_free((void *)addr, entry->size, &kv_any, &kp_dirty);
515 	pool_put(&vmalloc_pool, entry);
516 }
517 
518 void *
519 kvmalloc(size_t size, gfp_t flags)
520 {
521 	if ((flags & M_NOWAIT) || size < PAGE_SIZE)
522 		return malloc(size, M_DRM, flags);
523 	if (flags & M_ZERO)
524 		return vzalloc(size);
525 	else
526 		return vmalloc(size);
527 }
528 
529 void
530 kvfree(const void *addr)
531 {
532 	if (is_vmalloc_addr(addr))
533 		vfree(addr);
534 	else
535 		free((void *)addr, M_DRM, 0);
536 }
537 
538 struct vm_page *
539 alloc_pages(unsigned int gfp_mask, unsigned int order)
540 {
541 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
542 	struct uvm_constraint_range *constraint = &no_constraint;
543 	struct pglist mlist;
544 
545 	if (gfp_mask & M_CANFAIL)
546 		flags |= UVM_PLA_FAILOK;
547 	if (gfp_mask & M_ZERO)
548 		flags |= UVM_PLA_ZERO;
549 	if (gfp_mask & __GFP_DMA32)
550 		constraint = &dma_constraint;
551 
552 	TAILQ_INIT(&mlist);
553 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
554 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
555 		return NULL;
556 	return TAILQ_FIRST(&mlist);
557 }
558 
559 void
560 __free_pages(struct vm_page *page, unsigned int order)
561 {
562 	struct pglist mlist;
563 	int i;
564 
565 	TAILQ_INIT(&mlist);
566 	for (i = 0; i < (1 << order); i++)
567 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
568 	uvm_pglistfree(&mlist);
569 }
570 
571 void
572 __pagevec_release(struct pagevec *pvec)
573 {
574 	struct pglist mlist;
575 	int i;
576 
577 	TAILQ_INIT(&mlist);
578 	for (i = 0; i < pvec->nr; i++)
579 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
580 	uvm_pglistfree(&mlist);
581 	pagevec_reinit(pvec);
582 }
583 
584 void *
585 kmap(struct vm_page *pg)
586 {
587 	vaddr_t va;
588 
589 #if defined (__HAVE_PMAP_DIRECT)
590 	va = pmap_map_direct(pg);
591 #else
592 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
593 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
594 	pmap_update(pmap_kernel());
595 #endif
596 	return (void *)va;
597 }
598 
599 void
600 kunmap_va(void *addr)
601 {
602 	vaddr_t va = (vaddr_t)addr;
603 
604 #if defined (__HAVE_PMAP_DIRECT)
605 	pmap_unmap_direct(va);
606 #else
607 	pmap_kremove(va, PAGE_SIZE);
608 	pmap_update(pmap_kernel());
609 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
610 #endif
611 }
612 
613 void *
614 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
615      pgprot_t prot)
616 {
617 	vaddr_t va;
618 	paddr_t pa;
619 	int i;
620 
621 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
622 	if (va == 0)
623 		return NULL;
624 	for (i = 0; i < npages; i++) {
625 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
626 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
627 		    PROT_READ | PROT_WRITE,
628 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
629 		pmap_update(pmap_kernel());
630 	}
631 
632 	return (void *)va;
633 }
634 
635 void
636 vunmap(void *addr, size_t size)
637 {
638 	vaddr_t va = (vaddr_t)addr;
639 
640 	pmap_remove(pmap_kernel(), va, va + size);
641 	pmap_update(pmap_kernel());
642 	uvm_km_free(kernel_map, va, size);
643 }
644 
645 void
646 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
647     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
648 {
649 	const uint8_t *cbuf = buf;
650 	int i;
651 
652 	for (i = 0; i < len; i++) {
653 		if ((i % rowsize) == 0)
654 			printf("%s", prefix_str);
655 		printf("%02x", cbuf[i]);
656 		if ((i % rowsize) == (rowsize - 1))
657 			printf("\n");
658 		else
659 			printf(" ");
660 	}
661 }
662 
663 void *
664 memchr_inv(const void *s, int c, size_t n)
665 {
666 	if (n != 0) {
667 		const unsigned char *p = s;
668 
669 		do {
670 			if (*p++ != (unsigned char)c)
671 				return ((void *)(p - 1));
672 		} while (--n != 0);
673 	}
674 	return (NULL);
675 }
676 
677 int
678 panic_cmp(struct rb_node *a, struct rb_node *b)
679 {
680 	panic(__func__);
681 }
682 
683 #undef RB_ROOT
684 #define RB_ROOT(head)	(head)->rbh_root
685 
686 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
687 
688 /*
689  * This is a fairly minimal implementation of the Linux "idr" API.  It
690  * probably isn't very efficient, and defenitely isn't RCU safe.  The
691  * pre-load buffer is global instead of per-cpu; we rely on the kernel
692  * lock to make this work.  We do randomize our IDs in order to make
693  * them harder to guess.
694  */
695 
696 int idr_cmp(struct idr_entry *, struct idr_entry *);
697 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
698 
699 struct pool idr_pool;
700 struct idr_entry *idr_entry_cache;
701 
702 void
703 idr_init(struct idr *idr)
704 {
705 	SPLAY_INIT(&idr->tree);
706 }
707 
708 void
709 idr_destroy(struct idr *idr)
710 {
711 	struct idr_entry *id;
712 
713 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
714 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
715 		pool_put(&idr_pool, id);
716 	}
717 }
718 
719 void
720 idr_preload(unsigned int gfp_mask)
721 {
722 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
723 
724 	KERNEL_ASSERT_LOCKED();
725 
726 	if (idr_entry_cache == NULL)
727 		idr_entry_cache = pool_get(&idr_pool, flags);
728 }
729 
730 int
731 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
732 {
733 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
734 	struct idr_entry *id;
735 	int begin;
736 
737 	KERNEL_ASSERT_LOCKED();
738 
739 	if (idr_entry_cache) {
740 		id = idr_entry_cache;
741 		idr_entry_cache = NULL;
742 	} else {
743 		id = pool_get(&idr_pool, flags);
744 		if (id == NULL)
745 			return -ENOMEM;
746 	}
747 
748 	if (end <= 0)
749 		end = INT_MAX;
750 
751 #ifdef notyet
752 	id->id = begin = start + arc4random_uniform(end - start);
753 #else
754 	id->id = begin = start;
755 #endif
756 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
757 		if (id->id == end)
758 			id->id = start;
759 		else
760 			id->id++;
761 		if (id->id == begin) {
762 			pool_put(&idr_pool, id);
763 			return -ENOSPC;
764 		}
765 	}
766 	id->ptr = ptr;
767 	return id->id;
768 }
769 
770 void *
771 idr_replace(struct idr *idr, void *ptr, unsigned long id)
772 {
773 	struct idr_entry find, *res;
774 	void *old;
775 
776 	find.id = id;
777 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
778 	if (res == NULL)
779 		return ERR_PTR(-ENOENT);
780 	old = res->ptr;
781 	res->ptr = ptr;
782 	return old;
783 }
784 
785 void *
786 idr_remove(struct idr *idr, unsigned long id)
787 {
788 	struct idr_entry find, *res;
789 	void *ptr = NULL;
790 
791 	find.id = id;
792 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
793 	if (res) {
794 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
795 		ptr = res->ptr;
796 		pool_put(&idr_pool, res);
797 	}
798 	return ptr;
799 }
800 
801 void *
802 idr_find(struct idr *idr, unsigned long id)
803 {
804 	struct idr_entry find, *res;
805 
806 	find.id = id;
807 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
808 	if (res == NULL)
809 		return NULL;
810 	return res->ptr;
811 }
812 
813 void *
814 idr_get_next(struct idr *idr, int *id)
815 {
816 	struct idr_entry *res;
817 
818 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
819 		if (res->id >= *id) {
820 			*id = res->id;
821 			return res->ptr;
822 		}
823 	}
824 
825 	return NULL;
826 }
827 
828 int
829 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
830 {
831 	struct idr_entry *id;
832 	int ret;
833 
834 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
835 		ret = func(id->id, id->ptr, data);
836 		if (ret)
837 			return ret;
838 	}
839 
840 	return 0;
841 }
842 
843 int
844 idr_cmp(struct idr_entry *a, struct idr_entry *b)
845 {
846 	return (a->id < b->id ? -1 : a->id > b->id);
847 }
848 
849 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
850 
851 void
852 ida_init(struct ida *ida)
853 {
854 	idr_init(&ida->idr);
855 }
856 
857 void
858 ida_destroy(struct ida *ida)
859 {
860 	idr_destroy(&ida->idr);
861 }
862 
863 int
864 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
865     gfp_t gfp_mask)
866 {
867 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
868 }
869 
870 void
871 ida_simple_remove(struct ida *ida, unsigned int id)
872 {
873 	idr_remove(&ida->idr, id);
874 }
875 
876 int
877 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
878 {
879 	return (a->id < b->id ? -1 : a->id > b->id);
880 }
881 
882 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
883 struct pool xa_pool;
884 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
885 
886 void
887 xa_init_flags(struct xarray *xa, gfp_t flags)
888 {
889 	static int initialized;
890 
891 	if (!initialized) {
892 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_TTY, 0,
893 		    "xapl", NULL);
894 		initialized = 1;
895 	}
896 	SPLAY_INIT(&xa->xa_tree);
897 }
898 
899 void
900 xa_destroy(struct xarray *xa)
901 {
902 	struct xarray_entry *id;
903 
904 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
905 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
906 		pool_put(&xa_pool, id);
907 	}
908 }
909 
910 int
911 xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
912 {
913 	struct xarray_entry *xid;
914 	int flags = (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
915 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
916 	int begin;
917 
918 	xid = pool_get(&xa_pool, flags);
919 	if (xid == NULL)
920 		return -ENOMEM;
921 
922 	if (limit <= 0)
923 		limit = INT_MAX;
924 
925 	xid->id = begin = start;
926 
927 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
928 		if (xid->id == limit)
929 			xid->id = start;
930 		else
931 			xid->id++;
932 		if (xid->id == begin) {
933 			pool_put(&xa_pool, xid);
934 			return -EBUSY;
935 		}
936 	}
937 	xid->ptr = entry;
938 	*id = xid->id;
939 	return 0;
940 }
941 
942 void *
943 xa_erase(struct xarray *xa, unsigned long index)
944 {
945 	struct xarray_entry find, *res;
946 	void *ptr = NULL;
947 
948 	find.id = index;
949 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
950 	if (res) {
951 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
952 		ptr = res->ptr;
953 		pool_put(&xa_pool, res);
954 	}
955 	return ptr;
956 }
957 
958 void *
959 xa_load(struct xarray *xa, unsigned long index)
960 {
961 	struct xarray_entry find, *res;
962 
963 	find.id = index;
964 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
965 	if (res == NULL)
966 		return NULL;
967 	return res->ptr;
968 }
969 
970 void *
971 xa_get_next(struct xarray *xa, unsigned long *index)
972 {
973 	struct xarray_entry *res;
974 
975 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
976 		if (res->id >= *index) {
977 			*index = res->id;
978 			return res->ptr;
979 		}
980 	}
981 
982 	return NULL;
983 }
984 
985 int
986 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
987 {
988 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
989 	    M_DRM, gfp_mask);
990 	if (table->sgl == NULL)
991 		return -ENOMEM;
992 	table->nents = table->orig_nents = nents;
993 	return 0;
994 }
995 
996 void
997 sg_free_table(struct sg_table *table)
998 {
999 	free(table->sgl, M_DRM,
1000 	    table->orig_nents * sizeof(struct scatterlist));
1001 	table->sgl = NULL;
1002 }
1003 
1004 size_t
1005 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1006     const void *buf, size_t buflen)
1007 {
1008 	panic("%s", __func__);
1009 }
1010 
1011 int
1012 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1013 {
1014 	void *cmd = NULL;
1015 	int cmdlen = 0;
1016 	int err, ret = 0;
1017 	int op;
1018 
1019 	iic_acquire_bus(&adap->ic, 0);
1020 
1021 	while (num > 2) {
1022 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1023 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1024 		    msgs->buf, msgs->len, 0);
1025 		if (err) {
1026 			ret = -err;
1027 			goto fail;
1028 		}
1029 		msgs++;
1030 		num--;
1031 		ret++;
1032 	}
1033 
1034 	if (num > 1) {
1035 		cmd = msgs->buf;
1036 		cmdlen = msgs->len;
1037 		msgs++;
1038 		num--;
1039 		ret++;
1040 	}
1041 
1042 	op = (msgs->flags & I2C_M_RD) ?
1043 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1044 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1045 	    msgs->buf, msgs->len, 0);
1046 	if (err) {
1047 		ret = -err;
1048 		goto fail;
1049 	}
1050 	msgs++;
1051 	ret++;
1052 
1053 fail:
1054 	iic_release_bus(&adap->ic, 0);
1055 
1056 	return ret;
1057 }
1058 
1059 int
1060 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1061 {
1062 	int ret;
1063 
1064 	if (adap->lock_ops)
1065 		adap->lock_ops->lock_bus(adap, 0);
1066 
1067 	if (adap->algo)
1068 		ret = adap->algo->master_xfer(adap, msgs, num);
1069 	else
1070 		ret = i2c_master_xfer(adap, msgs, num);
1071 
1072 	if (adap->lock_ops)
1073 		adap->lock_ops->unlock_bus(adap, 0);
1074 
1075 	return ret;
1076 }
1077 
1078 int
1079 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1080 {
1081 	struct i2c_algo_bit_data *algo = adap->algo_data;
1082 	struct i2c_adapter bb;
1083 
1084 	memset(&bb, 0, sizeof(bb));
1085 	bb.ic = algo->ic;
1086 	bb.retries = adap->retries;
1087 	return i2c_master_xfer(&bb, msgs, num);
1088 }
1089 
1090 uint32_t
1091 i2c_bb_functionality(struct i2c_adapter *adap)
1092 {
1093 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1094 }
1095 
1096 struct i2c_algorithm i2c_bit_algo = {
1097 	.master_xfer = i2c_bb_master_xfer,
1098 	.functionality = i2c_bb_functionality
1099 };
1100 
1101 int
1102 i2c_bit_add_bus(struct i2c_adapter *adap)
1103 {
1104 	adap->algo = &i2c_bit_algo;
1105 	adap->retries = 3;
1106 
1107 	return 0;
1108 }
1109 
1110 #if defined(__amd64__) || defined(__i386__)
1111 
1112 /*
1113  * This is a minimal implementation of the Linux vga_get/vga_put
1114  * interface.  In all likelyhood, it will only work for inteldrm(4) as
1115  * it assumes that if there is another active VGA device in the
1116  * system, it is sitting behind a PCI bridge.
1117  */
1118 
1119 extern int pci_enumerate_bus(struct pci_softc *,
1120     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1121 
1122 pcitag_t vga_bridge_tag;
1123 int vga_bridge_disabled;
1124 
1125 int
1126 vga_disable_bridge(struct pci_attach_args *pa)
1127 {
1128 	pcireg_t bhlc, bc;
1129 
1130 	if (pa->pa_domain != 0)
1131 		return 0;
1132 
1133 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1134 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1135 		return 0;
1136 
1137 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1138 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1139 		return 0;
1140 	bc &= ~PPB_BC_VGA_ENABLE;
1141 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1142 
1143 	vga_bridge_tag = pa->pa_tag;
1144 	vga_bridge_disabled = 1;
1145 
1146 	return 1;
1147 }
1148 
1149 void
1150 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1151 {
1152 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1153 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1154 }
1155 
1156 void
1157 vga_put(struct pci_dev *pdev, int rsrc)
1158 {
1159 	pcireg_t bc;
1160 
1161 	if (!vga_bridge_disabled)
1162 		return;
1163 
1164 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1165 	bc |= PPB_BC_VGA_ENABLE;
1166 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1167 
1168 	vga_bridge_disabled = 0;
1169 }
1170 
1171 #endif
1172 
1173 /*
1174  * ACPI types and interfaces.
1175  */
1176 
1177 #ifdef __HAVE_ACPI
1178 #include "acpi.h"
1179 #endif
1180 
1181 #if NACPI > 0
1182 
1183 #include <dev/acpi/acpireg.h>
1184 #include <dev/acpi/acpivar.h>
1185 #include <dev/acpi/amltypes.h>
1186 #include <dev/acpi/dsdt.h>
1187 
1188 acpi_status
1189 acpi_get_table(const char *sig, int instance,
1190     struct acpi_table_header **hdr)
1191 {
1192 	struct acpi_softc *sc = acpi_softc;
1193 	struct acpi_q *entry;
1194 
1195 	KASSERT(instance == 1);
1196 
1197 	if (sc == NULL)
1198 		return AE_NOT_FOUND;
1199 
1200 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1201 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1202 			*hdr = entry->q_table;
1203 			return 0;
1204 		}
1205 	}
1206 
1207 	return AE_NOT_FOUND;
1208 }
1209 
1210 acpi_status
1211 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1212 {
1213 	node = aml_searchname(node, name);
1214 	if (node == NULL)
1215 		return AE_NOT_FOUND;
1216 
1217 	*rnode = node;
1218 	return 0;
1219 }
1220 
1221 acpi_status
1222 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1223 {
1224 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1225 	KASSERT(type == ACPI_FULL_PATHNAME);
1226 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1227 	return 0;
1228 }
1229 
1230 acpi_status
1231 acpi_evaluate_object(acpi_handle node, const char *name,
1232     struct acpi_object_list *params, struct acpi_buffer *result)
1233 {
1234 	struct aml_value args[4], res;
1235 	union acpi_object *obj;
1236 	uint8_t *data;
1237 	int i;
1238 
1239 	KASSERT(params->count <= nitems(args));
1240 
1241 	for (i = 0; i < params->count; i++) {
1242 		args[i].type = params->pointer[i].type;
1243 		switch (args[i].type) {
1244 		case AML_OBJTYPE_INTEGER:
1245 			args[i].v_integer = params->pointer[i].integer.value;
1246 			break;
1247 		case AML_OBJTYPE_BUFFER:
1248 			args[i].length = params->pointer[i].buffer.length;
1249 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1250 			break;
1251 		default:
1252 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1253 			return AE_BAD_PARAMETER;
1254 		}
1255 	}
1256 
1257 	if (name) {
1258 		node = aml_searchname(node, name);
1259 		if (node == NULL)
1260 			return AE_NOT_FOUND;
1261 	}
1262 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1263 		aml_freevalue(&res);
1264 		return AE_ERROR;
1265 	}
1266 
1267 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1268 
1269 	result->length = sizeof(union acpi_object);
1270 	switch (res.type) {
1271 	case AML_OBJTYPE_BUFFER:
1272 		result->length += res.length;
1273 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1274 		obj = (union acpi_object *)result->pointer;
1275 		data = (uint8_t *)(obj + 1);
1276 		obj->type = res.type;
1277 		obj->buffer.length = res.length;
1278 		obj->buffer.pointer = data;
1279 		memcpy(data, res.v_buffer, res.length);
1280 		break;
1281 	default:
1282 		printf("%s: return type 0x%02x", __func__, res.type);
1283 		aml_freevalue(&res);
1284 		return AE_ERROR;
1285 	}
1286 
1287 	aml_freevalue(&res);
1288 	return 0;
1289 }
1290 
1291 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1292 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1293 
1294 int
1295 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1296 {
1297 	struct acpi_bus_event event;
1298 	struct notifier_block *nb;
1299 
1300 	event.device_class = ACPI_VIDEO_CLASS;
1301 	event.type = notify;
1302 
1303 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1304 		nb->notifier_call(nb, 0, &event);
1305 	return 0;
1306 }
1307 
1308 int
1309 register_acpi_notifier(struct notifier_block *nb)
1310 {
1311 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1312 	return 0;
1313 }
1314 
1315 int
1316 unregister_acpi_notifier(struct notifier_block *nb)
1317 {
1318 	struct notifier_block *tmp;
1319 
1320 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1321 		if (tmp == nb) {
1322 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1323 			    notifier_block, link);
1324 			return 0;
1325 		}
1326 	}
1327 
1328 	return -ENOENT;
1329 }
1330 
1331 const char *
1332 acpi_format_exception(acpi_status status)
1333 {
1334 	switch (status) {
1335 	case AE_NOT_FOUND:
1336 		return "not found";
1337 	case AE_BAD_PARAMETER:
1338 		return "bad parameter";
1339 	default:
1340 		return "unknown";
1341 	}
1342 }
1343 
1344 #endif
1345 
1346 void
1347 backlight_do_update_status(void *arg)
1348 {
1349 	backlight_update_status(arg);
1350 }
1351 
1352 struct backlight_device *
1353 backlight_device_register(const char *name, void *kdev, void *data,
1354     const struct backlight_ops *ops, struct backlight_properties *props)
1355 {
1356 	struct backlight_device *bd;
1357 
1358 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1359 	bd->ops = ops;
1360 	bd->props = *props;
1361 	bd->data = data;
1362 
1363 	task_set(&bd->task, backlight_do_update_status, bd);
1364 
1365 	return bd;
1366 }
1367 
1368 void
1369 backlight_device_unregister(struct backlight_device *bd)
1370 {
1371 	free(bd, M_DRM, sizeof(*bd));
1372 }
1373 
1374 void
1375 backlight_schedule_update_status(struct backlight_device *bd)
1376 {
1377 	task_add(systq, &bd->task);
1378 }
1379 
1380 inline int
1381 backlight_enable(struct backlight_device *bd)
1382 {
1383 	if (bd == NULL)
1384 		return 0;
1385 
1386 	bd->props.power = FB_BLANK_UNBLANK;
1387 
1388 	return bd->ops->update_status(bd);
1389 }
1390 
1391 inline int
1392 backlight_disable(struct backlight_device *bd)
1393 {
1394 	if (bd == NULL)
1395 		return 0;
1396 
1397 	bd->props.power = FB_BLANK_POWERDOWN;
1398 
1399 	return bd->ops->update_status(bd);
1400 }
1401 
1402 void
1403 drm_sysfs_hotplug_event(struct drm_device *dev)
1404 {
1405 	KNOTE(&dev->note, NOTE_CHANGE);
1406 }
1407 
1408 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1409 
1410 uint64_t
1411 dma_fence_context_alloc(unsigned int num)
1412 {
1413   return atomic64_add_return(num, &drm_fence_context_count) - num;
1414 }
1415 
1416 struct default_wait_cb {
1417 	struct dma_fence_cb base;
1418 	struct proc *proc;
1419 };
1420 
1421 static void
1422 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1423 {
1424 	struct default_wait_cb *wait =
1425 	    container_of(cb, struct default_wait_cb, base);
1426 	wake_up_process(wait->proc);
1427 }
1428 
1429 long
1430 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1431 {
1432 	long ret = timeout ? timeout : 1;
1433 	unsigned long end;
1434 	int err;
1435 	struct default_wait_cb cb;
1436 	bool was_set;
1437 
1438 	KASSERT(timeout <= INT_MAX);
1439 
1440 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1441 		return ret;
1442 
1443 	mtx_enter(fence->lock);
1444 
1445 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1446 	    &fence->flags);
1447 
1448 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1449 		goto out;
1450 
1451 	if (!was_set && fence->ops->enable_signaling) {
1452 		if (!fence->ops->enable_signaling(fence)) {
1453 			dma_fence_signal_locked(fence);
1454 			goto out;
1455 		}
1456 	}
1457 
1458 	if (timeout == 0) {
1459 		ret = 0;
1460 		goto out;
1461 	}
1462 
1463 	cb.base.func = dma_fence_default_wait_cb;
1464 	cb.proc = curproc;
1465 	list_add(&cb.base.node, &fence->cb_list);
1466 
1467 	end = jiffies + timeout;
1468 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1469 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1470 			break;
1471 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1472 		    "dmafence", ret);
1473 		if (err == EINTR || err == ERESTART) {
1474 			ret = -ERESTARTSYS;
1475 			break;
1476 		}
1477 	}
1478 
1479 	if (!list_empty(&cb.base.node))
1480 		list_del(&cb.base.node);
1481 out:
1482 	mtx_leave(fence->lock);
1483 
1484 	return ret;
1485 }
1486 
1487 static bool
1488 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1489     uint32_t *idx)
1490 {
1491 	int i;
1492 
1493 	for (i = 0; i < count; ++i) {
1494 		struct dma_fence *fence = fences[i];
1495 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1496 			if (idx)
1497 				*idx = i;
1498 			return true;
1499 		}
1500 	}
1501 	return false;
1502 }
1503 
1504 long
1505 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1506     bool intr, long timeout, uint32_t *idx)
1507 {
1508 	struct default_wait_cb *cb;
1509 	long ret = timeout;
1510 	unsigned long end;
1511 	int i, err;
1512 
1513 	KASSERT(timeout <= INT_MAX);
1514 
1515 	if (timeout == 0) {
1516 		for (i = 0; i < count; i++) {
1517 			if (dma_fence_is_signaled(fences[i])) {
1518 				if (idx)
1519 					*idx = i;
1520 				return 1;
1521 			}
1522 		}
1523 		return 0;
1524 	}
1525 
1526 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1527 	if (cb == NULL)
1528 		return -ENOMEM;
1529 
1530 	for (i = 0; i < count; i++) {
1531 		struct dma_fence *fence = fences[i];
1532 		cb[i].proc = curproc;
1533 		if (dma_fence_add_callback(fence, &cb[i].base,
1534 		    dma_fence_default_wait_cb)) {
1535 			if (idx)
1536 				*idx = i;
1537 			goto cb_cleanup;
1538 		}
1539 	}
1540 
1541 	end = jiffies + timeout;
1542 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1543 		if (dma_fence_test_signaled_any(fences, count, idx))
1544 			break;
1545 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
1546 		if (err == EINTR || err == ERESTART) {
1547 			ret = -ERESTARTSYS;
1548 			break;
1549 		}
1550 	}
1551 
1552 cb_cleanup:
1553 	while (i-- > 0)
1554 		dma_fence_remove_callback(fences[i], &cb[i].base);
1555 	free(cb, M_DRM, count * sizeof(*cb));
1556 	return ret;
1557 }
1558 
1559 static struct dma_fence dma_fence_stub;
1560 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1561 
1562 static const char *
1563 dma_fence_stub_get_name(struct dma_fence *fence)
1564 {
1565 	return "stub";
1566 }
1567 
1568 static const struct dma_fence_ops dma_fence_stub_ops = {
1569 	.get_driver_name = dma_fence_stub_get_name,
1570 	.get_timeline_name = dma_fence_stub_get_name,
1571 };
1572 
1573 struct dma_fence *
1574 dma_fence_get_stub(void)
1575 {
1576 	mtx_enter(&dma_fence_stub_mtx);
1577 	if (dma_fence_stub.ops == NULL) {
1578 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1579 		    &dma_fence_stub_mtx, 0, 0);
1580 		dma_fence_signal_locked(&dma_fence_stub);
1581 	}
1582 	mtx_leave(&dma_fence_stub_mtx);
1583 
1584 	return dma_fence_get(&dma_fence_stub);
1585 }
1586 
1587 static const char *
1588 dma_fence_array_get_driver_name(struct dma_fence *fence)
1589 {
1590 	return "dma_fence_array";
1591 }
1592 
1593 static const char *
1594 dma_fence_array_get_timeline_name(struct dma_fence *fence)
1595 {
1596 	return "unbound";
1597 }
1598 
1599 static void
1600 irq_dma_fence_array_work(struct irq_work *wrk)
1601 {
1602 	struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work);
1603 
1604 	dma_fence_signal(&dfa->base);
1605 	dma_fence_put(&dfa->base);
1606 }
1607 
1608 static void
1609 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
1610 {
1611 	struct dma_fence_array_cb *array_cb =
1612 	    container_of(cb, struct dma_fence_array_cb, cb);
1613 	struct dma_fence_array *dfa = array_cb->array;
1614 
1615 	if (atomic_dec_and_test(&dfa->num_pending))
1616 		irq_work_queue(&dfa->work);
1617 	else
1618 		dma_fence_put(&dfa->base);
1619 }
1620 
1621 static bool
1622 dma_fence_array_enable_signaling(struct dma_fence *fence)
1623 {
1624 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1625 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
1626 	int i;
1627 
1628 	for (i = 0; i < dfa->num_fences; ++i) {
1629 		cb[i].array = dfa;
1630 		dma_fence_get(&dfa->base);
1631 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
1632 		    dma_fence_array_cb_func)) {
1633 			dma_fence_put(&dfa->base);
1634 			if (atomic_dec_and_test(&dfa->num_pending))
1635 				return false;
1636 		}
1637 	}
1638 
1639 	return true;
1640 }
1641 
1642 static bool dma_fence_array_signaled(struct dma_fence *fence)
1643 {
1644 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1645 
1646 	return atomic_read(&dfa->num_pending) <= 0;
1647 }
1648 
1649 static void dma_fence_array_release(struct dma_fence *fence)
1650 {
1651 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1652 	int i;
1653 
1654 	for (i = 0; i < dfa->num_fences; ++i)
1655 		dma_fence_put(dfa->fences[i]);
1656 
1657 	free(dfa->fences, M_DRM, 0);
1658 	dma_fence_free(fence);
1659 }
1660 
1661 struct dma_fence_array *
1662 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
1663     unsigned seqno, bool signal_on_any)
1664 {
1665 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
1666 	    (num_fences * sizeof(struct dma_fence_array_cb)),
1667 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1668 	if (dfa == NULL)
1669 		return NULL;
1670 
1671 	mtx_init(&dfa->lock, IPL_TTY);
1672 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
1673 	    context, seqno);
1674 	init_irq_work(&dfa->work, irq_dma_fence_array_work);
1675 
1676 	dfa->num_fences = num_fences;
1677 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
1678 	dfa->fences = fences;
1679 
1680 	return dfa;
1681 }
1682 
1683 const struct dma_fence_ops dma_fence_array_ops = {
1684 	.get_driver_name = dma_fence_array_get_driver_name,
1685 	.get_timeline_name = dma_fence_array_get_timeline_name,
1686 	.enable_signaling = dma_fence_array_enable_signaling,
1687 	.signaled = dma_fence_array_signaled,
1688 	.release = dma_fence_array_release,
1689 };
1690 
1691 int
1692 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
1693 {
1694 	return (ENXIO);
1695 }
1696 
1697 int
1698 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
1699 {
1700 	return (ENXIO);
1701 }
1702 
1703 int
1704 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
1705 {
1706 	return (ENOTTY);
1707 }
1708 
1709 int
1710 dmabuf_poll(struct file *fp, int events, struct proc *p)
1711 {
1712 	return (0);
1713 }
1714 
1715 int
1716 dmabuf_kqfilter(struct file *fp, struct knote *kn)
1717 {
1718 	return (EINVAL);
1719 }
1720 
1721 int
1722 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
1723 {
1724 	struct dma_buf *dmabuf = fp->f_data;
1725 
1726 	memset(st, 0, sizeof(*st));
1727 	st->st_size = dmabuf->size;
1728 	st->st_mode = S_IFIFO;	/* XXX */
1729 	return (0);
1730 }
1731 
1732 int
1733 dmabuf_close(struct file *fp, struct proc *p)
1734 {
1735 	struct dma_buf *dmabuf = fp->f_data;
1736 
1737 	fp->f_data = NULL;
1738 	KERNEL_LOCK();
1739 	dmabuf->ops->release(dmabuf);
1740 	KERNEL_UNLOCK();
1741 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
1742 	return (0);
1743 }
1744 
1745 int
1746 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
1747 {
1748 	struct dma_buf *dmabuf = fp->f_data;
1749 	off_t newoff;
1750 
1751 	if (*offset != 0)
1752 		return (EINVAL);
1753 
1754 	switch (whence) {
1755 	case SEEK_SET:
1756 		newoff = 0;
1757 		break;
1758 	case SEEK_END:
1759 		newoff = dmabuf->size;
1760 		break;
1761 	default:
1762 		return (EINVAL);
1763 	}
1764 	mtx_enter(&fp->f_mtx);
1765 	fp->f_offset = newoff;
1766 	mtx_leave(&fp->f_mtx);
1767 	*offset = newoff;
1768 	return (0);
1769 }
1770 
1771 const struct fileops dmabufops = {
1772 	.fo_read	= dmabuf_read,
1773 	.fo_write	= dmabuf_write,
1774 	.fo_ioctl	= dmabuf_ioctl,
1775 	.fo_poll	= dmabuf_poll,
1776 	.fo_kqfilter	= dmabuf_kqfilter,
1777 	.fo_stat	= dmabuf_stat,
1778 	.fo_close	= dmabuf_close,
1779 	.fo_seek	= dmabuf_seek,
1780 };
1781 
1782 struct dma_buf *
1783 dma_buf_export(const struct dma_buf_export_info *info)
1784 {
1785 	struct proc *p = curproc;
1786 	struct dma_buf *dmabuf;
1787 	struct file *fp;
1788 
1789 	fp = fnew(p);
1790 	if (fp == NULL)
1791 		return ERR_PTR(-ENFILE);
1792 	fp->f_type = DTYPE_DMABUF;
1793 	fp->f_ops = &dmabufops;
1794 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
1795 	dmabuf->priv = info->priv;
1796 	dmabuf->ops = info->ops;
1797 	dmabuf->size = info->size;
1798 	dmabuf->file = fp;
1799 	fp->f_data = dmabuf;
1800 	INIT_LIST_HEAD(&dmabuf->attachments);
1801 	return dmabuf;
1802 }
1803 
1804 struct dma_buf *
1805 dma_buf_get(int fd)
1806 {
1807 	struct proc *p = curproc;
1808 	struct filedesc *fdp = p->p_fd;
1809 	struct file *fp;
1810 
1811 	if ((fp = fd_getfile(fdp, fd)) == NULL)
1812 		return ERR_PTR(-EBADF);
1813 
1814 	if (fp->f_type != DTYPE_DMABUF) {
1815 		FRELE(fp, p);
1816 		return ERR_PTR(-EINVAL);
1817 	}
1818 
1819 	return fp->f_data;
1820 }
1821 
1822 void
1823 dma_buf_put(struct dma_buf *dmabuf)
1824 {
1825 	KASSERT(dmabuf);
1826 	KASSERT(dmabuf->file);
1827 
1828 	FRELE(dmabuf->file, curproc);
1829 }
1830 
1831 int
1832 dma_buf_fd(struct dma_buf *dmabuf, int flags)
1833 {
1834 	struct proc *p = curproc;
1835 	struct filedesc *fdp = p->p_fd;
1836 	struct file *fp = dmabuf->file;
1837 	int fd, cloexec, error;
1838 
1839 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
1840 
1841 	fdplock(fdp);
1842 restart:
1843 	if ((error = fdalloc(p, 0, &fd)) != 0) {
1844 		if (error == ENOSPC) {
1845 			fdexpand(p);
1846 			goto restart;
1847 		}
1848 		fdpunlock(fdp);
1849 		return -error;
1850 	}
1851 
1852 	fdinsert(fdp, fd, cloexec, fp);
1853 	fdpunlock(fdp);
1854 
1855 	return fd;
1856 }
1857 
1858 void
1859 get_dma_buf(struct dma_buf *dmabuf)
1860 {
1861 	FREF(dmabuf->file);
1862 }
1863 
1864 enum pci_bus_speed
1865 pcie_get_speed_cap(struct pci_dev *pdev)
1866 {
1867 	pci_chipset_tag_t	pc = pdev->pc;
1868 	pcitag_t		tag = pdev->tag;
1869 	int			pos ;
1870 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1871 	pcireg_t		id;
1872 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
1873 	int			bus, device, function;
1874 
1875 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1876 	    &pos, NULL))
1877 		return PCI_SPEED_UNKNOWN;
1878 
1879 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1880 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1881 
1882 	/* we've been informed via and serverworks don't make the cut */
1883 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1884 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1885 		return PCI_SPEED_UNKNOWN;
1886 
1887 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1888 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1889 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1890 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1891 
1892 	lnkcap &= 0x0f;
1893 	lnkcap2 &= 0xfe;
1894 
1895 	if (lnkcap2) { /* PCIE GEN 3.0 */
1896 		if (lnkcap2 & 0x02)
1897 			cap = PCIE_SPEED_2_5GT;
1898 		if (lnkcap2 & 0x04)
1899 			cap = PCIE_SPEED_5_0GT;
1900 		if (lnkcap2 & 0x08)
1901 			cap = PCIE_SPEED_8_0GT;
1902 		if (lnkcap2 & 0x10)
1903 			cap = PCIE_SPEED_16_0GT;
1904 	} else {
1905 		if (lnkcap & 0x01)
1906 			cap = PCIE_SPEED_2_5GT;
1907 		if (lnkcap & 0x02)
1908 			cap = PCIE_SPEED_5_0GT;
1909 	}
1910 
1911 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
1912 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
1913 	    lnkcap2);
1914 	return cap;
1915 }
1916 
1917 enum pcie_link_width
1918 pcie_get_width_cap(struct pci_dev *pdev)
1919 {
1920 	pci_chipset_tag_t	pc = pdev->pc;
1921 	pcitag_t		tag = pdev->tag;
1922 	int			pos ;
1923 	pcireg_t		lnkcap = 0;
1924 	pcireg_t		id;
1925 	int			bus, device, function;
1926 
1927 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1928 	    &pos, NULL))
1929 		return PCIE_LNK_WIDTH_UNKNOWN;
1930 
1931 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1932 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1933 
1934 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1935 
1936 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
1937 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
1938 
1939 	if (lnkcap)
1940 		return (lnkcap & 0x3f0) >> 4;
1941 	return PCIE_LNK_WIDTH_UNKNOWN;
1942 }
1943 
1944 int
1945 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1946     int sync, void *key)
1947 {
1948 	wakeup(wqe);
1949 	if (wqe->proc)
1950 		wake_up_process(wqe->proc);
1951 	list_del_init(&wqe->entry);
1952 	return 0;
1953 }
1954 
1955 static wait_queue_head_t bit_waitq;
1956 wait_queue_head_t var_waitq;
1957 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
1958 
1959 int
1960 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1961 {
1962 	int err;
1963 
1964 	if (!test_bit(bit, word))
1965 		return 0;
1966 
1967 	mtx_enter(&wait_bit_mtx);
1968 	while (test_bit(bit, word)) {
1969 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
1970 		    INFSLP);
1971 		if (err) {
1972 			mtx_leave(&wait_bit_mtx);
1973 			return 1;
1974 		}
1975 	}
1976 	mtx_leave(&wait_bit_mtx);
1977 	return 0;
1978 }
1979 
1980 int
1981 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
1982 {
1983 	int err;
1984 
1985 	if (!test_bit(bit, word))
1986 		return 0;
1987 
1988 	mtx_enter(&wait_bit_mtx);
1989 	while (test_bit(bit, word)) {
1990 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
1991 		if (err) {
1992 			mtx_leave(&wait_bit_mtx);
1993 			return 1;
1994 		}
1995 	}
1996 	mtx_leave(&wait_bit_mtx);
1997 	return 0;
1998 }
1999 
2000 void
2001 wake_up_bit(void *word, int bit)
2002 {
2003 	mtx_enter(&wait_bit_mtx);
2004 	wakeup(word);
2005 	mtx_leave(&wait_bit_mtx);
2006 }
2007 
2008 void
2009 clear_and_wake_up_bit(int bit, void *word)
2010 {
2011 	clear_bit(bit, word);
2012 	wake_up_bit(word, bit);
2013 }
2014 
2015 wait_queue_head_t *
2016 bit_waitqueue(void *word, int bit)
2017 {
2018 	/* XXX hash table of wait queues? */
2019 	return &bit_waitq;
2020 }
2021 
2022 struct workqueue_struct *system_wq;
2023 struct workqueue_struct *system_highpri_wq;
2024 struct workqueue_struct *system_unbound_wq;
2025 struct workqueue_struct *system_long_wq;
2026 struct taskq *taskletq;
2027 
2028 void
2029 drm_linux_init(void)
2030 {
2031 	system_wq = (struct workqueue_struct *)
2032 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
2033 	system_highpri_wq = (struct workqueue_struct *)
2034 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2035 	system_unbound_wq = (struct workqueue_struct *)
2036 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2037 	system_long_wq = (struct workqueue_struct *)
2038 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2039 
2040 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2041 
2042 	init_waitqueue_head(&bit_waitq);
2043 	init_waitqueue_head(&var_waitq);
2044 
2045 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2046 	    "idrpl", NULL);
2047 
2048 	pool_init(&vmalloc_pool, sizeof(struct vmalloc_entry), 0, IPL_NONE, 0,
2049 	    "vmallocpl", NULL);
2050 	RBT_INIT(vmalloc_tree, &vmalloc_tree);
2051 }
2052 
2053 void
2054 drm_linux_exit(void)
2055 {
2056 	pool_destroy(&idr_pool);
2057 
2058 	taskq_destroy(taskletq);
2059 
2060 	taskq_destroy((struct taskq *)system_long_wq);
2061 	taskq_destroy((struct taskq *)system_unbound_wq);
2062 	taskq_destroy((struct taskq *)system_highpri_wq);
2063 	taskq_destroy((struct taskq *)system_wq);
2064 }
2065 
2066 #define PCIE_ECAP_RESIZE_BAR	0x15
2067 #define RBCAP0			0x04
2068 #define RBCTRL0			0x08
2069 #define RBCTRL_BARINDEX_MASK	0x07
2070 #define RBCTRL_BARSIZE_MASK	0x1f00
2071 #define RBCTRL_BARSIZE_SHIFT	8
2072 
2073 /* size in MB is 1 << nsize */
2074 int
2075 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2076 {
2077 	pcireg_t	reg;
2078 	uint32_t	offset, capid;
2079 
2080 	KASSERT(bar == 0);
2081 
2082 	offset = PCI_PCIE_ECAP;
2083 
2084 	/* search PCI Express Extended Capabilities */
2085 	do {
2086 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2087 		capid = PCI_PCIE_ECAP_ID(reg);
2088 		if (capid == PCIE_ECAP_RESIZE_BAR)
2089 			break;
2090 		offset = PCI_PCIE_ECAP_NEXT(reg);
2091 	} while (capid != 0);
2092 
2093 	if (capid == 0) {
2094 		printf("%s: could not find resize bar cap!\n", __func__);
2095 		return -ENOTSUP;
2096 	}
2097 
2098 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2099 
2100 	if ((reg & (1 << (nsize + 4))) == 0) {
2101 		printf("%s size not supported\n", __func__);
2102 		return -ENOTSUP;
2103 	}
2104 
2105 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2106 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2107 		printf("%s BAR index not 0\n", __func__);
2108 		return -EINVAL;
2109 	}
2110 
2111 	reg &= ~RBCTRL_BARSIZE_MASK;
2112 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2113 
2114 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2115 
2116 	return 0;
2117 }
2118 
2119 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2120 
2121 int
2122 register_shrinker(struct shrinker *shrinker)
2123 {
2124 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2125 	return 0;
2126 }
2127 
2128 void
2129 unregister_shrinker(struct shrinker *shrinker)
2130 {
2131 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2132 }
2133 
2134 void
2135 drmbackoff(long npages)
2136 {
2137 	struct shrink_control sc;
2138 	struct shrinker *shrinker;
2139 	u_long ret;
2140 
2141 	shrinker = TAILQ_FIRST(&shrinkers);
2142 	while (shrinker && npages > 0) {
2143 		sc.nr_to_scan = npages;
2144 		ret = shrinker->scan_objects(shrinker, &sc);
2145 		npages -= ret;
2146 		shrinker = TAILQ_NEXT(shrinker, next);
2147 	}
2148 }
2149 
2150 void *
2151 bitmap_zalloc(u_int n, gfp_t flags)
2152 {
2153 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2154 }
2155 
2156 void
2157 bitmap_free(void *p)
2158 {
2159 	kfree(p);
2160 }
2161 
2162 int
2163 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2164 {
2165 	if (atomic_add_unless(v, -1, 1))
2166 		return 0;
2167 
2168 	rw_enter_write(lock);
2169 	if (atomic_dec_return(v) == 0)
2170 		return 1;
2171 	rw_exit_write(lock);
2172 	return 0;
2173 }
2174 
2175 int
2176 printk(const char *fmt, ...)
2177 {
2178 	int ret, level;
2179 	va_list ap;
2180 
2181 	if (fmt != NULL && *fmt == '\001') {
2182 		level = fmt[1];
2183 #ifndef DRMDEBUG
2184 		if (level >= KERN_INFO[1] && level <= '9')
2185 			return 0;
2186 #endif
2187 		fmt += 2;
2188 	}
2189 
2190 	va_start(ap, fmt);
2191 	ret = vprintf(fmt, ap);
2192 	va_end(ap);
2193 
2194 	return ret;
2195 }
2196 
2197 #define START(node) ((node)->start)
2198 #define LAST(node) ((node)->last)
2199 
2200 struct interval_tree_node *
2201 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2202     unsigned long last)
2203 {
2204 	struct interval_tree_node *node;
2205 	struct rb_node *rb;
2206 
2207 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2208 		node = rb_entry(rb, typeof(*node), rb);
2209 		if (LAST(node) >= start && START(node) <= last)
2210 			return node;
2211 	}
2212 	return NULL;
2213 }
2214 
2215 void
2216 interval_tree_remove(struct interval_tree_node *node,
2217     struct rb_root_cached *root)
2218 {
2219 	rb_erase_cached(&node->rb, root);
2220 }
2221 
2222 void
2223 interval_tree_insert(struct interval_tree_node *node,
2224     struct rb_root_cached *root)
2225 {
2226 	struct rb_node **iter = &root->rb_root.rb_node;
2227 	struct rb_node *parent = NULL;
2228 	struct interval_tree_node *iter_node;
2229 
2230 	while (*iter) {
2231 		parent = *iter;
2232 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
2233 
2234 		if (node->start < iter_node->start)
2235 			iter = &(*iter)->rb_left;
2236 		else
2237 			iter = &(*iter)->rb_right;
2238 	}
2239 
2240 	rb_link_node(&node->rb, parent, iter);
2241 	rb_insert_color_cached(&node->rb, root, false);
2242 }
2243