xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision c020cf82e0cc147236f01a8dca7052034cf9d30d)
1 /*	$OpenBSD: drm_linux.c,v 1.60 2020/06/14 15:20:07 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/param.h>
21 #include <sys/event.h>
22 #include <sys/filedesc.h>
23 #include <sys/kthread.h>
24 #include <sys/stat.h>
25 #include <sys/unistd.h>
26 #include <sys/proc.h>
27 #include <sys/pool.h>
28 #include <sys/fcntl.h>
29 
30 #include <dev/pci/ppbreg.h>
31 
32 #include <linux/dma-buf.h>
33 #include <linux/mod_devicetable.h>
34 #include <linux/acpi.h>
35 #include <linux/pagevec.h>
36 #include <linux/dma-fence-array.h>
37 #include <linux/interrupt.h>
38 #include <linux/err.h>
39 #include <linux/idr.h>
40 #include <linux/scatterlist.h>
41 #include <linux/i2c.h>
42 #include <linux/pci.h>
43 #include <linux/notifier.h>
44 #include <linux/backlight.h>
45 #include <linux/shrinker.h>
46 #include <linux/fb.h>
47 #include <linux/xarray.h>
48 
49 #include <drm/drm_device.h>
50 #include <drm/drm_print.h>
51 
52 #if defined(__amd64__) || defined(__i386__)
53 #include "bios.h"
54 #endif
55 
56 void
57 tasklet_run(void *arg)
58 {
59 	struct tasklet_struct *ts = arg;
60 
61 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
62 	if (tasklet_trylock(ts)) {
63 		if (!atomic_read(&ts->count))
64 			ts->func(ts->data);
65 		tasklet_unlock(ts);
66 	}
67 }
68 
69 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
70 volatile struct proc *sch_proc;
71 volatile void *sch_ident;
72 int sch_priority;
73 
74 void
75 set_current_state(int state)
76 {
77 	if (sch_ident != curproc)
78 		mtx_enter(&sch_mtx);
79 	MUTEX_ASSERT_LOCKED(&sch_mtx);
80 	sch_ident = sch_proc = curproc;
81 	sch_priority = state;
82 }
83 
84 void
85 __set_current_state(int state)
86 {
87 	KASSERT(state == TASK_RUNNING);
88 	if (sch_ident == curproc) {
89 		MUTEX_ASSERT_LOCKED(&sch_mtx);
90 		sch_ident = NULL;
91 		mtx_leave(&sch_mtx);
92 	}
93 }
94 
95 void
96 schedule(void)
97 {
98 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
99 }
100 
101 long
102 schedule_timeout(long timeout)
103 {
104 	struct sleep_state sls;
105 	long deadline;
106 	int wait, spl;
107 
108 	MUTEX_ASSERT_LOCKED(&sch_mtx);
109 	KASSERT(!cold);
110 
111 	sleep_setup(&sls, sch_ident, sch_priority, "schto");
112 	if (timeout != MAX_SCHEDULE_TIMEOUT)
113 		sleep_setup_timeout(&sls, timeout);
114 
115 	wait = (sch_proc == curproc && timeout > 0);
116 
117 	spl = MUTEX_OLDIPL(&sch_mtx);
118 	MUTEX_OLDIPL(&sch_mtx) = splsched();
119 	mtx_leave(&sch_mtx);
120 
121 	sleep_setup_signal(&sls);
122 
123 	if (timeout != MAX_SCHEDULE_TIMEOUT)
124 		deadline = ticks + timeout;
125 	sleep_finish_all(&sls, wait);
126 	if (timeout != MAX_SCHEDULE_TIMEOUT)
127 		timeout = deadline - ticks;
128 
129 	mtx_enter(&sch_mtx);
130 	MUTEX_OLDIPL(&sch_mtx) = spl;
131 	sch_ident = curproc;
132 
133 	return timeout > 0 ? timeout : 0;
134 }
135 
136 long
137 schedule_timeout_uninterruptible(long timeout)
138 {
139 	tsleep(curproc, PWAIT, "schtou", timeout);
140 	return 0;
141 }
142 
143 int
144 wake_up_process(struct proc *p)
145 {
146 	atomic_cas_ptr(&sch_proc, p, NULL);
147 	return wakeup_proc(p, NULL);
148 }
149 
150 void
151 flush_workqueue(struct workqueue_struct *wq)
152 {
153 	if (cold)
154 		return;
155 
156 	taskq_barrier((struct taskq *)wq);
157 }
158 
159 bool
160 flush_work(struct work_struct *work)
161 {
162 	if (cold)
163 		return false;
164 
165 	taskq_barrier(work->tq);
166 	return false;
167 }
168 
169 bool
170 flush_delayed_work(struct delayed_work *dwork)
171 {
172 	bool ret = false;
173 
174 	if (cold)
175 		return false;
176 
177 	while (timeout_pending(&dwork->to)) {
178 		tsleep(dwork, PWAIT, "fldwto", 1);
179 		ret = true;
180 	}
181 
182 	taskq_barrier(dwork->tq ? dwork->tq : (struct taskq *)system_wq);
183 	return ret;
184 }
185 
186 struct kthread {
187 	int (*func)(void *);
188 	void *data;
189 	struct proc *proc;
190 	volatile u_int flags;
191 #define KTHREAD_SHOULDSTOP	0x0000001
192 #define KTHREAD_STOPPED		0x0000002
193 #define KTHREAD_SHOULDPARK	0x0000004
194 #define KTHREAD_PARKED		0x0000008
195 	LIST_ENTRY(kthread) next;
196 };
197 
198 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
199 
200 void
201 kthread_func(void *arg)
202 {
203 	struct kthread *thread = arg;
204 	int ret;
205 
206 	ret = thread->func(thread->data);
207 	thread->flags |= KTHREAD_STOPPED;
208 	kthread_exit(ret);
209 }
210 
211 struct proc *
212 kthread_run(int (*func)(void *), void *data, const char *name)
213 {
214 	struct kthread *thread;
215 
216 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
217 	thread->func = func;
218 	thread->data = data;
219 	thread->flags = 0;
220 
221 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
222 		free(thread, M_DRM, sizeof(*thread));
223 		return ERR_PTR(-ENOMEM);
224 	}
225 
226 	LIST_INSERT_HEAD(&kthread_list, thread, next);
227 	return thread->proc;
228 }
229 
230 struct kthread *
231 kthread_lookup(struct proc *p)
232 {
233 	struct kthread *thread;
234 
235 	LIST_FOREACH(thread, &kthread_list, next) {
236 		if (thread->proc == p)
237 			break;
238 	}
239 	KASSERT(thread);
240 
241 	return thread;
242 }
243 
244 int
245 kthread_should_park(void)
246 {
247 	struct kthread *thread = kthread_lookup(curproc);
248 	return (thread->flags & KTHREAD_SHOULDPARK);
249 }
250 
251 void
252 kthread_parkme(void)
253 {
254 	struct kthread *thread = kthread_lookup(curproc);
255 
256 	while (thread->flags & KTHREAD_SHOULDPARK) {
257 		thread->flags |= KTHREAD_PARKED;
258 		wakeup(thread);
259 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
260 		thread->flags &= ~KTHREAD_PARKED;
261 	}
262 }
263 
264 void
265 kthread_park(struct proc *p)
266 {
267 	struct kthread *thread = kthread_lookup(p);
268 
269 	while ((thread->flags & KTHREAD_PARKED) == 0) {
270 		thread->flags |= KTHREAD_SHOULDPARK;
271 		wake_up_process(thread->proc);
272 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
273 	}
274 }
275 
276 void
277 kthread_unpark(struct proc *p)
278 {
279 	struct kthread *thread = kthread_lookup(p);
280 
281 	thread->flags &= ~KTHREAD_SHOULDPARK;
282 	wakeup(thread);
283 }
284 
285 int
286 kthread_should_stop(void)
287 {
288 	struct kthread *thread = kthread_lookup(curproc);
289 	return (thread->flags & KTHREAD_SHOULDSTOP);
290 }
291 
292 void
293 kthread_stop(struct proc *p)
294 {
295 	struct kthread *thread = kthread_lookup(p);
296 
297 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
298 		thread->flags |= KTHREAD_SHOULDSTOP;
299 		wake_up_process(thread->proc);
300 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
301 	}
302 	LIST_REMOVE(thread, next);
303 	free(thread, M_DRM, sizeof(*thread));
304 }
305 
306 struct timespec
307 ns_to_timespec(const int64_t nsec)
308 {
309 	struct timespec ts;
310 	int32_t rem;
311 
312 	if (nsec == 0) {
313 		ts.tv_sec = 0;
314 		ts.tv_nsec = 0;
315 		return (ts);
316 	}
317 
318 	ts.tv_sec = nsec / NSEC_PER_SEC;
319 	rem = nsec % NSEC_PER_SEC;
320 	if (rem < 0) {
321 		ts.tv_sec--;
322 		rem += NSEC_PER_SEC;
323 	}
324 	ts.tv_nsec = rem;
325 	return (ts);
326 }
327 
328 int64_t
329 timeval_to_ns(const struct timeval *tv)
330 {
331 	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
332 		tv->tv_usec * NSEC_PER_USEC;
333 }
334 
335 struct timeval
336 ns_to_timeval(const int64_t nsec)
337 {
338 	struct timeval tv;
339 	int32_t rem;
340 
341 	if (nsec == 0) {
342 		tv.tv_sec = 0;
343 		tv.tv_usec = 0;
344 		return (tv);
345 	}
346 
347 	tv.tv_sec = nsec / NSEC_PER_SEC;
348 	rem = nsec % NSEC_PER_SEC;
349 	if (rem < 0) {
350 		tv.tv_sec--;
351 		rem += NSEC_PER_SEC;
352 	}
353 	tv.tv_usec = rem / 1000;
354 	return (tv);
355 }
356 
357 int64_t
358 timeval_to_ms(const struct timeval *tv)
359 {
360 	return ((int64_t)tv->tv_sec * 1000) + (tv->tv_usec / 1000);
361 }
362 
363 int64_t
364 timeval_to_us(const struct timeval *tv)
365 {
366 	return ((int64_t)tv->tv_sec * 1000000) + tv->tv_usec;
367 }
368 
369 extern char *hw_vendor, *hw_prod, *hw_ver;
370 
371 #if NBIOS > 0
372 extern char smbios_board_vendor[];
373 extern char smbios_board_prod[];
374 extern char smbios_board_serial[];
375 #endif
376 
377 bool
378 dmi_match(int slot, const char *str)
379 {
380 	switch (slot) {
381 	case DMI_SYS_VENDOR:
382 		if (hw_vendor != NULL &&
383 		    !strcmp(hw_vendor, str))
384 			return true;
385 		break;
386 	case DMI_PRODUCT_NAME:
387 		if (hw_prod != NULL &&
388 		    !strcmp(hw_prod, str))
389 			return true;
390 		break;
391 	case DMI_PRODUCT_VERSION:
392 		if (hw_ver != NULL &&
393 		    !strcmp(hw_ver, str))
394 			return true;
395 		break;
396 #if NBIOS > 0
397 	case DMI_BOARD_VENDOR:
398 		if (strcmp(smbios_board_vendor, str) == 0)
399 			return true;
400 		break;
401 	case DMI_BOARD_NAME:
402 		if (strcmp(smbios_board_prod, str) == 0)
403 			return true;
404 		break;
405 	case DMI_BOARD_SERIAL:
406 		if (strcmp(smbios_board_serial, str) == 0)
407 			return true;
408 		break;
409 #else
410 	case DMI_BOARD_VENDOR:
411 		if (hw_vendor != NULL &&
412 		    !strcmp(hw_vendor, str))
413 			return true;
414 		break;
415 	case DMI_BOARD_NAME:
416 		if (hw_prod != NULL &&
417 		    !strcmp(hw_prod, str))
418 			return true;
419 		break;
420 #endif
421 	case DMI_NONE:
422 	default:
423 		return false;
424 	}
425 
426 	return false;
427 }
428 
429 static bool
430 dmi_found(const struct dmi_system_id *dsi)
431 {
432 	int i, slot;
433 
434 	for (i = 0; i < nitems(dsi->matches); i++) {
435 		slot = dsi->matches[i].slot;
436 		if (slot == DMI_NONE)
437 			break;
438 		if (!dmi_match(slot, dsi->matches[i].substr))
439 			return false;
440 	}
441 
442 	return true;
443 }
444 
445 const struct dmi_system_id *
446 dmi_first_match(const struct dmi_system_id *sysid)
447 {
448 	const struct dmi_system_id *dsi;
449 
450 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
451 		if (dmi_found(dsi))
452 			return dsi;
453 	}
454 
455 	return NULL;
456 }
457 
458 #if NBIOS > 0
459 extern char smbios_bios_date[];
460 #endif
461 
462 const char *
463 dmi_get_system_info(int slot)
464 {
465 	WARN_ON(slot != DMI_BIOS_DATE);
466 #if NBIOS > 0
467 	if (slot == DMI_BIOS_DATE)
468 		return smbios_bios_date;
469 #endif
470 	return NULL;
471 }
472 
473 int
474 dmi_check_system(const struct dmi_system_id *sysid)
475 {
476 	const struct dmi_system_id *dsi;
477 	int num = 0;
478 
479 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
480 		if (dmi_found(dsi)) {
481 			num++;
482 			if (dsi->callback && dsi->callback(dsi))
483 				break;
484 		}
485 	}
486 	return (num);
487 }
488 
489 struct vm_page *
490 alloc_pages(unsigned int gfp_mask, unsigned int order)
491 {
492 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
493 	struct uvm_constraint_range *constraint = &no_constraint;
494 	struct pglist mlist;
495 
496 	if (gfp_mask & M_CANFAIL)
497 		flags |= UVM_PLA_FAILOK;
498 	if (gfp_mask & M_ZERO)
499 		flags |= UVM_PLA_ZERO;
500 	if (gfp_mask & __GFP_DMA32)
501 		constraint = &dma_constraint;
502 
503 	TAILQ_INIT(&mlist);
504 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
505 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
506 		return NULL;
507 	return TAILQ_FIRST(&mlist);
508 }
509 
510 void
511 __free_pages(struct vm_page *page, unsigned int order)
512 {
513 	struct pglist mlist;
514 	int i;
515 
516 	TAILQ_INIT(&mlist);
517 	for (i = 0; i < (1 << order); i++)
518 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
519 	uvm_pglistfree(&mlist);
520 }
521 
522 void
523 __pagevec_release(struct pagevec *pvec)
524 {
525 	struct pglist mlist;
526 	int i;
527 
528 	TAILQ_INIT(&mlist);
529 	for (i = 0; i < pvec->nr; i++)
530 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
531 	uvm_pglistfree(&mlist);
532 	pagevec_reinit(pvec);
533 }
534 
535 void *
536 kmap(struct vm_page *pg)
537 {
538 	vaddr_t va;
539 
540 #if defined (__HAVE_PMAP_DIRECT)
541 	va = pmap_map_direct(pg);
542 #else
543 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
544 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
545 	pmap_update(pmap_kernel());
546 #endif
547 	return (void *)va;
548 }
549 
550 void
551 kunmap_va(void *addr)
552 {
553 	vaddr_t va = (vaddr_t)addr;
554 
555 #if defined (__HAVE_PMAP_DIRECT)
556 	pmap_unmap_direct(va);
557 #else
558 	pmap_kremove(va, PAGE_SIZE);
559 	pmap_update(pmap_kernel());
560 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
561 #endif
562 }
563 
564 void *
565 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
566      pgprot_t prot)
567 {
568 	vaddr_t va;
569 	paddr_t pa;
570 	int i;
571 
572 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
573 	if (va == 0)
574 		return NULL;
575 	for (i = 0; i < npages; i++) {
576 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
577 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
578 		    PROT_READ | PROT_WRITE,
579 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
580 		pmap_update(pmap_kernel());
581 	}
582 
583 	return (void *)va;
584 }
585 
586 void
587 vunmap(void *addr, size_t size)
588 {
589 	vaddr_t va = (vaddr_t)addr;
590 
591 	pmap_remove(pmap_kernel(), va, va + size);
592 	pmap_update(pmap_kernel());
593 	uvm_km_free(kernel_map, va, size);
594 }
595 
596 void
597 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
598     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
599 {
600 	const uint8_t *cbuf = buf;
601 	int i;
602 
603 	for (i = 0; i < len; i++) {
604 		if ((i % rowsize) == 0)
605 			printf("%s", prefix_str);
606 		printf("%02x", cbuf[i]);
607 		if ((i % rowsize) == (rowsize - 1))
608 			printf("\n");
609 		else
610 			printf(" ");
611 	}
612 }
613 
614 void *
615 memchr_inv(const void *s, int c, size_t n)
616 {
617 	if (n != 0) {
618 		const unsigned char *p = s;
619 
620 		do {
621 			if (*p++ != (unsigned char)c)
622 				return ((void *)(p - 1));
623 		}while (--n != 0);
624 	}
625 	return (NULL);
626 }
627 
628 int
629 panic_cmp(struct rb_node *a, struct rb_node *b)
630 {
631 	panic(__func__);
632 }
633 
634 #undef RB_ROOT
635 #define RB_ROOT(head)	(head)->rbh_root
636 
637 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
638 
639 /*
640  * This is a fairly minimal implementation of the Linux "idr" API.  It
641  * probably isn't very efficient, and defenitely isn't RCU safe.  The
642  * pre-load buffer is global instead of per-cpu; we rely on the kernel
643  * lock to make this work.  We do randomize our IDs in order to make
644  * them harder to guess.
645  */
646 
647 int idr_cmp(struct idr_entry *, struct idr_entry *);
648 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
649 
650 struct pool idr_pool;
651 struct idr_entry *idr_entry_cache;
652 
653 void
654 idr_init(struct idr *idr)
655 {
656 	static int initialized;
657 
658 	if (!initialized) {
659 		pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
660 		    "idrpl", NULL);
661 		initialized = 1;
662 	}
663 	SPLAY_INIT(&idr->tree);
664 }
665 
666 void
667 idr_destroy(struct idr *idr)
668 {
669 	struct idr_entry *id;
670 
671 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
672 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
673 		pool_put(&idr_pool, id);
674 	}
675 }
676 
677 void
678 idr_preload(unsigned int gfp_mask)
679 {
680 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
681 
682 	KERNEL_ASSERT_LOCKED();
683 
684 	if (idr_entry_cache == NULL)
685 		idr_entry_cache = pool_get(&idr_pool, flags);
686 }
687 
688 int
689 idr_alloc(struct idr *idr, void *ptr, int start, int end,
690     unsigned int gfp_mask)
691 {
692 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
693 	struct idr_entry *id;
694 	int begin;
695 
696 	KERNEL_ASSERT_LOCKED();
697 
698 	if (idr_entry_cache) {
699 		id = idr_entry_cache;
700 		idr_entry_cache = NULL;
701 	} else {
702 		id = pool_get(&idr_pool, flags);
703 		if (id == NULL)
704 			return -ENOMEM;
705 	}
706 
707 	if (end <= 0)
708 		end = INT_MAX;
709 
710 #ifdef notyet
711 	id->id = begin = start + arc4random_uniform(end - start);
712 #else
713 	id->id = begin = start;
714 #endif
715 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
716 		if (++id->id == end)
717 			id->id = start;
718 		if (id->id == begin) {
719 			pool_put(&idr_pool, id);
720 			return -ENOSPC;
721 		}
722 	}
723 	id->ptr = ptr;
724 	return id->id;
725 }
726 
727 void *
728 idr_replace(struct idr *idr, void *ptr, int id)
729 {
730 	struct idr_entry find, *res;
731 	void *old;
732 
733 	find.id = id;
734 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
735 	if (res == NULL)
736 		return ERR_PTR(-ENOENT);
737 	old = res->ptr;
738 	res->ptr = ptr;
739 	return old;
740 }
741 
742 void *
743 idr_remove(struct idr *idr, int id)
744 {
745 	struct idr_entry find, *res;
746 	void *ptr = NULL;
747 
748 	find.id = id;
749 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
750 	if (res) {
751 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
752 		ptr = res->ptr;
753 		pool_put(&idr_pool, res);
754 	}
755 	return ptr;
756 }
757 
758 void *
759 idr_find(struct idr *idr, int id)
760 {
761 	struct idr_entry find, *res;
762 
763 	find.id = id;
764 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
765 	if (res == NULL)
766 		return NULL;
767 	return res->ptr;
768 }
769 
770 void *
771 idr_get_next(struct idr *idr, int *id)
772 {
773 	struct idr_entry *res;
774 
775 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
776 		if (res->id >= *id) {
777 			*id = res->id;
778 			return res->ptr;
779 		}
780 	}
781 
782 	return NULL;
783 }
784 
785 int
786 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
787 {
788 	struct idr_entry *id;
789 	int ret;
790 
791 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
792 		ret = func(id->id, id->ptr, data);
793 		if (ret)
794 			return ret;
795 	}
796 
797 	return 0;
798 }
799 
800 int
801 idr_cmp(struct idr_entry *a, struct idr_entry *b)
802 {
803 	return (a->id < b->id ? -1 : a->id > b->id);
804 }
805 
806 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
807 
808 void
809 ida_init(struct ida *ida)
810 {
811 	ida->counter = 0;
812 }
813 
814 void
815 ida_destroy(struct ida *ida)
816 {
817 }
818 
819 void
820 ida_remove(struct ida *ida, int id)
821 {
822 }
823 
824 int
825 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
826     int flags)
827 {
828 	if (end <= 0)
829 		end = INT_MAX;
830 
831 	if (start > ida->counter)
832 		ida->counter = start;
833 
834 	if (ida->counter >= end)
835 		return -ENOSPC;
836 
837 	return ida->counter++;
838 }
839 
840 void
841 ida_simple_remove(struct ida *ida, int id)
842 {
843 }
844 
845 int
846 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
847 {
848 	return (a->id < b->id ? -1 : a->id > b->id);
849 }
850 
851 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
852 struct pool xa_pool;
853 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
854 
855 void
856 xa_init_flags(struct xarray *xa, gfp_t flags)
857 {
858 	static int initialized;
859 
860 	if (!initialized) {
861 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_TTY, 0,
862 		    "xapl", NULL);
863 		initialized = 1;
864 	}
865 	SPLAY_INIT(&xa->xa_tree);
866 }
867 
868 void
869 xa_destroy(struct xarray *xa)
870 {
871 	struct xarray_entry *id;
872 
873 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
874 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
875 		pool_put(&xa_pool, id);
876 	}
877 }
878 
879 int
880 xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
881 {
882 	struct xarray_entry *xid;
883 	int flags = (gfp & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
884 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
885 	int begin;
886 
887 	xid = pool_get(&xa_pool, flags);
888 	if (xid == NULL)
889 		return -ENOMEM;
890 
891 	if (limit <= 0)
892 		limit = INT_MAX;
893 
894 	xid->id = begin = start;
895 
896 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
897 		if (++xid->id == limit)
898 			xid->id = start;
899 		if (xid->id == begin) {
900 			pool_put(&xa_pool, xid);
901 			return -EBUSY;
902 		}
903 	}
904 	xid->ptr = entry;
905 	*id = xid->id;
906 	return 0;
907 }
908 
909 void *
910 xa_erase(struct xarray *xa, unsigned long index)
911 {
912 	struct xarray_entry find, *res;
913 	void *ptr = NULL;
914 
915 	find.id = index;
916 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
917 	if (res) {
918 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
919 		ptr = res->ptr;
920 		pool_put(&xa_pool, res);
921 	}
922 	return ptr;
923 }
924 
925 void *
926 xa_load(struct xarray *xa, unsigned long index)
927 {
928 	struct xarray_entry find, *res;
929 
930 	find.id = index;
931 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
932 	if (res == NULL)
933 		return NULL;
934 	return res->ptr;
935 }
936 
937 void *
938 xa_get_next(struct xarray *xa, unsigned long *index)
939 {
940 	struct xarray_entry *res;
941 
942 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
943 		if (res->id >= *index) {
944 			*index = res->id;
945 			return res->ptr;
946 		}
947 	}
948 
949 	return NULL;
950 }
951 
952 int
953 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
954 {
955 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
956 	    M_DRM, gfp_mask);
957 	if (table->sgl == NULL)
958 		return -ENOMEM;
959 	table->nents = table->orig_nents = nents;
960 	return 0;
961 }
962 
963 void
964 sg_free_table(struct sg_table *table)
965 {
966 	free(table->sgl, M_DRM,
967 	    table->orig_nents * sizeof(struct scatterlist));
968 }
969 
970 size_t
971 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
972     const void *buf, size_t buflen)
973 {
974 	panic("%s", __func__);
975 }
976 
977 int
978 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
979 {
980 	void *cmd = NULL;
981 	int cmdlen = 0;
982 	int err, ret = 0;
983 	int op;
984 
985 	iic_acquire_bus(&adap->ic, 0);
986 
987 	while (num > 2) {
988 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
989 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
990 		    msgs->buf, msgs->len, 0);
991 		if (err) {
992 			ret = -err;
993 			goto fail;
994 		}
995 		msgs++;
996 		num--;
997 		ret++;
998 	}
999 
1000 	if (num > 1) {
1001 		cmd = msgs->buf;
1002 		cmdlen = msgs->len;
1003 		msgs++;
1004 		num--;
1005 		ret++;
1006 	}
1007 
1008 	op = (msgs->flags & I2C_M_RD) ?
1009 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1010 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1011 	    msgs->buf, msgs->len, 0);
1012 	if (err) {
1013 		ret = -err;
1014 		goto fail;
1015 	}
1016 	msgs++;
1017 	ret++;
1018 
1019 fail:
1020 	iic_release_bus(&adap->ic, 0);
1021 
1022 	return ret;
1023 }
1024 
1025 int
1026 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1027 {
1028 	int ret;
1029 
1030 	if (adap->lock_ops)
1031 		adap->lock_ops->lock_bus(adap, 0);
1032 
1033 	if (adap->algo)
1034 		ret = adap->algo->master_xfer(adap, msgs, num);
1035 	else
1036 		ret = i2c_master_xfer(adap, msgs, num);
1037 
1038 	if (adap->lock_ops)
1039 		adap->lock_ops->unlock_bus(adap, 0);
1040 
1041 	return ret;
1042 }
1043 
1044 int
1045 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1046 {
1047 	struct i2c_algo_bit_data *algo = adap->algo_data;
1048 	struct i2c_adapter bb;
1049 
1050 	memset(&bb, 0, sizeof(bb));
1051 	bb.ic = algo->ic;
1052 	bb.retries = adap->retries;
1053 	return i2c_master_xfer(&bb, msgs, num);
1054 }
1055 
1056 uint32_t
1057 i2c_bb_functionality(struct i2c_adapter *adap)
1058 {
1059 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1060 }
1061 
1062 struct i2c_algorithm i2c_bit_algo = {
1063 	.master_xfer = i2c_bb_master_xfer,
1064 	.functionality = i2c_bb_functionality
1065 };
1066 
1067 int
1068 i2c_bit_add_bus(struct i2c_adapter *adap)
1069 {
1070 	adap->algo = &i2c_bit_algo;
1071 	adap->retries = 3;
1072 
1073 	return 0;
1074 }
1075 
1076 #if defined(__amd64__) || defined(__i386__)
1077 
1078 /*
1079  * This is a minimal implementation of the Linux vga_get/vga_put
1080  * interface.  In all likelyhood, it will only work for inteldrm(4) as
1081  * it assumes that if there is another active VGA device in the
1082  * system, it is sitting behind a PCI bridge.
1083  */
1084 
1085 extern int pci_enumerate_bus(struct pci_softc *,
1086     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1087 
1088 pcitag_t vga_bridge_tag;
1089 int vga_bridge_disabled;
1090 
1091 int
1092 vga_disable_bridge(struct pci_attach_args *pa)
1093 {
1094 	pcireg_t bhlc, bc;
1095 
1096 	if (pa->pa_domain != 0)
1097 		return 0;
1098 
1099 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1100 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1101 		return 0;
1102 
1103 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1104 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1105 		return 0;
1106 	bc &= ~PPB_BC_VGA_ENABLE;
1107 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1108 
1109 	vga_bridge_tag = pa->pa_tag;
1110 	vga_bridge_disabled = 1;
1111 
1112 	return 1;
1113 }
1114 
1115 void
1116 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1117 {
1118 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1119 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1120 }
1121 
1122 void
1123 vga_put(struct pci_dev *pdev, int rsrc)
1124 {
1125 	pcireg_t bc;
1126 
1127 	if (!vga_bridge_disabled)
1128 		return;
1129 
1130 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1131 	bc |= PPB_BC_VGA_ENABLE;
1132 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1133 
1134 	vga_bridge_disabled = 0;
1135 }
1136 
1137 #endif
1138 
1139 /*
1140  * ACPI types and interfaces.
1141  */
1142 
1143 #ifdef __HAVE_ACPI
1144 #include "acpi.h"
1145 #endif
1146 
1147 #if NACPI > 0
1148 
1149 #include <dev/acpi/acpireg.h>
1150 #include <dev/acpi/acpivar.h>
1151 #include <dev/acpi/amltypes.h>
1152 #include <dev/acpi/dsdt.h>
1153 
1154 acpi_status
1155 acpi_get_table(const char *sig, int instance,
1156     struct acpi_table_header **hdr)
1157 {
1158 	struct acpi_softc *sc = acpi_softc;
1159 	struct acpi_q *entry;
1160 
1161 	KASSERT(instance == 1);
1162 
1163 	if (sc == NULL)
1164 		return AE_NOT_FOUND;
1165 
1166 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1167 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1168 			*hdr = entry->q_table;
1169 			return 0;
1170 		}
1171 	}
1172 
1173 	return AE_NOT_FOUND;
1174 }
1175 
1176 acpi_status
1177 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1178 {
1179 	node = aml_searchname(node, name);
1180 	if (node == NULL)
1181 		return AE_NOT_FOUND;
1182 
1183 	*rnode = node;
1184 	return 0;
1185 }
1186 
1187 acpi_status
1188 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1189 {
1190 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1191 	KASSERT(type == ACPI_FULL_PATHNAME);
1192 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1193 	return 0;
1194 }
1195 
1196 acpi_status
1197 acpi_evaluate_object(acpi_handle node, const char *name,
1198     struct acpi_object_list *params, struct acpi_buffer *result)
1199 {
1200 	struct aml_value args[4], res;
1201 	union acpi_object *obj;
1202 	uint8_t *data;
1203 	int i;
1204 
1205 	KASSERT(params->count <= nitems(args));
1206 
1207 	for (i = 0; i < params->count; i++) {
1208 		args[i].type = params->pointer[i].type;
1209 		switch (args[i].type) {
1210 		case AML_OBJTYPE_INTEGER:
1211 			args[i].v_integer = params->pointer[i].integer.value;
1212 			break;
1213 		case AML_OBJTYPE_BUFFER:
1214 			args[i].length = params->pointer[i].buffer.length;
1215 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1216 			break;
1217 		default:
1218 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1219 			return AE_BAD_PARAMETER;
1220 		}
1221 	}
1222 
1223 	if (name) {
1224 		node = aml_searchname(node, name);
1225 		if (node == NULL)
1226 			return AE_NOT_FOUND;
1227 	}
1228 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1229 		aml_freevalue(&res);
1230 		return AE_ERROR;
1231 	}
1232 
1233 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1234 
1235 	result->length = sizeof(union acpi_object);
1236 	switch (res.type) {
1237 	case AML_OBJTYPE_BUFFER:
1238 		result->length += res.length;
1239 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1240 		obj = (union acpi_object *)result->pointer;
1241 		data = (uint8_t *)(obj + 1);
1242 		obj->type = res.type;
1243 		obj->buffer.length = res.length;
1244 		obj->buffer.pointer = data;
1245 		memcpy(data, res.v_buffer, res.length);
1246 		break;
1247 	default:
1248 		printf("%s: return type 0x%02x", __func__, res.type);
1249 		aml_freevalue(&res);
1250 		return AE_ERROR;
1251 	}
1252 
1253 	aml_freevalue(&res);
1254 	return 0;
1255 }
1256 
1257 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1258 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1259 
1260 int
1261 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1262 {
1263 	struct acpi_bus_event event;
1264 	struct notifier_block *nb;
1265 
1266 	event.device_class = ACPI_VIDEO_CLASS;
1267 	event.type = notify;
1268 
1269 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1270 		nb->notifier_call(nb, 0, &event);
1271 	return 0;
1272 }
1273 
1274 int
1275 register_acpi_notifier(struct notifier_block *nb)
1276 {
1277 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1278 	return 0;
1279 }
1280 
1281 int
1282 unregister_acpi_notifier(struct notifier_block *nb)
1283 {
1284 	struct notifier_block *tmp;
1285 
1286 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1287 		if (tmp == nb) {
1288 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1289 			    notifier_block, link);
1290 			return 0;
1291 		}
1292 	}
1293 
1294 	return -ENOENT;
1295 }
1296 
1297 const char *
1298 acpi_format_exception(acpi_status status)
1299 {
1300 	switch (status) {
1301 	case AE_NOT_FOUND:
1302 		return "not found";
1303 	case AE_BAD_PARAMETER:
1304 		return "bad parameter";
1305 	default:
1306 		return "unknown";
1307 	}
1308 }
1309 
1310 #endif
1311 
1312 void
1313 backlight_do_update_status(void *arg)
1314 {
1315 	backlight_update_status(arg);
1316 }
1317 
1318 struct backlight_device *
1319 backlight_device_register(const char *name, void *kdev, void *data,
1320     const struct backlight_ops *ops, struct backlight_properties *props)
1321 {
1322 	struct backlight_device *bd;
1323 
1324 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1325 	bd->ops = ops;
1326 	bd->props = *props;
1327 	bd->data = data;
1328 
1329 	task_set(&bd->task, backlight_do_update_status, bd);
1330 
1331 	return bd;
1332 }
1333 
1334 void
1335 backlight_device_unregister(struct backlight_device *bd)
1336 {
1337 	free(bd, M_DRM, sizeof(*bd));
1338 }
1339 
1340 void
1341 backlight_schedule_update_status(struct backlight_device *bd)
1342 {
1343 	task_add(systq, &bd->task);
1344 }
1345 
1346 inline int
1347 backlight_enable(struct backlight_device *bd)
1348 {
1349 	if (bd == NULL)
1350 		return 0;
1351 
1352 	bd->props.power = FB_BLANK_UNBLANK;
1353 
1354 	return bd->ops->update_status(bd);
1355 }
1356 
1357 inline int
1358 backlight_disable(struct backlight_device *bd)
1359 {
1360 	if (bd == NULL)
1361 		return 0;
1362 
1363 	bd->props.power = FB_BLANK_POWERDOWN;
1364 
1365 	return bd->ops->update_status(bd);
1366 }
1367 
1368 void
1369 drm_sysfs_hotplug_event(struct drm_device *dev)
1370 {
1371 	KNOTE(&dev->note, NOTE_CHANGE);
1372 }
1373 
1374 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1375 
1376 uint64_t
1377 dma_fence_context_alloc(unsigned int num)
1378 {
1379   return atomic64_add_return(num, &drm_fence_context_count) - num;
1380 }
1381 
1382 struct default_wait_cb {
1383 	struct dma_fence_cb base;
1384 	struct proc *proc;
1385 };
1386 
1387 static void
1388 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1389 {
1390 	struct default_wait_cb *wait =
1391 	    container_of(cb, struct default_wait_cb, base);
1392 	wake_up_process(wait->proc);
1393 }
1394 
1395 long
1396 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1397 {
1398 	long ret = timeout ? timeout : 1;
1399 	int err;
1400 	struct default_wait_cb cb;
1401 	bool was_set;
1402 
1403 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1404 		return ret;
1405 
1406 	mtx_enter(fence->lock);
1407 
1408 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1409 	    &fence->flags);
1410 
1411 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1412 		goto out;
1413 
1414 	if (!was_set && fence->ops->enable_signaling) {
1415 		if (!fence->ops->enable_signaling(fence)) {
1416 			dma_fence_signal_locked(fence);
1417 			goto out;
1418 		}
1419 	}
1420 
1421 	if (timeout == 0) {
1422 		ret = 0;
1423 		goto out;
1424 	}
1425 
1426 	cb.base.func = dma_fence_default_wait_cb;
1427 	cb.proc = curproc;
1428 	list_add(&cb.base.node, &fence->cb_list);
1429 
1430 	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1431 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0, "dmafence",
1432 		    timeout);
1433 		if (err == EINTR || err == ERESTART) {
1434 			ret = -ERESTARTSYS;
1435 			break;
1436 		} else if (err == EWOULDBLOCK) {
1437 			ret = 0;
1438 			break;
1439 		}
1440 	}
1441 
1442 	if (!list_empty(&cb.base.node))
1443 		list_del(&cb.base.node);
1444 out:
1445 	mtx_leave(fence->lock);
1446 
1447 	return ret;
1448 }
1449 
1450 static bool
1451 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1452     uint32_t *idx)
1453 {
1454 	int i;
1455 
1456 	for (i = 0; i < count; ++i) {
1457 		struct dma_fence *fence = fences[i];
1458 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1459 			if (idx)
1460 				*idx = i;
1461 			return true;
1462 		}
1463 	}
1464 	return false;
1465 }
1466 
1467 long
1468 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
1469     bool intr, long timeout, uint32_t *idx)
1470 {
1471 	struct default_wait_cb *cb;
1472 	int i, err;
1473 	int ret = timeout;
1474 
1475 	if (timeout == 0) {
1476 		for (i = 0; i < count; i++) {
1477 			if (dma_fence_is_signaled(fences[i])) {
1478 				if (idx)
1479 					*idx = i;
1480 				return 1;
1481 			}
1482 		}
1483 		return 0;
1484 	}
1485 
1486 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1487 	if (cb == NULL)
1488 		return -ENOMEM;
1489 
1490 	for (i = 0; i < count; i++) {
1491 		struct dma_fence *fence = fences[i];
1492 		cb[i].proc = curproc;
1493 		if (dma_fence_add_callback(fence, &cb[i].base,
1494 		    dma_fence_default_wait_cb)) {
1495 			if (idx)
1496 				*idx = i;
1497 			goto cb_cleanup;
1498 		}
1499 	}
1500 
1501 	while (ret > 0) {
1502 		if (dma_fence_test_signaled_any(fences, count, idx))
1503 			break;
1504 
1505 		err = tsleep(curproc, intr ? PCATCH : 0,
1506 		    "dfwat", timeout);
1507 		if (err == EINTR || err == ERESTART) {
1508 			ret = -ERESTARTSYS;
1509 			break;
1510 		} else if (err == EWOULDBLOCK) {
1511 			ret = 0;
1512 			break;
1513 		}
1514 	}
1515 
1516 cb_cleanup:
1517 	while (i-- > 0)
1518 		dma_fence_remove_callback(fences[i], &cb[i].base);
1519 	free(cb, M_DRM, count * sizeof(*cb));
1520 	return ret;
1521 }
1522 
1523 static struct dma_fence dma_fence_stub;
1524 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
1525 
1526 static const char *
1527 dma_fence_stub_get_name(struct dma_fence *fence)
1528 {
1529 	return "stub";
1530 }
1531 
1532 static const struct dma_fence_ops dma_fence_stub_ops = {
1533 	.get_driver_name = dma_fence_stub_get_name,
1534 	.get_timeline_name = dma_fence_stub_get_name,
1535 };
1536 
1537 struct dma_fence *
1538 dma_fence_get_stub(void)
1539 {
1540 	mtx_enter(&dma_fence_stub_mtx);
1541 	if (dma_fence_stub.ops == NULL) {
1542 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
1543 		    &dma_fence_stub_mtx, 0, 0);
1544 		dma_fence_signal_locked(&dma_fence_stub);
1545 	}
1546 	mtx_leave(&dma_fence_stub_mtx);
1547 
1548 	return dma_fence_get(&dma_fence_stub);
1549 }
1550 
1551 static const char *
1552 dma_fence_array_get_driver_name(struct dma_fence *fence)
1553 {
1554 	return "dma_fence_array";
1555 }
1556 
1557 static const char *
1558 dma_fence_array_get_timeline_name(struct dma_fence *fence)
1559 {
1560 	return "unbound";
1561 }
1562 
1563 static void
1564 irq_dma_fence_array_work(struct irq_work *wrk)
1565 {
1566 	struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work);
1567 
1568 	dma_fence_signal(&dfa->base);
1569 	dma_fence_put(&dfa->base);
1570 }
1571 
1572 static void
1573 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
1574 {
1575 	struct dma_fence_array_cb *array_cb =
1576 	    container_of(cb, struct dma_fence_array_cb, cb);
1577 	struct dma_fence_array *dfa = array_cb->array;
1578 
1579 	if (atomic_dec_and_test(&dfa->num_pending))
1580 		irq_work_queue(&dfa->work);
1581 	else
1582 		dma_fence_put(&dfa->base);
1583 }
1584 
1585 static bool
1586 dma_fence_array_enable_signaling(struct dma_fence *fence)
1587 {
1588 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1589 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
1590 	int i;
1591 
1592 	for (i = 0; i < dfa->num_fences; ++i) {
1593 		cb[i].array = dfa;
1594 		dma_fence_get(&dfa->base);
1595 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
1596 		    dma_fence_array_cb_func)) {
1597 			dma_fence_put(&dfa->base);
1598 			if (atomic_dec_and_test(&dfa->num_pending))
1599 				return false;
1600 		}
1601 	}
1602 
1603 	return true;
1604 }
1605 
1606 static bool dma_fence_array_signaled(struct dma_fence *fence)
1607 {
1608 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1609 
1610 	return atomic_read(&dfa->num_pending) <= 0;
1611 }
1612 
1613 static void dma_fence_array_release(struct dma_fence *fence)
1614 {
1615 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1616 	int i;
1617 
1618 	for (i = 0; i < dfa->num_fences; ++i)
1619 		dma_fence_put(dfa->fences[i]);
1620 
1621 	free(dfa->fences, M_DRM, 0);
1622 	dma_fence_free(fence);
1623 }
1624 
1625 struct dma_fence_array *
1626 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
1627     unsigned seqno, bool signal_on_any)
1628 {
1629 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
1630 	    (num_fences * sizeof(struct dma_fence_array_cb)),
1631 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1632 	if (dfa == NULL)
1633 		return NULL;
1634 
1635 	mtx_init(&dfa->lock, IPL_TTY);
1636 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
1637 	    context, seqno);
1638 	init_irq_work(&dfa->work, irq_dma_fence_array_work);
1639 
1640 	dfa->num_fences = num_fences;
1641 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
1642 	dfa->fences = fences;
1643 
1644 	return dfa;
1645 }
1646 
1647 const struct dma_fence_ops dma_fence_array_ops = {
1648 	.get_driver_name = dma_fence_array_get_driver_name,
1649 	.get_timeline_name = dma_fence_array_get_timeline_name,
1650 	.enable_signaling = dma_fence_array_enable_signaling,
1651 	.signaled = dma_fence_array_signaled,
1652 	.release = dma_fence_array_release,
1653 };
1654 
1655 int
1656 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
1657 {
1658 	return (ENXIO);
1659 }
1660 
1661 int
1662 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
1663 {
1664 	return (ENXIO);
1665 }
1666 
1667 int
1668 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
1669 {
1670 	return (ENOTTY);
1671 }
1672 
1673 int
1674 dmabuf_poll(struct file *fp, int events, struct proc *p)
1675 {
1676 	return (0);
1677 }
1678 
1679 int
1680 dmabuf_kqfilter(struct file *fp, struct knote *kn)
1681 {
1682 	return (EINVAL);
1683 }
1684 
1685 int
1686 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
1687 {
1688 	struct dma_buf *dmabuf = fp->f_data;
1689 
1690 	memset(st, 0, sizeof(*st));
1691 	st->st_size = dmabuf->size;
1692 	st->st_mode = S_IFIFO;	/* XXX */
1693 	return (0);
1694 }
1695 
1696 int
1697 dmabuf_close(struct file *fp, struct proc *p)
1698 {
1699 	struct dma_buf *dmabuf = fp->f_data;
1700 
1701 	fp->f_data = NULL;
1702 	KERNEL_LOCK();
1703 	dmabuf->ops->release(dmabuf);
1704 	KERNEL_UNLOCK();
1705 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
1706 	return (0);
1707 }
1708 
1709 int
1710 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
1711 {
1712 	struct dma_buf *dmabuf = fp->f_data;
1713 	off_t newoff;
1714 
1715 	if (*offset != 0)
1716 		return (EINVAL);
1717 
1718 	switch (whence) {
1719 	case SEEK_SET:
1720 		newoff = 0;
1721 		break;
1722 	case SEEK_END:
1723 		newoff = dmabuf->size;
1724 		break;
1725 	default:
1726 		return (EINVAL);
1727 	}
1728 	mtx_enter(&fp->f_mtx);
1729 	fp->f_offset = newoff;
1730 	mtx_leave(&fp->f_mtx);
1731 	*offset = newoff;
1732 	return (0);
1733 }
1734 
1735 const struct fileops dmabufops = {
1736 	.fo_read	= dmabuf_read,
1737 	.fo_write	= dmabuf_write,
1738 	.fo_ioctl	= dmabuf_ioctl,
1739 	.fo_poll	= dmabuf_poll,
1740 	.fo_kqfilter	= dmabuf_kqfilter,
1741 	.fo_stat	= dmabuf_stat,
1742 	.fo_close	= dmabuf_close,
1743 	.fo_seek	= dmabuf_seek,
1744 };
1745 
1746 struct dma_buf *
1747 dma_buf_export(const struct dma_buf_export_info *info)
1748 {
1749 	struct proc *p = curproc;
1750 	struct dma_buf *dmabuf;
1751 	struct file *fp;
1752 
1753 	fp = fnew(p);
1754 	if (fp == NULL)
1755 		return ERR_PTR(-ENFILE);
1756 	fp->f_type = DTYPE_DMABUF;
1757 	fp->f_ops = &dmabufops;
1758 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
1759 	dmabuf->priv = info->priv;
1760 	dmabuf->ops = info->ops;
1761 	dmabuf->size = info->size;
1762 	dmabuf->file = fp;
1763 	fp->f_data = dmabuf;
1764 	INIT_LIST_HEAD(&dmabuf->attachments);
1765 	return dmabuf;
1766 }
1767 
1768 struct dma_buf *
1769 dma_buf_get(int fd)
1770 {
1771 	struct proc *p = curproc;
1772 	struct filedesc *fdp = p->p_fd;
1773 	struct file *fp;
1774 
1775 	if ((fp = fd_getfile(fdp, fd)) == NULL)
1776 		return ERR_PTR(-EBADF);
1777 
1778 	if (fp->f_type != DTYPE_DMABUF) {
1779 		FRELE(fp, p);
1780 		return ERR_PTR(-EINVAL);
1781 	}
1782 
1783 	return fp->f_data;
1784 }
1785 
1786 void
1787 dma_buf_put(struct dma_buf *dmabuf)
1788 {
1789 	KASSERT(dmabuf);
1790 	KASSERT(dmabuf->file);
1791 
1792 	FRELE(dmabuf->file, curproc);
1793 }
1794 
1795 int
1796 dma_buf_fd(struct dma_buf *dmabuf, int flags)
1797 {
1798 	struct proc *p = curproc;
1799 	struct filedesc *fdp = p->p_fd;
1800 	struct file *fp = dmabuf->file;
1801 	int fd, cloexec, error;
1802 
1803 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
1804 
1805 	fdplock(fdp);
1806 restart:
1807 	if ((error = fdalloc(p, 0, &fd)) != 0) {
1808 		if (error == ENOSPC) {
1809 			fdexpand(p);
1810 			goto restart;
1811 		}
1812 		fdpunlock(fdp);
1813 		return -error;
1814 	}
1815 
1816 	fdinsert(fdp, fd, cloexec, fp);
1817 	fdpunlock(fdp);
1818 
1819 	return fd;
1820 }
1821 
1822 void
1823 get_dma_buf(struct dma_buf *dmabuf)
1824 {
1825 	FREF(dmabuf->file);
1826 }
1827 
1828 enum pci_bus_speed
1829 pcie_get_speed_cap(struct pci_dev *pdev)
1830 {
1831 	pci_chipset_tag_t	pc = pdev->pc;
1832 	pcitag_t		tag = pdev->tag;
1833 	int			pos ;
1834 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1835 	pcireg_t		id;
1836 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
1837 	int			bus, device, function;
1838 
1839 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1840 	    &pos, NULL))
1841 		return PCI_SPEED_UNKNOWN;
1842 
1843 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1844 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1845 
1846 	/* we've been informed via and serverworks don't make the cut */
1847 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1848 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1849 		return PCI_SPEED_UNKNOWN;
1850 
1851 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1852 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1853 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1854 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1855 
1856 	lnkcap &= 0x0f;
1857 	lnkcap2 &= 0xfe;
1858 
1859 	if (lnkcap2) { /* PCIE GEN 3.0 */
1860 		if (lnkcap2 & 0x02)
1861 			cap = PCIE_SPEED_2_5GT;
1862 		if (lnkcap2 & 0x04)
1863 			cap = PCIE_SPEED_5_0GT;
1864 		if (lnkcap2 & 0x08)
1865 			cap = PCIE_SPEED_8_0GT;
1866 		if (lnkcap2 & 0x10)
1867 			cap = PCIE_SPEED_16_0GT;
1868 	} else {
1869 		if (lnkcap & 0x01)
1870 			cap = PCIE_SPEED_2_5GT;
1871 		if (lnkcap & 0x02)
1872 			cap = PCIE_SPEED_5_0GT;
1873 	}
1874 
1875 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
1876 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
1877 	    lnkcap2);
1878 	return cap;
1879 }
1880 
1881 enum pcie_link_width
1882 pcie_get_width_cap(struct pci_dev *pdev)
1883 {
1884 	pci_chipset_tag_t	pc = pdev->pc;
1885 	pcitag_t		tag = pdev->tag;
1886 	int			pos ;
1887 	pcireg_t		lnkcap = 0;
1888 	pcireg_t		id;
1889 	int			bus, device, function;
1890 
1891 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1892 	    &pos, NULL))
1893 		return PCIE_LNK_WIDTH_UNKNOWN;
1894 
1895 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1896 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1897 
1898 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1899 
1900 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
1901 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
1902 
1903 	if (lnkcap)
1904 		return (lnkcap & 0x3f0) >> 4;
1905 	return PCIE_LNK_WIDTH_UNKNOWN;
1906 }
1907 
1908 int
1909 default_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1910     int sync, void *key)
1911 {
1912 	wakeup(wqe);
1913 	if (wqe->proc)
1914 		wake_up_process(wqe->proc);
1915 	return 0;
1916 }
1917 
1918 int
1919 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1920     int sync, void *key)
1921 {
1922 	default_wake_function(wqe, mode, sync, key);
1923 	list_del_init(&wqe->entry);
1924 	return 0;
1925 }
1926 
1927 static wait_queue_head_t bit_waitq;
1928 wait_queue_head_t var_waitq;
1929 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
1930 
1931 int
1932 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1933 {
1934 	int err;
1935 
1936 	if (!test_bit(bit, word))
1937 		return 0;
1938 
1939 	mtx_enter(&wait_bit_mtx);
1940 	while (test_bit(bit, word)) {
1941 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
1942 		    INFSLP);
1943 		if (err) {
1944 			mtx_leave(&wait_bit_mtx);
1945 			return 1;
1946 		}
1947 	}
1948 	mtx_leave(&wait_bit_mtx);
1949 	return 0;
1950 }
1951 
1952 int
1953 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
1954 {
1955 	int err;
1956 
1957 	if (!test_bit(bit, word))
1958 		return 0;
1959 
1960 	mtx_enter(&wait_bit_mtx);
1961 	while (test_bit(bit, word)) {
1962 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
1963 		if (err) {
1964 			mtx_leave(&wait_bit_mtx);
1965 			return 1;
1966 		}
1967 	}
1968 	mtx_leave(&wait_bit_mtx);
1969 	return 0;
1970 }
1971 
1972 void
1973 wake_up_bit(void *word, int bit)
1974 {
1975 	mtx_enter(&wait_bit_mtx);
1976 	wakeup(word);
1977 	mtx_leave(&wait_bit_mtx);
1978 }
1979 
1980 void
1981 clear_and_wake_up_bit(int bit, void *word)
1982 {
1983 	clear_bit(bit, word);
1984 	wake_up_bit(word, bit);
1985 }
1986 
1987 wait_queue_head_t *
1988 bit_waitqueue(void *word, int bit)
1989 {
1990 	/* XXX hash table of wait queues? */
1991 	return &bit_waitq;
1992 }
1993 
1994 struct workqueue_struct *system_wq;
1995 struct workqueue_struct *system_highpri_wq;
1996 struct workqueue_struct *system_unbound_wq;
1997 struct workqueue_struct *system_long_wq;
1998 struct taskq *taskletq;
1999 
2000 void
2001 drm_linux_init(void)
2002 {
2003 	if (system_wq == NULL) {
2004 		system_wq = (struct workqueue_struct *)
2005 		    taskq_create("drmwq", 4, IPL_HIGH, 0);
2006 	}
2007 	if (system_highpri_wq == NULL) {
2008 		system_highpri_wq = (struct workqueue_struct *)
2009 		    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2010 	}
2011 	if (system_unbound_wq == NULL) {
2012 		system_unbound_wq = (struct workqueue_struct *)
2013 		    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2014 	}
2015 	if (system_long_wq == NULL) {
2016 		system_long_wq = (struct workqueue_struct *)
2017 		    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2018 	}
2019 
2020 	if (taskletq == NULL)
2021 		taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2022 
2023 	init_waitqueue_head(&bit_waitq);
2024 	init_waitqueue_head(&var_waitq);
2025 }
2026 
2027 #define PCIE_ECAP_RESIZE_BAR	0x15
2028 #define RBCAP0			0x04
2029 #define RBCTRL0			0x08
2030 #define RBCTRL_BARINDEX_MASK	0x07
2031 #define RBCTRL_BARSIZE_MASK	0x1f00
2032 #define RBCTRL_BARSIZE_SHIFT	8
2033 
2034 /* size in MB is 1 << nsize */
2035 int
2036 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2037 {
2038 	pcireg_t	reg;
2039 	uint32_t	offset, capid;
2040 
2041 	KASSERT(bar == 0);
2042 
2043 	offset = PCI_PCIE_ECAP;
2044 
2045 	/* search PCI Express Extended Capabilities */
2046 	do {
2047 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2048 		capid = PCI_PCIE_ECAP_ID(reg);
2049 		if (capid == PCIE_ECAP_RESIZE_BAR)
2050 			break;
2051 		offset = PCI_PCIE_ECAP_NEXT(reg);
2052 	} while (capid != 0);
2053 
2054 	if (capid == 0) {
2055 		printf("%s: could not find resize bar cap!\n", __func__);
2056 		return -ENOTSUP;
2057 	}
2058 
2059 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2060 
2061 	if ((reg & (1 << (nsize + 4))) == 0) {
2062 		printf("%s size not supported\n", __func__);
2063 		return -ENOTSUP;
2064 	}
2065 
2066 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2067 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2068 		printf("%s BAR index not 0\n", __func__);
2069 		return -EINVAL;
2070 	}
2071 
2072 	reg &= ~RBCTRL_BARSIZE_MASK;
2073 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2074 
2075 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2076 
2077 	return 0;
2078 }
2079 
2080 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2081 
2082 int
2083 register_shrinker(struct shrinker *shrinker)
2084 {
2085 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2086 	return 0;
2087 }
2088 
2089 void
2090 unregister_shrinker(struct shrinker *shrinker)
2091 {
2092 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2093 }
2094 
2095 void
2096 drmbackoff(long npages)
2097 {
2098 	struct shrink_control sc;
2099 	struct shrinker *shrinker;
2100 	u_long ret;
2101 
2102 	shrinker = TAILQ_FIRST(&shrinkers);
2103 	while (shrinker && npages > 0) {
2104 		sc.nr_to_scan = npages;
2105 		ret = shrinker->scan_objects(shrinker, &sc);
2106 		npages -= ret;
2107 		shrinker = TAILQ_NEXT(shrinker, next);
2108 	}
2109 }
2110 
2111 void *
2112 bitmap_zalloc(u_int n, gfp_t flags)
2113 {
2114 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2115 }
2116 
2117 void
2118 bitmap_free(void *p)
2119 {
2120 	kfree(p);
2121 }
2122 
2123 int
2124 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2125 {
2126 	if (atomic_add_unless(v, -1, 1))
2127 		return 0;
2128 
2129 	rw_enter_write(lock);
2130 	if (atomic_dec_return(v) == 0)
2131 		return 1;
2132 	rw_exit_write(lock);
2133 	return 0;
2134 }
2135 
2136 int
2137 printk(const char *fmt, ...)
2138 {
2139 	int ret, level;
2140 	va_list ap;
2141 
2142 	if (fmt != NULL && *fmt == '\001') {
2143 		level = fmt[1];
2144 #ifndef DRMDEBUG
2145 		if (level >= KERN_INFO[1] && level <= '9')
2146 			return 0;
2147 #endif
2148 		fmt += 2;
2149 	}
2150 
2151 	va_start(ap, fmt);
2152 	ret = vprintf(fmt, ap);
2153 	va_end(ap);
2154 
2155 	return ret;
2156 }
2157