xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision 6396a31b28c13abcc71f05292f11b42abbafd7d3)
1 /*	$OpenBSD: drm_linux.c,v 1.37 2019/06/04 12:08:22 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <drm/drmP.h>
20 #include <dev/pci/ppbreg.h>
21 #include <sys/event.h>
22 #include <sys/filedesc.h>
23 #include <sys/stat.h>
24 #include <sys/unistd.h>
25 #include <linux/dma-buf.h>
26 #include <linux/mod_devicetable.h>
27 #include <linux/acpi.h>
28 #include <linux/pagevec.h>
29 #include <linux/dma-fence-array.h>
30 
31 void
32 tasklet_run(void *arg)
33 {
34 	struct tasklet_struct *ts = arg;
35 
36 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
37 	if (tasklet_trylock(ts)) {
38 		if (!atomic_read(&ts->count))
39 			ts->func(ts->data);
40 		tasklet_unlock(ts);
41 	}
42 }
43 
44 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
45 volatile struct proc *sch_proc;
46 volatile void *sch_ident;
47 int sch_priority;
48 
49 void
50 set_current_state(int state)
51 {
52 	if (sch_ident != curproc)
53 		mtx_enter(&sch_mtx);
54 	MUTEX_ASSERT_LOCKED(&sch_mtx);
55 	sch_ident = sch_proc = curproc;
56 	sch_priority = state;
57 }
58 
59 void
60 __set_current_state(int state)
61 {
62 	KASSERT(state == TASK_RUNNING);
63 	if (sch_ident == curproc) {
64 		MUTEX_ASSERT_LOCKED(&sch_mtx);
65 		sch_ident = NULL;
66 		mtx_leave(&sch_mtx);
67 	}
68 }
69 
70 void
71 schedule(void)
72 {
73 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
74 }
75 
76 long
77 schedule_timeout(long timeout)
78 {
79 	struct sleep_state sls;
80 	long deadline;
81 	int wait, spl;
82 
83 	MUTEX_ASSERT_LOCKED(&sch_mtx);
84 	KASSERT(!cold);
85 
86 	sleep_setup(&sls, sch_ident, sch_priority, "schto");
87 	if (timeout != MAX_SCHEDULE_TIMEOUT)
88 		sleep_setup_timeout(&sls, timeout);
89 	sleep_setup_signal(&sls, sch_priority);
90 
91 	wait = (sch_proc == curproc && timeout > 0);
92 
93 	spl = MUTEX_OLDIPL(&sch_mtx);
94 	MUTEX_OLDIPL(&sch_mtx) = splsched();
95 	mtx_leave(&sch_mtx);
96 
97 	if (timeout != MAX_SCHEDULE_TIMEOUT)
98 		deadline = ticks + timeout;
99 	sleep_finish_all(&sls, wait);
100 	if (timeout != MAX_SCHEDULE_TIMEOUT)
101 		timeout = deadline - ticks;
102 
103 	mtx_enter(&sch_mtx);
104 	MUTEX_OLDIPL(&sch_mtx) = spl;
105 	sch_ident = curproc;
106 
107 	return timeout > 0 ? timeout : 0;
108 }
109 
110 int
111 wake_up_process(struct proc *p)
112 {
113 	int s, r = 0;
114 
115 	SCHED_LOCK(s);
116 	atomic_cas_ptr(&sch_proc, p, NULL);
117 	if (p->p_wchan) {
118 		if (p->p_stat == SSLEEP) {
119 			setrunnable(p);
120 			r = 1;
121 		} else
122 			unsleep(p);
123 	}
124 	SCHED_UNLOCK(s);
125 
126 	return r;
127 }
128 
129 void
130 flush_workqueue(struct workqueue_struct *wq)
131 {
132 	if (cold)
133 		return;
134 
135 	taskq_barrier((struct taskq *)wq);
136 }
137 
138 bool
139 flush_work(struct work_struct *work)
140 {
141 	if (cold)
142 		return false;
143 
144 	taskq_barrier(work->tq);
145 	return false;
146 }
147 
148 bool
149 flush_delayed_work(struct delayed_work *dwork)
150 {
151 	bool ret = false;
152 
153 	if (cold)
154 		return false;
155 
156 	while (timeout_pending(&dwork->to)) {
157 		tsleep(dwork, PWAIT, "fldwto", 1);
158 		ret = true;
159 	}
160 
161 	taskq_barrier(dwork->tq ? dwork->tq : (struct taskq *)system_wq);
162 	return ret;
163 }
164 
165 struct timespec
166 ns_to_timespec(const int64_t nsec)
167 {
168 	struct timespec ts;
169 	int32_t rem;
170 
171 	if (nsec == 0) {
172 		ts.tv_sec = 0;
173 		ts.tv_nsec = 0;
174 		return (ts);
175 	}
176 
177 	ts.tv_sec = nsec / NSEC_PER_SEC;
178 	rem = nsec % NSEC_PER_SEC;
179 	if (rem < 0) {
180 		ts.tv_sec--;
181 		rem += NSEC_PER_SEC;
182 	}
183 	ts.tv_nsec = rem;
184 	return (ts);
185 }
186 
187 int64_t
188 timeval_to_ns(const struct timeval *tv)
189 {
190 	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
191 		tv->tv_usec * NSEC_PER_USEC;
192 }
193 
194 struct timeval
195 ns_to_timeval(const int64_t nsec)
196 {
197 	struct timeval tv;
198 	int32_t rem;
199 
200 	if (nsec == 0) {
201 		tv.tv_sec = 0;
202 		tv.tv_usec = 0;
203 		return (tv);
204 	}
205 
206 	tv.tv_sec = nsec / NSEC_PER_SEC;
207 	rem = nsec % NSEC_PER_SEC;
208 	if (rem < 0) {
209 		tv.tv_sec--;
210 		rem += NSEC_PER_SEC;
211 	}
212 	tv.tv_usec = rem / 1000;
213 	return (tv);
214 }
215 
216 int64_t
217 timeval_to_ms(const struct timeval *tv)
218 {
219 	return ((int64_t)tv->tv_sec * 1000) + (tv->tv_usec / 1000);
220 }
221 
222 int64_t
223 timeval_to_us(const struct timeval *tv)
224 {
225 	return ((int64_t)tv->tv_sec * 1000000) + tv->tv_usec;
226 }
227 
228 extern char *hw_vendor, *hw_prod, *hw_ver;
229 
230 bool
231 dmi_match(int slot, const char *str)
232 {
233 	switch (slot) {
234 	case DMI_SYS_VENDOR:
235 	case DMI_BOARD_VENDOR:
236 		if (hw_vendor != NULL &&
237 		    !strcmp(hw_vendor, str))
238 			return true;
239 		break;
240 	case DMI_PRODUCT_NAME:
241 	case DMI_BOARD_NAME:
242 		if (hw_prod != NULL &&
243 		    !strcmp(hw_prod, str))
244 			return true;
245 		break;
246 	case DMI_PRODUCT_VERSION:
247 	case DMI_BOARD_VERSION:
248 		if (hw_ver != NULL &&
249 		    !strcmp(hw_ver, str))
250 			return true;
251 		break;
252 	case DMI_NONE:
253 	default:
254 		return false;
255 	}
256 
257 	return false;
258 }
259 
260 static bool
261 dmi_found(const struct dmi_system_id *dsi)
262 {
263 	int i, slot;
264 
265 	for (i = 0; i < nitems(dsi->matches); i++) {
266 		slot = dsi->matches[i].slot;
267 		if (slot == DMI_NONE)
268 			break;
269 		if (!dmi_match(slot, dsi->matches[i].substr))
270 			return false;
271 	}
272 
273 	return true;
274 }
275 
276 int
277 dmi_check_system(const struct dmi_system_id *sysid)
278 {
279 	const struct dmi_system_id *dsi;
280 	int num = 0;
281 
282 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
283 		if (dmi_found(dsi)) {
284 			num++;
285 			if (dsi->callback && dsi->callback(dsi))
286 				break;
287 		}
288 	}
289 	return (num);
290 }
291 
292 struct vm_page *
293 alloc_pages(unsigned int gfp_mask, unsigned int order)
294 {
295 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
296 	struct pglist mlist;
297 
298 	if (gfp_mask & M_CANFAIL)
299 		flags |= UVM_PLA_FAILOK;
300 	if (gfp_mask & M_ZERO)
301 		flags |= UVM_PLA_ZERO;
302 
303 	TAILQ_INIT(&mlist);
304 	if (uvm_pglistalloc(PAGE_SIZE << order, dma_constraint.ucr_low,
305 	    dma_constraint.ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
306 		return NULL;
307 	return TAILQ_FIRST(&mlist);
308 }
309 
310 void
311 __free_pages(struct vm_page *page, unsigned int order)
312 {
313 	struct pglist mlist;
314 	int i;
315 
316 	TAILQ_INIT(&mlist);
317 	for (i = 0; i < (1 << order); i++)
318 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
319 	uvm_pglistfree(&mlist);
320 }
321 
322 void
323 __pagevec_release(struct pagevec *pvec)
324 {
325 	struct pglist mlist;
326 	int i;
327 
328 	TAILQ_INIT(&mlist);
329 	for (i = 0; i < pvec->nr; i++)
330 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
331 	uvm_pglistfree(&mlist);
332 	pagevec_reinit(pvec);
333 }
334 
335 void *
336 kmap(struct vm_page *pg)
337 {
338 	vaddr_t va;
339 
340 #if defined (__HAVE_PMAP_DIRECT)
341 	va = pmap_map_direct(pg);
342 #else
343 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
344 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
345 	pmap_update(pmap_kernel());
346 #endif
347 	return (void *)va;
348 }
349 
350 void
351 kunmap(void *addr)
352 {
353 	vaddr_t va = (vaddr_t)addr;
354 
355 #if defined (__HAVE_PMAP_DIRECT)
356 	pmap_unmap_direct(va);
357 #else
358 	pmap_kremove(va, PAGE_SIZE);
359 	pmap_update(pmap_kernel());
360 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
361 #endif
362 }
363 
364 void *
365 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
366      pgprot_t prot)
367 {
368 	vaddr_t va;
369 	paddr_t pa;
370 	int i;
371 
372 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
373 	if (va == 0)
374 		return NULL;
375 	for (i = 0; i < npages; i++) {
376 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
377 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
378 		    PROT_READ | PROT_WRITE,
379 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
380 		pmap_update(pmap_kernel());
381 	}
382 
383 	return (void *)va;
384 }
385 
386 void
387 vunmap(void *addr, size_t size)
388 {
389 	vaddr_t va = (vaddr_t)addr;
390 
391 	pmap_remove(pmap_kernel(), va, va + size);
392 	pmap_update(pmap_kernel());
393 	uvm_km_free(kernel_map, va, size);
394 }
395 
396 void
397 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
398     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
399 {
400 	const uint8_t *cbuf = buf;
401 	int i;
402 
403 	for (i = 0; i < len; i++) {
404 		if ((i % rowsize) == 0)
405 			printf("%s", prefix_str);
406 		printf("%02x", cbuf[i]);
407 		if ((i % rowsize) == (rowsize - 1))
408 			printf("\n");
409 		else
410 			printf(" ");
411 	}
412 }
413 
414 void *
415 memchr_inv(const void *s, int c, size_t n)
416 {
417 	if (n != 0) {
418 		const unsigned char *p = s;
419 
420 		do {
421 			if (*p++ != (unsigned char)c)
422 				return ((void *)(p - 1));
423 		}while (--n != 0);
424 	}
425 	return (NULL);
426 }
427 
428 int
429 panic_cmp(struct rb_node *a, struct rb_node *b)
430 {
431 	panic(__func__);
432 }
433 
434 #undef RB_ROOT
435 #define RB_ROOT(head)	(head)->rbh_root
436 
437 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
438 
439 /*
440  * This is a fairly minimal implementation of the Linux "idr" API.  It
441  * probably isn't very efficient, and defenitely isn't RCU safe.  The
442  * pre-load buffer is global instead of per-cpu; we rely on the kernel
443  * lock to make this work.  We do randomize our IDs in order to make
444  * them harder to guess.
445  */
446 
447 int idr_cmp(struct idr_entry *, struct idr_entry *);
448 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
449 
450 struct pool idr_pool;
451 struct idr_entry *idr_entry_cache;
452 
453 void
454 idr_init(struct idr *idr)
455 {
456 	static int initialized;
457 
458 	if (!initialized) {
459 		pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
460 		    "idrpl", NULL);
461 		initialized = 1;
462 	}
463 	SPLAY_INIT(&idr->tree);
464 }
465 
466 void
467 idr_destroy(struct idr *idr)
468 {
469 	struct idr_entry *id;
470 
471 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
472 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
473 		pool_put(&idr_pool, id);
474 	}
475 }
476 
477 void
478 idr_preload(unsigned int gfp_mask)
479 {
480 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
481 
482 	KERNEL_ASSERT_LOCKED();
483 
484 	if (idr_entry_cache == NULL)
485 		idr_entry_cache = pool_get(&idr_pool, flags);
486 }
487 
488 int
489 idr_alloc(struct idr *idr, void *ptr, int start, int end,
490     unsigned int gfp_mask)
491 {
492 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
493 	struct idr_entry *id;
494 	int begin;
495 
496 	KERNEL_ASSERT_LOCKED();
497 
498 	if (idr_entry_cache) {
499 		id = idr_entry_cache;
500 		idr_entry_cache = NULL;
501 	} else {
502 		id = pool_get(&idr_pool, flags);
503 		if (id == NULL)
504 			return -ENOMEM;
505 	}
506 
507 	if (end <= 0)
508 		end = INT_MAX;
509 
510 #ifdef notyet
511 	id->id = begin = start + arc4random_uniform(end - start);
512 #else
513 	id->id = begin = start;
514 #endif
515 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
516 		if (++id->id == end)
517 			id->id = start;
518 		if (id->id == begin) {
519 			pool_put(&idr_pool, id);
520 			return -ENOSPC;
521 		}
522 	}
523 	id->ptr = ptr;
524 	return id->id;
525 }
526 
527 void *
528 idr_replace(struct idr *idr, void *ptr, int id)
529 {
530 	struct idr_entry find, *res;
531 	void *old;
532 
533 	find.id = id;
534 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
535 	if (res == NULL)
536 		return ERR_PTR(-ENOENT);
537 	old = res->ptr;
538 	res->ptr = ptr;
539 	return old;
540 }
541 
542 void *
543 idr_remove(struct idr *idr, int id)
544 {
545 	struct idr_entry find, *res;
546 	void *ptr = NULL;
547 
548 	find.id = id;
549 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
550 	if (res) {
551 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
552 		ptr = res->ptr;
553 		pool_put(&idr_pool, res);
554 	}
555 	return ptr;
556 }
557 
558 void *
559 idr_find(struct idr *idr, int id)
560 {
561 	struct idr_entry find, *res;
562 
563 	find.id = id;
564 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
565 	if (res == NULL)
566 		return NULL;
567 	return res->ptr;
568 }
569 
570 void *
571 idr_get_next(struct idr *idr, int *id)
572 {
573 	struct idr_entry *res;
574 
575 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
576 		if (res->id >= *id) {
577 			*id = res->id;
578 			return res->ptr;
579 		}
580 	}
581 
582 	return NULL;
583 }
584 
585 int
586 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
587 {
588 	struct idr_entry *id;
589 	int ret;
590 
591 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
592 		ret = func(id->id, id->ptr, data);
593 		if (ret)
594 			return ret;
595 	}
596 
597 	return 0;
598 }
599 
600 int
601 idr_cmp(struct idr_entry *a, struct idr_entry *b)
602 {
603 	return (a->id < b->id ? -1 : a->id > b->id);
604 }
605 
606 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
607 
608 void
609 ida_init(struct ida *ida)
610 {
611 	ida->counter = 0;
612 }
613 
614 void
615 ida_destroy(struct ida *ida)
616 {
617 }
618 
619 void
620 ida_remove(struct ida *ida, int id)
621 {
622 }
623 
624 int
625 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
626     int flags)
627 {
628 	if (end <= 0)
629 		end = INT_MAX;
630 
631 	if (start > ida->counter)
632 		ida->counter = start;
633 
634 	if (ida->counter >= end)
635 		return -ENOSPC;
636 
637 	return ida->counter++;
638 }
639 
640 void
641 ida_simple_remove(struct ida *ida, int id)
642 {
643 }
644 
645 int
646 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
647 {
648 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
649 	    M_DRM, gfp_mask);
650 	if (table->sgl == NULL)
651 		return -ENOMEM;
652 	table->nents = table->orig_nents = nents;
653 	return 0;
654 }
655 
656 void
657 sg_free_table(struct sg_table *table)
658 {
659 	free(table->sgl, M_DRM,
660 	    table->orig_nents * sizeof(struct scatterlist));
661 }
662 
663 size_t
664 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
665     const void *buf, size_t buflen)
666 {
667 	panic("%s", __func__);
668 }
669 
670 int
671 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
672 {
673 	void *cmd = NULL;
674 	int cmdlen = 0;
675 	int err, ret = 0;
676 	int op;
677 
678 	iic_acquire_bus(&adap->ic, 0);
679 
680 	while (num > 2) {
681 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
682 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
683 		    msgs->buf, msgs->len, 0);
684 		if (err) {
685 			ret = -err;
686 			goto fail;
687 		}
688 		msgs++;
689 		num--;
690 		ret++;
691 	}
692 
693 	if (num > 1) {
694 		cmd = msgs->buf;
695 		cmdlen = msgs->len;
696 		msgs++;
697 		num--;
698 		ret++;
699 	}
700 
701 	op = (msgs->flags & I2C_M_RD) ?
702 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
703 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
704 	    msgs->buf, msgs->len, 0);
705 	if (err) {
706 		ret = -err;
707 		goto fail;
708 	}
709 	msgs++;
710 	ret++;
711 
712 fail:
713 	iic_release_bus(&adap->ic, 0);
714 
715 	return ret;
716 }
717 
718 int
719 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
720 {
721 	if (adap->algo)
722 		return adap->algo->master_xfer(adap, msgs, num);
723 
724 	return i2c_master_xfer(adap, msgs, num);
725 }
726 
727 int
728 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
729 {
730 	struct i2c_algo_bit_data *algo = adap->algo_data;
731 	struct i2c_adapter bb;
732 
733 	memset(&bb, 0, sizeof(bb));
734 	bb.ic = algo->ic;
735 	bb.retries = adap->retries;
736 	return i2c_master_xfer(&bb, msgs, num);
737 }
738 
739 uint32_t
740 i2c_bb_functionality(struct i2c_adapter *adap)
741 {
742 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
743 }
744 
745 struct i2c_algorithm i2c_bit_algo = {
746 	.master_xfer = i2c_bb_master_xfer,
747 	.functionality = i2c_bb_functionality
748 };
749 
750 int
751 i2c_bit_add_bus(struct i2c_adapter *adap)
752 {
753 	adap->algo = &i2c_bit_algo;
754 	adap->retries = 3;
755 
756 	return 0;
757 }
758 
759 #if defined(__amd64__) || defined(__i386__)
760 
761 /*
762  * This is a minimal implementation of the Linux vga_get/vga_put
763  * interface.  In all likelyhood, it will only work for inteldrm(4) as
764  * it assumes that if there is another active VGA device in the
765  * system, it is sitting behind a PCI bridge.
766  */
767 
768 extern int pci_enumerate_bus(struct pci_softc *,
769     int (*)(struct pci_attach_args *), struct pci_attach_args *);
770 
771 pcitag_t vga_bridge_tag;
772 int vga_bridge_disabled;
773 
774 int
775 vga_disable_bridge(struct pci_attach_args *pa)
776 {
777 	pcireg_t bhlc, bc;
778 
779 	if (pa->pa_domain != 0)
780 		return 0;
781 
782 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
783 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
784 		return 0;
785 
786 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
787 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
788 		return 0;
789 	bc &= ~PPB_BC_VGA_ENABLE;
790 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
791 
792 	vga_bridge_tag = pa->pa_tag;
793 	vga_bridge_disabled = 1;
794 
795 	return 1;
796 }
797 
798 void
799 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
800 {
801 	KASSERT(pdev->pci->sc_bridgetag == NULL);
802 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
803 }
804 
805 void
806 vga_put(struct pci_dev *pdev, int rsrc)
807 {
808 	pcireg_t bc;
809 
810 	if (!vga_bridge_disabled)
811 		return;
812 
813 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
814 	bc |= PPB_BC_VGA_ENABLE;
815 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
816 
817 	vga_bridge_disabled = 0;
818 }
819 
820 #endif
821 
822 /*
823  * ACPI types and interfaces.
824  */
825 
826 #ifdef __HAVE_ACPI
827 #include "acpi.h"
828 #endif
829 
830 #if NACPI > 0
831 
832 #include <dev/acpi/acpireg.h>
833 #include <dev/acpi/acpivar.h>
834 
835 acpi_status
836 acpi_get_table(const char *sig, int instance,
837     struct acpi_table_header **hdr)
838 {
839 	struct acpi_softc *sc = acpi_softc;
840 	struct acpi_q *entry;
841 
842 	KASSERT(instance == 1);
843 
844 	if (sc == NULL)
845 		return AE_NOT_FOUND;
846 
847 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
848 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
849 			*hdr = entry->q_table;
850 			return 0;
851 		}
852 	}
853 
854 	return AE_NOT_FOUND;
855 }
856 
857 #endif
858 
859 void
860 backlight_do_update_status(void *arg)
861 {
862 	backlight_update_status(arg);
863 }
864 
865 struct backlight_device *
866 backlight_device_register(const char *name, void *kdev, void *data,
867     const struct backlight_ops *ops, struct backlight_properties *props)
868 {
869 	struct backlight_device *bd;
870 
871 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
872 	bd->ops = ops;
873 	bd->props = *props;
874 	bd->data = data;
875 
876 	task_set(&bd->task, backlight_do_update_status, bd);
877 
878 	return bd;
879 }
880 
881 void
882 backlight_device_unregister(struct backlight_device *bd)
883 {
884 	free(bd, M_DRM, sizeof(*bd));
885 }
886 
887 void
888 backlight_schedule_update_status(struct backlight_device *bd)
889 {
890 	task_add(systq, &bd->task);
891 }
892 
893 void
894 drm_sysfs_hotplug_event(struct drm_device *dev)
895 {
896 	KNOTE(&dev->note, NOTE_CHANGE);
897 }
898 
899 unsigned int drm_fence_count;
900 
901 unsigned int
902 dma_fence_context_alloc(unsigned int num)
903 {
904 	return __sync_add_and_fetch(&drm_fence_count, num) - num;
905 }
906 
907 static void
908 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
909 {
910 	wakeup(fence);
911 }
912 
913 long
914 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
915 {
916 	long ret = timeout ? timeout : 1;
917 	int err;
918 	struct dma_fence_cb cb;
919 	bool was_set;
920 
921 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
922 		return ret;
923 
924 	mtx_enter(fence->lock);
925 
926 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
927 	    &fence->flags);
928 
929 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
930 		goto out;
931 
932 	if (!was_set && fence->ops->enable_signaling) {
933 		if (!fence->ops->enable_signaling(fence)) {
934 			dma_fence_signal_locked(fence);
935 			goto out;
936 		}
937 	}
938 
939 	if (timeout == 0) {
940 		ret = 0;
941 		goto out;
942 	}
943 
944 	cb.func = dma_fence_default_wait_cb;
945 	list_add(&cb.node, &fence->cb_list);
946 
947 	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
948 		err = msleep(fence, fence->lock, intr ? PCATCH : 0, "dmafence",
949 		    timeout);
950 		if (err == EINTR || err == ERESTART) {
951 			ret = -ERESTARTSYS;
952 			break;
953 		} else if (err == EWOULDBLOCK) {
954 			ret = 0;
955 			break;
956 		}
957 	}
958 
959 	if (!list_empty(&cb.node))
960 		list_del(&cb.node);
961 out:
962 	mtx_leave(fence->lock);
963 
964 	return ret;
965 }
966 
967 static const char *
968 dma_fence_array_get_driver_name(struct dma_fence *fence)
969 {
970 	return "dma_fence_array";
971 }
972 
973 static const char *
974 dma_fence_array_get_timeline_name(struct dma_fence *fence)
975 {
976 	return "unbound";
977 }
978 
979 static void
980 irq_dma_fence_array_work(struct irq_work *wrk)
981 {
982 	struct dma_fence_array *dfa = container_of(wrk, typeof(*dfa), work);
983 
984 	dma_fence_signal(&dfa->base);
985 	dma_fence_put(&dfa->base);
986 }
987 
988 static void
989 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
990 {
991 	struct dma_fence_array_cb *array_cb =
992 	    container_of(cb, struct dma_fence_array_cb, cb);
993 	struct dma_fence_array *dfa = array_cb->array;
994 
995 	if (atomic_dec_and_test(&dfa->num_pending))
996 		irq_work_queue(&dfa->work);
997 	else
998 		dma_fence_put(&dfa->base);
999 }
1000 
1001 static bool
1002 dma_fence_array_enable_signaling(struct dma_fence *fence)
1003 {
1004 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1005 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
1006 	int i;
1007 
1008 	for (i = 0; i < dfa->num_fences; ++i) {
1009 		cb[i].array = dfa;
1010 		dma_fence_get(&dfa->base);
1011 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
1012 		    dma_fence_array_cb_func)) {
1013 			dma_fence_put(&dfa->base);
1014 			if (atomic_dec_and_test(&dfa->num_pending))
1015 				return false;
1016 		}
1017 	}
1018 
1019 	return true;
1020 }
1021 
1022 static bool dma_fence_array_signaled(struct dma_fence *fence)
1023 {
1024 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1025 
1026 	return atomic_read(&dfa->num_pending) <= 0;
1027 }
1028 
1029 static void dma_fence_array_release(struct dma_fence *fence)
1030 {
1031 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
1032 	int i;
1033 
1034 	for (i = 0; i < dfa->num_fences; ++i)
1035 		dma_fence_put(dfa->fences[i]);
1036 
1037 	free(dfa->fences, M_DRM, 0);
1038 	dma_fence_free(fence);
1039 }
1040 
1041 struct dma_fence_array *
1042 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
1043     unsigned seqno, bool signal_on_any)
1044 {
1045 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
1046 	    (num_fences * sizeof(struct dma_fence_array_cb)),
1047 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
1048 	if (dfa == NULL)
1049 		return NULL;
1050 
1051 	mtx_init(&dfa->lock, IPL_TTY);
1052 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
1053 	    context, seqno);
1054 	init_irq_work(&dfa->work, irq_dma_fence_array_work);
1055 
1056 	dfa->num_fences = num_fences;
1057 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
1058 	dfa->fences = fences;
1059 
1060 	return dfa;
1061 }
1062 
1063 const struct dma_fence_ops dma_fence_array_ops = {
1064 	.get_driver_name = dma_fence_array_get_driver_name,
1065 	.get_timeline_name = dma_fence_array_get_timeline_name,
1066 	.enable_signaling = dma_fence_array_enable_signaling,
1067 	.signaled = dma_fence_array_signaled,
1068 	.release = dma_fence_array_release,
1069 };
1070 
1071 int
1072 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
1073 {
1074 	return (ENXIO);
1075 }
1076 
1077 int
1078 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
1079 {
1080 	return (ENXIO);
1081 }
1082 
1083 int
1084 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
1085 {
1086 	return (ENOTTY);
1087 }
1088 
1089 int
1090 dmabuf_poll(struct file *fp, int events, struct proc *p)
1091 {
1092 	return (0);
1093 }
1094 
1095 int
1096 dmabuf_kqfilter(struct file *fp, struct knote *kn)
1097 {
1098 	return (EINVAL);
1099 }
1100 
1101 int
1102 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
1103 {
1104 	struct dma_buf *dmabuf = fp->f_data;
1105 
1106 	memset(st, 0, sizeof(*st));
1107 	st->st_size = dmabuf->size;
1108 	st->st_mode = S_IFIFO;	/* XXX */
1109 	return (0);
1110 }
1111 
1112 int
1113 dmabuf_close(struct file *fp, struct proc *p)
1114 {
1115 	struct dma_buf *dmabuf = fp->f_data;
1116 
1117 	fp->f_data = NULL;
1118 	KERNEL_LOCK();
1119 	dmabuf->ops->release(dmabuf);
1120 	KERNEL_UNLOCK();
1121 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
1122 	return (0);
1123 }
1124 
1125 int
1126 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
1127 {
1128 	struct dma_buf *dmabuf = fp->f_data;
1129 	off_t newoff;
1130 
1131 	if (*offset != 0)
1132 		return (EINVAL);
1133 
1134 	switch (whence) {
1135 	case SEEK_SET:
1136 		newoff = 0;
1137 		break;
1138 	case SEEK_END:
1139 		newoff = dmabuf->size;
1140 		break;
1141 	default:
1142 		return (EINVAL);
1143 	}
1144 	fp->f_offset = *offset = newoff;
1145 	return (0);
1146 }
1147 
1148 struct fileops dmabufops = {
1149 	.fo_read	= dmabuf_read,
1150 	.fo_write	= dmabuf_write,
1151 	.fo_ioctl	= dmabuf_ioctl,
1152 	.fo_poll	= dmabuf_poll,
1153 	.fo_kqfilter	= dmabuf_kqfilter,
1154 	.fo_stat	= dmabuf_stat,
1155 	.fo_close	= dmabuf_close,
1156 	.fo_seek	= dmabuf_seek,
1157 };
1158 
1159 struct dma_buf *
1160 dma_buf_export(const struct dma_buf_export_info *info)
1161 {
1162 	struct proc *p = curproc;
1163 	struct dma_buf *dmabuf;
1164 	struct file *fp;
1165 
1166 	fp = fnew(p);
1167 	if (fp == NULL)
1168 		return ERR_PTR(-ENFILE);
1169 	fp->f_type = DTYPE_DMABUF;
1170 	fp->f_ops = &dmabufops;
1171 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
1172 	dmabuf->priv = info->priv;
1173 	dmabuf->ops = info->ops;
1174 	dmabuf->size = info->size;
1175 	dmabuf->file = fp;
1176 	fp->f_data = dmabuf;
1177 	return dmabuf;
1178 }
1179 
1180 struct dma_buf *
1181 dma_buf_get(int fd)
1182 {
1183 	struct proc *p = curproc;
1184 	struct filedesc *fdp = p->p_fd;
1185 	struct file *fp;
1186 
1187 	if ((fp = fd_getfile(fdp, fd)) == NULL)
1188 		return ERR_PTR(-EBADF);
1189 
1190 	if (fp->f_type != DTYPE_DMABUF) {
1191 		FRELE(fp, p);
1192 		return ERR_PTR(-EINVAL);
1193 	}
1194 
1195 	return fp->f_data;
1196 }
1197 
1198 void
1199 dma_buf_put(struct dma_buf *dmabuf)
1200 {
1201 	KASSERT(dmabuf);
1202 	KASSERT(dmabuf->file);
1203 
1204 	FRELE(dmabuf->file, curproc);
1205 }
1206 
1207 int
1208 dma_buf_fd(struct dma_buf *dmabuf, int flags)
1209 {
1210 	struct proc *p = curproc;
1211 	struct filedesc *fdp = p->p_fd;
1212 	struct file *fp = dmabuf->file;
1213 	int fd, cloexec, error;
1214 
1215 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
1216 
1217 	fdplock(fdp);
1218 restart:
1219 	if ((error = fdalloc(p, 0, &fd)) != 0) {
1220 		if (error == ENOSPC) {
1221 			fdexpand(p);
1222 			goto restart;
1223 		}
1224 		fdpunlock(fdp);
1225 		return -error;
1226 	}
1227 
1228 	fdinsert(fdp, fd, cloexec, fp);
1229 	fdpunlock(fdp);
1230 
1231 	return fd;
1232 }
1233 
1234 void
1235 get_dma_buf(struct dma_buf *dmabuf)
1236 {
1237 	FREF(dmabuf->file);
1238 }
1239 
1240 enum pci_bus_speed
1241 pcie_get_speed_cap(struct pci_dev *pdev)
1242 {
1243 	pci_chipset_tag_t	pc = pdev->pc;
1244 	pcitag_t		tag = pdev->tag;
1245 	int			pos ;
1246 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1247 	pcireg_t		id;
1248 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
1249 	int			bus, device, function;
1250 
1251 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1252 	    &pos, NULL))
1253 		return PCI_SPEED_UNKNOWN;
1254 
1255 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1256 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1257 
1258 	/* we've been informed via and serverworks don't make the cut */
1259 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1260 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1261 		return PCI_SPEED_UNKNOWN;
1262 
1263 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1264 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1265 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1266 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1267 
1268 	lnkcap &= 0x0f;
1269 	lnkcap2 &= 0xfe;
1270 
1271 	if (lnkcap2) { /* PCIE GEN 3.0 */
1272 		if (lnkcap2 & 0x02)
1273 			cap = PCIE_SPEED_2_5GT;
1274 		if (lnkcap2 & 0x04)
1275 			cap = PCIE_SPEED_5_0GT;
1276 		if (lnkcap2 & 0x08)
1277 			cap = PCIE_SPEED_8_0GT;
1278 		if (lnkcap2 & 0x10)
1279 			cap = PCIE_SPEED_16_0GT;
1280 	} else {
1281 		if (lnkcap & 0x01)
1282 			cap = PCIE_SPEED_2_5GT;
1283 		if (lnkcap & 0x02)
1284 			cap = PCIE_SPEED_5_0GT;
1285 	}
1286 
1287 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
1288 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
1289 	    lnkcap2);
1290 	return cap;
1291 }
1292 
1293 enum pcie_link_width
1294 pcie_get_width_cap(struct pci_dev *pdev)
1295 {
1296 	pci_chipset_tag_t	pc = pdev->pc;
1297 	pcitag_t		tag = pdev->tag;
1298 	int			pos ;
1299 	pcireg_t		lnkcap = 0;
1300 	pcireg_t		id;
1301 	int			bus, device, function;
1302 
1303 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1304 	    &pos, NULL))
1305 		return PCIE_LNK_WIDTH_UNKNOWN;
1306 
1307 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1308 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1309 
1310 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1311 
1312 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
1313 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
1314 
1315 	if (lnkcap)
1316 		return (lnkcap & 0x3f0) >> 4;
1317 	return PCIE_LNK_WIDTH_UNKNOWN;
1318 }
1319 
1320 int
1321 default_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1322     int sync, void *key)
1323 {
1324 	wakeup(wqe);
1325 	if (wqe->proc)
1326 		wake_up_process(wqe->proc);
1327 	return 0;
1328 }
1329 
1330 int
1331 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1332     int sync, void *key)
1333 {
1334 	default_wake_function(wqe, mode, sync, key);
1335 	list_del_init(&wqe->entry);
1336 	return 0;
1337 }
1338 
1339 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
1340 
1341 int
1342 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1343 {
1344 	int err;
1345 
1346 	if (!test_bit(bit, word))
1347 		return 0;
1348 
1349 	mtx_enter(&wait_bit_mtx);
1350 	while (test_bit(bit, word)) {
1351 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", 0);
1352 		if (err) {
1353 			mtx_leave(&wait_bit_mtx);
1354 			return 1;
1355 		}
1356 	}
1357 	mtx_leave(&wait_bit_mtx);
1358 	return 0;
1359 }
1360 
1361 int
1362 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
1363 {
1364 	int err;
1365 
1366 	if (!test_bit(bit, word))
1367 		return 0;
1368 
1369 	mtx_enter(&wait_bit_mtx);
1370 	while (test_bit(bit, word)) {
1371 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
1372 		if (err) {
1373 			mtx_leave(&wait_bit_mtx);
1374 			return 1;
1375 		}
1376 	}
1377 	mtx_leave(&wait_bit_mtx);
1378 	return 0;
1379 }
1380 
1381 void
1382 wake_up_bit(void *word, int bit)
1383 {
1384 	mtx_enter(&wait_bit_mtx);
1385 	wakeup(word);
1386 	mtx_leave(&wait_bit_mtx);
1387 }
1388 
1389 struct workqueue_struct *system_wq;
1390 struct workqueue_struct *system_unbound_wq;
1391 struct workqueue_struct *system_long_wq;
1392 struct taskq *taskletq;
1393 
1394 void
1395 drm_linux_init(void)
1396 {
1397 	if (system_wq == NULL) {
1398 		system_wq = (struct workqueue_struct *)
1399 		    taskq_create("drmwq", 1, IPL_HIGH, 0);
1400 	}
1401 	if (system_unbound_wq == NULL) {
1402 		system_unbound_wq = (struct workqueue_struct *)
1403 		    taskq_create("drmubwq", 1, IPL_HIGH, 0);
1404 	}
1405 	if (system_long_wq == NULL) {
1406 		system_long_wq = (struct workqueue_struct *)
1407 		    taskq_create("drmlwq", 1, IPL_HIGH, 0);
1408 	}
1409 
1410 	if (taskletq == NULL)
1411 		taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
1412 }
1413 
1414 #define PCIE_ECAP_RESIZE_BAR	0x15
1415 #define RBCAP0			0x04
1416 #define RBCTRL0			0x08
1417 #define RBCTRL_BARINDEX_MASK	0x07
1418 #define RBCTRL_BARSIZE_MASK	0x1f00
1419 #define RBCTRL_BARSIZE_SHIFT	8
1420 
1421 /* size in MB is 1 << nsize */
1422 int
1423 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
1424 {
1425 	pcireg_t	reg;
1426 	uint32_t	offset, capid;
1427 
1428 	KASSERT(bar == 0);
1429 
1430 	offset = PCI_PCIE_ECAP;
1431 
1432 	/* search PCI Express Extended Capabilities */
1433 	do {
1434 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
1435 		capid = PCI_PCIE_ECAP_ID(reg);
1436 		if (capid == PCIE_ECAP_RESIZE_BAR)
1437 			break;
1438 		offset = PCI_PCIE_ECAP_NEXT(reg);
1439 	} while (capid != 0);
1440 
1441 	if (capid == 0) {
1442 		printf("%s: could not find resize bar cap!\n", __func__);
1443 		return -ENOTSUP;
1444 	}
1445 
1446 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
1447 
1448 	if ((reg & (1 << (nsize + 4))) == 0) {
1449 		printf("%s size not supported\n", __func__);
1450 		return -ENOTSUP;
1451 	}
1452 
1453 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
1454 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
1455 		printf("%s BAR index not 0\n", __func__);
1456 		return -EINVAL;
1457 	}
1458 
1459 	reg &= ~RBCTRL_BARSIZE_MASK;
1460 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
1461 
1462 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
1463 
1464 	return 0;
1465 }
1466