xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /*	$OpenBSD: drm_linux.c,v 1.34 2019/04/23 11:38:55 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <drm/drmP.h>
20 #include <dev/pci/ppbreg.h>
21 #include <sys/event.h>
22 #include <sys/filedesc.h>
23 #include <sys/stat.h>
24 #include <sys/unistd.h>
25 #include <linux/dma-buf.h>
26 #include <linux/mod_devicetable.h>
27 #include <linux/acpi.h>
28 #include <linux/pagevec.h>
29 
30 void
31 tasklet_run(void *arg)
32 {
33 	struct tasklet_struct *ts = arg;
34 
35 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
36 	if (tasklet_trylock(ts)) {
37 		if (!atomic_read(&ts->count))
38 			ts->func(ts->data);
39 		tasklet_unlock(ts);
40 	}
41 }
42 
43 struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
44 volatile struct proc *sch_proc;
45 volatile void *sch_ident;
46 int sch_priority;
47 
48 void
49 set_current_state(int state)
50 {
51 	if (sch_ident != curproc)
52 		mtx_enter(&sch_mtx);
53 	MUTEX_ASSERT_LOCKED(&sch_mtx);
54 	sch_ident = sch_proc = curproc;
55 	sch_priority = state;
56 }
57 
58 void
59 __set_current_state(int state)
60 {
61 	KASSERT(state == TASK_RUNNING);
62 	if (sch_ident == curproc) {
63 		MUTEX_ASSERT_LOCKED(&sch_mtx);
64 		sch_ident = NULL;
65 		mtx_leave(&sch_mtx);
66 	}
67 }
68 
69 void
70 schedule(void)
71 {
72 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
73 }
74 
75 long
76 schedule_timeout(long timeout)
77 {
78 	struct sleep_state sls;
79 	long deadline;
80 	int wait, spl;
81 
82 	MUTEX_ASSERT_LOCKED(&sch_mtx);
83 	KASSERT(!cold);
84 
85 	sleep_setup(&sls, sch_ident, sch_priority, "schto");
86 	if (timeout != MAX_SCHEDULE_TIMEOUT)
87 		sleep_setup_timeout(&sls, timeout);
88 	sleep_setup_signal(&sls, sch_priority);
89 
90 	wait = (sch_proc == curproc && timeout > 0);
91 
92 	spl = MUTEX_OLDIPL(&sch_mtx);
93 	MUTEX_OLDIPL(&sch_mtx) = splsched();
94 	mtx_leave(&sch_mtx);
95 
96 	if (timeout != MAX_SCHEDULE_TIMEOUT)
97 		deadline = ticks + timeout;
98 	sleep_finish_all(&sls, wait);
99 	if (timeout != MAX_SCHEDULE_TIMEOUT)
100 		timeout = deadline - ticks;
101 
102 	mtx_enter(&sch_mtx);
103 	MUTEX_OLDIPL(&sch_mtx) = spl;
104 	sch_ident = curproc;
105 
106 	return timeout > 0 ? timeout : 0;
107 }
108 
109 int
110 wake_up_process(struct proc *p)
111 {
112 	int s, r = 0;
113 
114 	SCHED_LOCK(s);
115 	atomic_cas_ptr(&sch_proc, p, NULL);
116 	if (p->p_wchan) {
117 		if (p->p_stat == SSLEEP) {
118 			setrunnable(p);
119 			r = 1;
120 		} else
121 			unsleep(p);
122 	}
123 	SCHED_UNLOCK(s);
124 
125 	return r;
126 }
127 
128 void
129 flush_workqueue(struct workqueue_struct *wq)
130 {
131 	if (cold)
132 		return;
133 
134 	taskq_barrier((struct taskq *)wq);
135 }
136 
137 bool
138 flush_work(struct work_struct *work)
139 {
140 	if (cold)
141 		return false;
142 
143 	taskq_barrier(work->tq);
144 	return false;
145 }
146 
147 bool
148 flush_delayed_work(struct delayed_work *dwork)
149 {
150 	bool ret = false;
151 
152 	if (cold)
153 		return false;
154 
155 	while (timeout_pending(&dwork->to)) {
156 		tsleep(dwork, PWAIT, "fldwto", 1);
157 		ret = true;
158 	}
159 
160 	taskq_barrier(dwork->tq ? dwork->tq : (struct taskq *)system_wq);
161 	return ret;
162 }
163 
164 struct timespec
165 ns_to_timespec(const int64_t nsec)
166 {
167 	struct timespec ts;
168 	int32_t rem;
169 
170 	if (nsec == 0) {
171 		ts.tv_sec = 0;
172 		ts.tv_nsec = 0;
173 		return (ts);
174 	}
175 
176 	ts.tv_sec = nsec / NSEC_PER_SEC;
177 	rem = nsec % NSEC_PER_SEC;
178 	if (rem < 0) {
179 		ts.tv_sec--;
180 		rem += NSEC_PER_SEC;
181 	}
182 	ts.tv_nsec = rem;
183 	return (ts);
184 }
185 
186 int64_t
187 timeval_to_ns(const struct timeval *tv)
188 {
189 	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
190 		tv->tv_usec * NSEC_PER_USEC;
191 }
192 
193 struct timeval
194 ns_to_timeval(const int64_t nsec)
195 {
196 	struct timeval tv;
197 	int32_t rem;
198 
199 	if (nsec == 0) {
200 		tv.tv_sec = 0;
201 		tv.tv_usec = 0;
202 		return (tv);
203 	}
204 
205 	tv.tv_sec = nsec / NSEC_PER_SEC;
206 	rem = nsec % NSEC_PER_SEC;
207 	if (rem < 0) {
208 		tv.tv_sec--;
209 		rem += NSEC_PER_SEC;
210 	}
211 	tv.tv_usec = rem / 1000;
212 	return (tv);
213 }
214 
215 int64_t
216 timeval_to_ms(const struct timeval *tv)
217 {
218 	return ((int64_t)tv->tv_sec * 1000) + (tv->tv_usec / 1000);
219 }
220 
221 int64_t
222 timeval_to_us(const struct timeval *tv)
223 {
224 	return ((int64_t)tv->tv_sec * 1000000) + tv->tv_usec;
225 }
226 
227 extern char *hw_vendor, *hw_prod, *hw_ver;
228 
229 bool
230 dmi_match(int slot, const char *str)
231 {
232 	switch (slot) {
233 	case DMI_SYS_VENDOR:
234 	case DMI_BOARD_VENDOR:
235 		if (hw_vendor != NULL &&
236 		    !strcmp(hw_vendor, str))
237 			return true;
238 		break;
239 	case DMI_PRODUCT_NAME:
240 	case DMI_BOARD_NAME:
241 		if (hw_prod != NULL &&
242 		    !strcmp(hw_prod, str))
243 			return true;
244 		break;
245 	case DMI_PRODUCT_VERSION:
246 	case DMI_BOARD_VERSION:
247 		if (hw_ver != NULL &&
248 		    !strcmp(hw_ver, str))
249 			return true;
250 		break;
251 	case DMI_NONE:
252 	default:
253 		return false;
254 	}
255 
256 	return false;
257 }
258 
259 static bool
260 dmi_found(const struct dmi_system_id *dsi)
261 {
262 	int i, slot;
263 
264 	for (i = 0; i < nitems(dsi->matches); i++) {
265 		slot = dsi->matches[i].slot;
266 		if (slot == DMI_NONE)
267 			break;
268 		if (!dmi_match(slot, dsi->matches[i].substr))
269 			return false;
270 	}
271 
272 	return true;
273 }
274 
275 int
276 dmi_check_system(const struct dmi_system_id *sysid)
277 {
278 	const struct dmi_system_id *dsi;
279 	int num = 0;
280 
281 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
282 		if (dmi_found(dsi)) {
283 			num++;
284 			if (dsi->callback && dsi->callback(dsi))
285 				break;
286 		}
287 	}
288 	return (num);
289 }
290 
291 struct vm_page *
292 alloc_pages(unsigned int gfp_mask, unsigned int order)
293 {
294 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
295 	struct pglist mlist;
296 
297 	if (gfp_mask & M_CANFAIL)
298 		flags |= UVM_PLA_FAILOK;
299 	if (gfp_mask & M_ZERO)
300 		flags |= UVM_PLA_ZERO;
301 
302 	TAILQ_INIT(&mlist);
303 	if (uvm_pglistalloc(PAGE_SIZE << order, dma_constraint.ucr_low,
304 	    dma_constraint.ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
305 		return NULL;
306 	return TAILQ_FIRST(&mlist);
307 }
308 
309 void
310 __free_pages(struct vm_page *page, unsigned int order)
311 {
312 	struct pglist mlist;
313 	int i;
314 
315 	TAILQ_INIT(&mlist);
316 	for (i = 0; i < (1 << order); i++)
317 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
318 	uvm_pglistfree(&mlist);
319 }
320 
321 void
322 __pagevec_release(struct pagevec *pvec)
323 {
324 	struct pglist mlist;
325 	int i;
326 
327 	TAILQ_INIT(&mlist);
328 	for (i = 0; i < pvec->nr; i++)
329 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
330 	uvm_pglistfree(&mlist);
331 	pagevec_reinit(pvec);
332 }
333 
334 void *
335 kmap(struct vm_page *pg)
336 {
337 	vaddr_t va;
338 
339 #if defined (__HAVE_PMAP_DIRECT)
340 	va = pmap_map_direct(pg);
341 #else
342 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
343 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
344 	pmap_update(pmap_kernel());
345 #endif
346 	return (void *)va;
347 }
348 
349 void
350 kunmap(void *addr)
351 {
352 	vaddr_t va = (vaddr_t)addr;
353 
354 #if defined (__HAVE_PMAP_DIRECT)
355 	pmap_unmap_direct(va);
356 #else
357 	pmap_kremove(va, PAGE_SIZE);
358 	pmap_update(pmap_kernel());
359 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
360 #endif
361 }
362 
363 void *
364 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
365      pgprot_t prot)
366 {
367 	vaddr_t va;
368 	paddr_t pa;
369 	int i;
370 
371 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
372 	if (va == 0)
373 		return NULL;
374 	for (i = 0; i < npages; i++) {
375 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
376 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
377 		    PROT_READ | PROT_WRITE,
378 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
379 		pmap_update(pmap_kernel());
380 	}
381 
382 	return (void *)va;
383 }
384 
385 void
386 vunmap(void *addr, size_t size)
387 {
388 	vaddr_t va = (vaddr_t)addr;
389 
390 	pmap_remove(pmap_kernel(), va, va + size);
391 	pmap_update(pmap_kernel());
392 	uvm_km_free(kernel_map, va, size);
393 }
394 
395 void
396 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
397     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
398 {
399 	const uint8_t *cbuf = buf;
400 	int i;
401 
402 	for (i = 0; i < len; i++) {
403 		if ((i % rowsize) == 0)
404 			printf("%s", prefix_str);
405 		printf("%02x", cbuf[i]);
406 		if ((i % rowsize) == (rowsize - 1))
407 			printf("\n");
408 		else
409 			printf(" ");
410 	}
411 }
412 
413 void *
414 memchr_inv(const void *s, int c, size_t n)
415 {
416 	if (n != 0) {
417 		const unsigned char *p = s;
418 
419 		do {
420 			if (*p++ != (unsigned char)c)
421 				return ((void *)(p - 1));
422 		}while (--n != 0);
423 	}
424 	return (NULL);
425 }
426 
427 int
428 panic_cmp(struct rb_node *a, struct rb_node *b)
429 {
430 	panic(__func__);
431 }
432 
433 #undef RB_ROOT
434 #define RB_ROOT(head)	(head)->rbh_root
435 
436 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
437 
438 /*
439  * This is a fairly minimal implementation of the Linux "idr" API.  It
440  * probably isn't very efficient, and defenitely isn't RCU safe.  The
441  * pre-load buffer is global instead of per-cpu; we rely on the kernel
442  * lock to make this work.  We do randomize our IDs in order to make
443  * them harder to guess.
444  */
445 
446 int idr_cmp(struct idr_entry *, struct idr_entry *);
447 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
448 
449 struct pool idr_pool;
450 struct idr_entry *idr_entry_cache;
451 
452 void
453 idr_init(struct idr *idr)
454 {
455 	static int initialized;
456 
457 	if (!initialized) {
458 		pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
459 		    "idrpl", NULL);
460 		initialized = 1;
461 	}
462 	SPLAY_INIT(&idr->tree);
463 }
464 
465 void
466 idr_destroy(struct idr *idr)
467 {
468 	struct idr_entry *id;
469 
470 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
471 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
472 		pool_put(&idr_pool, id);
473 	}
474 }
475 
476 void
477 idr_preload(unsigned int gfp_mask)
478 {
479 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
480 
481 	KERNEL_ASSERT_LOCKED();
482 
483 	if (idr_entry_cache == NULL)
484 		idr_entry_cache = pool_get(&idr_pool, flags);
485 }
486 
487 int
488 idr_alloc(struct idr *idr, void *ptr, int start, int end,
489     unsigned int gfp_mask)
490 {
491 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
492 	struct idr_entry *id;
493 	int begin;
494 
495 	KERNEL_ASSERT_LOCKED();
496 
497 	if (idr_entry_cache) {
498 		id = idr_entry_cache;
499 		idr_entry_cache = NULL;
500 	} else {
501 		id = pool_get(&idr_pool, flags);
502 		if (id == NULL)
503 			return -ENOMEM;
504 	}
505 
506 	if (end <= 0)
507 		end = INT_MAX;
508 
509 #ifdef notyet
510 	id->id = begin = start + arc4random_uniform(end - start);
511 #else
512 	id->id = begin = start;
513 #endif
514 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
515 		if (++id->id == end)
516 			id->id = start;
517 		if (id->id == begin) {
518 			pool_put(&idr_pool, id);
519 			return -ENOSPC;
520 		}
521 	}
522 	id->ptr = ptr;
523 	return id->id;
524 }
525 
526 void *
527 idr_replace(struct idr *idr, void *ptr, int id)
528 {
529 	struct idr_entry find, *res;
530 	void *old;
531 
532 	find.id = id;
533 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
534 	if (res == NULL)
535 		return ERR_PTR(-ENOENT);
536 	old = res->ptr;
537 	res->ptr = ptr;
538 	return old;
539 }
540 
541 void *
542 idr_remove(struct idr *idr, int id)
543 {
544 	struct idr_entry find, *res;
545 	void *ptr = NULL;
546 
547 	find.id = id;
548 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
549 	if (res) {
550 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
551 		ptr = res->ptr;
552 		pool_put(&idr_pool, res);
553 	}
554 	return ptr;
555 }
556 
557 void *
558 idr_find(struct idr *idr, int id)
559 {
560 	struct idr_entry find, *res;
561 
562 	find.id = id;
563 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
564 	if (res == NULL)
565 		return NULL;
566 	return res->ptr;
567 }
568 
569 void *
570 idr_get_next(struct idr *idr, int *id)
571 {
572 	struct idr_entry *res;
573 
574 	res = idr_find(idr, *id);
575 	if (res == NULL)
576 		res = SPLAY_MIN(idr_tree, &idr->tree);
577 	else
578 		res = SPLAY_NEXT(idr_tree, &idr->tree, res);
579 	if (res == NULL)
580 		return NULL;
581 	*id = res->id;
582 	return res->ptr;
583 }
584 
585 int
586 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
587 {
588 	struct idr_entry *id;
589 	int ret;
590 
591 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
592 		ret = func(id->id, id->ptr, data);
593 		if (ret)
594 			return ret;
595 	}
596 
597 	return 0;
598 }
599 
600 int
601 idr_cmp(struct idr_entry *a, struct idr_entry *b)
602 {
603 	return (a->id < b->id ? -1 : a->id > b->id);
604 }
605 
606 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
607 
608 void
609 ida_init(struct ida *ida)
610 {
611 	ida->counter = 0;
612 }
613 
614 void
615 ida_destroy(struct ida *ida)
616 {
617 }
618 
619 void
620 ida_remove(struct ida *ida, int id)
621 {
622 }
623 
624 int
625 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
626     int flags)
627 {
628 	if (end <= 0)
629 		end = INT_MAX;
630 
631 	if (start > ida->counter)
632 		ida->counter = start;
633 
634 	if (ida->counter >= end)
635 		return -ENOSPC;
636 
637 	return ida->counter++;
638 }
639 
640 void
641 ida_simple_remove(struct ida *ida, int id)
642 {
643 }
644 
645 int
646 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
647 {
648 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
649 	    M_DRM, gfp_mask);
650 	if (table->sgl == NULL)
651 		return -ENOMEM;
652 	table->nents = table->orig_nents = nents;
653 	return 0;
654 }
655 
656 void
657 sg_free_table(struct sg_table *table)
658 {
659 	free(table->sgl, M_DRM,
660 	    table->orig_nents * sizeof(struct scatterlist));
661 }
662 
663 size_t
664 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
665     const void *buf, size_t buflen)
666 {
667 	panic("%s", __func__);
668 }
669 
670 int
671 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
672 {
673 	void *cmd = NULL;
674 	int cmdlen = 0;
675 	int err, ret = 0;
676 	int op;
677 
678 	iic_acquire_bus(&adap->ic, 0);
679 
680 	while (num > 2) {
681 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
682 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
683 		    msgs->buf, msgs->len, 0);
684 		if (err) {
685 			ret = -err;
686 			goto fail;
687 		}
688 		msgs++;
689 		num--;
690 		ret++;
691 	}
692 
693 	if (num > 1) {
694 		cmd = msgs->buf;
695 		cmdlen = msgs->len;
696 		msgs++;
697 		num--;
698 		ret++;
699 	}
700 
701 	op = (msgs->flags & I2C_M_RD) ?
702 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
703 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
704 	    msgs->buf, msgs->len, 0);
705 	if (err) {
706 		ret = -err;
707 		goto fail;
708 	}
709 	msgs++;
710 	ret++;
711 
712 fail:
713 	iic_release_bus(&adap->ic, 0);
714 
715 	return ret;
716 }
717 
718 int
719 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
720 {
721 	if (adap->algo)
722 		return adap->algo->master_xfer(adap, msgs, num);
723 
724 	return i2c_master_xfer(adap, msgs, num);
725 }
726 
727 int
728 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
729 {
730 	struct i2c_algo_bit_data *algo = adap->algo_data;
731 	struct i2c_adapter bb;
732 
733 	memset(&bb, 0, sizeof(bb));
734 	bb.ic = algo->ic;
735 	bb.retries = adap->retries;
736 	return i2c_master_xfer(&bb, msgs, num);
737 }
738 
739 uint32_t
740 i2c_bb_functionality(struct i2c_adapter *adap)
741 {
742 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
743 }
744 
745 struct i2c_algorithm i2c_bit_algo = {
746 	.master_xfer = i2c_bb_master_xfer,
747 	.functionality = i2c_bb_functionality
748 };
749 
750 int
751 i2c_bit_add_bus(struct i2c_adapter *adap)
752 {
753 	adap->algo = &i2c_bit_algo;
754 	adap->retries = 3;
755 
756 	return 0;
757 }
758 
759 #if defined(__amd64__) || defined(__i386__)
760 
761 /*
762  * This is a minimal implementation of the Linux vga_get/vga_put
763  * interface.  In all likelyhood, it will only work for inteldrm(4) as
764  * it assumes that if there is another active VGA device in the
765  * system, it is sitting behind a PCI bridge.
766  */
767 
768 extern int pci_enumerate_bus(struct pci_softc *,
769     int (*)(struct pci_attach_args *), struct pci_attach_args *);
770 
771 pcitag_t vga_bridge_tag;
772 int vga_bridge_disabled;
773 
774 int
775 vga_disable_bridge(struct pci_attach_args *pa)
776 {
777 	pcireg_t bhlc, bc;
778 
779 	if (pa->pa_domain != 0)
780 		return 0;
781 
782 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
783 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
784 		return 0;
785 
786 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
787 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
788 		return 0;
789 	bc &= ~PPB_BC_VGA_ENABLE;
790 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
791 
792 	vga_bridge_tag = pa->pa_tag;
793 	vga_bridge_disabled = 1;
794 
795 	return 1;
796 }
797 
798 void
799 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
800 {
801 	KASSERT(pdev->pci->sc_bridgetag == NULL);
802 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
803 }
804 
805 void
806 vga_put(struct pci_dev *pdev, int rsrc)
807 {
808 	pcireg_t bc;
809 
810 	if (!vga_bridge_disabled)
811 		return;
812 
813 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
814 	bc |= PPB_BC_VGA_ENABLE;
815 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
816 
817 	vga_bridge_disabled = 0;
818 }
819 
820 #endif
821 
822 /*
823  * ACPI types and interfaces.
824  */
825 
826 #ifdef __HAVE_ACPI
827 #include "acpi.h"
828 #endif
829 
830 #if NACPI > 0
831 
832 #include <dev/acpi/acpireg.h>
833 #include <dev/acpi/acpivar.h>
834 
835 acpi_status
836 acpi_get_table(const char *sig, int instance,
837     struct acpi_table_header **hdr)
838 {
839 	struct acpi_softc *sc = acpi_softc;
840 	struct acpi_q *entry;
841 
842 	KASSERT(instance == 1);
843 
844 	if (sc == NULL)
845 		return AE_NOT_FOUND;
846 
847 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
848 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
849 			*hdr = entry->q_table;
850 			return 0;
851 		}
852 	}
853 
854 	return AE_NOT_FOUND;
855 }
856 
857 #endif
858 
859 void
860 backlight_do_update_status(void *arg)
861 {
862 	backlight_update_status(arg);
863 }
864 
865 struct backlight_device *
866 backlight_device_register(const char *name, void *kdev, void *data,
867     const struct backlight_ops *ops, struct backlight_properties *props)
868 {
869 	struct backlight_device *bd;
870 
871 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
872 	bd->ops = ops;
873 	bd->props = *props;
874 	bd->data = data;
875 
876 	task_set(&bd->task, backlight_do_update_status, bd);
877 
878 	return bd;
879 }
880 
881 void
882 backlight_device_unregister(struct backlight_device *bd)
883 {
884 	free(bd, M_DRM, sizeof(*bd));
885 }
886 
887 void
888 backlight_schedule_update_status(struct backlight_device *bd)
889 {
890 	task_add(systq, &bd->task);
891 }
892 
893 void
894 drm_sysfs_hotplug_event(struct drm_device *dev)
895 {
896 	KNOTE(&dev->note, NOTE_CHANGE);
897 }
898 
899 unsigned int drm_fence_count;
900 
901 unsigned int
902 dma_fence_context_alloc(unsigned int num)
903 {
904 	return __sync_add_and_fetch(&drm_fence_count, num) - num;
905 }
906 
907 int
908 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
909 {
910 	return (ENXIO);
911 }
912 
913 int
914 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
915 {
916 	return (ENXIO);
917 }
918 
919 int
920 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
921 {
922 	return (ENOTTY);
923 }
924 
925 int
926 dmabuf_poll(struct file *fp, int events, struct proc *p)
927 {
928 	return (0);
929 }
930 
931 int
932 dmabuf_kqfilter(struct file *fp, struct knote *kn)
933 {
934 	return (EINVAL);
935 }
936 
937 int
938 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
939 {
940 	struct dma_buf *dmabuf = fp->f_data;
941 
942 	memset(st, 0, sizeof(*st));
943 	st->st_size = dmabuf->size;
944 	st->st_mode = S_IFIFO;	/* XXX */
945 	return (0);
946 }
947 
948 int
949 dmabuf_close(struct file *fp, struct proc *p)
950 {
951 	struct dma_buf *dmabuf = fp->f_data;
952 
953 	fp->f_data = NULL;
954 	KERNEL_LOCK();
955 	dmabuf->ops->release(dmabuf);
956 	KERNEL_UNLOCK();
957 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
958 	return (0);
959 }
960 
961 int
962 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
963 {
964 	struct dma_buf *dmabuf = fp->f_data;
965 	off_t newoff;
966 
967 	if (*offset != 0)
968 		return (EINVAL);
969 
970 	switch (whence) {
971 	case SEEK_SET:
972 		newoff = 0;
973 		break;
974 	case SEEK_END:
975 		newoff = dmabuf->size;
976 		break;
977 	default:
978 		return (EINVAL);
979 	}
980 	fp->f_offset = *offset = newoff;
981 	return (0);
982 }
983 
984 struct fileops dmabufops = {
985 	.fo_read	= dmabuf_read,
986 	.fo_write	= dmabuf_write,
987 	.fo_ioctl	= dmabuf_ioctl,
988 	.fo_poll	= dmabuf_poll,
989 	.fo_kqfilter	= dmabuf_kqfilter,
990 	.fo_stat	= dmabuf_stat,
991 	.fo_close	= dmabuf_close,
992 	.fo_seek	= dmabuf_seek,
993 };
994 
995 struct dma_buf *
996 dma_buf_export(const struct dma_buf_export_info *info)
997 {
998 	struct proc *p = curproc;
999 	struct dma_buf *dmabuf;
1000 	struct file *fp;
1001 
1002 	fp = fnew(p);
1003 	if (fp == NULL)
1004 		return ERR_PTR(-ENFILE);
1005 	fp->f_type = DTYPE_DMABUF;
1006 	fp->f_ops = &dmabufops;
1007 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
1008 	dmabuf->priv = info->priv;
1009 	dmabuf->ops = info->ops;
1010 	dmabuf->size = info->size;
1011 	dmabuf->file = fp;
1012 	fp->f_data = dmabuf;
1013 	return dmabuf;
1014 }
1015 
1016 struct dma_buf *
1017 dma_buf_get(int fd)
1018 {
1019 	struct proc *p = curproc;
1020 	struct filedesc *fdp = p->p_fd;
1021 	struct file *fp;
1022 
1023 	if ((fp = fd_getfile(fdp, fd)) == NULL)
1024 		return ERR_PTR(-EBADF);
1025 
1026 	if (fp->f_type != DTYPE_DMABUF) {
1027 		FRELE(fp, p);
1028 		return ERR_PTR(-EINVAL);
1029 	}
1030 
1031 	return fp->f_data;
1032 }
1033 
1034 void
1035 dma_buf_put(struct dma_buf *dmabuf)
1036 {
1037 	KASSERT(dmabuf);
1038 	KASSERT(dmabuf->file);
1039 
1040 	FRELE(dmabuf->file, curproc);
1041 }
1042 
1043 int
1044 dma_buf_fd(struct dma_buf *dmabuf, int flags)
1045 {
1046 	struct proc *p = curproc;
1047 	struct filedesc *fdp = p->p_fd;
1048 	struct file *fp = dmabuf->file;
1049 	int fd, cloexec, error;
1050 
1051 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
1052 
1053 	fdplock(fdp);
1054 restart:
1055 	if ((error = fdalloc(p, 0, &fd)) != 0) {
1056 		if (error == ENOSPC) {
1057 			fdexpand(p);
1058 			goto restart;
1059 		}
1060 		fdpunlock(fdp);
1061 		return -error;
1062 	}
1063 
1064 	fdinsert(fdp, fd, cloexec, fp);
1065 	fdpunlock(fdp);
1066 
1067 	return fd;
1068 }
1069 
1070 void
1071 get_dma_buf(struct dma_buf *dmabuf)
1072 {
1073 	FREF(dmabuf->file);
1074 }
1075 
1076 enum pci_bus_speed
1077 pcie_get_speed_cap(struct pci_dev *pdev)
1078 {
1079 	pci_chipset_tag_t	pc = pdev->pc;
1080 	pcitag_t		tag = pdev->tag;
1081 	int			pos ;
1082 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
1083 	pcireg_t		id;
1084 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
1085 	int			bus, device, function;
1086 
1087 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1088 	    &pos, NULL))
1089 		return PCI_SPEED_UNKNOWN;
1090 
1091 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1092 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1093 
1094 	/* we've been informed via and serverworks don't make the cut */
1095 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
1096 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
1097 		return PCI_SPEED_UNKNOWN;
1098 
1099 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1100 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
1101 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
1102 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
1103 
1104 	lnkcap &= 0x0f;
1105 	lnkcap2 &= 0xfe;
1106 
1107 	if (lnkcap2) { /* PCIE GEN 3.0 */
1108 		if (lnkcap2 & 0x02)
1109 			cap = PCIE_SPEED_2_5GT;
1110 		if (lnkcap2 & 0x04)
1111 			cap = PCIE_SPEED_5_0GT;
1112 		if (lnkcap2 & 0x08)
1113 			cap = PCIE_SPEED_8_0GT;
1114 		if (lnkcap2 & 0x10)
1115 			cap = PCIE_SPEED_16_0GT;
1116 	} else {
1117 		if (lnkcap & 0x01)
1118 			cap = PCIE_SPEED_2_5GT;
1119 		if (lnkcap & 0x02)
1120 			cap = PCIE_SPEED_5_0GT;
1121 	}
1122 
1123 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
1124 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
1125 	    lnkcap2);
1126 	return cap;
1127 }
1128 
1129 enum pcie_link_width
1130 pcie_get_width_cap(struct pci_dev *pdev)
1131 {
1132 	pci_chipset_tag_t	pc = pdev->pc;
1133 	pcitag_t		tag = pdev->tag;
1134 	int			pos ;
1135 	pcireg_t		lnkcap = 0;
1136 	pcireg_t		id;
1137 	int			bus, device, function;
1138 
1139 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
1140 	    &pos, NULL))
1141 		return PCIE_LNK_WIDTH_UNKNOWN;
1142 
1143 	id = pci_conf_read(pc, tag, PCI_ID_REG);
1144 	pci_decompose_tag(pc, tag, &bus, &device, &function);
1145 
1146 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
1147 
1148 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
1149 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
1150 
1151 	if (lnkcap)
1152 		return (lnkcap & 0x3f0) >> 4;
1153 	return PCIE_LNK_WIDTH_UNKNOWN;
1154 }
1155 
1156 int
1157 default_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1158     int sync, void *key)
1159 {
1160 	wakeup(wqe);
1161 	if (wqe->proc)
1162 		wake_up_process(wqe->proc);
1163 	return 0;
1164 }
1165 
1166 int
1167 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
1168     int sync, void *key)
1169 {
1170 	default_wake_function(wqe, mode, sync, key);
1171 	list_del_init(&wqe->entry);
1172 	return 0;
1173 }
1174 
1175 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
1176 
1177 int
1178 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1179 {
1180 	int err;
1181 
1182 	if (!test_bit(bit, word))
1183 		return 0;
1184 
1185 	mtx_enter(&wait_bit_mtx);
1186 	while (test_bit(bit, word)) {
1187 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", 0);
1188 		if (err) {
1189 			mtx_leave(&wait_bit_mtx);
1190 			return 1;
1191 		}
1192 	}
1193 	mtx_leave(&wait_bit_mtx);
1194 	return 0;
1195 }
1196 
1197 int
1198 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
1199 {
1200 	int err;
1201 
1202 	if (!test_bit(bit, word))
1203 		return 0;
1204 
1205 	mtx_enter(&wait_bit_mtx);
1206 	while (test_bit(bit, word)) {
1207 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
1208 		if (err) {
1209 			mtx_leave(&wait_bit_mtx);
1210 			return 1;
1211 		}
1212 	}
1213 	mtx_leave(&wait_bit_mtx);
1214 	return 0;
1215 }
1216 
1217 void
1218 wake_up_bit(void *word, int bit)
1219 {
1220 	mtx_enter(&wait_bit_mtx);
1221 	wakeup(word);
1222 	mtx_leave(&wait_bit_mtx);
1223 }
1224 
1225 struct workqueue_struct *system_wq;
1226 struct workqueue_struct *system_unbound_wq;
1227 struct workqueue_struct *system_long_wq;
1228 struct taskq *taskletq;
1229 
1230 void
1231 drm_linux_init(void)
1232 {
1233 	if (system_wq == NULL) {
1234 		system_wq = (struct workqueue_struct *)
1235 		    taskq_create("drmwq", 1, IPL_HIGH, 0);
1236 	}
1237 	if (system_unbound_wq == NULL) {
1238 		system_unbound_wq = (struct workqueue_struct *)
1239 		    taskq_create("drmubwq", 1, IPL_HIGH, 0);
1240 	}
1241 	if (system_long_wq == NULL) {
1242 		system_long_wq = (struct workqueue_struct *)
1243 		    taskq_create("drmlwq", 1, IPL_HIGH, 0);
1244 	}
1245 
1246 	if (taskletq == NULL)
1247 		taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
1248 }
1249 
1250 #define PCIE_ECAP_RESIZE_BAR	0x15
1251 #define RBCAP0			0x04
1252 #define RBCTRL0			0x08
1253 #define RBCTRL_BARINDEX_MASK	0x07
1254 #define RBCTRL_BARSIZE_MASK	0x1f00
1255 #define RBCTRL_BARSIZE_SHIFT	8
1256 
1257 /* size in MB is 1 << nsize */
1258 int
1259 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
1260 {
1261 	pcireg_t	reg;
1262 	uint32_t	offset, capid;
1263 
1264 	KASSERT(bar == 0);
1265 
1266 	offset = PCI_PCIE_ECAP;
1267 
1268 	/* search PCI Express Extended Capabilities */
1269 	do {
1270 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
1271 		capid = PCI_PCIE_ECAP_ID(reg);
1272 		if (capid == PCIE_ECAP_RESIZE_BAR)
1273 			break;
1274 		offset = PCI_PCIE_ECAP_NEXT(reg);
1275 	} while (capid != 0);
1276 
1277 	if (capid == 0) {
1278 		printf("%s: could not find resize bar cap!\n", __func__);
1279 		return -ENOTSUP;
1280 	}
1281 
1282 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
1283 
1284 	if ((reg & (1 << (nsize + 4))) == 0) {
1285 		printf("%s size not supported\n", __func__);
1286 		return -ENOTSUP;
1287 	}
1288 
1289 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
1290 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
1291 		printf("%s BAR index not 0\n", __func__);
1292 		return -EINVAL;
1293 	}
1294 
1295 	reg &= ~RBCTRL_BARSIZE_MASK;
1296 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
1297 
1298 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
1299 
1300 	return 0;
1301 }
1302