xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision f763167468dba5339ed4b14b7ecaca2a397ab0f6)
1 /*	$OpenBSD: drm_linux.c,v 1.15 2017/07/12 20:12:19 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <dev/pci/drm/drmP.h>
20 #include <dev/pci/ppbreg.h>
21 
22 void
23 flush_barrier(void *arg)
24 {
25 	int *barrier = arg;
26 
27 	*barrier = 1;
28 	wakeup(barrier);
29 }
30 
31 void
32 flush_workqueue(struct workqueue_struct *wq)
33 {
34 	struct sleep_state sls;
35 	struct task task;
36 	int barrier = 0;
37 
38 	if (cold)
39 		return;
40 
41 	task_set(&task, flush_barrier, &barrier);
42 	task_add((struct taskq *)wq, &task);
43 	while (!barrier) {
44 		sleep_setup(&sls, &barrier, PWAIT, "flwqbar");
45 		sleep_finish(&sls, !barrier);
46 	}
47 }
48 
49 void
50 flush_work(struct work_struct *work)
51 {
52 	struct sleep_state sls;
53 	struct task task;
54 	int barrier = 0;
55 
56 	if (cold)
57 		return;
58 
59 	task_set(&task, flush_barrier, &barrier);
60 	task_add(work->tq, &task);
61 	while (!barrier) {
62 		sleep_setup(&sls, &barrier, PWAIT, "flwkbar");
63 		sleep_finish(&sls, !barrier);
64 	}
65 }
66 
67 void
68 flush_delayed_work(struct delayed_work *dwork)
69 {
70 	struct sleep_state sls;
71 	struct task task;
72 	int barrier = 0;
73 
74 	if (cold)
75 		return;
76 
77 	while (timeout_pending(&dwork->to))
78 		tsleep(&barrier, PWAIT, "fldwto", 1);
79 
80 	task_set(&task, flush_barrier, &barrier);
81 	task_add(dwork->tq, &task);
82 	while (!barrier) {
83 		sleep_setup(&sls, &barrier, PWAIT, "fldwbar");
84 		sleep_finish(&sls, !barrier);
85 	}
86 }
87 
88 struct timespec
89 ns_to_timespec(const int64_t nsec)
90 {
91 	struct timespec ts;
92 	int32_t rem;
93 
94 	if (nsec == 0) {
95 		ts.tv_sec = 0;
96 		ts.tv_nsec = 0;
97 		return (ts);
98 	}
99 
100 	ts.tv_sec = nsec / NSEC_PER_SEC;
101 	rem = nsec % NSEC_PER_SEC;
102 	if (rem < 0) {
103 		ts.tv_sec--;
104 		rem += NSEC_PER_SEC;
105 	}
106 	ts.tv_nsec = rem;
107 	return (ts);
108 }
109 
110 int64_t
111 timeval_to_ns(const struct timeval *tv)
112 {
113 	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
114 		tv->tv_usec * NSEC_PER_USEC;
115 }
116 
117 struct timeval
118 ns_to_timeval(const int64_t nsec)
119 {
120 	struct timeval tv;
121 	int32_t rem;
122 
123 	if (nsec == 0) {
124 		tv.tv_sec = 0;
125 		tv.tv_usec = 0;
126 		return (tv);
127 	}
128 
129 	tv.tv_sec = nsec / NSEC_PER_SEC;
130 	rem = nsec % NSEC_PER_SEC;
131 	if (rem < 0) {
132 		tv.tv_sec--;
133 		rem += NSEC_PER_SEC;
134 	}
135 	tv.tv_usec = rem / 1000;
136 	return (tv);
137 }
138 
139 int64_t
140 timeval_to_us(const struct timeval *tv)
141 {
142 	return ((int64_t)tv->tv_sec * 1000000) + tv->tv_usec;
143 }
144 
145 extern char *hw_vendor, *hw_prod;
146 
147 static bool
148 dmi_found(const struct dmi_system_id *dsi)
149 {
150 	int i, slot;
151 
152 	for (i = 0; i < nitems(dsi->matches); i++) {
153 		slot = dsi->matches[i].slot;
154 		switch (slot) {
155 		case DMI_NONE:
156 			break;
157 		case DMI_SYS_VENDOR:
158 		case DMI_BOARD_VENDOR:
159 			if (hw_vendor != NULL &&
160 			    !strcmp(hw_vendor, dsi->matches[i].substr))
161 				break;
162 			else
163 				return false;
164 		case DMI_PRODUCT_NAME:
165 		case DMI_BOARD_NAME:
166 			if (hw_prod != NULL &&
167 			    !strcmp(hw_prod, dsi->matches[i].substr))
168 				break;
169 			else
170 				return false;
171 		default:
172 			return false;
173 		}
174 	}
175 
176 	return true;
177 }
178 
179 int
180 dmi_check_system(const struct dmi_system_id *sysid)
181 {
182 	const struct dmi_system_id *dsi;
183 	int num = 0;
184 
185 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
186 		if (dmi_found(dsi)) {
187 			num++;
188 			if (dsi->callback && dsi->callback(dsi))
189 				break;
190 		}
191 	}
192 	return (num);
193 }
194 
195 struct vm_page *
196 alloc_pages(unsigned int gfp_mask, unsigned int order)
197 {
198 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
199 	struct pglist mlist;
200 
201 	if (gfp_mask & M_CANFAIL)
202 		flags |= UVM_PLA_FAILOK;
203 	if (gfp_mask & M_ZERO)
204 		flags |= UVM_PLA_ZERO;
205 
206 	TAILQ_INIT(&mlist);
207 	if (uvm_pglistalloc(PAGE_SIZE << order, 0, -1, PAGE_SIZE, 0,
208 	    &mlist, 1, flags))
209 		return NULL;
210 	return TAILQ_FIRST(&mlist);
211 }
212 
213 void
214 __free_pages(struct vm_page *page, unsigned int order)
215 {
216 	struct pglist mlist;
217 	int i;
218 
219 	TAILQ_INIT(&mlist);
220 	for (i = 0; i < (1 << order); i++)
221 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
222 	uvm_pglistfree(&mlist);
223 }
224 
225 void *
226 kmap(struct vm_page *pg)
227 {
228 	vaddr_t va;
229 
230 #if defined (__HAVE_PMAP_DIRECT)
231 	va = pmap_map_direct(pg);
232 #else
233 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
234 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
235 	pmap_update(pmap_kernel());
236 #endif
237 	return (void *)va;
238 }
239 
240 void
241 kunmap(void *addr)
242 {
243 	vaddr_t va = (vaddr_t)addr;
244 
245 #if defined (__HAVE_PMAP_DIRECT)
246 	pmap_unmap_direct(va);
247 #else
248 	pmap_kremove(va, PAGE_SIZE);
249 	pmap_update(pmap_kernel());
250 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
251 #endif
252 }
253 
254 void *
255 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
256      pgprot_t prot)
257 {
258 	vaddr_t va;
259 	paddr_t pa;
260 	int i;
261 
262 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
263 	if (va == 0)
264 		return NULL;
265 	for (i = 0; i < npages; i++) {
266 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
267 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
268 		    PROT_READ | PROT_WRITE,
269 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
270 		pmap_update(pmap_kernel());
271 	}
272 
273 	return (void *)va;
274 }
275 
276 void
277 vunmap(void *addr, size_t size)
278 {
279 	vaddr_t va = (vaddr_t)addr;
280 
281 	pmap_remove(pmap_kernel(), va, va + size);
282 	pmap_update(pmap_kernel());
283 	uvm_km_free(kernel_map, va, size);
284 }
285 
286 void
287 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
288     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
289 {
290 	const uint8_t *cbuf = buf;
291 	int i;
292 
293 	for (i = 0; i < len; i++) {
294 		if ((i % rowsize) == 0)
295 			printf("%s", prefix_str);
296 		printf("%02x", cbuf[i]);
297 		if ((i % rowsize) == (rowsize - 1))
298 			printf("\n");
299 		else
300 			printf(" ");
301 	}
302 }
303 
304 void *
305 memchr_inv(const void *s, int c, size_t n)
306 {
307 	if (n != 0) {
308 		const unsigned char *p = s;
309 
310 		do {
311 			if (*p++ != (unsigned char)c)
312 				return ((void *)(p - 1));
313 		}while (--n != 0);
314 	}
315 	return (NULL);
316 }
317 
318 int
319 panic_cmp(struct rb_node *a, struct rb_node *b)
320 {
321 	panic(__func__);
322 }
323 
324 #undef RB_ROOT
325 #define RB_ROOT(head)	(head)->rbh_root
326 
327 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
328 
329 /*
330  * This is a fairly minimal implementation of the Linux "idr" API.  It
331  * probably isn't very efficient, and defenitely isn't RCU safe.  The
332  * pre-load buffer is global instead of per-cpu; we rely on the kernel
333  * lock to make this work.  We do randomize our IDs in order to make
334  * them harder to guess.
335  */
336 
337 int idr_cmp(struct idr_entry *, struct idr_entry *);
338 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
339 
340 struct pool idr_pool;
341 struct idr_entry *idr_entry_cache;
342 
343 void
344 idr_init(struct idr *idr)
345 {
346 	static int initialized;
347 
348 	if (!initialized) {
349 		pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
350 		    "idrpl", NULL);
351 		initialized = 1;
352 	}
353 	SPLAY_INIT(&idr->tree);
354 }
355 
356 void
357 idr_destroy(struct idr *idr)
358 {
359 	struct idr_entry *id;
360 
361 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
362 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
363 		pool_put(&idr_pool, id);
364 	}
365 }
366 
367 void
368 idr_preload(unsigned int gfp_mask)
369 {
370 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
371 
372 	KERNEL_ASSERT_LOCKED();
373 
374 	if (idr_entry_cache == NULL)
375 		idr_entry_cache = pool_get(&idr_pool, flags);
376 }
377 
378 int
379 idr_alloc(struct idr *idr, void *ptr, int start, int end,
380     unsigned int gfp_mask)
381 {
382 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
383 	struct idr_entry *id;
384 	int begin;
385 
386 	KERNEL_ASSERT_LOCKED();
387 
388 	if (idr_entry_cache) {
389 		id = idr_entry_cache;
390 		idr_entry_cache = NULL;
391 	} else {
392 		id = pool_get(&idr_pool, flags);
393 		if (id == NULL)
394 			return -ENOMEM;
395 	}
396 
397 	if (end <= 0)
398 		end = INT_MAX;
399 
400 #ifdef notyet
401 	id->id = begin = start + arc4random_uniform(end - start);
402 #else
403 	id->id = begin = start;
404 #endif
405 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
406 		if (++id->id == end)
407 			id->id = start;
408 		if (id->id == begin) {
409 			pool_put(&idr_pool, id);
410 			return -ENOSPC;
411 		}
412 	}
413 	id->ptr = ptr;
414 	return id->id;
415 }
416 
417 void *
418 idr_replace(struct idr *idr, void *ptr, int id)
419 {
420 	struct idr_entry find, *res;
421 	void *old;
422 
423 	find.id = id;
424 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
425 	if (res == NULL)
426 		return ERR_PTR(-ENOENT);
427 	old = res->ptr;
428 	res->ptr = ptr;
429 	return old;
430 }
431 
432 void
433 idr_remove(struct idr *idr, int id)
434 {
435 	struct idr_entry find, *res;
436 
437 	find.id = id;
438 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
439 	if (res) {
440 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
441 		pool_put(&idr_pool, res);
442 	}
443 }
444 
445 void *
446 idr_find(struct idr *idr, int id)
447 {
448 	struct idr_entry find, *res;
449 
450 	find.id = id;
451 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
452 	if (res == NULL)
453 		return NULL;
454 	return res->ptr;
455 }
456 
457 void *
458 idr_get_next(struct idr *idr, int *id)
459 {
460 	struct idr_entry *res;
461 
462 	res = idr_find(idr, *id);
463 	if (res == NULL)
464 		res = SPLAY_MIN(idr_tree, &idr->tree);
465 	else
466 		res = SPLAY_NEXT(idr_tree, &idr->tree, res);
467 	if (res == NULL)
468 		return NULL;
469 	*id = res->id;
470 	return res->ptr;
471 }
472 
473 int
474 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
475 {
476 	struct idr_entry *id;
477 	int ret;
478 
479 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
480 		ret = func(id->id, id->ptr, data);
481 		if (ret)
482 			return ret;
483 	}
484 
485 	return 0;
486 }
487 
488 int
489 idr_cmp(struct idr_entry *a, struct idr_entry *b)
490 {
491 	return (a->id < b->id ? -1 : a->id > b->id);
492 }
493 
494 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
495 
496 void
497 ida_init(struct ida *ida)
498 {
499 	ida->counter = 0;
500 }
501 
502 void
503 ida_destroy(struct ida *ida)
504 {
505 }
506 
507 void
508 ida_remove(struct ida *ida, int id)
509 {
510 }
511 
512 int
513 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
514     int flags)
515 {
516 	if (end <= 0)
517 		end = INT_MAX;
518 
519 	if (start > ida->counter)
520 		ida->counter = start;
521 
522 	if (ida->counter >= end)
523 		return -ENOSPC;
524 
525 	return ida->counter++;
526 }
527 
528 int
529 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
530 {
531 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
532 	    M_DRM, gfp_mask);
533 	if (table->sgl == NULL)
534 		return -ENOMEM;
535 	table->nents = table->orig_nents = nents;
536 	return 0;
537 }
538 
539 void
540 sg_free_table(struct sg_table *table)
541 {
542 	free(table->sgl, M_DRM,
543 	    table->orig_nents * sizeof(struct scatterlist));
544 }
545 
546 size_t
547 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
548     const void *buf, size_t buflen)
549 {
550 	panic("%s", __func__);
551 }
552 
553 int
554 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
555 {
556 	void *cmd = NULL;
557 	int cmdlen = 0;
558 	int err, ret = 0;
559 	int op;
560 
561 	if (adap->algo)
562 		return adap->algo->master_xfer(adap, msgs, num);
563 
564 	iic_acquire_bus(&adap->ic, 0);
565 
566 	while (num > 2) {
567 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
568 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
569 		    msgs->buf, msgs->len, 0);
570 		if (err) {
571 			ret = -err;
572 			goto fail;
573 		}
574 		msgs++;
575 		num--;
576 		ret++;
577 	}
578 
579 	if (num > 1) {
580 		cmd = msgs->buf;
581 		cmdlen = msgs->len;
582 		msgs++;
583 		num--;
584 		ret++;
585 	}
586 
587 	op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
588 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen, msgs->buf, msgs->len, 0);
589 	if (err) {
590 		ret = -err;
591 		goto fail;
592 	}
593 	msgs++;
594 	ret++;
595 
596 fail:
597 	iic_release_bus(&adap->ic, 0);
598 
599 	return ret;
600 }
601 
602 #if defined(__amd64__) || defined(__i386__)
603 
604 /*
605  * This is a minimal implementation of the Linux vga_get/vga_put
606  * interface.  In all likelyhood, it will only work for inteldrm(4) as
607  * it assumes that if there is another active VGA device in the
608  * system, it is sitting behind a PCI bridge.
609  */
610 
611 extern int pci_enumerate_bus(struct pci_softc *,
612     int (*)(struct pci_attach_args *), struct pci_attach_args *);
613 
614 pcitag_t vga_bridge_tag;
615 int vga_bridge_disabled;
616 
617 int
618 vga_disable_bridge(struct pci_attach_args *pa)
619 {
620 	pcireg_t bhlc, bc;
621 
622 	if (pa->pa_domain != 0)
623 		return 0;
624 
625 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
626 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
627 		return 0;
628 
629 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
630 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
631 		return 0;
632 	bc &= ~PPB_BC_VGA_ENABLE;
633 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
634 
635 	vga_bridge_tag = pa->pa_tag;
636 	vga_bridge_disabled = 1;
637 
638 	return 1;
639 }
640 
641 void
642 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
643 {
644 	KASSERT(pdev->pci->sc_bridgetag == NULL);
645 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
646 }
647 
648 void
649 vga_put(struct pci_dev *pdev, int rsrc)
650 {
651 	pcireg_t bc;
652 
653 	if (!vga_bridge_disabled)
654 		return;
655 
656 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
657 	bc |= PPB_BC_VGA_ENABLE;
658 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
659 
660 	vga_bridge_disabled = 0;
661 }
662 
663 #endif
664 
665 /*
666  * ACPI types and interfaces.
667  */
668 
669 #if defined(__amd64__) || defined(__i386__)
670 #include "acpi.h"
671 #endif
672 
673 #if NACPI > 0
674 
675 #include <dev/acpi/acpireg.h>
676 #include <dev/acpi/acpivar.h>
677 
678 acpi_status
679 acpi_get_table_with_size(const char *sig, int instance,
680     struct acpi_table_header **hdr, acpi_size *size)
681 {
682 	struct acpi_softc *sc = acpi_softc;
683 	struct acpi_q *entry;
684 
685 	KASSERT(instance == 1);
686 
687 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
688 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
689 			*hdr = entry->q_table;
690 			*size = (*hdr)->length;
691 			return 0;
692 		}
693 	}
694 
695 	return AE_NOT_FOUND;
696 }
697 
698 #endif
699 
700 void
701 backlight_do_update_status(void *arg)
702 {
703 	backlight_update_status(arg);
704 }
705 
706 struct backlight_device *
707 backlight_device_register(const char *name, void *kdev, void *data,
708     const struct backlight_ops *ops, struct backlight_properties *props)
709 {
710 	struct backlight_device *bd;
711 
712 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
713 	bd->ops = ops;
714 	bd->props = *props;
715 	bd->data = data;
716 
717 	task_set(&bd->task, backlight_do_update_status, bd);
718 
719 	return bd;
720 }
721 
722 void
723 backlight_device_unregister(struct backlight_device *bd)
724 {
725 	free(bd, M_DRM, sizeof(*bd));
726 }
727 
728 void
729 backlight_schedule_update_status(struct backlight_device *bd)
730 {
731 	task_add(systq, &bd->task);
732 }
733