xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision aa997e528a848ca5596493c2a801bdd6fb26ae61)
1 /*	$OpenBSD: drm_linux.c,v 1.20 2018/01/31 03:26:00 jsg Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <dev/pci/drm/drmP.h>
20 #include <dev/pci/ppbreg.h>
21 #include <sys/event.h>
22 
23 void
24 flush_barrier(void *arg)
25 {
26 	int *barrier = arg;
27 
28 	*barrier = 1;
29 	wakeup(barrier);
30 }
31 
32 void
33 flush_workqueue(struct workqueue_struct *wq)
34 {
35 	struct sleep_state sls;
36 	struct task task;
37 	int barrier = 0;
38 
39 	if (cold)
40 		return;
41 
42 	task_set(&task, flush_barrier, &barrier);
43 	task_add((struct taskq *)wq, &task);
44 	while (!barrier) {
45 		sleep_setup(&sls, &barrier, PWAIT, "flwqbar");
46 		sleep_finish(&sls, !barrier);
47 	}
48 }
49 
50 void
51 flush_work(struct work_struct *work)
52 {
53 	struct sleep_state sls;
54 	struct task task;
55 	int barrier = 0;
56 
57 	if (cold)
58 		return;
59 
60 	task_set(&task, flush_barrier, &barrier);
61 	task_add(work->tq, &task);
62 	while (!barrier) {
63 		sleep_setup(&sls, &barrier, PWAIT, "flwkbar");
64 		sleep_finish(&sls, !barrier);
65 	}
66 }
67 
68 void
69 flush_delayed_work(struct delayed_work *dwork)
70 {
71 	struct sleep_state sls;
72 	struct task task;
73 	int barrier = 0;
74 
75 	if (cold)
76 		return;
77 
78 	while (timeout_pending(&dwork->to))
79 		tsleep(&barrier, PWAIT, "fldwto", 1);
80 
81 	task_set(&task, flush_barrier, &barrier);
82 	task_add(dwork->tq, &task);
83 	while (!barrier) {
84 		sleep_setup(&sls, &barrier, PWAIT, "fldwbar");
85 		sleep_finish(&sls, !barrier);
86 	}
87 }
88 
89 struct timespec
90 ns_to_timespec(const int64_t nsec)
91 {
92 	struct timespec ts;
93 	int32_t rem;
94 
95 	if (nsec == 0) {
96 		ts.tv_sec = 0;
97 		ts.tv_nsec = 0;
98 		return (ts);
99 	}
100 
101 	ts.tv_sec = nsec / NSEC_PER_SEC;
102 	rem = nsec % NSEC_PER_SEC;
103 	if (rem < 0) {
104 		ts.tv_sec--;
105 		rem += NSEC_PER_SEC;
106 	}
107 	ts.tv_nsec = rem;
108 	return (ts);
109 }
110 
111 int64_t
112 timeval_to_ns(const struct timeval *tv)
113 {
114 	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
115 		tv->tv_usec * NSEC_PER_USEC;
116 }
117 
118 struct timeval
119 ns_to_timeval(const int64_t nsec)
120 {
121 	struct timeval tv;
122 	int32_t rem;
123 
124 	if (nsec == 0) {
125 		tv.tv_sec = 0;
126 		tv.tv_usec = 0;
127 		return (tv);
128 	}
129 
130 	tv.tv_sec = nsec / NSEC_PER_SEC;
131 	rem = nsec % NSEC_PER_SEC;
132 	if (rem < 0) {
133 		tv.tv_sec--;
134 		rem += NSEC_PER_SEC;
135 	}
136 	tv.tv_usec = rem / 1000;
137 	return (tv);
138 }
139 
140 int64_t
141 timeval_to_us(const struct timeval *tv)
142 {
143 	return ((int64_t)tv->tv_sec * 1000000) + tv->tv_usec;
144 }
145 
146 extern char *hw_vendor, *hw_prod, *hw_ver;
147 
148 bool
149 dmi_match(int slot, const char *str)
150 {
151 	switch (slot) {
152 	case DMI_SYS_VENDOR:
153 	case DMI_BOARD_VENDOR:
154 		if (hw_vendor != NULL &&
155 		    !strcmp(hw_vendor, str))
156 			return true;
157 		break;
158 	case DMI_PRODUCT_NAME:
159 	case DMI_BOARD_NAME:
160 		if (hw_prod != NULL &&
161 		    !strcmp(hw_prod, str))
162 			return true;
163 		break;
164 	case DMI_PRODUCT_VERSION:
165 	case DMI_BOARD_VERSION:
166 		if (hw_ver != NULL &&
167 		    !strcmp(hw_ver, str))
168 			return true;
169 		break;
170 	case DMI_NONE:
171 	default:
172 		return false;
173 	}
174 
175 	return false;
176 }
177 
178 static bool
179 dmi_found(const struct dmi_system_id *dsi)
180 {
181 	int i, slot;
182 
183 	for (i = 0; i < nitems(dsi->matches); i++) {
184 		slot = dsi->matches[i].slot;
185 		if (slot == DMI_NONE)
186 			break;
187 		if (!dmi_match(slot, dsi->matches[i].substr))
188 			return false;
189 	}
190 
191 	return true;
192 }
193 
194 int
195 dmi_check_system(const struct dmi_system_id *sysid)
196 {
197 	const struct dmi_system_id *dsi;
198 	int num = 0;
199 
200 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
201 		if (dmi_found(dsi)) {
202 			num++;
203 			if (dsi->callback && dsi->callback(dsi))
204 				break;
205 		}
206 	}
207 	return (num);
208 }
209 
210 struct vm_page *
211 alloc_pages(unsigned int gfp_mask, unsigned int order)
212 {
213 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
214 	struct pglist mlist;
215 
216 	if (gfp_mask & M_CANFAIL)
217 		flags |= UVM_PLA_FAILOK;
218 	if (gfp_mask & M_ZERO)
219 		flags |= UVM_PLA_ZERO;
220 
221 	TAILQ_INIT(&mlist);
222 	if (uvm_pglistalloc(PAGE_SIZE << order, dma_constraint.ucr_low,
223 	    dma_constraint.ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
224 		return NULL;
225 	return TAILQ_FIRST(&mlist);
226 }
227 
228 void
229 __free_pages(struct vm_page *page, unsigned int order)
230 {
231 	struct pglist mlist;
232 	int i;
233 
234 	TAILQ_INIT(&mlist);
235 	for (i = 0; i < (1 << order); i++)
236 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
237 	uvm_pglistfree(&mlist);
238 }
239 
240 void *
241 kmap(struct vm_page *pg)
242 {
243 	vaddr_t va;
244 
245 #if defined (__HAVE_PMAP_DIRECT)
246 	va = pmap_map_direct(pg);
247 #else
248 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
249 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
250 	pmap_update(pmap_kernel());
251 #endif
252 	return (void *)va;
253 }
254 
255 void
256 kunmap(void *addr)
257 {
258 	vaddr_t va = (vaddr_t)addr;
259 
260 #if defined (__HAVE_PMAP_DIRECT)
261 	pmap_unmap_direct(va);
262 #else
263 	pmap_kremove(va, PAGE_SIZE);
264 	pmap_update(pmap_kernel());
265 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
266 #endif
267 }
268 
269 void *
270 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
271      pgprot_t prot)
272 {
273 	vaddr_t va;
274 	paddr_t pa;
275 	int i;
276 
277 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
278 	if (va == 0)
279 		return NULL;
280 	for (i = 0; i < npages; i++) {
281 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
282 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
283 		    PROT_READ | PROT_WRITE,
284 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
285 		pmap_update(pmap_kernel());
286 	}
287 
288 	return (void *)va;
289 }
290 
291 void
292 vunmap(void *addr, size_t size)
293 {
294 	vaddr_t va = (vaddr_t)addr;
295 
296 	pmap_remove(pmap_kernel(), va, va + size);
297 	pmap_update(pmap_kernel());
298 	uvm_km_free(kernel_map, va, size);
299 }
300 
301 void
302 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
303     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
304 {
305 	const uint8_t *cbuf = buf;
306 	int i;
307 
308 	for (i = 0; i < len; i++) {
309 		if ((i % rowsize) == 0)
310 			printf("%s", prefix_str);
311 		printf("%02x", cbuf[i]);
312 		if ((i % rowsize) == (rowsize - 1))
313 			printf("\n");
314 		else
315 			printf(" ");
316 	}
317 }
318 
319 void *
320 memchr_inv(const void *s, int c, size_t n)
321 {
322 	if (n != 0) {
323 		const unsigned char *p = s;
324 
325 		do {
326 			if (*p++ != (unsigned char)c)
327 				return ((void *)(p - 1));
328 		}while (--n != 0);
329 	}
330 	return (NULL);
331 }
332 
333 int
334 panic_cmp(struct rb_node *a, struct rb_node *b)
335 {
336 	panic(__func__);
337 }
338 
339 #undef RB_ROOT
340 #define RB_ROOT(head)	(head)->rbh_root
341 
342 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
343 
344 /*
345  * This is a fairly minimal implementation of the Linux "idr" API.  It
346  * probably isn't very efficient, and defenitely isn't RCU safe.  The
347  * pre-load buffer is global instead of per-cpu; we rely on the kernel
348  * lock to make this work.  We do randomize our IDs in order to make
349  * them harder to guess.
350  */
351 
352 int idr_cmp(struct idr_entry *, struct idr_entry *);
353 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
354 
355 struct pool idr_pool;
356 struct idr_entry *idr_entry_cache;
357 
358 void
359 idr_init(struct idr *idr)
360 {
361 	static int initialized;
362 
363 	if (!initialized) {
364 		pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
365 		    "idrpl", NULL);
366 		initialized = 1;
367 	}
368 	SPLAY_INIT(&idr->tree);
369 }
370 
371 void
372 idr_destroy(struct idr *idr)
373 {
374 	struct idr_entry *id;
375 
376 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
377 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
378 		pool_put(&idr_pool, id);
379 	}
380 }
381 
382 void
383 idr_preload(unsigned int gfp_mask)
384 {
385 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
386 
387 	KERNEL_ASSERT_LOCKED();
388 
389 	if (idr_entry_cache == NULL)
390 		idr_entry_cache = pool_get(&idr_pool, flags);
391 }
392 
393 int
394 idr_alloc(struct idr *idr, void *ptr, int start, int end,
395     unsigned int gfp_mask)
396 {
397 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
398 	struct idr_entry *id;
399 	int begin;
400 
401 	KERNEL_ASSERT_LOCKED();
402 
403 	if (idr_entry_cache) {
404 		id = idr_entry_cache;
405 		idr_entry_cache = NULL;
406 	} else {
407 		id = pool_get(&idr_pool, flags);
408 		if (id == NULL)
409 			return -ENOMEM;
410 	}
411 
412 	if (end <= 0)
413 		end = INT_MAX;
414 
415 #ifdef notyet
416 	id->id = begin = start + arc4random_uniform(end - start);
417 #else
418 	id->id = begin = start;
419 #endif
420 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
421 		if (++id->id == end)
422 			id->id = start;
423 		if (id->id == begin) {
424 			pool_put(&idr_pool, id);
425 			return -ENOSPC;
426 		}
427 	}
428 	id->ptr = ptr;
429 	return id->id;
430 }
431 
432 void *
433 idr_replace(struct idr *idr, void *ptr, int id)
434 {
435 	struct idr_entry find, *res;
436 	void *old;
437 
438 	find.id = id;
439 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
440 	if (res == NULL)
441 		return ERR_PTR(-ENOENT);
442 	old = res->ptr;
443 	res->ptr = ptr;
444 	return old;
445 }
446 
447 void
448 idr_remove(struct idr *idr, int id)
449 {
450 	struct idr_entry find, *res;
451 
452 	find.id = id;
453 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
454 	if (res) {
455 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
456 		pool_put(&idr_pool, res);
457 	}
458 }
459 
460 void *
461 idr_find(struct idr *idr, int id)
462 {
463 	struct idr_entry find, *res;
464 
465 	find.id = id;
466 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
467 	if (res == NULL)
468 		return NULL;
469 	return res->ptr;
470 }
471 
472 void *
473 idr_get_next(struct idr *idr, int *id)
474 {
475 	struct idr_entry *res;
476 
477 	res = idr_find(idr, *id);
478 	if (res == NULL)
479 		res = SPLAY_MIN(idr_tree, &idr->tree);
480 	else
481 		res = SPLAY_NEXT(idr_tree, &idr->tree, res);
482 	if (res == NULL)
483 		return NULL;
484 	*id = res->id;
485 	return res->ptr;
486 }
487 
488 int
489 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
490 {
491 	struct idr_entry *id;
492 	int ret;
493 
494 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
495 		ret = func(id->id, id->ptr, data);
496 		if (ret)
497 			return ret;
498 	}
499 
500 	return 0;
501 }
502 
503 int
504 idr_cmp(struct idr_entry *a, struct idr_entry *b)
505 {
506 	return (a->id < b->id ? -1 : a->id > b->id);
507 }
508 
509 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
510 
511 void
512 ida_init(struct ida *ida)
513 {
514 	ida->counter = 0;
515 }
516 
517 void
518 ida_destroy(struct ida *ida)
519 {
520 }
521 
522 void
523 ida_remove(struct ida *ida, int id)
524 {
525 }
526 
527 int
528 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
529     int flags)
530 {
531 	if (end <= 0)
532 		end = INT_MAX;
533 
534 	if (start > ida->counter)
535 		ida->counter = start;
536 
537 	if (ida->counter >= end)
538 		return -ENOSPC;
539 
540 	return ida->counter++;
541 }
542 
543 int
544 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
545 {
546 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
547 	    M_DRM, gfp_mask);
548 	if (table->sgl == NULL)
549 		return -ENOMEM;
550 	table->nents = table->orig_nents = nents;
551 	return 0;
552 }
553 
554 void
555 sg_free_table(struct sg_table *table)
556 {
557 	free(table->sgl, M_DRM,
558 	    table->orig_nents * sizeof(struct scatterlist));
559 }
560 
561 size_t
562 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
563     const void *buf, size_t buflen)
564 {
565 	panic("%s", __func__);
566 }
567 
568 int
569 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
570 {
571 	void *cmd = NULL;
572 	int cmdlen = 0;
573 	int err, ret = 0;
574 	int op;
575 
576 	iic_acquire_bus(&adap->ic, 0);
577 
578 	while (num > 2) {
579 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
580 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
581 		    msgs->buf, msgs->len, 0);
582 		if (err) {
583 			ret = -err;
584 			goto fail;
585 		}
586 		msgs++;
587 		num--;
588 		ret++;
589 	}
590 
591 	if (num > 1) {
592 		cmd = msgs->buf;
593 		cmdlen = msgs->len;
594 		msgs++;
595 		num--;
596 		ret++;
597 	}
598 
599 	op = (msgs->flags & I2C_M_RD) ?
600 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
601 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
602 	    msgs->buf, msgs->len, 0);
603 	if (err) {
604 		ret = -err;
605 		goto fail;
606 	}
607 	msgs++;
608 	ret++;
609 
610 fail:
611 	iic_release_bus(&adap->ic, 0);
612 
613 	return ret;
614 }
615 
616 int
617 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
618 {
619 	if (adap->algo)
620 		return adap->algo->master_xfer(adap, msgs, num);
621 
622 	return i2c_master_xfer(adap, msgs, num);
623 }
624 
625 int
626 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
627 {
628 	struct i2c_algo_bit_data *algo = adap->algo_data;
629 	struct i2c_adapter bb;
630 
631 	memset(&bb, 0, sizeof(bb));
632 	bb.ic = algo->ic;
633 	bb.retries = adap->retries;
634 	return i2c_master_xfer(&bb, msgs, num);
635 }
636 
637 uint32_t
638 i2c_bb_functionality(struct i2c_adapter *adap)
639 {
640 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
641 }
642 
643 struct i2c_algorithm i2c_bit_algo = {
644 	.master_xfer = i2c_bb_master_xfer,
645 	.functionality = i2c_bb_functionality
646 };
647 
648 int
649 i2c_bit_add_bus(struct i2c_adapter *adap)
650 {
651 	adap->algo = &i2c_bit_algo;
652 	adap->retries = 3;
653 
654 	return 0;
655 }
656 
657 #if defined(__amd64__) || defined(__i386__)
658 
659 /*
660  * This is a minimal implementation of the Linux vga_get/vga_put
661  * interface.  In all likelyhood, it will only work for inteldrm(4) as
662  * it assumes that if there is another active VGA device in the
663  * system, it is sitting behind a PCI bridge.
664  */
665 
666 extern int pci_enumerate_bus(struct pci_softc *,
667     int (*)(struct pci_attach_args *), struct pci_attach_args *);
668 
669 pcitag_t vga_bridge_tag;
670 int vga_bridge_disabled;
671 
672 int
673 vga_disable_bridge(struct pci_attach_args *pa)
674 {
675 	pcireg_t bhlc, bc;
676 
677 	if (pa->pa_domain != 0)
678 		return 0;
679 
680 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
681 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
682 		return 0;
683 
684 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
685 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
686 		return 0;
687 	bc &= ~PPB_BC_VGA_ENABLE;
688 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
689 
690 	vga_bridge_tag = pa->pa_tag;
691 	vga_bridge_disabled = 1;
692 
693 	return 1;
694 }
695 
696 void
697 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
698 {
699 	KASSERT(pdev->pci->sc_bridgetag == NULL);
700 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
701 }
702 
703 void
704 vga_put(struct pci_dev *pdev, int rsrc)
705 {
706 	pcireg_t bc;
707 
708 	if (!vga_bridge_disabled)
709 		return;
710 
711 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
712 	bc |= PPB_BC_VGA_ENABLE;
713 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
714 
715 	vga_bridge_disabled = 0;
716 }
717 
718 #endif
719 
720 /*
721  * ACPI types and interfaces.
722  */
723 
724 #if defined(__amd64__) || defined(__i386__)
725 #include "acpi.h"
726 #endif
727 
728 #if NACPI > 0
729 
730 #include <dev/acpi/acpireg.h>
731 #include <dev/acpi/acpivar.h>
732 
733 acpi_status
734 acpi_get_table_with_size(const char *sig, int instance,
735     struct acpi_table_header **hdr, acpi_size *size)
736 {
737 	struct acpi_softc *sc = acpi_softc;
738 	struct acpi_q *entry;
739 
740 	KASSERT(instance == 1);
741 
742 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
743 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
744 			*hdr = entry->q_table;
745 			*size = (*hdr)->length;
746 			return 0;
747 		}
748 	}
749 
750 	return AE_NOT_FOUND;
751 }
752 
753 #endif
754 
755 void
756 backlight_do_update_status(void *arg)
757 {
758 	backlight_update_status(arg);
759 }
760 
761 struct backlight_device *
762 backlight_device_register(const char *name, void *kdev, void *data,
763     const struct backlight_ops *ops, struct backlight_properties *props)
764 {
765 	struct backlight_device *bd;
766 
767 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
768 	bd->ops = ops;
769 	bd->props = *props;
770 	bd->data = data;
771 
772 	task_set(&bd->task, backlight_do_update_status, bd);
773 
774 	return bd;
775 }
776 
777 void
778 backlight_device_unregister(struct backlight_device *bd)
779 {
780 	free(bd, M_DRM, sizeof(*bd));
781 }
782 
783 void
784 backlight_schedule_update_status(struct backlight_device *bd)
785 {
786 	task_add(systq, &bd->task);
787 }
788 
789 void
790 drm_sysfs_hotplug_event(struct drm_device *dev)
791 {
792 	KNOTE(&dev->note, NOTE_CHANGE);
793 }
794