xref: /openbsd-src/sys/dev/pci/drm/drm_linux.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: drm_linux.c,v 1.12 2016/09/15 02:00:17 dlg Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <dev/pci/drm/drmP.h>
20 #include <dev/pci/ppbreg.h>
21 
22 struct timespec
23 ns_to_timespec(const int64_t nsec)
24 {
25 	struct timespec ts;
26 	int32_t rem;
27 
28 	if (nsec == 0) {
29 		ts.tv_sec = 0;
30 		ts.tv_nsec = 0;
31 		return (ts);
32 	}
33 
34 	ts.tv_sec = nsec / NSEC_PER_SEC;
35 	rem = nsec % NSEC_PER_SEC;
36 	if (rem < 0) {
37 		ts.tv_sec--;
38 		rem += NSEC_PER_SEC;
39 	}
40 	ts.tv_nsec = rem;
41 	return (ts);
42 }
43 
44 int64_t
45 timeval_to_ns(const struct timeval *tv)
46 {
47 	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
48 		tv->tv_usec * NSEC_PER_USEC;
49 }
50 
51 struct timeval
52 ns_to_timeval(const int64_t nsec)
53 {
54 	struct timeval tv;
55 	int32_t rem;
56 
57 	if (nsec == 0) {
58 		tv.tv_sec = 0;
59 		tv.tv_usec = 0;
60 		return (tv);
61 	}
62 
63 	tv.tv_sec = nsec / NSEC_PER_SEC;
64 	rem = nsec % NSEC_PER_SEC;
65 	if (rem < 0) {
66 		tv.tv_sec--;
67 		rem += NSEC_PER_SEC;
68 	}
69 	tv.tv_usec = rem / 1000;
70 	return (tv);
71 }
72 
73 extern char *hw_vendor, *hw_prod;
74 
75 static bool
76 dmi_found(const struct dmi_system_id *dsi)
77 {
78 	int i, slot;
79 
80 	for (i = 0; i < nitems(dsi->matches); i++) {
81 		slot = dsi->matches[i].slot;
82 		switch (slot) {
83 		case DMI_NONE:
84 			break;
85 		case DMI_SYS_VENDOR:
86 		case DMI_BOARD_VENDOR:
87 			if (hw_vendor != NULL &&
88 			    !strcmp(hw_vendor, dsi->matches[i].substr))
89 				break;
90 			else
91 				return false;
92 		case DMI_PRODUCT_NAME:
93 		case DMI_BOARD_NAME:
94 			if (hw_prod != NULL &&
95 			    !strcmp(hw_prod, dsi->matches[i].substr))
96 				break;
97 			else
98 				return false;
99 		default:
100 			return false;
101 		}
102 	}
103 
104 	return true;
105 }
106 
107 int
108 dmi_check_system(const struct dmi_system_id *sysid)
109 {
110 	const struct dmi_system_id *dsi;
111 	int num = 0;
112 
113 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
114 		if (dmi_found(dsi)) {
115 			num++;
116 			if (dsi->callback && dsi->callback(dsi))
117 				break;
118 		}
119 	}
120 	return (num);
121 }
122 
123 struct vm_page *
124 alloc_pages(unsigned int gfp_mask, unsigned int order)
125 {
126 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
127 	struct pglist mlist;
128 
129 	if (gfp_mask & M_CANFAIL)
130 		flags |= UVM_PLA_FAILOK;
131 
132 	TAILQ_INIT(&mlist);
133 	if (uvm_pglistalloc(PAGE_SIZE << order, 0, -1, PAGE_SIZE, 0,
134 	    &mlist, 1, flags))
135 		return NULL;
136 	return TAILQ_FIRST(&mlist);
137 }
138 
139 void
140 __free_pages(struct vm_page *page, unsigned int order)
141 {
142 	struct pglist mlist;
143 	int i;
144 
145 	TAILQ_INIT(&mlist);
146 	for (i = 0; i < (1 << order); i++)
147 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
148 	uvm_pglistfree(&mlist);
149 }
150 
151 void *
152 kmap(struct vm_page *pg)
153 {
154 	vaddr_t va;
155 
156 #if defined (__HAVE_PMAP_DIRECT)
157 	va = pmap_map_direct(pg);
158 #else
159 	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
160 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
161 	pmap_update(pmap_kernel());
162 #endif
163 	return (void *)va;
164 }
165 
166 void
167 kunmap(void *addr)
168 {
169 	vaddr_t va = (vaddr_t)addr;
170 
171 #if defined (__HAVE_PMAP_DIRECT)
172 	pmap_unmap_direct(va);
173 #else
174 	pmap_kremove(va, PAGE_SIZE);
175 	pmap_update(pmap_kernel());
176 	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
177 #endif
178 }
179 
180 void *
181 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
182      pgprot_t prot)
183 {
184 	vaddr_t va;
185 	paddr_t pa;
186 	int i;
187 
188 	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
189 	if (va == 0)
190 		return NULL;
191 	for (i = 0; i < npages; i++) {
192 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
193 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
194 		    PROT_READ | PROT_WRITE,
195 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
196 		pmap_update(pmap_kernel());
197 	}
198 
199 	return (void *)va;
200 }
201 
202 void
203 vunmap(void *addr, size_t size)
204 {
205 	vaddr_t va = (vaddr_t)addr;
206 
207 	pmap_remove(pmap_kernel(), va, va + size);
208 	pmap_update(pmap_kernel());
209 	uvm_km_free(kernel_map, va, size);
210 }
211 
212 int
213 panic_cmp(struct rb_node *a, struct rb_node *b)
214 {
215 	panic(__func__);
216 }
217 
218 #undef RB_ROOT
219 #define RB_ROOT(head)	(head)->rbh_root
220 
221 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
222 
223 /*
224  * This is a fairly minimal implementation of the Linux "idr" API.  It
225  * probably isn't very efficient, and defenitely isn't RCU safe.  The
226  * pre-load buffer is global instead of per-cpu; we rely on the kernel
227  * lock to make this work.  We do randomize our IDs in order to make
228  * them harder to guess.
229  */
230 
231 int idr_cmp(struct idr_entry *, struct idr_entry *);
232 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
233 
234 struct pool idr_pool;
235 struct idr_entry *idr_entry_cache;
236 
237 void
238 idr_init(struct idr *idr)
239 {
240 	static int initialized;
241 
242 	if (!initialized) {
243 		pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
244 		    "idrpl", NULL);
245 		initialized = 1;
246 	}
247 	SPLAY_INIT(&idr->tree);
248 }
249 
250 void
251 idr_destroy(struct idr *idr)
252 {
253 	struct idr_entry *id;
254 
255 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
256 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
257 		pool_put(&idr_pool, id);
258 	}
259 }
260 
261 void
262 idr_preload(unsigned int gfp_mask)
263 {
264 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
265 
266 	KERNEL_ASSERT_LOCKED();
267 
268 	if (idr_entry_cache == NULL)
269 		idr_entry_cache = pool_get(&idr_pool, flags);
270 }
271 
272 int
273 idr_alloc(struct idr *idr, void *ptr, int start, int end,
274     unsigned int gfp_mask)
275 {
276 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
277 	struct idr_entry *id;
278 	int begin;
279 
280 	KERNEL_ASSERT_LOCKED();
281 
282 	if (idr_entry_cache) {
283 		id = idr_entry_cache;
284 		idr_entry_cache = NULL;
285 	} else {
286 		id = pool_get(&idr_pool, flags);
287 		if (id == NULL)
288 			return -ENOMEM;
289 	}
290 
291 	if (end <= 0)
292 		end = INT_MAX;
293 
294 	id->id = begin = start + arc4random_uniform(end - start);
295 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
296 		if (++id->id == end)
297 			id->id = start;
298 		if (id->id == begin) {
299 			pool_put(&idr_pool, id);
300 			return -ENOSPC;
301 		}
302 	}
303 	id->ptr = ptr;
304 	return id->id;
305 }
306 
307 void
308 idr_remove(struct idr *idr, int id)
309 {
310 	struct idr_entry find, *res;
311 
312 	find.id = id;
313 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
314 	if (res) {
315 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
316 		pool_put(&idr_pool, res);
317 	}
318 }
319 
320 void *
321 idr_find(struct idr *idr, int id)
322 {
323 	struct idr_entry find, *res;
324 
325 	find.id = id;
326 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
327 	if (res == NULL)
328 		return NULL;
329 	return res->ptr;
330 }
331 
332 int
333 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
334 {
335 	struct idr_entry *id;
336 	int ret;
337 
338 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
339 		ret = func(id->id, id->ptr, data);
340 		if (ret)
341 			return ret;
342 	}
343 
344 	return 0;
345 }
346 
347 int
348 idr_cmp(struct idr_entry *a, struct idr_entry *b)
349 {
350 	return (a->id < b->id ? -1 : a->id > b->id);
351 }
352 
353 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
354 
355 #if defined(__amd64__) || defined(__i386__)
356 
357 /*
358  * This is a minimal implementation of the Linux vga_get/vga_put
359  * interface.  In all likelyhood, it will only work for inteldrm(4) as
360  * it assumes that if there is another active VGA device in the
361  * system, it is sitting behind a PCI bridge.
362  */
363 
364 extern int pci_enumerate_bus(struct pci_softc *,
365     int (*)(struct pci_attach_args *), struct pci_attach_args *);
366 
367 pcitag_t vga_bridge_tag;
368 int vga_bridge_disabled;
369 
370 int
371 vga_disable_bridge(struct pci_attach_args *pa)
372 {
373 	pcireg_t bhlc, bc;
374 
375 	if (pa->pa_domain != 0)
376 		return 0;
377 
378 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
379 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
380 		return 0;
381 
382 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
383 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
384 		return 0;
385 	bc &= ~PPB_BC_VGA_ENABLE;
386 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
387 
388 	vga_bridge_tag = pa->pa_tag;
389 	vga_bridge_disabled = 1;
390 
391 	return 1;
392 }
393 
394 void
395 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
396 {
397 	KASSERT(pdev->pci->sc_bridgetag == NULL);
398 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
399 }
400 
401 void
402 vga_put(struct pci_dev *pdev, int rsrc)
403 {
404 	pcireg_t bc;
405 
406 	if (!vga_bridge_disabled)
407 		return;
408 
409 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
410 	bc |= PPB_BC_VGA_ENABLE;
411 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
412 
413 	vga_bridge_disabled = 0;
414 }
415 
416 #endif
417 
418 /*
419  * ACPI types and interfaces.
420  */
421 
422 #if defined(__amd64__) || defined(__i386__)
423 #include "acpi.h"
424 #endif
425 
426 #if NACPI > 0
427 
428 #include <dev/acpi/acpireg.h>
429 #include <dev/acpi/acpivar.h>
430 
431 acpi_status
432 acpi_get_table_with_size(const char *sig, int instance,
433     struct acpi_table_header **hdr, acpi_size *size)
434 {
435 	struct acpi_softc *sc = acpi_softc;
436 	struct acpi_q *entry;
437 
438 	KASSERT(instance == 1);
439 
440 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
441 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
442 			*hdr = entry->q_table;
443 			*size = (*hdr)->length;
444 			return 0;
445 		}
446 	}
447 
448 	return AE_NOT_FOUND;
449 }
450 
451 #endif
452