xref: /openbsd-src/sys/dev/pci/pci.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*	$OpenBSD: pci.c,v 1.113 2019/06/08 10:27:02 dlg Exp $	*/
2 /*	$NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1995, 1996 Christopher G. Demetriou.  All rights reserved.
6  * Copyright (c) 1994 Charles Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Charles Hannum.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * PCI bus autoconfiguration.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcidevs.h>
46 #include <dev/pci/ppbreg.h>
47 
48 int pcimatch(struct device *, void *, void *);
49 void pciattach(struct device *, struct device *, void *);
50 int pcidetach(struct device *, int);
51 int pciactivate(struct device *, int);
52 void pci_suspend(struct pci_softc *);
53 void pci_powerdown(struct pci_softc *);
54 void pci_resume(struct pci_softc *);
55 
56 #define NMAPREG			((PCI_MAPREG_END - PCI_MAPREG_START) / \
57 				    sizeof(pcireg_t))
58 struct pci_dev {
59 	struct device *pd_dev;
60 	LIST_ENTRY(pci_dev) pd_next;
61 	pcitag_t pd_tag;        /* pci register tag */
62 	pcireg_t pd_csr;
63 	pcireg_t pd_bhlc;
64 	pcireg_t pd_int;
65 	pcireg_t pd_map[NMAPREG];
66 	pcireg_t pd_mask[NMAPREG];
67 	pcireg_t pd_msi_mc;
68 	pcireg_t pd_msi_ma;
69 	pcireg_t pd_msi_mau32;
70 	pcireg_t pd_msi_md;
71 	int pd_pmcsr_state;
72 	int pd_vga_decode;
73 };
74 
75 #ifdef APERTURE
76 extern int allowaperture;
77 #endif
78 
79 struct cfattach pci_ca = {
80 	sizeof(struct pci_softc), pcimatch, pciattach, pcidetach, pciactivate
81 };
82 
83 struct cfdriver pci_cd = {
84 	NULL, "pci", DV_DULL
85 };
86 
87 int	pci_ndomains;
88 
89 struct proc *pci_vga_proc;
90 struct pci_softc *pci_vga_pci;
91 pcitag_t pci_vga_tag;
92 
93 int	pci_dopm;
94 
95 int	pciprint(void *, const char *);
96 int	pcisubmatch(struct device *, void *, void *);
97 
98 #ifdef PCI_MACHDEP_ENUMERATE_BUS
99 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
100 #else
101 int pci_enumerate_bus(struct pci_softc *,
102     int (*)(struct pci_attach_args *), struct pci_attach_args *);
103 #endif
104 int	pci_reserve_resources(struct pci_attach_args *);
105 int	pci_primary_vga(struct pci_attach_args *);
106 
107 /*
108  * Important note about PCI-ISA bridges:
109  *
110  * Callbacks are used to configure these devices so that ISA/EISA bridges
111  * can attach their child busses after PCI configuration is done.
112  *
113  * This works because:
114  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
115  *	(2) any ISA/EISA bridges must be attached to primary PCI
116  *	    busses (i.e. bus zero).
117  *
118  * That boils down to: there can only be one of these outstanding
119  * at a time, it is cleared when configuring PCI bus 0 before any
120  * subdevices have been found, and it is run after all subdevices
121  * of PCI bus 0 have been found.
122  *
123  * This is needed because there are some (legacy) PCI devices which
124  * can show up as ISA/EISA devices as well (the prime example of which
125  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
126  * and the bridge is seen before the video board is, the board can show
127  * up as an ISA device, and that can (bogusly) complicate the PCI device's
128  * attach code, or make the PCI device not be properly attached at all.
129  *
130  * We use the generic config_defer() facility to achieve this.
131  */
132 
133 int
134 pcimatch(struct device *parent, void *match, void *aux)
135 {
136 	struct cfdata *cf = match;
137 	struct pcibus_attach_args *pba = aux;
138 
139 	if (strcmp(pba->pba_busname, cf->cf_driver->cd_name))
140 		return (0);
141 
142 	/* Check the locators */
143 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
144 	    cf->pcibuscf_bus != pba->pba_bus)
145 		return (0);
146 
147 	/* sanity */
148 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
149 		return (0);
150 
151 	/*
152 	 * XXX check other (hardware?) indicators
153 	 */
154 
155 	return (1);
156 }
157 
158 void
159 pciattach(struct device *parent, struct device *self, void *aux)
160 {
161 	struct pcibus_attach_args *pba = aux;
162 	struct pci_softc *sc = (struct pci_softc *)self;
163 
164 	pci_attach_hook(parent, self, pba);
165 
166 	printf("\n");
167 
168 	LIST_INIT(&sc->sc_devs);
169 
170 	sc->sc_iot = pba->pba_iot;
171 	sc->sc_memt = pba->pba_memt;
172 	sc->sc_dmat = pba->pba_dmat;
173 	sc->sc_pc = pba->pba_pc;
174 	sc->sc_flags = pba->pba_flags;
175 	sc->sc_ioex = pba->pba_ioex;
176 	sc->sc_memex = pba->pba_memex;
177 	sc->sc_pmemex = pba->pba_pmemex;
178 	sc->sc_busex = pba->pba_busex;
179 	sc->sc_domain = pba->pba_domain;
180 	sc->sc_bus = pba->pba_bus;
181 	sc->sc_bridgetag = pba->pba_bridgetag;
182 	sc->sc_bridgeih = pba->pba_bridgeih;
183 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
184 	sc->sc_intrswiz = pba->pba_intrswiz;
185 	sc->sc_intrtag = pba->pba_intrtag;
186 
187 	/* Reserve our own bus number. */
188 	if (sc->sc_busex)
189 		extent_alloc_region(sc->sc_busex, sc->sc_bus, 1, EX_NOWAIT);
190 
191 	pci_enumerate_bus(sc, pci_reserve_resources, NULL);
192 
193 	/* Find the VGA device that's currently active. */
194 	if (pci_enumerate_bus(sc, pci_primary_vga, NULL))
195 		pci_vga_pci = sc;
196 
197 	pci_enumerate_bus(sc, NULL, NULL);
198 }
199 
200 int
201 pcidetach(struct device *self, int flags)
202 {
203 	return pci_detach_devices((struct pci_softc *)self, flags);
204 }
205 
206 int
207 pciactivate(struct device *self, int act)
208 {
209 	int rv = 0;
210 
211 	switch (act) {
212 	case DVACT_SUSPEND:
213 		rv = config_activate_children(self, act);
214 		pci_suspend((struct pci_softc *)self);
215 		break;
216 	case DVACT_RESUME:
217 		pci_resume((struct pci_softc *)self);
218 		rv = config_activate_children(self, act);
219 		break;
220 	case DVACT_POWERDOWN:
221 		rv = config_activate_children(self, act);
222 		pci_powerdown((struct pci_softc *)self);
223 		break;
224 	default:
225 		rv = config_activate_children(self, act);
226 		break;
227 	}
228 	return (rv);
229 }
230 
231 void
232 pci_suspend(struct pci_softc *sc)
233 {
234 	struct pci_dev *pd;
235 	pcireg_t bhlc, reg;
236 	int off, i;
237 
238 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
239 		/*
240 		 * Only handle header type 0 here; PCI-PCI bridges and
241 		 * CardBus bridges need special handling, which will
242 		 * be done in their specific drivers.
243 		 */
244 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
245 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
246 			continue;
247 
248 		/* Save registers that may get lost. */
249 		for (i = 0; i < NMAPREG; i++)
250 			pd->pd_map[i] = pci_conf_read(sc->sc_pc, pd->pd_tag,
251 			    PCI_MAPREG_START + (i * 4));
252 		pd->pd_csr = pci_conf_read(sc->sc_pc, pd->pd_tag,
253 		    PCI_COMMAND_STATUS_REG);
254 		pd->pd_bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag,
255 		    PCI_BHLC_REG);
256 		pd->pd_int = pci_conf_read(sc->sc_pc, pd->pd_tag,
257 		    PCI_INTERRUPT_REG);
258 
259 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
260 		    PCI_CAP_MSI, &off, &reg)) {
261 			pd->pd_msi_ma = pci_conf_read(sc->sc_pc, pd->pd_tag,
262 			    off + PCI_MSI_MA);
263 			if (reg & PCI_MSI_MC_C64) {
264 				pd->pd_msi_mau32 = pci_conf_read(sc->sc_pc,
265 				    pd->pd_tag, off + PCI_MSI_MAU32);
266 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
267 				    pd->pd_tag, off + PCI_MSI_MD64);
268 			} else {
269 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
270 				    pd->pd_tag, off + PCI_MSI_MD32);
271 			}
272 			pd->pd_msi_mc = reg;
273 		}
274 	}
275 }
276 
277 void
278 pci_powerdown(struct pci_softc *sc)
279 {
280 	struct pci_dev *pd;
281 	pcireg_t bhlc;
282 
283 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
284 		/*
285 		 * Only handle header type 0 here; PCI-PCI bridges and
286 		 * CardBus bridges need special handling, which will
287 		 * be done in their specific drivers.
288 		 */
289 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
290 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
291 			continue;
292 
293 		if (pci_dopm) {
294 			/*
295 			 * Place the device into the lowest possible
296 			 * power state.
297 			 */
298 			pd->pd_pmcsr_state = pci_get_powerstate(sc->sc_pc,
299 			    pd->pd_tag);
300 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
301 			    pci_min_powerstate(sc->sc_pc, pd->pd_tag));
302 		}
303 	}
304 }
305 
306 void
307 pci_resume(struct pci_softc *sc)
308 {
309 	struct pci_dev *pd;
310 	pcireg_t bhlc, reg;
311 	int off, i;
312 
313 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
314 		/*
315 		 * Only handle header type 0 here; PCI-PCI bridges and
316 		 * CardBus bridges need special handling, which will
317 		 * be done in their specific drivers.
318 		 */
319 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
320 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
321 			continue;
322 
323 		/* Restore power. */
324 		if (pci_dopm)
325 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
326 			    pd->pd_pmcsr_state);
327 
328 		/* Restore the registers saved above. */
329 		for (i = 0; i < NMAPREG; i++)
330 			pci_conf_write(sc->sc_pc, pd->pd_tag,
331 			    PCI_MAPREG_START + (i * 4), pd->pd_map[i]);
332 		reg = pci_conf_read(sc->sc_pc, pd->pd_tag,
333 		    PCI_COMMAND_STATUS_REG);
334 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
335 		    (reg & 0xffff0000) | (pd->pd_csr & 0x0000ffff));
336 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG,
337 		    pd->pd_bhlc);
338 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_INTERRUPT_REG,
339 		    pd->pd_int);
340 
341 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
342 		    PCI_CAP_MSI, &off, &reg)) {
343 			pci_conf_write(sc->sc_pc, pd->pd_tag,
344 			    off + PCI_MSI_MA, pd->pd_msi_ma);
345 			if (reg & PCI_MSI_MC_C64) {
346 				pci_conf_write(sc->sc_pc, pd->pd_tag,
347 				    off + PCI_MSI_MAU32, pd->pd_msi_mau32);
348 				pci_conf_write(sc->sc_pc, pd->pd_tag,
349 				    off + PCI_MSI_MD64, pd->pd_msi_md);
350 			} else {
351 				pci_conf_write(sc->sc_pc, pd->pd_tag,
352 				    off + PCI_MSI_MD32, pd->pd_msi_md);
353 			}
354 			pci_conf_write(sc->sc_pc, pd->pd_tag,
355 			    off + PCI_MSI_MC, pd->pd_msi_mc);
356 		}
357 	}
358 }
359 
360 int
361 pciprint(void *aux, const char *pnp)
362 {
363 	struct pci_attach_args *pa = aux;
364 	char devinfo[256];
365 
366 	if (pnp) {
367 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo,
368 		    sizeof devinfo);
369 		printf("%s at %s", devinfo, pnp);
370 	}
371 	printf(" dev %d function %d", pa->pa_device, pa->pa_function);
372 	if (!pnp) {
373 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo,
374 		    sizeof devinfo);
375 		printf(" %s", devinfo);
376 	}
377 
378 	return (UNCONF);
379 }
380 
381 int
382 pcisubmatch(struct device *parent, void *match,  void *aux)
383 {
384 	struct cfdata *cf = match;
385 	struct pci_attach_args *pa = aux;
386 
387 	if (cf->pcicf_dev != PCI_UNK_DEV &&
388 	    cf->pcicf_dev != pa->pa_device)
389 		return (0);
390 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
391 	    cf->pcicf_function != pa->pa_function)
392 		return (0);
393 
394 	return ((*cf->cf_attach->ca_match)(parent, match, aux));
395 }
396 
397 int
398 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
399     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
400 {
401 	pci_chipset_tag_t pc = sc->sc_pc;
402 	struct pci_attach_args pa;
403 	struct pci_dev *pd;
404 	pcireg_t id, class, intr, bhlcr, cap;
405 	int pin, bus, device, function;
406 	int off, ret = 0;
407 	uint64_t addr;
408 
409 	pci_decompose_tag(pc, tag, &bus, &device, &function);
410 
411 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
412 	if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
413 		return (0);
414 
415 	id = pci_conf_read(pc, tag, PCI_ID_REG);
416 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
417 
418 	/* Invalid vendor ID value? */
419 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
420 		return (0);
421 	/* XXX Not invalid, but we've done this ~forever. */
422 	if (PCI_VENDOR(id) == 0)
423 		return (0);
424 
425 	pa.pa_iot = sc->sc_iot;
426 	pa.pa_memt = sc->sc_memt;
427 	pa.pa_dmat = sc->sc_dmat;
428 	pa.pa_pc = pc;
429 	pa.pa_ioex = sc->sc_ioex;
430 	pa.pa_memex = sc->sc_memex;
431 	pa.pa_pmemex = sc->sc_pmemex;
432 	pa.pa_busex = sc->sc_busex;
433 	pa.pa_domain = sc->sc_domain;
434 	pa.pa_bus = bus;
435 	pa.pa_device = device;
436 	pa.pa_function = function;
437 	pa.pa_tag = tag;
438 	pa.pa_id = id;
439 	pa.pa_class = class;
440 	pa.pa_bridgetag = sc->sc_bridgetag;
441 	pa.pa_bridgeih = sc->sc_bridgeih;
442 
443 	/* This is a simplification of the NetBSD code.
444 	   We don't support turning off I/O or memory
445 	   on broken hardware. <csapuntz@stanford.edu> */
446 	pa.pa_flags = sc->sc_flags;
447 	pa.pa_flags |= PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
448 
449 	if (sc->sc_bridgetag == NULL) {
450 		pa.pa_intrswiz = 0;
451 		pa.pa_intrtag = tag;
452 	} else {
453 		pa.pa_intrswiz = sc->sc_intrswiz + device;
454 		pa.pa_intrtag = sc->sc_intrtag;
455 	}
456 
457 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
458 
459 	pin = PCI_INTERRUPT_PIN(intr);
460 	pa.pa_rawintrpin = pin;
461 	if (pin == PCI_INTERRUPT_PIN_NONE) {
462 		/* no interrupt */
463 		pa.pa_intrpin = 0;
464 	} else {
465 		/*
466 		 * swizzle it based on the number of busses we're
467 		 * behind and our device number.
468 		 */
469 		pa.pa_intrpin = 	/* XXX */
470 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
471 	}
472 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
473 
474 	if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSI, &off, &cap)) {
475 		/*
476 		 * XXX Should we enable MSI mapping ourselves on
477 		 * systems that have it disabled?
478 		 */
479 		if (cap & PCI_HT_MSI_ENABLED) {
480 			if ((cap & PCI_HT_MSI_FIXED) == 0) {
481 				addr = pci_conf_read(pc, tag,
482 				    off + PCI_HT_MSI_ADDR);
483 				addr |= (uint64_t)pci_conf_read(pc, tag,
484 				    off + PCI_HT_MSI_ADDR_HI32) << 32;
485 			} else
486 				addr = PCI_HT_MSI_FIXED_ADDR;
487 
488 			/*
489 			 * XXX This will fail to enable MSI on systems
490 			 * that don't use the canonical address.
491 			 */
492 			if (addr == PCI_HT_MSI_FIXED_ADDR)
493 				pa.pa_flags |= PCI_FLAGS_MSI_ENABLED;
494 		}
495 	}
496 
497 	/*
498 	 * Give the MD code a chance to alter pci_attach_args and/or
499 	 * skip devices.
500 	 */
501 	if (pci_probe_device_hook(pc, &pa) != 0)
502 		return (0);
503 
504 	if (match != NULL) {
505 		ret = (*match)(&pa);
506 		if (ret != 0 && pap != NULL)
507 			*pap = pa;
508 	} else {
509 		pcireg_t address, csr;
510 		int i, reg, reg_start, reg_end;
511 		int s;
512 
513 		pd = malloc(sizeof *pd, M_DEVBUF, M_ZERO | M_WAITOK);
514 		pd->pd_tag = tag;
515 		LIST_INSERT_HEAD(&sc->sc_devs, pd, pd_next);
516 
517 		switch (PCI_HDRTYPE_TYPE(bhlcr)) {
518 		case 0:
519 			reg_start = PCI_MAPREG_START;
520 			reg_end = PCI_MAPREG_END;
521 			break;
522 		case 1: /* PCI-PCI bridge */
523 			reg_start = PCI_MAPREG_START;
524 			reg_end = PCI_MAPREG_PPB_END;
525 			break;
526 		case 2: /* PCI-CardBus bridge */
527 			reg_start = PCI_MAPREG_START;
528 			reg_end = PCI_MAPREG_PCB_END;
529 			break;
530 		default:
531 			return (0);
532 		}
533 
534 		s = splhigh();
535 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
536 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
537 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr &
538 			    ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE));
539 
540 		for (reg = reg_start, i = 0; reg < reg_end; reg += 4, i++) {
541 			address = pci_conf_read(pc, tag, reg);
542 			pci_conf_write(pc, tag, reg, 0xffffffff);
543 			pd->pd_mask[i] = pci_conf_read(pc, tag, reg);
544 			pci_conf_write(pc, tag, reg, address);
545 		}
546 
547 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
548 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
549 		splx(s);
550 
551 		if ((PCI_CLASS(class) == PCI_CLASS_DISPLAY &&
552 		    PCI_SUBCLASS(class) == PCI_SUBCLASS_DISPLAY_VGA) ||
553 		    (PCI_CLASS(class) == PCI_CLASS_PREHISTORIC &&
554 		    PCI_SUBCLASS(class) == PCI_SUBCLASS_PREHISTORIC_VGA))
555 			pd->pd_vga_decode = 1;
556 
557 		pd->pd_dev = config_found_sm(&sc->sc_dev, &pa, pciprint,
558 		    pcisubmatch);
559 		if (pd->pd_dev)
560 			pci_dev_postattach(pd->pd_dev, &pa);
561 	}
562 
563 	return (ret);
564 }
565 
566 int
567 pci_detach_devices(struct pci_softc *sc, int flags)
568 {
569 	struct pci_dev *pd, *next;
570 	int ret;
571 
572 	ret = config_detach_children(&sc->sc_dev, flags);
573 	if (ret != 0)
574 		return (ret);
575 
576 	for (pd = LIST_FIRST(&sc->sc_devs); pd != NULL; pd = next) {
577 		next = LIST_NEXT(pd, pd_next);
578 		free(pd, M_DEVBUF, sizeof *pd);
579 	}
580 	LIST_INIT(&sc->sc_devs);
581 
582 	return (0);
583 }
584 
585 int
586 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
587     int *offset, pcireg_t *value)
588 {
589 	pcireg_t reg;
590 	unsigned int ofs;
591 
592 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
593 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
594 		return (0);
595 
596 	/* Determine the Capability List Pointer register to start with. */
597 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
598 	switch (PCI_HDRTYPE_TYPE(reg)) {
599 	case 0:	/* standard device header */
600 	case 1: /* PCI-PCI bridge header */
601 		ofs = PCI_CAPLISTPTR_REG;
602 		break;
603 	case 2:	/* PCI-CardBus bridge header */
604 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
605 		break;
606 	default:
607 		return (0);
608 	}
609 
610 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
611 	while (ofs != 0) {
612 		/*
613 		 * Some devices, like parts of the NVIDIA C51 chipset,
614 		 * have a broken Capabilities List.  So we need to do
615 		 * a sanity check here.
616 		 */
617 		if ((ofs & 3) || (ofs < 0x40))
618 			return (0);
619 		reg = pci_conf_read(pc, tag, ofs);
620 		if (PCI_CAPLIST_CAP(reg) == capid) {
621 			if (offset)
622 				*offset = ofs;
623 			if (value)
624 				*value = reg;
625 			return (1);
626 		}
627 		ofs = PCI_CAPLIST_NEXT(reg);
628 	}
629 
630 	return (0);
631 }
632 
633 int
634 pci_get_ht_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
635     int *offset, pcireg_t *value)
636 {
637 	pcireg_t reg;
638 	unsigned int ofs;
639 
640 	if (pci_get_capability(pc, tag, PCI_CAP_HT, &ofs, NULL) == 0)
641 		return (0);
642 
643 	while (ofs != 0) {
644 #ifdef DIAGNOSTIC
645 		if ((ofs & 3) || (ofs < 0x40))
646 			panic("pci_get_ht_capability");
647 #endif
648 		reg = pci_conf_read(pc, tag, ofs);
649 		if (PCI_HT_CAP(reg) == capid) {
650 			if (offset)
651 				*offset = ofs;
652 			if (value)
653 				*value = reg;
654 			return (1);
655 		}
656 		ofs = PCI_CAPLIST_NEXT(reg);
657 	}
658 
659 	return (0);
660 }
661 
662 uint16_t
663 pci_requester_id(pci_chipset_tag_t pc, pcitag_t tag)
664 {
665 	int bus, dev, func;
666 
667 	pci_decompose_tag(pc, tag, &bus, &dev, &func);
668 	return ((bus << 8) | (dev << 3) | func);
669 }
670 
671 int
672 pci_find_device(struct pci_attach_args *pa,
673     int (*match)(struct pci_attach_args *))
674 {
675 	extern struct cfdriver pci_cd;
676 	struct device *pcidev;
677 	int i;
678 
679 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
680 		pcidev = pci_cd.cd_devs[i];
681 		if (pcidev != NULL &&
682 		    pci_enumerate_bus((struct pci_softc *)pcidev,
683 		    		      match, pa) != 0)
684 			return (1);
685 	}
686 	return (0);
687 }
688 
689 int
690 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
691 {
692 	pcireg_t reg;
693 	int offset;
694 
695 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
696 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
697 		return (reg & PCI_PMCSR_STATE_MASK);
698 	}
699 	return (PCI_PMCSR_STATE_D0);
700 }
701 
702 int
703 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
704 {
705 	pcireg_t reg;
706 	int offset, ostate = state;
707 
708 	/*
709 	 * Warn the firmware that we are going to put the device
710 	 * into the given state.
711 	 */
712 	pci_set_powerstate_md(pc, tag, state, 1);
713 
714 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
715 		if (state == PCI_PMCSR_STATE_D3) {
716 			/*
717 			 * The PCI Power Management spec says we
718 			 * should disable I/O and memory space as well
719 			 * as bus mastering before we place the device
720 			 * into D3.
721 			 */
722 			reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
723 			reg &= ~PCI_COMMAND_IO_ENABLE;
724 			reg &= ~PCI_COMMAND_MEM_ENABLE;
725 			reg &= ~PCI_COMMAND_MASTER_ENABLE;
726 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg);
727 		}
728 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
729 		if ((reg & PCI_PMCSR_STATE_MASK) != state) {
730 			ostate = reg & PCI_PMCSR_STATE_MASK;
731 
732 			pci_conf_write(pc, tag, offset + PCI_PMCSR,
733 			    (reg & ~PCI_PMCSR_STATE_MASK) | state);
734 			if (state == PCI_PMCSR_STATE_D3 ||
735 			    ostate == PCI_PMCSR_STATE_D3)
736 				delay(10 * 1000);
737 		}
738 	}
739 
740 	/*
741 	 * Warn the firmware that the device is now in the given
742 	 * state.
743 	 */
744 	pci_set_powerstate_md(pc, tag, state, 0);
745 
746 	return (ostate);
747 }
748 
749 #ifndef PCI_MACHDEP_ENUMERATE_BUS
750 /*
751  * Generic PCI bus enumeration routine.  Used unless machine-dependent
752  * code needs to provide something else.
753  */
754 int
755 pci_enumerate_bus(struct pci_softc *sc,
756     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
757 {
758 	pci_chipset_tag_t pc = sc->sc_pc;
759 	int device, function, nfunctions, ret;
760 	const struct pci_quirkdata *qd;
761 	pcireg_t id, bhlcr;
762 	pcitag_t tag;
763 
764 	for (device = 0; device < sc->sc_maxndevs; device++) {
765 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
766 
767 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
768 		if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
769 			continue;
770 
771 		id = pci_conf_read(pc, tag, PCI_ID_REG);
772 
773 		/* Invalid vendor ID value? */
774 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
775 			continue;
776 		/* XXX Not invalid, but we've done this ~forever. */
777 		if (PCI_VENDOR(id) == 0)
778 			continue;
779 
780 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
781 
782 		if (qd != NULL &&
783 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
784 			nfunctions = 8;
785 		else if (qd != NULL &&
786 		      (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
787 			nfunctions = 1;
788 		else
789 			nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
790 
791 		for (function = 0; function < nfunctions; function++) {
792 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
793 			ret = pci_probe_device(sc, tag, match, pap);
794 			if (match != NULL && ret != 0)
795 				return (ret);
796 		}
797  	}
798 
799 	return (0);
800 }
801 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
802 
803 int
804 pci_reserve_resources(struct pci_attach_args *pa)
805 {
806 	pci_chipset_tag_t pc = pa->pa_pc;
807 	pcitag_t tag = pa->pa_tag;
808 	pcireg_t bhlc, blr, type, bir;
809 	pcireg_t addr, mask;
810 	bus_addr_t base, limit;
811 	bus_size_t size;
812 	int reg, reg_start, reg_end, reg_rom;
813 	int bus, dev, func;
814 	int sec, sub;
815 	int flags;
816 	int s;
817 
818 	pci_decompose_tag(pc, tag, &bus, &dev, &func);
819 
820 	bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
821 	switch (PCI_HDRTYPE_TYPE(bhlc)) {
822 	case 0:
823 		reg_start = PCI_MAPREG_START;
824 		reg_end = PCI_MAPREG_END;
825 		reg_rom = PCI_ROM_REG;
826 		break;
827 	case 1: /* PCI-PCI bridge */
828 		reg_start = PCI_MAPREG_START;
829 		reg_end = PCI_MAPREG_PPB_END;
830 		reg_rom = 0;	/* 0x38 */
831 		break;
832 	case 2: /* PCI-CardBus bridge */
833 		reg_start = PCI_MAPREG_START;
834 		reg_end = PCI_MAPREG_PCB_END;
835 		reg_rom = 0;
836 		break;
837 	default:
838 		return (0);
839 	}
840 
841 	for (reg = reg_start; reg < reg_end; reg += 4) {
842 		if (!pci_mapreg_probe(pc, tag, reg, &type))
843 			continue;
844 
845 		if (pci_mapreg_info(pc, tag, reg, type, &base, &size, &flags))
846 			continue;
847 
848 		if (base == 0)
849 			continue;
850 
851 		switch (type) {
852 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
853 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
854 			if (ISSET(flags, BUS_SPACE_MAP_PREFETCHABLE) &&
855 			    pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
856 			    base, size, EX_NOWAIT) == 0) {
857 				break;
858 			}
859 #ifdef __sparc64__
860 			/*
861 			 * Certain SPARC T5 systems assign
862 			 * non-prefetchable 64-bit BARs of its onboard
863 			 * mpii(4) controllers addresses in the
864 			 * prefetchable memory range.  This is
865 			 * (probably) safe, as reads from the device
866 			 * registers mapped by these BARs are
867 			 * side-effect free.  So assume the firmware
868 			 * knows what it is doing.
869 			 */
870 			if (base >= 0x100000000 &&
871 			    pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
872 			    base, size, EX_NOWAIT) == 0) {
873 				break;
874 			}
875 #endif
876 			if (pa->pa_memex && extent_alloc_region(pa->pa_memex,
877 			    base, size, EX_NOWAIT)) {
878 				printf("%d:%d:%d: mem address conflict 0x%lx/0x%lx\n",
879 				    bus, dev, func, base, size);
880 				pci_conf_write(pc, tag, reg, 0);
881 				if (type & PCI_MAPREG_MEM_TYPE_64BIT)
882 					pci_conf_write(pc, tag, reg + 4, 0);
883 			}
884 			break;
885 		case PCI_MAPREG_TYPE_IO:
886 			if (pa->pa_ioex && extent_alloc_region(pa->pa_ioex,
887 			    base, size, EX_NOWAIT)) {
888 				printf("%d:%d:%d: io address conflict 0x%lx/0x%lx\n",
889 				    bus, dev, func, base, size);
890 				pci_conf_write(pc, tag, reg, 0);
891 			}
892 			break;
893 		}
894 
895 		if (type & PCI_MAPREG_MEM_TYPE_64BIT)
896 			reg += 4;
897 	}
898 
899 	if (reg_rom != 0) {
900 		s = splhigh();
901 		addr = pci_conf_read(pc, tag, PCI_ROM_REG);
902 		pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
903 		mask = pci_conf_read(pc, tag, PCI_ROM_REG);
904 		pci_conf_write(pc, tag, PCI_ROM_REG, addr);
905 		splx(s);
906 
907 		base = PCI_ROM_ADDR(addr);
908 		size = PCI_ROM_SIZE(mask);
909 		if (base != 0 && size != 0) {
910 			if (pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
911 			    base, size, EX_NOWAIT) &&
912 			    pa->pa_memex && extent_alloc_region(pa->pa_memex,
913 			    base, size, EX_NOWAIT)) {
914 				printf("%d:%d:%d: rom address conflict 0x%lx/0x%lx\n",
915 				    bus, dev, func, base, size);
916 				pci_conf_write(pc, tag, PCI_ROM_REG, 0);
917 			}
918 		}
919 	}
920 
921 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
922 		return (0);
923 
924 	/* Figure out the I/O address range of the bridge. */
925 	blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS);
926 	base = (blr & 0x000000f0) << 8;
927 	limit = (blr & 0x000f000) | 0x00000fff;
928 	blr = pci_conf_read(pc, tag, PPB_REG_IO_HI);
929 	base |= (blr & 0x0000ffff) << 16;
930 	limit |= (blr & 0xffff0000);
931 	if (limit > base)
932 		size = (limit - base + 1);
933 	else
934 		size = 0;
935 	if (pa->pa_ioex && base > 0 && size > 0) {
936 		if (extent_alloc_region(pa->pa_ioex, base, size, EX_NOWAIT)) {
937 			printf("%d:%d:%d: bridge io address conflict 0x%lx/0x%lx\n",
938 			    bus, dev, func, base, size);
939 			blr &= 0xffff0000;
940 			blr |= 0x000000f0;
941 			pci_conf_write(pc, tag, PPB_REG_IOSTATUS, blr);
942 		}
943 	}
944 
945 	/* Figure out the memory mapped I/O address range of the bridge. */
946 	blr = pci_conf_read(pc, tag, PPB_REG_MEM);
947 	base = (blr & 0x0000fff0) << 16;
948 	limit = (blr & 0xfff00000) | 0x000fffff;
949 	if (limit > base)
950 		size = (limit - base + 1);
951 	else
952 		size = 0;
953 	if (pa->pa_memex && base > 0 && size > 0) {
954 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
955 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
956 			    bus, dev, func, base, size);
957 			pci_conf_write(pc, tag, PPB_REG_MEM, 0x0000fff0);
958 		}
959 	}
960 
961 	/* Figure out the prefetchable memory address range of the bridge. */
962 	blr = pci_conf_read(pc, tag, PPB_REG_PREFMEM);
963 	base = (blr & 0x0000fff0) << 16;
964 	limit = (blr & 0xfff00000) | 0x000fffff;
965 #ifdef __LP64__
966 	blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFBASE_HI32);
967 	base |= ((uint64_t)blr) << 32;
968 	blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFLIM_HI32);
969 	limit |= ((uint64_t)blr) << 32;
970 #endif
971 	if (limit > base)
972 		size = (limit - base + 1);
973 	else
974 		size = 0;
975 	if (pa->pa_pmemex && base > 0 && size > 0) {
976 		if (extent_alloc_region(pa->pa_pmemex, base, size, EX_NOWAIT)) {
977 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
978 			    bus, dev, func, base, size);
979 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
980 		}
981 	} else if (pa->pa_memex && base > 0 && size > 0) {
982 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
983 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
984 			    bus, dev, func, base, size);
985 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
986 		}
987 	}
988 
989 	/* Figure out the bus range handled by the bridge. */
990 	bir = pci_conf_read(pc, tag, PPB_REG_BUSINFO);
991 	sec = PPB_BUSINFO_SECONDARY(bir);
992 	sub = PPB_BUSINFO_SUBORDINATE(bir);
993 	if (pa->pa_busex && sub >= sec && sub > 0) {
994 		if (extent_alloc_region(pa->pa_busex, sec, sub - sec + 1,
995 		    EX_NOWAIT)) {
996 			printf("%d:%d:%d: bridge bus conflict %d-%d\n",
997 			    bus, dev, func, sec, sub);
998 		}
999 	}
1000 
1001 	return (0);
1002 }
1003 
1004 /*
1005  * Vital Product Data (PCI 2.2)
1006  */
1007 
1008 int
1009 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
1010     pcireg_t *data)
1011 {
1012 	uint32_t reg;
1013 	int ofs, i, j;
1014 
1015 	KASSERT(data != NULL);
1016 	KASSERT((offset + count) < 0x7fff);
1017 
1018 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
1019 		return (1);
1020 
1021 	for (i = 0; i < count; offset += sizeof(*data), i++) {
1022 		reg &= 0x0000ffff;
1023 		reg &= ~PCI_VPD_OPFLAG;
1024 		reg |= PCI_VPD_ADDRESS(offset);
1025 		pci_conf_write(pc, tag, ofs, reg);
1026 
1027 		/*
1028 		 * PCI 2.2 does not specify how long we should poll
1029 		 * for completion nor whether the operation can fail.
1030 		 */
1031 		j = 0;
1032 		do {
1033 			if (j++ == 20)
1034 				return (1);
1035 			delay(4);
1036 			reg = pci_conf_read(pc, tag, ofs);
1037 		} while ((reg & PCI_VPD_OPFLAG) == 0);
1038 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
1039 	}
1040 
1041 	return (0);
1042 }
1043 
1044 int
1045 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
1046     pcireg_t *data)
1047 {
1048 	pcireg_t reg;
1049 	int ofs, i, j;
1050 
1051 	KASSERT(data != NULL);
1052 	KASSERT((offset + count) < 0x7fff);
1053 
1054 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
1055 		return (1);
1056 
1057 	for (i = 0; i < count; offset += sizeof(*data), i++) {
1058 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
1059 
1060 		reg &= 0x0000ffff;
1061 		reg |= PCI_VPD_OPFLAG;
1062 		reg |= PCI_VPD_ADDRESS(offset);
1063 		pci_conf_write(pc, tag, ofs, reg);
1064 
1065 		/*
1066 		 * PCI 2.2 does not specify how long we should poll
1067 		 * for completion nor whether the operation can fail.
1068 		 */
1069 		j = 0;
1070 		do {
1071 			if (j++ == 20)
1072 				return (1);
1073 			delay(1);
1074 			reg = pci_conf_read(pc, tag, ofs);
1075 		} while (reg & PCI_VPD_OPFLAG);
1076 	}
1077 
1078 	return (0);
1079 }
1080 
1081 int
1082 pci_matchbyid(struct pci_attach_args *pa, const struct pci_matchid *ids,
1083     int nent)
1084 {
1085 	const struct pci_matchid *pm;
1086 	int i;
1087 
1088 	for (i = 0, pm = ids; i < nent; i++, pm++)
1089 		if (PCI_VENDOR(pa->pa_id) == pm->pm_vid &&
1090 		    PCI_PRODUCT(pa->pa_id) == pm->pm_pid)
1091 			return (1);
1092 	return (0);
1093 }
1094 
1095 void
1096 pci_disable_legacy_vga(struct device *dev)
1097 {
1098 	struct pci_softc *pci;
1099 	struct pci_dev *pd;
1100 
1101 	/* XXX Until we attach the drm drivers directly to pci. */
1102 	while (dev->dv_parent->dv_cfdata->cf_driver != &pci_cd)
1103 		dev = dev->dv_parent;
1104 
1105 	pci = (struct pci_softc *)dev->dv_parent;
1106 	LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1107 		if (pd->pd_dev == dev) {
1108 			pd->pd_vga_decode = 0;
1109 			break;
1110 		}
1111 	}
1112 }
1113 
1114 #ifdef USER_PCICONF
1115 /*
1116  * This is the user interface to PCI configuration space.
1117  */
1118 
1119 #include <sys/pciio.h>
1120 #include <sys/fcntl.h>
1121 
1122 #ifdef DEBUG
1123 #define PCIDEBUG(x) printf x
1124 #else
1125 #define PCIDEBUG(x)
1126 #endif
1127 
1128 void pci_disable_vga(pci_chipset_tag_t, pcitag_t);
1129 void pci_enable_vga(pci_chipset_tag_t, pcitag_t);
1130 void pci_route_vga(struct pci_softc *);
1131 void pci_unroute_vga(struct pci_softc *);
1132 
1133 int pciopen(dev_t dev, int oflags, int devtype, struct proc *p);
1134 int pciclose(dev_t dev, int flag, int devtype, struct proc *p);
1135 int pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p);
1136 
1137 int
1138 pciopen(dev_t dev, int oflags, int devtype, struct proc *p)
1139 {
1140 	PCIDEBUG(("pciopen ndevs: %d\n" , pci_cd.cd_ndevs));
1141 
1142 	if (minor(dev) >= pci_ndomains) {
1143 		return ENXIO;
1144 	}
1145 
1146 #ifndef APERTURE
1147 	if ((oflags & FWRITE) && securelevel > 0) {
1148 		return EPERM;
1149 	}
1150 #else
1151 	if ((oflags & FWRITE) && securelevel > 0 && allowaperture == 0) {
1152 		return EPERM;
1153 	}
1154 #endif
1155 	return (0);
1156 }
1157 
1158 int
1159 pciclose(dev_t dev, int flag, int devtype, struct proc *p)
1160 {
1161 	PCIDEBUG(("pciclose\n"));
1162 
1163 	pci_vga_proc = NULL;
1164 	return (0);
1165 }
1166 
1167 int
1168 pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1169 {
1170 	struct pcisel *sel = (struct pcisel *)data;
1171 	struct pci_io *io;
1172 	struct pci_rom *rom;
1173 	int i, error;
1174 	pcitag_t tag;
1175 	struct pci_softc *pci;
1176 	pci_chipset_tag_t pc;
1177 
1178 	switch (cmd) {
1179 	case PCIOCREAD:
1180 	case PCIOCREADMASK:
1181 		break;
1182 	case PCIOCWRITE:
1183 		if (!(flag & FWRITE))
1184 			return EPERM;
1185 		break;
1186 	case PCIOCGETROMLEN:
1187 	case PCIOCGETROM:
1188 		break;
1189 	case PCIOCGETVGA:
1190 	case PCIOCSETVGA:
1191 		if (pci_vga_pci == NULL)
1192 			return EINVAL;
1193 		break;
1194 	default:
1195 		return ENOTTY;
1196 	}
1197 
1198 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
1199 		pci = pci_cd.cd_devs[i];
1200 		if (pci != NULL && pci->sc_domain == minor(dev) &&
1201 		    pci->sc_bus == sel->pc_bus)
1202 			break;
1203 	}
1204 	if (i >= pci_cd.cd_ndevs)
1205 		return ENXIO;
1206 
1207 	/* Check bounds */
1208 	if (pci->sc_bus >= 256 ||
1209 	    sel->pc_dev >= pci_bus_maxdevs(pci->sc_pc, pci->sc_bus) ||
1210 	    sel->pc_func >= 8)
1211 		return EINVAL;
1212 
1213 	pc = pci->sc_pc;
1214 	tag = pci_make_tag(pc, sel->pc_bus, sel->pc_dev, sel->pc_func);
1215 
1216 	switch (cmd) {
1217 	case PCIOCREAD:
1218 		io = (struct pci_io *)data;
1219 		switch (io->pi_width) {
1220 		case 4:
1221 			/* Configuration space bounds check */
1222 			if (io->pi_reg < 0 ||
1223 			    io->pi_reg >= pci_conf_size(pc, tag))
1224 				return EINVAL;
1225 			/* Make sure the register is properly aligned */
1226 			if (io->pi_reg & 0x3)
1227 				return EINVAL;
1228 			io->pi_data = pci_conf_read(pc, tag, io->pi_reg);
1229 			error = 0;
1230 			break;
1231 		default:
1232 			error = EINVAL;
1233 			break;
1234 		}
1235 		break;
1236 
1237 	case PCIOCWRITE:
1238 		io = (struct pci_io *)data;
1239 		switch (io->pi_width) {
1240 		case 4:
1241 			/* Configuration space bounds check */
1242 			if (io->pi_reg < 0 ||
1243 			    io->pi_reg >= pci_conf_size(pc, tag))
1244 				return EINVAL;
1245 			/* Make sure the register is properly aligned */
1246 			if (io->pi_reg & 0x3)
1247 				return EINVAL;
1248 			pci_conf_write(pc, tag, io->pi_reg, io->pi_data);
1249 			error = 0;
1250 			break;
1251 		default:
1252 			error = EINVAL;
1253 			break;
1254 		}
1255 		break;
1256 
1257 	case PCIOCREADMASK:
1258 	{
1259 		io = (struct pci_io *)data;
1260 		struct pci_dev *pd;
1261 		int dev, func, i;
1262 
1263 		if (io->pi_width != 4 || io->pi_reg & 0x3 ||
1264 		    io->pi_reg < PCI_MAPREG_START ||
1265 		    io->pi_reg >= PCI_MAPREG_END)
1266 			return (EINVAL);
1267 
1268 		error = ENODEV;
1269 		LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1270 			pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1271 			if (dev == sel->pc_dev && func == sel->pc_func) {
1272 				i = (io->pi_reg - PCI_MAPREG_START) / 4;
1273 				io->pi_data = pd->pd_mask[i];
1274 				error = 0;
1275 				break;
1276 			}
1277 		}
1278 		break;
1279 	}
1280 
1281 	case PCIOCGETROMLEN:
1282 	case PCIOCGETROM:
1283 	{
1284 		pcireg_t addr, mask, bhlc;
1285 		bus_space_handle_t h;
1286 		bus_size_t len, off;
1287 		char buf[256];
1288 		int s;
1289 
1290 		rom = (struct pci_rom *)data;
1291 
1292 		bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
1293 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
1294 			return (ENODEV);
1295 
1296 		s = splhigh();
1297 		addr = pci_conf_read(pc, tag, PCI_ROM_REG);
1298 		pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
1299 		mask = pci_conf_read(pc, tag, PCI_ROM_REG);
1300 		pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1301 		splx(s);
1302 
1303 		/*
1304 		 * Section 6.2.5.2 `Expansion ROM Base Addres Register',
1305 		 *
1306 		 * tells us that only the upper 21 bits are writable.
1307 		 * This means that the size of a ROM must be a
1308 		 * multiple of 2 KB.  So reading the ROM in chunks of
1309 		 * 256 bytes should work just fine.
1310 		 */
1311 		if ((PCI_ROM_ADDR(addr) == 0 ||
1312 		     PCI_ROM_SIZE(mask) % sizeof(buf)) != 0)
1313 			return (ENODEV);
1314 
1315 		/* If we're just after the size, skip reading the ROM. */
1316 		if (cmd == PCIOCGETROMLEN) {
1317 			error = 0;
1318 			goto fail;
1319 		}
1320 
1321 		if (rom->pr_romlen < PCI_ROM_SIZE(mask)) {
1322 			error = ENOMEM;
1323 			goto fail;
1324 		}
1325 
1326 		error = bus_space_map(pci->sc_memt, PCI_ROM_ADDR(addr),
1327 		    PCI_ROM_SIZE(mask), 0, &h);
1328 		if (error)
1329 			goto fail;
1330 
1331 		off = 0;
1332 		len = PCI_ROM_SIZE(mask);
1333 		while (len > 0 && error == 0) {
1334 			s = splhigh();
1335 			pci_conf_write(pc, tag, PCI_ROM_REG,
1336 			    addr | PCI_ROM_ENABLE);
1337 			bus_space_read_region_1(pci->sc_memt, h, off,
1338 			    buf, sizeof(buf));
1339 			pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1340 			splx(s);
1341 
1342 			error = copyout(buf, rom->pr_rom + off, sizeof(buf));
1343 			off += sizeof(buf);
1344 			len -= sizeof(buf);
1345 		}
1346 
1347 		bus_space_unmap(pci->sc_memt, h, PCI_ROM_SIZE(mask));
1348 
1349 	fail:
1350 		rom->pr_romlen = PCI_ROM_SIZE(mask);
1351 		break;
1352 	}
1353 
1354 	case PCIOCGETVGA:
1355 	{
1356 		struct pci_vga *vga = (struct pci_vga *)data;
1357 		struct pci_dev *pd;
1358 		int bus, dev, func;
1359 
1360 		vga->pv_decode = 0;
1361 		LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1362 			pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1363 			if (dev == sel->pc_dev && func == sel->pc_func) {
1364 				if (pd->pd_vga_decode)
1365 					vga->pv_decode = PCI_VGA_IO_ENABLE |
1366 					    PCI_VGA_MEM_ENABLE;
1367 				break;
1368 			}
1369 		}
1370 
1371 		pci_decompose_tag(pci_vga_pci->sc_pc,
1372 		    pci_vga_tag, &bus, &dev, &func);
1373 		vga->pv_sel.pc_bus = bus;
1374 		vga->pv_sel.pc_dev = dev;
1375 		vga->pv_sel.pc_func = func;
1376 		error = 0;
1377 		break;
1378 	}
1379 	case PCIOCSETVGA:
1380 	{
1381 		struct pci_vga *vga = (struct pci_vga *)data;
1382 		int bus, dev, func;
1383 
1384 		switch (vga->pv_lock) {
1385 		case PCI_VGA_UNLOCK:
1386 		case PCI_VGA_LOCK:
1387 		case PCI_VGA_TRYLOCK:
1388 			break;
1389 		default:
1390 			return (EINVAL);
1391 		}
1392 
1393 		if (vga->pv_lock == PCI_VGA_UNLOCK) {
1394 			if (pci_vga_proc != p)
1395 				return (EINVAL);
1396 			pci_vga_proc = NULL;
1397 			wakeup(&pci_vga_proc);
1398 			return (0);
1399 		}
1400 
1401 		while (pci_vga_proc != p && pci_vga_proc != NULL) {
1402 			if (vga->pv_lock == PCI_VGA_TRYLOCK)
1403 				return (EBUSY);
1404 			error = tsleep(&pci_vga_proc, PLOCK | PCATCH,
1405 			    "vgalk", 0);
1406 			if (error)
1407 				return (error);
1408 		}
1409 		pci_vga_proc = p;
1410 
1411 		pci_decompose_tag(pci_vga_pci->sc_pc,
1412 		    pci_vga_tag, &bus, &dev, &func);
1413 		if (bus != vga->pv_sel.pc_bus || dev != vga->pv_sel.pc_dev ||
1414 		    func != vga->pv_sel.pc_func) {
1415 			pci_disable_vga(pci_vga_pci->sc_pc, pci_vga_tag);
1416 			if (pci != pci_vga_pci) {
1417 				pci_unroute_vga(pci_vga_pci);
1418 				pci_route_vga(pci);
1419 				pci_vga_pci = pci;
1420 			}
1421 			pci_enable_vga(pc, tag);
1422 			pci_vga_tag = tag;
1423 		}
1424 
1425 		error = 0;
1426 		break;
1427 	}
1428 
1429 	default:
1430 		error = ENOTTY;
1431 		break;
1432 	}
1433 
1434 	return (error);
1435 }
1436 
1437 void
1438 pci_disable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1439 {
1440 	pcireg_t csr;
1441 
1442 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1443 	csr &= ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
1444 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1445 }
1446 
1447 void
1448 pci_enable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1449 {
1450 	pcireg_t csr;
1451 
1452 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1453 	csr |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE;
1454 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1455 }
1456 
1457 void
1458 pci_route_vga(struct pci_softc *sc)
1459 {
1460 	pci_chipset_tag_t pc = sc->sc_pc;
1461 	pcireg_t bc;
1462 
1463 	if (sc->sc_bridgetag == NULL)
1464 		return;
1465 
1466 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1467 	bc |= PPB_BC_VGA_ENABLE;
1468 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1469 
1470 	pci_route_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1471 }
1472 
1473 void
1474 pci_unroute_vga(struct pci_softc *sc)
1475 {
1476 	pci_chipset_tag_t pc = sc->sc_pc;
1477 	pcireg_t bc;
1478 
1479 	if (sc->sc_bridgetag == NULL)
1480 		return;
1481 
1482 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1483 	bc &= ~PPB_BC_VGA_ENABLE;
1484 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1485 
1486 	pci_unroute_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1487 }
1488 #endif /* USER_PCICONF */
1489 
1490 int
1491 pci_primary_vga(struct pci_attach_args *pa)
1492 {
1493 	/* XXX For now, only handle the first PCI domain. */
1494 	if (pa->pa_domain != 0)
1495 		return (0);
1496 
1497 	if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY ||
1498 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA) &&
1499 	    (PCI_CLASS(pa->pa_class) != PCI_CLASS_PREHISTORIC ||
1500 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_PREHISTORIC_VGA))
1501 		return (0);
1502 
1503 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
1504 	    & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1505 	    != (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1506 		return (0);
1507 
1508 	pci_vga_tag = pa->pa_tag;
1509 
1510 	return (1);
1511 }
1512