xref: /openbsd-src/sys/dev/pci/pci.c (revision 03ad23dddf5ad92d57aa2ca7ae5fe8e429f806de)
1 /*	$OpenBSD: pci.c,v 1.106 2014/10/26 16:18:42 kettenis Exp $	*/
2 /*	$NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1995, 1996 Christopher G. Demetriou.  All rights reserved.
6  * Copyright (c) 1994 Charles Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Charles Hannum.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * PCI bus autoconfiguration.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcidevs.h>
46 #include <dev/pci/ppbreg.h>
47 
48 int pcimatch(struct device *, void *, void *);
49 void pciattach(struct device *, struct device *, void *);
50 int pcidetach(struct device *, int);
51 int pciactivate(struct device *, int);
52 void pci_suspend(struct pci_softc *);
53 void pci_powerdown(struct pci_softc *);
54 void pci_resume(struct pci_softc *);
55 
56 #define NMAPREG			((PCI_MAPREG_END - PCI_MAPREG_START) / \
57 				    sizeof(pcireg_t))
58 struct pci_dev {
59 	struct device *pd_dev;
60 	LIST_ENTRY(pci_dev) pd_next;
61 	pcitag_t pd_tag;        /* pci register tag */
62 	pcireg_t pd_csr;
63 	pcireg_t pd_bhlc;
64 	pcireg_t pd_int;
65 	pcireg_t pd_map[NMAPREG];
66 	pcireg_t pd_mask[NMAPREG];
67 	pcireg_t pd_msi_mc;
68 	pcireg_t pd_msi_ma;
69 	pcireg_t pd_msi_mau32;
70 	pcireg_t pd_msi_md;
71 	int pd_pmcsr_state;
72 	int pd_vga_decode;
73 };
74 
75 #ifdef APERTURE
76 extern int allowaperture;
77 #endif
78 
79 struct cfattach pci_ca = {
80 	sizeof(struct pci_softc), pcimatch, pciattach, pcidetach, pciactivate
81 };
82 
83 struct cfdriver pci_cd = {
84 	NULL, "pci", DV_DULL
85 };
86 
87 int	pci_ndomains;
88 
89 struct proc *pci_vga_proc;
90 struct pci_softc *pci_vga_pci;
91 pcitag_t pci_vga_tag;
92 
93 int	pci_dopm;
94 
95 int	pciprint(void *, const char *);
96 int	pcisubmatch(struct device *, void *, void *);
97 
98 #ifdef PCI_MACHDEP_ENUMERATE_BUS
99 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
100 #else
101 int pci_enumerate_bus(struct pci_softc *,
102     int (*)(struct pci_attach_args *), struct pci_attach_args *);
103 #endif
104 int	pci_reserve_resources(struct pci_attach_args *);
105 int	pci_primary_vga(struct pci_attach_args *);
106 
107 /*
108  * Important note about PCI-ISA bridges:
109  *
110  * Callbacks are used to configure these devices so that ISA/EISA bridges
111  * can attach their child busses after PCI configuration is done.
112  *
113  * This works because:
114  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
115  *	(2) any ISA/EISA bridges must be attached to primary PCI
116  *	    busses (i.e. bus zero).
117  *
118  * That boils down to: there can only be one of these outstanding
119  * at a time, it is cleared when configuring PCI bus 0 before any
120  * subdevices have been found, and it is run after all subdevices
121  * of PCI bus 0 have been found.
122  *
123  * This is needed because there are some (legacy) PCI devices which
124  * can show up as ISA/EISA devices as well (the prime example of which
125  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
126  * and the bridge is seen before the video board is, the board can show
127  * up as an ISA device, and that can (bogusly) complicate the PCI device's
128  * attach code, or make the PCI device not be properly attached at all.
129  *
130  * We use the generic config_defer() facility to achieve this.
131  */
132 
133 int
134 pcimatch(struct device *parent, void *match, void *aux)
135 {
136 	struct cfdata *cf = match;
137 	struct pcibus_attach_args *pba = aux;
138 
139 	if (strcmp(pba->pba_busname, cf->cf_driver->cd_name))
140 		return (0);
141 
142 	/* Check the locators */
143 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
144 	    cf->pcibuscf_bus != pba->pba_bus)
145 		return (0);
146 
147 	/* sanity */
148 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
149 		return (0);
150 
151 	/*
152 	 * XXX check other (hardware?) indicators
153 	 */
154 
155 	return (1);
156 }
157 
158 void
159 pciattach(struct device *parent, struct device *self, void *aux)
160 {
161 	struct pcibus_attach_args *pba = aux;
162 	struct pci_softc *sc = (struct pci_softc *)self;
163 
164 	pci_attach_hook(parent, self, pba);
165 
166 	printf("\n");
167 
168 	LIST_INIT(&sc->sc_devs);
169 
170 	sc->sc_iot = pba->pba_iot;
171 	sc->sc_memt = pba->pba_memt;
172 	sc->sc_dmat = pba->pba_dmat;
173 	sc->sc_pc = pba->pba_pc;
174 	sc->sc_flags = pba->pba_flags;
175 	sc->sc_ioex = pba->pba_ioex;
176 	sc->sc_memex = pba->pba_memex;
177 	sc->sc_pmemex = pba->pba_pmemex;
178 	sc->sc_busex = pba->pba_busex;
179 	sc->sc_domain = pba->pba_domain;
180 	sc->sc_bus = pba->pba_bus;
181 	sc->sc_bridgetag = pba->pba_bridgetag;
182 	sc->sc_bridgeih = pba->pba_bridgeih;
183 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
184 	sc->sc_intrswiz = pba->pba_intrswiz;
185 	sc->sc_intrtag = pba->pba_intrtag;
186 
187 	/* Reserve our own bus number. */
188 	if (sc->sc_busex)
189 		extent_alloc_region(sc->sc_busex, sc->sc_bus, 1, EX_NOWAIT);
190 
191 	pci_enumerate_bus(sc, pci_reserve_resources, NULL);
192 
193 	/* Find the VGA device that's currently active. */
194 	if (pci_enumerate_bus(sc, pci_primary_vga, NULL))
195 		pci_vga_pci = sc;
196 
197 	pci_enumerate_bus(sc, NULL, NULL);
198 }
199 
200 int
201 pcidetach(struct device *self, int flags)
202 {
203 	return pci_detach_devices((struct pci_softc *)self, flags);
204 }
205 
206 int
207 pciactivate(struct device *self, int act)
208 {
209 	int rv = 0;
210 
211 	switch (act) {
212 	case DVACT_SUSPEND:
213 		rv = config_activate_children(self, act);
214 		pci_suspend((struct pci_softc *)self);
215 		break;
216 	case DVACT_RESUME:
217 		pci_resume((struct pci_softc *)self);
218 		rv = config_activate_children(self, act);
219 		break;
220 	case DVACT_POWERDOWN:
221 		rv = config_activate_children(self, act);
222 		pci_powerdown((struct pci_softc *)self);
223 		break;
224 	default:
225 		rv = config_activate_children(self, act);
226 		break;
227 	}
228 	return (rv);
229 }
230 
231 void
232 pci_suspend(struct pci_softc *sc)
233 {
234 	struct pci_dev *pd;
235 	pcireg_t bhlc, reg;
236 	int off, i;
237 
238 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
239 		/*
240 		 * Only handle header type 0 here; PCI-PCI bridges and
241 		 * CardBus bridges need special handling, which will
242 		 * be done in their specific drivers.
243 		 */
244 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
245 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
246 			continue;
247 
248 		/* Save registers that may get lost. */
249 		for (i = 0; i < NMAPREG; i++)
250 			pd->pd_map[i] = pci_conf_read(sc->sc_pc, pd->pd_tag,
251 			    PCI_MAPREG_START + (i * 4));
252 		pd->pd_csr = pci_conf_read(sc->sc_pc, pd->pd_tag,
253 		    PCI_COMMAND_STATUS_REG);
254 		pd->pd_bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag,
255 		    PCI_BHLC_REG);
256 		pd->pd_int = pci_conf_read(sc->sc_pc, pd->pd_tag,
257 		    PCI_INTERRUPT_REG);
258 
259 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
260 		    PCI_CAP_MSI, &off, &reg)) {
261 			pd->pd_msi_ma = pci_conf_read(sc->sc_pc, pd->pd_tag,
262 			    off + PCI_MSI_MA);
263 			if (reg & PCI_MSI_MC_C64) {
264 				pd->pd_msi_mau32 = pci_conf_read(sc->sc_pc,
265 				    pd->pd_tag, off + PCI_MSI_MAU32);
266 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
267 				    pd->pd_tag, off + PCI_MSI_MD64);
268 			} else {
269 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
270 				    pd->pd_tag, off + PCI_MSI_MD32);
271 			}
272 			pd->pd_msi_mc = reg;
273 		}
274 	}
275 }
276 
277 void
278 pci_powerdown(struct pci_softc *sc)
279 {
280 	struct pci_dev *pd;
281 	pcireg_t bhlc;
282 
283 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
284 		/*
285 		 * Only handle header type 0 here; PCI-PCI bridges and
286 		 * CardBus bridges need special handling, which will
287 		 * be done in their specific drivers.
288 		 */
289 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
290 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
291 			continue;
292 
293 		if (pci_dopm) {
294 			/*
295 			 * Place the device into the lowest possible
296 			 * power state.
297 			 */
298 			pd->pd_pmcsr_state = pci_get_powerstate(sc->sc_pc,
299 			    pd->pd_tag);
300 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
301 			    pci_min_powerstate(sc->sc_pc, pd->pd_tag));
302 		}
303 	}
304 }
305 
306 void
307 pci_resume(struct pci_softc *sc)
308 {
309 	struct pci_dev *pd;
310 	pcireg_t bhlc, reg;
311 	int off, i;
312 
313 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
314 		/*
315 		 * Only handle header type 0 here; PCI-PCI bridges and
316 		 * CardBus bridges need special handling, which will
317 		 * be done in their specific drivers.
318 		 */
319 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
320 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
321 			continue;
322 
323 		/* Restore power. */
324 		if (pci_dopm)
325 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
326 			    pd->pd_pmcsr_state);
327 
328 		/* Restore the registers saved above. */
329 		for (i = 0; i < NMAPREG; i++)
330 			pci_conf_write(sc->sc_pc, pd->pd_tag,
331 			    PCI_MAPREG_START + (i * 4), pd->pd_map[i]);
332 		reg = pci_conf_read(sc->sc_pc, pd->pd_tag,
333 		    PCI_COMMAND_STATUS_REG);
334 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
335 		    (reg & 0xffff0000) | (pd->pd_csr & 0x0000ffff));
336 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG,
337 		    pd->pd_bhlc);
338 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_INTERRUPT_REG,
339 		    pd->pd_int);
340 
341 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
342 		    PCI_CAP_MSI, &off, &reg)) {
343 			pci_conf_write(sc->sc_pc, pd->pd_tag,
344 			    off + PCI_MSI_MA, pd->pd_msi_ma);
345 			if (reg & PCI_MSI_MC_C64) {
346 				pci_conf_write(sc->sc_pc, pd->pd_tag,
347 				    off + PCI_MSI_MAU32, pd->pd_msi_mau32);
348 				pci_conf_write(sc->sc_pc, pd->pd_tag,
349 				    off + PCI_MSI_MD64, pd->pd_msi_md);
350 			} else {
351 				pci_conf_write(sc->sc_pc, pd->pd_tag,
352 				    off + PCI_MSI_MD32, pd->pd_msi_md);
353 			}
354 			pci_conf_write(sc->sc_pc, pd->pd_tag,
355 			    off + PCI_MSI_MC, pd->pd_msi_mc);
356 		}
357 	}
358 }
359 
360 int
361 pciprint(void *aux, const char *pnp)
362 {
363 	struct pci_attach_args *pa = aux;
364 	char devinfo[256];
365 
366 	if (pnp) {
367 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo,
368 		    sizeof devinfo);
369 		printf("%s at %s", devinfo, pnp);
370 	}
371 	printf(" dev %d function %d", pa->pa_device, pa->pa_function);
372 	if (!pnp) {
373 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo,
374 		    sizeof devinfo);
375 		printf(" %s", devinfo);
376 	}
377 
378 	return (UNCONF);
379 }
380 
381 int
382 pcisubmatch(struct device *parent, void *match,  void *aux)
383 {
384 	struct cfdata *cf = match;
385 	struct pci_attach_args *pa = aux;
386 
387 	if (cf->pcicf_dev != PCI_UNK_DEV &&
388 	    cf->pcicf_dev != pa->pa_device)
389 		return (0);
390 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
391 	    cf->pcicf_function != pa->pa_function)
392 		return (0);
393 
394 	return ((*cf->cf_attach->ca_match)(parent, match, aux));
395 }
396 
397 int
398 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
399     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
400 {
401 	pci_chipset_tag_t pc = sc->sc_pc;
402 	struct pci_attach_args pa;
403 	struct pci_dev *pd;
404 	pcireg_t id, class, intr, bhlcr, cap;
405 	int pin, bus, device, function;
406 	int off, ret = 0;
407 	uint64_t addr;
408 
409 	pci_decompose_tag(pc, tag, &bus, &device, &function);
410 
411 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
412 	if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
413 		return (0);
414 
415 	id = pci_conf_read(pc, tag, PCI_ID_REG);
416 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
417 
418 	/* Invalid vendor ID value? */
419 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
420 		return (0);
421 	/* XXX Not invalid, but we've done this ~forever. */
422 	if (PCI_VENDOR(id) == 0)
423 		return (0);
424 
425 	pa.pa_iot = sc->sc_iot;
426 	pa.pa_memt = sc->sc_memt;
427 	pa.pa_dmat = sc->sc_dmat;
428 	pa.pa_pc = pc;
429 	pa.pa_ioex = sc->sc_ioex;
430 	pa.pa_memex = sc->sc_memex;
431 	pa.pa_pmemex = sc->sc_pmemex;
432 	pa.pa_busex = sc->sc_busex;
433 	pa.pa_domain = sc->sc_domain;
434 	pa.pa_bus = bus;
435 	pa.pa_device = device;
436 	pa.pa_function = function;
437 	pa.pa_tag = tag;
438 	pa.pa_id = id;
439 	pa.pa_class = class;
440 	pa.pa_bridgetag = sc->sc_bridgetag;
441 	pa.pa_bridgeih = sc->sc_bridgeih;
442 
443 	/* This is a simplification of the NetBSD code.
444 	   We don't support turning off I/O or memory
445 	   on broken hardware. <csapuntz@stanford.edu> */
446 	pa.pa_flags = sc->sc_flags;
447 	pa.pa_flags |= PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
448 
449 	if (sc->sc_bridgetag == NULL) {
450 		pa.pa_intrswiz = 0;
451 		pa.pa_intrtag = tag;
452 	} else {
453 		pa.pa_intrswiz = sc->sc_intrswiz + device;
454 		pa.pa_intrtag = sc->sc_intrtag;
455 	}
456 
457 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
458 
459 	pin = PCI_INTERRUPT_PIN(intr);
460 	pa.pa_rawintrpin = pin;
461 	if (pin == PCI_INTERRUPT_PIN_NONE) {
462 		/* no interrupt */
463 		pa.pa_intrpin = 0;
464 	} else {
465 		/*
466 		 * swizzle it based on the number of busses we're
467 		 * behind and our device number.
468 		 */
469 		pa.pa_intrpin = 	/* XXX */
470 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
471 	}
472 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
473 
474 	if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSI, &off, &cap)) {
475 		/*
476 		 * XXX Should we enable MSI mapping ourselves on
477 		 * systems that have it disabled?
478 		 */
479 		if (cap & PCI_HT_MSI_ENABLED) {
480 			if ((cap & PCI_HT_MSI_FIXED) == 0) {
481 				addr = pci_conf_read(pc, tag,
482 				    off + PCI_HT_MSI_ADDR);
483 				addr |= (uint64_t)pci_conf_read(pc, tag,
484 				    off + PCI_HT_MSI_ADDR_HI32) << 32;
485 			} else
486 				addr = PCI_HT_MSI_FIXED_ADDR;
487 
488 			/*
489 			 * XXX This will fail to enable MSI on systems
490 			 * that don't use the canonical address.
491 			 */
492 			if (addr == PCI_HT_MSI_FIXED_ADDR)
493 				pa.pa_flags |= PCI_FLAGS_MSI_ENABLED;
494 		}
495 	}
496 
497 	/*
498 	 * Give the MD code a chance to alter pci_attach_args and/or
499 	 * skip devices.
500 	 */
501 	if (pci_probe_device_hook(pc, &pa) != 0)
502 		return (0);
503 
504 	if (match != NULL) {
505 		ret = (*match)(&pa);
506 		if (ret != 0 && pap != NULL)
507 			*pap = pa;
508 	} else {
509 		pcireg_t address, csr;
510 		int i, reg, reg_start, reg_end;
511 		int s;
512 
513 		pd = malloc(sizeof *pd, M_DEVBUF, M_ZERO | M_WAITOK);
514 		pd->pd_tag = tag;
515 		LIST_INSERT_HEAD(&sc->sc_devs, pd, pd_next);
516 
517 		switch (PCI_HDRTYPE_TYPE(bhlcr)) {
518 		case 0:
519 			reg_start = PCI_MAPREG_START;
520 			reg_end = PCI_MAPREG_END;
521 			break;
522 		case 1: /* PCI-PCI bridge */
523 			reg_start = PCI_MAPREG_START;
524 			reg_end = PCI_MAPREG_PPB_END;
525 			break;
526 		case 2: /* PCI-CardBus bridge */
527 			reg_start = PCI_MAPREG_START;
528 			reg_end = PCI_MAPREG_PCB_END;
529 			break;
530 		default:
531 			return (0);
532 		}
533 
534 		s = splhigh();
535 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
536 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
537 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr &
538 			    ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE));
539 
540 		for (reg = reg_start, i = 0; reg < reg_end; reg += 4, i++) {
541 			address = pci_conf_read(pc, tag, reg);
542 			pci_conf_write(pc, tag, reg, 0xffffffff);
543 			pd->pd_mask[i] = pci_conf_read(pc, tag, reg);
544 			pci_conf_write(pc, tag, reg, address);
545 		}
546 
547 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
548 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
549 		splx(s);
550 
551 		if ((PCI_CLASS(class) == PCI_CLASS_DISPLAY &&
552 		    PCI_SUBCLASS(class) == PCI_SUBCLASS_DISPLAY_VGA) ||
553 		    (PCI_CLASS(class) == PCI_CLASS_PREHISTORIC &&
554 		    PCI_SUBCLASS(class) == PCI_SUBCLASS_PREHISTORIC_VGA))
555 			pd->pd_vga_decode = 1;
556 
557 		pd->pd_dev = config_found_sm(&sc->sc_dev, &pa, pciprint,
558 		    pcisubmatch);
559 		if (pd->pd_dev)
560 			pci_dev_postattach(pd->pd_dev, &pa);
561 	}
562 
563 	return (ret);
564 }
565 
566 int
567 pci_detach_devices(struct pci_softc *sc, int flags)
568 {
569 	struct pci_dev *pd, *next;
570 	int ret;
571 
572 	ret = config_detach_children(&sc->sc_dev, flags);
573 	if (ret != 0)
574 		return (ret);
575 
576 	for (pd = LIST_FIRST(&sc->sc_devs); pd != NULL; pd = next) {
577 		next = LIST_NEXT(pd, pd_next);
578 		free(pd, M_DEVBUF, 0);
579 	}
580 	LIST_INIT(&sc->sc_devs);
581 
582 	return (0);
583 }
584 
585 int
586 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
587     int *offset, pcireg_t *value)
588 {
589 	pcireg_t reg;
590 	unsigned int ofs;
591 
592 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
593 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
594 		return (0);
595 
596 	/* Determine the Capability List Pointer register to start with. */
597 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
598 	switch (PCI_HDRTYPE_TYPE(reg)) {
599 	case 0:	/* standard device header */
600 	case 1: /* PCI-PCI bridge header */
601 		ofs = PCI_CAPLISTPTR_REG;
602 		break;
603 	case 2:	/* PCI-CardBus bridge header */
604 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
605 		break;
606 	default:
607 		return (0);
608 	}
609 
610 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
611 	while (ofs != 0) {
612 		/*
613 		 * Some devices, like parts of the NVIDIA C51 chipset,
614 		 * have a broken Capabilities List.  So we need to do
615 		 * a sanity check here.
616 		 */
617 		if ((ofs & 3) || (ofs < 0x40))
618 			return (0);
619 		reg = pci_conf_read(pc, tag, ofs);
620 		if (PCI_CAPLIST_CAP(reg) == capid) {
621 			if (offset)
622 				*offset = ofs;
623 			if (value)
624 				*value = reg;
625 			return (1);
626 		}
627 		ofs = PCI_CAPLIST_NEXT(reg);
628 	}
629 
630 	return (0);
631 }
632 
633 int
634 pci_get_ht_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
635     int *offset, pcireg_t *value)
636 {
637 	pcireg_t reg;
638 	unsigned int ofs;
639 
640 	if (pci_get_capability(pc, tag, PCI_CAP_HT, &ofs, NULL) == 0)
641 		return (0);
642 
643 	while (ofs != 0) {
644 #ifdef DIAGNOSTIC
645 		if ((ofs & 3) || (ofs < 0x40))
646 			panic("pci_get_ht_capability");
647 #endif
648 		reg = pci_conf_read(pc, tag, ofs);
649 		if (PCI_HT_CAP(reg) == capid) {
650 			if (offset)
651 				*offset = ofs;
652 			if (value)
653 				*value = reg;
654 			return (1);
655 		}
656 		ofs = PCI_CAPLIST_NEXT(reg);
657 	}
658 
659 	return (0);
660 }
661 
662 int
663 pci_find_device(struct pci_attach_args *pa,
664     int (*match)(struct pci_attach_args *))
665 {
666 	extern struct cfdriver pci_cd;
667 	struct device *pcidev;
668 	int i;
669 
670 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
671 		pcidev = pci_cd.cd_devs[i];
672 		if (pcidev != NULL &&
673 		    pci_enumerate_bus((struct pci_softc *)pcidev,
674 		    		      match, pa) != 0)
675 			return (1);
676 	}
677 	return (0);
678 }
679 
680 int
681 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
682 {
683 	pcireg_t reg;
684 	int offset;
685 
686 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
687 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
688 		return (reg & PCI_PMCSR_STATE_MASK);
689 	}
690 	return (PCI_PMCSR_STATE_D0);
691 }
692 
693 int
694 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
695 {
696 	pcireg_t reg;
697 	int offset, ostate = state;
698 
699 	/*
700 	 * Warn the firmware that we are going to put the device
701 	 * into the given state.
702 	 */
703 	pci_set_powerstate_md(pc, tag, state, 1);
704 
705 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
706 		if (state == PCI_PMCSR_STATE_D3) {
707 			/*
708 			 * The PCI Power Management spec says we
709 			 * should disable I/O and memory space as well
710 			 * as bus mastering before we place the device
711 			 * into D3.
712 			 */
713 			reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
714 			reg &= ~PCI_COMMAND_IO_ENABLE;
715 			reg &= ~PCI_COMMAND_MEM_ENABLE;
716 			reg &= ~PCI_COMMAND_MASTER_ENABLE;
717 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg);
718 		}
719 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
720 		if ((reg & PCI_PMCSR_STATE_MASK) != state) {
721 			ostate = reg & PCI_PMCSR_STATE_MASK;
722 
723 			pci_conf_write(pc, tag, offset + PCI_PMCSR,
724 			    (reg & ~PCI_PMCSR_STATE_MASK) | state);
725 			if (state == PCI_PMCSR_STATE_D3 ||
726 			    ostate == PCI_PMCSR_STATE_D3)
727 				delay(10 * 1000);
728 		}
729 	}
730 
731 	/*
732 	 * Warn the firmware that the device is now in the given
733 	 * state.
734 	 */
735 	pci_set_powerstate_md(pc, tag, state, 0);
736 
737 	return (ostate);
738 }
739 
740 #ifndef PCI_MACHDEP_ENUMERATE_BUS
741 /*
742  * Generic PCI bus enumeration routine.  Used unless machine-dependent
743  * code needs to provide something else.
744  */
745 int
746 pci_enumerate_bus(struct pci_softc *sc,
747     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
748 {
749 	pci_chipset_tag_t pc = sc->sc_pc;
750 	int device, function, nfunctions, ret;
751 	const struct pci_quirkdata *qd;
752 	pcireg_t id, bhlcr;
753 	pcitag_t tag;
754 
755 	for (device = 0; device < sc->sc_maxndevs; device++) {
756 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
757 
758 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
759 		if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
760 			continue;
761 
762 		id = pci_conf_read(pc, tag, PCI_ID_REG);
763 
764 		/* Invalid vendor ID value? */
765 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
766 			continue;
767 		/* XXX Not invalid, but we've done this ~forever. */
768 		if (PCI_VENDOR(id) == 0)
769 			continue;
770 
771 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
772 
773 		if (qd != NULL &&
774 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
775 			nfunctions = 8;
776 		else if (qd != NULL &&
777 		      (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
778 			nfunctions = 1;
779 		else
780 			nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
781 
782 		for (function = 0; function < nfunctions; function++) {
783 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
784 			ret = pci_probe_device(sc, tag, match, pap);
785 			if (match != NULL && ret != 0)
786 				return (ret);
787 		}
788  	}
789 
790 	return (0);
791 }
792 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
793 
794 int
795 pci_reserve_resources(struct pci_attach_args *pa)
796 {
797 	pci_chipset_tag_t pc = pa->pa_pc;
798 	pcitag_t tag = pa->pa_tag;
799 	pcireg_t bhlc, blr, type, bir;
800 	pcireg_t addr, mask;
801 	bus_addr_t base, limit;
802 	bus_size_t size;
803 	int reg, reg_start, reg_end, reg_rom;
804 	int bus, dev, func;
805 	int sec, sub;
806 	int flags;
807 	int s;
808 
809 	pci_decompose_tag(pc, tag, &bus, &dev, &func);
810 
811 	bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
812 	switch (PCI_HDRTYPE_TYPE(bhlc)) {
813 	case 0:
814 		reg_start = PCI_MAPREG_START;
815 		reg_end = PCI_MAPREG_END;
816 		reg_rom = PCI_ROM_REG;
817 		break;
818 	case 1: /* PCI-PCI bridge */
819 		reg_start = PCI_MAPREG_START;
820 		reg_end = PCI_MAPREG_PPB_END;
821 		reg_rom = 0;	/* 0x38 */
822 		break;
823 	case 2: /* PCI-CardBus bridge */
824 		reg_start = PCI_MAPREG_START;
825 		reg_end = PCI_MAPREG_PCB_END;
826 		reg_rom = 0;
827 		break;
828 	default:
829 		return (0);
830 	}
831 
832 	for (reg = reg_start; reg < reg_end; reg += 4) {
833 		if (!pci_mapreg_probe(pc, tag, reg, &type))
834 			continue;
835 
836 		if (pci_mapreg_info(pc, tag, reg, type, &base, &size, &flags))
837 			continue;
838 
839 		if (base == 0)
840 			continue;
841 
842 		switch (type) {
843 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
844 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
845 			if (ISSET(flags, BUS_SPACE_MAP_PREFETCHABLE) &&
846 			    pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
847 			    base, size, EX_NOWAIT) == 0) {
848 				break;
849 			}
850 			if (pa->pa_memex && extent_alloc_region(pa->pa_memex,
851 			    base, size, EX_NOWAIT)) {
852 				printf("%d:%d:%d: mem address conflict 0x%lx/0x%lx\n",
853 				    bus, dev, func, base, size);
854 				pci_conf_write(pc, tag, reg, 0);
855 				if (type & PCI_MAPREG_MEM_TYPE_64BIT)
856 					pci_conf_write(pc, tag, reg + 4, 0);
857 			}
858 			break;
859 		case PCI_MAPREG_TYPE_IO:
860 			if (pa->pa_ioex && extent_alloc_region(pa->pa_ioex,
861 			    base, size, EX_NOWAIT)) {
862 				printf("%d:%d:%d: io address conflict 0x%lx/0x%lx\n",
863 				    bus, dev, func, base, size);
864 				pci_conf_write(pc, tag, reg, 0);
865 			}
866 			break;
867 		}
868 
869 		if (type & PCI_MAPREG_MEM_TYPE_64BIT)
870 			reg += 4;
871 	}
872 
873 	if (reg_rom != 0) {
874 		s = splhigh();
875 		addr = pci_conf_read(pc, tag, PCI_ROM_REG);
876 		pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
877 		mask = pci_conf_read(pc, tag, PCI_ROM_REG);
878 		pci_conf_write(pc, tag, PCI_ROM_REG, addr);
879 		splx(s);
880 
881 		base = PCI_ROM_ADDR(addr);
882 		size = PCI_ROM_SIZE(mask);
883 		if (base != 0 && size != 0) {
884 			if (pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
885 			    base, size, EX_NOWAIT) &&
886 			    pa->pa_memex && extent_alloc_region(pa->pa_memex,
887 			    base, size, EX_NOWAIT)) {
888 				printf("%d:%d:%d: mem address conflict 0x%lx/0x%lx\n",
889 				    bus, dev, func, base, size);
890 				pci_conf_write(pc, tag, PCI_ROM_REG, 0);
891 			}
892 		}
893 	}
894 
895 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
896 		return (0);
897 
898 	/* Figure out the I/O address range of the bridge. */
899 	blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS);
900 	base = (blr & 0x000000f0) << 8;
901 	limit = (blr & 0x000f000) | 0x00000fff;
902 	blr = pci_conf_read(pc, tag, PPB_REG_IO_HI);
903 	base |= (blr & 0x0000ffff) << 16;
904 	limit |= (blr & 0xffff0000);
905 	if (limit > base)
906 		size = (limit - base + 1);
907 	else
908 		size = 0;
909 	if (pa->pa_ioex && base > 0 && size > 0) {
910 		if (extent_alloc_region(pa->pa_ioex, base, size, EX_NOWAIT)) {
911 			printf("%d:%d:%d: bridge io address conflict 0x%lx/0x%lx\n",
912 			    bus, dev, func, base, size);
913 			blr &= 0xffff0000;
914 			blr |= 0x000000f0;
915 			pci_conf_write(pc, tag, PPB_REG_IOSTATUS, blr);
916 		}
917 	}
918 
919 	/* Figure out the memory mapped I/O address range of the bridge. */
920 	blr = pci_conf_read(pc, tag, PPB_REG_MEM);
921 	base = (blr & 0x0000fff0) << 16;
922 	limit = (blr & 0xfff00000) | 0x000fffff;
923 	if (limit > base)
924 		size = (limit - base + 1);
925 	else
926 		size = 0;
927 	if (pa->pa_memex && base > 0 && size > 0) {
928 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
929 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
930 			    bus, dev, func, base, size);
931 			pci_conf_write(pc, tag, PPB_REG_MEM, 0x0000fff0);
932 		}
933 	}
934 
935 	/* Figure out the prefetchable memory address range of the bridge. */
936 	blr = pci_conf_read(pc, tag, PPB_REG_PREFMEM);
937 	base = (blr & 0x0000fff0) << 16;
938 	limit = (blr & 0xfff00000) | 0x000fffff;
939 	if (limit > base)
940 		size = (limit - base + 1);
941 	else
942 		size = 0;
943 	if (pa->pa_pmemex && base > 0 && size > 0) {
944 		if (extent_alloc_region(pa->pa_pmemex, base, size, EX_NOWAIT)) {
945 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
946 			    bus, dev, func, base, size);
947 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
948 		}
949 	} else if (pa->pa_memex && base > 0 && size > 0) {
950 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
951 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
952 			    bus, dev, func, base, size);
953 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
954 		}
955 	}
956 
957 	/* Figure out the bus range handled by the bridge. */
958 	bir = pci_conf_read(pc, tag, PPB_REG_BUSINFO);
959 	sec = PPB_BUSINFO_SECONDARY(bir);
960 	sub = PPB_BUSINFO_SUBORDINATE(bir);
961 	if (pa->pa_busex && sub >= sec) {
962 		if (extent_alloc_region(pa->pa_busex, sec, sub - sec + 1,
963 		    EX_NOWAIT)) {
964 			printf("%d:%d:%d: bridge bus conflict %d-%d\n",
965 			    bus, dev, func, sec, sub);
966 		}
967 	}
968 
969 	return (0);
970 }
971 
972 /*
973  * Vital Product Data (PCI 2.2)
974  */
975 
976 int
977 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
978     pcireg_t *data)
979 {
980 	uint32_t reg;
981 	int ofs, i, j;
982 
983 	KASSERT(data != NULL);
984 	KASSERT((offset + count) < 0x7fff);
985 
986 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
987 		return (1);
988 
989 	for (i = 0; i < count; offset += sizeof(*data), i++) {
990 		reg &= 0x0000ffff;
991 		reg &= ~PCI_VPD_OPFLAG;
992 		reg |= PCI_VPD_ADDRESS(offset);
993 		pci_conf_write(pc, tag, ofs, reg);
994 
995 		/*
996 		 * PCI 2.2 does not specify how long we should poll
997 		 * for completion nor whether the operation can fail.
998 		 */
999 		j = 0;
1000 		do {
1001 			if (j++ == 20)
1002 				return (1);
1003 			delay(4);
1004 			reg = pci_conf_read(pc, tag, ofs);
1005 		} while ((reg & PCI_VPD_OPFLAG) == 0);
1006 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
1007 	}
1008 
1009 	return (0);
1010 }
1011 
1012 int
1013 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
1014     pcireg_t *data)
1015 {
1016 	pcireg_t reg;
1017 	int ofs, i, j;
1018 
1019 	KASSERT(data != NULL);
1020 	KASSERT((offset + count) < 0x7fff);
1021 
1022 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
1023 		return (1);
1024 
1025 	for (i = 0; i < count; offset += sizeof(*data), i++) {
1026 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
1027 
1028 		reg &= 0x0000ffff;
1029 		reg |= PCI_VPD_OPFLAG;
1030 		reg |= PCI_VPD_ADDRESS(offset);
1031 		pci_conf_write(pc, tag, ofs, reg);
1032 
1033 		/*
1034 		 * PCI 2.2 does not specify how long we should poll
1035 		 * for completion nor whether the operation can fail.
1036 		 */
1037 		j = 0;
1038 		do {
1039 			if (j++ == 20)
1040 				return (1);
1041 			delay(1);
1042 			reg = pci_conf_read(pc, tag, ofs);
1043 		} while (reg & PCI_VPD_OPFLAG);
1044 	}
1045 
1046 	return (0);
1047 }
1048 
1049 int
1050 pci_matchbyid(struct pci_attach_args *pa, const struct pci_matchid *ids,
1051     int nent)
1052 {
1053 	const struct pci_matchid *pm;
1054 	int i;
1055 
1056 	for (i = 0, pm = ids; i < nent; i++, pm++)
1057 		if (PCI_VENDOR(pa->pa_id) == pm->pm_vid &&
1058 		    PCI_PRODUCT(pa->pa_id) == pm->pm_pid)
1059 			return (1);
1060 	return (0);
1061 }
1062 
1063 void
1064 pci_disable_legacy_vga(struct device *dev)
1065 {
1066 	struct pci_softc *pci;
1067 	struct pci_dev *pd;
1068 
1069 	/* XXX Until we attach the drm drivers directly to pci. */
1070 	while (dev->dv_parent->dv_cfdata->cf_driver != &pci_cd)
1071 		dev = dev->dv_parent;
1072 
1073 	pci = (struct pci_softc *)dev->dv_parent;
1074 	LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1075 		if (pd->pd_dev == dev) {
1076 			pd->pd_vga_decode = 0;
1077 			break;
1078 		}
1079 	}
1080 }
1081 
1082 #ifdef USER_PCICONF
1083 /*
1084  * This is the user interface to PCI configuration space.
1085  */
1086 
1087 #include <sys/pciio.h>
1088 #include <sys/fcntl.h>
1089 
1090 #ifdef DEBUG
1091 #define PCIDEBUG(x) printf x
1092 #else
1093 #define PCIDEBUG(x)
1094 #endif
1095 
1096 void pci_disable_vga(pci_chipset_tag_t, pcitag_t);
1097 void pci_enable_vga(pci_chipset_tag_t, pcitag_t);
1098 void pci_route_vga(struct pci_softc *);
1099 void pci_unroute_vga(struct pci_softc *);
1100 
1101 int pciopen(dev_t dev, int oflags, int devtype, struct proc *p);
1102 int pciclose(dev_t dev, int flag, int devtype, struct proc *p);
1103 int pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p);
1104 
1105 int
1106 pciopen(dev_t dev, int oflags, int devtype, struct proc *p)
1107 {
1108 	PCIDEBUG(("pciopen ndevs: %d\n" , pci_cd.cd_ndevs));
1109 
1110 	if (minor(dev) >= pci_ndomains) {
1111 		return ENXIO;
1112 	}
1113 
1114 #ifndef APERTURE
1115 	if ((oflags & FWRITE) && securelevel > 0) {
1116 		return EPERM;
1117 	}
1118 #else
1119 	if ((oflags & FWRITE) && securelevel > 0 && allowaperture == 0) {
1120 		return EPERM;
1121 	}
1122 #endif
1123 	return (0);
1124 }
1125 
1126 int
1127 pciclose(dev_t dev, int flag, int devtype, struct proc *p)
1128 {
1129 	PCIDEBUG(("pciclose\n"));
1130 
1131 	pci_vga_proc = NULL;
1132 	return (0);
1133 }
1134 
1135 int
1136 pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1137 {
1138 	struct pcisel *sel = (struct pcisel *)data;
1139 	struct pci_io *io;
1140 	struct pci_rom *rom;
1141 	int i, error;
1142 	pcitag_t tag;
1143 	struct pci_softc *pci;
1144 	pci_chipset_tag_t pc;
1145 
1146 	switch (cmd) {
1147 	case PCIOCREAD:
1148 	case PCIOCREADMASK:
1149 		break;
1150 	case PCIOCWRITE:
1151 		if (!(flag & FWRITE))
1152 			return EPERM;
1153 		break;
1154 	case PCIOCGETROMLEN:
1155 	case PCIOCGETROM:
1156 		break;
1157 	case PCIOCGETVGA:
1158 	case PCIOCSETVGA:
1159 		if (pci_vga_pci == NULL)
1160 			return EINVAL;
1161 		break;
1162 	default:
1163 		return ENOTTY;
1164 	}
1165 
1166 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
1167 		pci = pci_cd.cd_devs[i];
1168 		if (pci != NULL && pci->sc_domain == minor(dev) &&
1169 		    pci->sc_bus == sel->pc_bus)
1170 			break;
1171 	}
1172 	if (i >= pci_cd.cd_ndevs)
1173 		return ENXIO;
1174 
1175 	/* Check bounds */
1176 	if (pci->sc_bus >= 256 ||
1177 	    sel->pc_dev >= pci_bus_maxdevs(pci->sc_pc, pci->sc_bus) ||
1178 	    sel->pc_func >= 8)
1179 		return EINVAL;
1180 
1181 	pc = pci->sc_pc;
1182 	tag = pci_make_tag(pc, sel->pc_bus, sel->pc_dev, sel->pc_func);
1183 
1184 	switch (cmd) {
1185 	case PCIOCREAD:
1186 		io = (struct pci_io *)data;
1187 		switch (io->pi_width) {
1188 		case 4:
1189 			/* Configuration space bounds check */
1190 			if (io->pi_reg < 0 ||
1191 			    io->pi_reg >= pci_conf_size(pc, tag))
1192 				return EINVAL;
1193 			/* Make sure the register is properly aligned */
1194 			if (io->pi_reg & 0x3)
1195 				return EINVAL;
1196 			io->pi_data = pci_conf_read(pc, tag, io->pi_reg);
1197 			error = 0;
1198 			break;
1199 		default:
1200 			error = EINVAL;
1201 			break;
1202 		}
1203 		break;
1204 
1205 	case PCIOCWRITE:
1206 		io = (struct pci_io *)data;
1207 		switch (io->pi_width) {
1208 		case 4:
1209 			/* Configuration space bounds check */
1210 			if (io->pi_reg < 0 ||
1211 			    io->pi_reg >= pci_conf_size(pc, tag))
1212 				return EINVAL;
1213 			/* Make sure the register is properly aligned */
1214 			if (io->pi_reg & 0x3)
1215 				return EINVAL;
1216 			pci_conf_write(pc, tag, io->pi_reg, io->pi_data);
1217 			error = 0;
1218 			break;
1219 		default:
1220 			error = EINVAL;
1221 			break;
1222 		}
1223 		break;
1224 
1225 	case PCIOCREADMASK:
1226 	{
1227 		io = (struct pci_io *)data;
1228 		struct pci_dev *pd;
1229 		int dev, func, i;
1230 
1231 		if (io->pi_width != 4 || io->pi_reg & 0x3 ||
1232 		    io->pi_reg < PCI_MAPREG_START ||
1233 		    io->pi_reg >= PCI_MAPREG_END)
1234 			return (EINVAL);
1235 
1236 		error = ENODEV;
1237 		LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1238 			pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1239 			if (dev == sel->pc_dev && func == sel->pc_func) {
1240 				i = (io->pi_reg - PCI_MAPREG_START) / 4;
1241 				io->pi_data = pd->pd_mask[i];
1242 				error = 0;
1243 				break;
1244 			}
1245 		}
1246 		break;
1247 	}
1248 
1249 	case PCIOCGETROMLEN:
1250 	case PCIOCGETROM:
1251 	{
1252 		pcireg_t addr, mask, bhlc;
1253 		bus_space_handle_t h;
1254 		bus_size_t len, off;
1255 		char buf[256];
1256 		int s;
1257 
1258 		rom = (struct pci_rom *)data;
1259 
1260 		bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
1261 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
1262 			return (ENODEV);
1263 
1264 		s = splhigh();
1265 		addr = pci_conf_read(pc, tag, PCI_ROM_REG);
1266 		pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
1267 		mask = pci_conf_read(pc, tag, PCI_ROM_REG);
1268 		pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1269 		splx(s);
1270 
1271 		/*
1272 		 * Section 6.2.5.2 `Expansion ROM Base Addres Register',
1273 		 *
1274 		 * tells us that only the upper 21 bits are writable.
1275 		 * This means that the size of a ROM must be a
1276 		 * multiple of 2 KB.  So reading the ROM in chunks of
1277 		 * 256 bytes should work just fine.
1278 		 */
1279 		if ((PCI_ROM_ADDR(addr) == 0 ||
1280 		     PCI_ROM_SIZE(mask) % sizeof(buf)) != 0)
1281 			return (ENODEV);
1282 
1283 		/* If we're just after the size, skip reading the ROM. */
1284 		if (cmd == PCIOCGETROMLEN) {
1285 			error = 0;
1286 			goto fail;
1287 		}
1288 
1289 		if (rom->pr_romlen < PCI_ROM_SIZE(mask)) {
1290 			error = ENOMEM;
1291 			goto fail;
1292 		}
1293 
1294 		error = bus_space_map(pci->sc_memt, PCI_ROM_ADDR(addr),
1295 		    PCI_ROM_SIZE(mask), 0, &h);
1296 		if (error)
1297 			goto fail;
1298 
1299 		off = 0;
1300 		len = PCI_ROM_SIZE(mask);
1301 		while (len > 0 && error == 0) {
1302 			s = splhigh();
1303 			pci_conf_write(pc, tag, PCI_ROM_REG,
1304 			    addr | PCI_ROM_ENABLE);
1305 			bus_space_read_region_1(pci->sc_memt, h, off,
1306 			    buf, sizeof(buf));
1307 			pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1308 			splx(s);
1309 
1310 			error = copyout(buf, rom->pr_rom + off, sizeof(buf));
1311 			off += sizeof(buf);
1312 			len -= sizeof(buf);
1313 		}
1314 
1315 		bus_space_unmap(pci->sc_memt, h, PCI_ROM_SIZE(mask));
1316 
1317 	fail:
1318 		rom->pr_romlen = PCI_ROM_SIZE(mask);
1319 		break;
1320 	}
1321 
1322 	case PCIOCGETVGA:
1323 	{
1324 		struct pci_vga *vga = (struct pci_vga *)data;
1325 		struct pci_dev *pd;
1326 		int bus, dev, func;
1327 
1328 		vga->pv_decode = 0;
1329 		LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1330 			pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1331 			if (dev == sel->pc_dev && func == sel->pc_func) {
1332 				if (pd->pd_vga_decode)
1333 					vga->pv_decode = PCI_VGA_IO_ENABLE |
1334 					    PCI_VGA_MEM_ENABLE;
1335 				break;
1336 			}
1337 		}
1338 
1339 		pci_decompose_tag(pci_vga_pci->sc_pc,
1340 		    pci_vga_tag, &bus, &dev, &func);
1341 		vga->pv_sel.pc_bus = bus;
1342 		vga->pv_sel.pc_dev = dev;
1343 		vga->pv_sel.pc_func = func;
1344 		error = 0;
1345 		break;
1346 	}
1347 	case PCIOCSETVGA:
1348 	{
1349 		struct pci_vga *vga = (struct pci_vga *)data;
1350 		int bus, dev, func;
1351 
1352 		switch (vga->pv_lock) {
1353 		case PCI_VGA_UNLOCK:
1354 		case PCI_VGA_LOCK:
1355 		case PCI_VGA_TRYLOCK:
1356 			break;
1357 		default:
1358 			return (EINVAL);
1359 		}
1360 
1361 		if (vga->pv_lock == PCI_VGA_UNLOCK) {
1362 			if (pci_vga_proc != p)
1363 				return (EINVAL);
1364 			pci_vga_proc = NULL;
1365 			wakeup(&pci_vga_proc);
1366 			return (0);
1367 		}
1368 
1369 		while (pci_vga_proc != p && pci_vga_proc != NULL) {
1370 			if (vga->pv_lock == PCI_VGA_TRYLOCK)
1371 				return (EBUSY);
1372 			error = tsleep(&pci_vga_proc, PLOCK | PCATCH,
1373 			    "vgalk", 0);
1374 			if (error)
1375 				return (error);
1376 		}
1377 		pci_vga_proc = p;
1378 
1379 		pci_decompose_tag(pci_vga_pci->sc_pc,
1380 		    pci_vga_tag, &bus, &dev, &func);
1381 		if (bus != vga->pv_sel.pc_bus || dev != vga->pv_sel.pc_dev ||
1382 		    func != vga->pv_sel.pc_func) {
1383 			pci_disable_vga(pci_vga_pci->sc_pc, pci_vga_tag);
1384 			if (pci != pci_vga_pci) {
1385 				pci_unroute_vga(pci_vga_pci);
1386 				pci_route_vga(pci);
1387 				pci_vga_pci = pci;
1388 			}
1389 			pci_enable_vga(pc, tag);
1390 			pci_vga_tag = tag;
1391 		}
1392 
1393 		error = 0;
1394 		break;
1395 	}
1396 
1397 	default:
1398 		error = ENOTTY;
1399 		break;
1400 	}
1401 
1402 	return (error);
1403 }
1404 
1405 void
1406 pci_disable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1407 {
1408 	pcireg_t csr;
1409 
1410 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1411 	csr &= ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
1412 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1413 }
1414 
1415 void
1416 pci_enable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1417 {
1418 	pcireg_t csr;
1419 
1420 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1421 	csr |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE;
1422 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1423 }
1424 
1425 void
1426 pci_route_vga(struct pci_softc *sc)
1427 {
1428 	pci_chipset_tag_t pc = sc->sc_pc;
1429 	pcireg_t bc;
1430 
1431 	if (sc->sc_bridgetag == NULL)
1432 		return;
1433 
1434 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1435 	bc |= PPB_BC_VGA_ENABLE;
1436 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1437 
1438 	pci_route_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1439 }
1440 
1441 void
1442 pci_unroute_vga(struct pci_softc *sc)
1443 {
1444 	pci_chipset_tag_t pc = sc->sc_pc;
1445 	pcireg_t bc;
1446 
1447 	if (sc->sc_bridgetag == NULL)
1448 		return;
1449 
1450 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1451 	bc &= ~PPB_BC_VGA_ENABLE;
1452 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1453 
1454 	pci_unroute_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1455 }
1456 #endif /* USER_PCICONF */
1457 
1458 int
1459 pci_primary_vga(struct pci_attach_args *pa)
1460 {
1461 	/* XXX For now, only handle the first PCI domain. */
1462 	if (pa->pa_domain != 0)
1463 		return (0);
1464 
1465 	if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY ||
1466 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA) &&
1467 	    (PCI_CLASS(pa->pa_class) != PCI_CLASS_PREHISTORIC ||
1468 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_PREHISTORIC_VGA))
1469 		return (0);
1470 
1471 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
1472 	    & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1473 	    != (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1474 		return (0);
1475 
1476 	pci_vga_tag = pa->pa_tag;
1477 
1478 	return (1);
1479 }
1480