xref: /openbsd-src/sys/dev/pci/pci.c (revision ac9b4aacc1da35008afea06a5d23c2f2dea9b93e)
1 /*	$OpenBSD: pci.c,v 1.94 2011/10/10 19:42:37 miod Exp $	*/
2 /*	$NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1995, 1996 Christopher G. Demetriou.  All rights reserved.
6  * Copyright (c) 1994 Charles Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Charles Hannum.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * PCI bus autoconfiguration.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/proc.h>
43 
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 #include <dev/pci/ppbreg.h>
48 
49 int pcimatch(struct device *, void *, void *);
50 void pciattach(struct device *, struct device *, void *);
51 int pcidetach(struct device *, int);
52 int pciactivate(struct device *, int);
53 void pci_suspend(struct pci_softc *);
54 void pci_resume(struct pci_softc *);
55 
56 #define NMAPREG			((PCI_MAPREG_END - PCI_MAPREG_START) / \
57 				    sizeof(pcireg_t))
58 struct pci_dev {
59 	LIST_ENTRY(pci_dev) pd_next;
60 	pcitag_t pd_tag;        /* pci register tag */
61 	pcireg_t pd_csr;
62 	pcireg_t pd_bhlc;
63 	pcireg_t pd_int;
64 	pcireg_t pd_map[NMAPREG];
65 	pcireg_t pd_mask[NMAPREG];
66 	pcireg_t pd_msi_mc;
67 	pcireg_t pd_msi_ma;
68 	pcireg_t pd_msi_mau32;
69 	pcireg_t pd_msi_md;
70 	int pd_pmcsr_state;
71 };
72 
73 #ifdef APERTURE
74 extern int allowaperture;
75 #endif
76 
77 struct cfattach pci_ca = {
78 	sizeof(struct pci_softc), pcimatch, pciattach, pcidetach, pciactivate
79 };
80 
81 struct cfdriver pci_cd = {
82 	NULL, "pci", DV_DULL
83 };
84 
85 int	pci_ndomains;
86 
87 struct proc *pci_vga_proc;
88 struct pci_softc *pci_vga_pci;
89 pcitag_t pci_vga_tag;
90 int	pci_vga_count;
91 
92 int	pci_dopm;
93 
94 int	pciprint(void *, const char *);
95 int	pcisubmatch(struct device *, void *, void *);
96 
97 #ifdef PCI_MACHDEP_ENUMERATE_BUS
98 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
99 #else
100 int pci_enumerate_bus(struct pci_softc *,
101     int (*)(struct pci_attach_args *), struct pci_attach_args *);
102 #endif
103 int	pci_reserve_resources(struct pci_attach_args *);
104 int	pci_count_vga(struct pci_attach_args *);
105 int	pci_primary_vga(struct pci_attach_args *);
106 
107 /*
108  * Important note about PCI-ISA bridges:
109  *
110  * Callbacks are used to configure these devices so that ISA/EISA bridges
111  * can attach their child busses after PCI configuration is done.
112  *
113  * This works because:
114  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
115  *	(2) any ISA/EISA bridges must be attached to primary PCI
116  *	    busses (i.e. bus zero).
117  *
118  * That boils down to: there can only be one of these outstanding
119  * at a time, it is cleared when configuring PCI bus 0 before any
120  * subdevices have been found, and it is run after all subdevices
121  * of PCI bus 0 have been found.
122  *
123  * This is needed because there are some (legacy) PCI devices which
124  * can show up as ISA/EISA devices as well (the prime example of which
125  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
126  * and the bridge is seen before the video board is, the board can show
127  * up as an ISA device, and that can (bogusly) complicate the PCI device's
128  * attach code, or make the PCI device not be properly attached at all.
129  *
130  * We use the generic config_defer() facility to achieve this.
131  */
132 
133 int
134 pcimatch(struct device *parent, void *match, void *aux)
135 {
136 	struct cfdata *cf = match;
137 	struct pcibus_attach_args *pba = aux;
138 
139 	if (strcmp(pba->pba_busname, cf->cf_driver->cd_name))
140 		return (0);
141 
142 	/* Check the locators */
143 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
144 	    cf->pcibuscf_bus != pba->pba_bus)
145 		return (0);
146 
147 	/* sanity */
148 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
149 		return (0);
150 
151 	/*
152 	 * XXX check other (hardware?) indicators
153 	 */
154 
155 	return (1);
156 }
157 
158 void
159 pciattach(struct device *parent, struct device *self, void *aux)
160 {
161 	struct pcibus_attach_args *pba = aux;
162 	struct pci_softc *sc = (struct pci_softc *)self;
163 
164 	pci_attach_hook(parent, self, pba);
165 
166 	printf("\n");
167 
168 	LIST_INIT(&sc->sc_devs);
169 
170 	sc->sc_iot = pba->pba_iot;
171 	sc->sc_memt = pba->pba_memt;
172 	sc->sc_dmat = pba->pba_dmat;
173 	sc->sc_pc = pba->pba_pc;
174 	sc->sc_flags = pba->pba_flags;
175 	sc->sc_ioex = pba->pba_ioex;
176 	sc->sc_memex = pba->pba_memex;
177 	sc->sc_pmemex = pba->pba_pmemex;
178 	sc->sc_domain = pba->pba_domain;
179 	sc->sc_bus = pba->pba_bus;
180 	sc->sc_bridgetag = pba->pba_bridgetag;
181 	sc->sc_bridgeih = pba->pba_bridgeih;
182 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
183 	sc->sc_intrswiz = pba->pba_intrswiz;
184 	sc->sc_intrtag = pba->pba_intrtag;
185 	pci_enumerate_bus(sc, pci_reserve_resources, NULL);
186 	pci_enumerate_bus(sc, pci_count_vga, NULL);
187 	if (pci_enumerate_bus(sc, pci_primary_vga, NULL))
188 		pci_vga_pci = sc;
189 	pci_enumerate_bus(sc, NULL, NULL);
190 }
191 
192 int
193 pcidetach(struct device *self, int flags)
194 {
195 	return pci_detach_devices((struct pci_softc *)self, flags);
196 }
197 
198 int
199 pciactivate(struct device *self, int act)
200 {
201 	int rv = 0;
202 
203 	switch (act) {
204 	case DVACT_QUIESCE:
205 		rv = config_activate_children(self, act);
206 		break;
207 	case DVACT_SUSPEND:
208 		rv = config_activate_children(self, act);
209 		pci_suspend((struct pci_softc *)self);
210 		break;
211 	case DVACT_RESUME:
212 		pci_resume((struct pci_softc *)self);
213 		rv = config_activate_children(self, act);
214 		break;
215 	}
216 	return (rv);
217 }
218 
219 void
220 pci_suspend(struct pci_softc *sc)
221 {
222 	struct pci_dev *pd;
223 	pcireg_t bhlc, reg;
224 	int off, i;
225 
226 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
227 		/*
228 		 * Only handle header type 0 here; PCI-PCI bridges and
229 		 * CardBus bridges need special handling, which will
230 		 * be done in their specific drivers.
231 		 */
232 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
233 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
234 			continue;
235 
236 		/* Save registers that may get lost. */
237 		for (i = 0; i < NMAPREG; i++)
238 			pd->pd_map[i] = pci_conf_read(sc->sc_pc, pd->pd_tag,
239 			    PCI_MAPREG_START + (i * 4));
240 		pd->pd_csr = pci_conf_read(sc->sc_pc, pd->pd_tag,
241 		    PCI_COMMAND_STATUS_REG);
242 		pd->pd_bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag,
243 		    PCI_BHLC_REG);
244 		pd->pd_int = pci_conf_read(sc->sc_pc, pd->pd_tag,
245 		    PCI_INTERRUPT_REG);
246 
247 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
248 		    PCI_CAP_MSI, &off, &reg)) {
249 			pd->pd_msi_ma = pci_conf_read(sc->sc_pc, pd->pd_tag,
250 			    off + PCI_MSI_MA);
251 			if (reg & PCI_MSI_MC_C64) {
252 				pd->pd_msi_mau32 = pci_conf_read(sc->sc_pc,
253 				    pd->pd_tag, off + PCI_MSI_MAU32);
254 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
255 				    pd->pd_tag, off + PCI_MSI_MD64);
256 			} else {
257 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
258 				    pd->pd_tag, off + PCI_MSI_MD32);
259 			}
260 			pd->pd_msi_mc = reg;
261 		}
262 
263 		if (pci_dopm) {
264 			/* Place the device into D3. */
265 			pd->pd_pmcsr_state = pci_get_powerstate(sc->sc_pc,
266 			    pd->pd_tag);
267 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
268 			    PCI_PMCSR_STATE_D3);
269 		}
270 	}
271 }
272 
273 void
274 pci_resume(struct pci_softc *sc)
275 {
276 	struct pci_dev *pd;
277 	pcireg_t bhlc, reg;
278 	int off, i;
279 
280 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
281 		/*
282 		 * Only handle header type 0 here; PCI-PCI bridges and
283 		 * CardBus bridges need special handling, which will
284 		 * be done in their specific drivers.
285 		 */
286 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
287 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
288 			continue;
289 
290 		if (pci_dopm) {
291 			/* Restore power. */
292 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
293 			    pd->pd_pmcsr_state);
294 		}
295 
296 		/* Restore the registers saved above. */
297 		for (i = 0; i < NMAPREG; i++)
298 			pci_conf_write(sc->sc_pc, pd->pd_tag,
299 			    PCI_MAPREG_START + (i * 4), pd->pd_map[i]);
300 		reg = pci_conf_read(sc->sc_pc, pd->pd_tag,
301 		    PCI_COMMAND_STATUS_REG);
302 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
303 		    (reg & 0xffff0000) | (pd->pd_csr & 0x0000ffff));
304 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG,
305 		    pd->pd_bhlc);
306 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_INTERRUPT_REG,
307 		    pd->pd_int);
308 
309 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
310 		    PCI_CAP_MSI, &off, &reg)) {
311 			pci_conf_write(sc->sc_pc, pd->pd_tag,
312 			    off + PCI_MSI_MA, pd->pd_msi_ma);
313 			if (reg & PCI_MSI_MC_C64) {
314 				pci_conf_write(sc->sc_pc, pd->pd_tag,
315 				    off + PCI_MSI_MAU32, pd->pd_msi_mau32);
316 				pci_conf_write(sc->sc_pc, pd->pd_tag,
317 				    off + PCI_MSI_MD64, pd->pd_msi_md);
318 			} else {
319 				pci_conf_write(sc->sc_pc, pd->pd_tag,
320 				    off + PCI_MSI_MD32, pd->pd_msi_md);
321 			}
322 			pci_conf_write(sc->sc_pc, pd->pd_tag,
323 			    off + PCI_MSI_MC, pd->pd_msi_mc);
324 		}
325 	}
326 }
327 
328 int
329 pciprint(void *aux, const char *pnp)
330 {
331 	struct pci_attach_args *pa = aux;
332 	char devinfo[256];
333 
334 	if (pnp) {
335 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo,
336 		    sizeof devinfo);
337 		printf("%s at %s", devinfo, pnp);
338 	}
339 	printf(" dev %d function %d", pa->pa_device, pa->pa_function);
340 	if (!pnp) {
341 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo,
342 		    sizeof devinfo);
343 		printf(" %s", devinfo);
344 	}
345 
346 	return (UNCONF);
347 }
348 
349 int
350 pcisubmatch(struct device *parent, void *match,  void *aux)
351 {
352 	struct cfdata *cf = match;
353 	struct pci_attach_args *pa = aux;
354 
355 	if (cf->pcicf_dev != PCI_UNK_DEV &&
356 	    cf->pcicf_dev != pa->pa_device)
357 		return (0);
358 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
359 	    cf->pcicf_function != pa->pa_function)
360 		return (0);
361 
362 	return ((*cf->cf_attach->ca_match)(parent, match, aux));
363 }
364 
365 int
366 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
367     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
368 {
369 	pci_chipset_tag_t pc = sc->sc_pc;
370 	struct pci_attach_args pa;
371 	struct pci_dev *pd;
372 	struct device *dev;
373 	pcireg_t id, class, intr, bhlcr, cap;
374 	int pin, bus, device, function;
375 	int off, ret = 0;
376 	uint64_t addr;
377 
378 	pci_decompose_tag(pc, tag, &bus, &device, &function);
379 
380 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
381 	if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
382 		return (0);
383 
384 	id = pci_conf_read(pc, tag, PCI_ID_REG);
385 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
386 
387 	/* Invalid vendor ID value? */
388 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
389 		return (0);
390 	/* XXX Not invalid, but we've done this ~forever. */
391 	if (PCI_VENDOR(id) == 0)
392 		return (0);
393 
394 	pa.pa_iot = sc->sc_iot;
395 	pa.pa_memt = sc->sc_memt;
396 	pa.pa_dmat = sc->sc_dmat;
397 	pa.pa_pc = pc;
398 	pa.pa_ioex = sc->sc_ioex;
399 	pa.pa_memex = sc->sc_memex;
400 	pa.pa_pmemex = sc->sc_pmemex;
401 	pa.pa_domain = sc->sc_domain;
402 	pa.pa_bus = bus;
403 	pa.pa_device = device;
404 	pa.pa_function = function;
405 	pa.pa_tag = tag;
406 	pa.pa_id = id;
407 	pa.pa_class = class;
408 	pa.pa_bridgetag = sc->sc_bridgetag;
409 	pa.pa_bridgeih = sc->sc_bridgeih;
410 
411 	/* This is a simplification of the NetBSD code.
412 	   We don't support turning off I/O or memory
413 	   on broken hardware. <csapuntz@stanford.edu> */
414 	pa.pa_flags = sc->sc_flags;
415 	pa.pa_flags |= PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
416 
417 	if (sc->sc_bridgetag == NULL) {
418 		pa.pa_intrswiz = 0;
419 		pa.pa_intrtag = tag;
420 	} else {
421 		pa.pa_intrswiz = sc->sc_intrswiz + device;
422 		pa.pa_intrtag = sc->sc_intrtag;
423 	}
424 
425 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
426 
427 	pin = PCI_INTERRUPT_PIN(intr);
428 	pa.pa_rawintrpin = pin;
429 	if (pin == PCI_INTERRUPT_PIN_NONE) {
430 		/* no interrupt */
431 		pa.pa_intrpin = 0;
432 	} else {
433 		/*
434 		 * swizzle it based on the number of busses we're
435 		 * behind and our device number.
436 		 */
437 		pa.pa_intrpin = 	/* XXX */
438 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
439 	}
440 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
441 
442 	if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSI, &off, &cap)) {
443 		/*
444 		 * XXX Should we enable MSI mapping ourselves on
445 		 * systems that have it disabled?
446 		 */
447 		if (cap & PCI_HT_MSI_ENABLED) {
448 			if ((cap & PCI_HT_MSI_FIXED) == 0) {
449 				addr = pci_conf_read(pc, tag,
450 				    off + PCI_HT_MSI_ADDR);
451 				addr |= (uint64_t)pci_conf_read(pc, tag,
452 				    off + PCI_HT_MSI_ADDR_HI32) << 32;
453 			} else
454 				addr = PCI_HT_MSI_FIXED_ADDR;
455 
456 			/*
457 			 * XXX This will fail to enable MSI on systems
458 			 * that don't use the canonical address.
459 			 */
460 			if (addr == PCI_HT_MSI_FIXED_ADDR)
461 				pa.pa_flags |= PCI_FLAGS_MSI_ENABLED;
462 		}
463 	}
464 
465 	/*
466 	 * Give the MD code a chance to alter pci_attach_args and/or
467 	 * skip devices.
468 	 */
469 	if (pci_probe_device_hook(pc, &pa) != 0)
470 		return (0);
471 
472 	if (match != NULL) {
473 		ret = (*match)(&pa);
474 		if (ret != 0 && pap != NULL)
475 			*pap = pa;
476 	} else {
477 		pcireg_t address, csr;
478 		int i, reg, reg_start, reg_end;
479 		int s;
480 
481 		pd = malloc(sizeof *pd, M_DEVBUF, M_ZERO | M_WAITOK);
482 		pd->pd_tag = tag;
483 		LIST_INSERT_HEAD(&sc->sc_devs, pd, pd_next);
484 
485 		switch (PCI_HDRTYPE_TYPE(bhlcr)) {
486 		case 0:
487 			reg_start = PCI_MAPREG_START;
488 			reg_end = PCI_MAPREG_END;
489 			break;
490 		case 1: /* PCI-PCI bridge */
491 			reg_start = PCI_MAPREG_START;
492 			reg_end = PCI_MAPREG_PPB_END;
493 			break;
494 		case 2: /* PCI-CardBus bridge */
495 			reg_start = PCI_MAPREG_START;
496 			reg_end = PCI_MAPREG_PCB_END;
497 			break;
498 		default:
499 			return (0);
500 		}
501 
502 		s = splhigh();
503 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
504 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
505 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr &
506 			    ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE));
507 
508 		for (reg = reg_start, i = 0; reg < reg_end; reg += 4, i++) {
509 			address = pci_conf_read(pc, tag, reg);
510 			pci_conf_write(pc, tag, reg, 0xffffffff);
511 			pd->pd_mask[i] = pci_conf_read(pc, tag, reg);
512 			pci_conf_write(pc, tag, reg, address);
513 		}
514 
515 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
516 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
517 		splx(s);
518 
519 		if ((dev = config_found_sm(&sc->sc_dev, &pa, pciprint,
520 		    pcisubmatch)))
521 			pci_dev_postattach(dev, &pa);
522 	}
523 
524 	return (ret);
525 }
526 
527 int
528 pci_detach_devices(struct pci_softc *sc, int flags)
529 {
530 	struct pci_dev *pd, *next;
531 	int ret;
532 
533 	ret = config_detach_children(&sc->sc_dev, flags);
534 	if (ret != 0)
535 		return (ret);
536 
537 	for (pd = LIST_FIRST(&sc->sc_devs);
538 	     pd != LIST_END(&sc->sc_devs); pd = next) {
539 		next = LIST_NEXT(pd, pd_next);
540 		free(pd, M_DEVBUF);
541 	}
542 	LIST_INIT(&sc->sc_devs);
543 
544 	return (0);
545 }
546 
547 int
548 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
549     int *offset, pcireg_t *value)
550 {
551 	pcireg_t reg;
552 	unsigned int ofs;
553 
554 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
555 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
556 		return (0);
557 
558 	/* Determine the Capability List Pointer register to start with. */
559 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
560 	switch (PCI_HDRTYPE_TYPE(reg)) {
561 	case 0:	/* standard device header */
562 	case 1: /* PCI-PCI bridge header */
563 		ofs = PCI_CAPLISTPTR_REG;
564 		break;
565 	case 2:	/* PCI-CardBus bridge header */
566 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
567 		break;
568 	default:
569 		return (0);
570 	}
571 
572 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
573 	while (ofs != 0) {
574 		/*
575 		 * Some devices, like parts of the NVIDIA C51 chipset,
576 		 * have a broken Capabilities List.  So we need to do
577 		 * a sanity check here.
578 		 */
579 		if ((ofs & 3) || (ofs < 0x40))
580 			return (0);
581 		reg = pci_conf_read(pc, tag, ofs);
582 		if (PCI_CAPLIST_CAP(reg) == capid) {
583 			if (offset)
584 				*offset = ofs;
585 			if (value)
586 				*value = reg;
587 			return (1);
588 		}
589 		ofs = PCI_CAPLIST_NEXT(reg);
590 	}
591 
592 	return (0);
593 }
594 
595 int
596 pci_get_ht_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
597     int *offset, pcireg_t *value)
598 {
599 	pcireg_t reg;
600 	unsigned int ofs;
601 
602 	if (pci_get_capability(pc, tag, PCI_CAP_HT, &ofs, NULL) == 0)
603 		return (0);
604 
605 	while (ofs != 0) {
606 #ifdef DIAGNOSTIC
607 		if ((ofs & 3) || (ofs < 0x40))
608 			panic("pci_get_ht_capability");
609 #endif
610 		reg = pci_conf_read(pc, tag, ofs);
611 		if (PCI_HT_CAP(reg) == capid) {
612 			if (offset)
613 				*offset = ofs;
614 			if (value)
615 				*value = reg;
616 			return (1);
617 		}
618 		ofs = PCI_CAPLIST_NEXT(reg);
619 	}
620 
621 	return (0);
622 }
623 
624 int
625 pci_find_device(struct pci_attach_args *pa,
626     int (*match)(struct pci_attach_args *))
627 {
628 	extern struct cfdriver pci_cd;
629 	struct device *pcidev;
630 	int i;
631 
632 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
633 		pcidev = pci_cd.cd_devs[i];
634 		if (pcidev != NULL &&
635 		    pci_enumerate_bus((struct pci_softc *)pcidev,
636 		    		      match, pa) != 0)
637 			return (1);
638 	}
639 	return (0);
640 }
641 
642 int
643 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
644 {
645 	pcireg_t reg;
646 	int offset;
647 
648 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
649 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
650 		return (reg & PCI_PMCSR_STATE_MASK);
651 	}
652 	return (PCI_PMCSR_STATE_D0);
653 }
654 
655 int
656 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
657 {
658 	pcireg_t reg;
659 	int offset;
660 
661 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
662 		if (state == PCI_PMCSR_STATE_D3) {
663 			/*
664 			 * The PCI Power Management spec says we
665 			 * should disable I/O and memory space as well
666 			 * as bus mastering before we place the device
667 			 * into D3.
668 			 */
669 			reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
670 			reg &= ~PCI_COMMAND_IO_ENABLE;
671 			reg &= ~PCI_COMMAND_MEM_ENABLE;
672 			reg &= ~PCI_COMMAND_MASTER_ENABLE;
673 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg);
674 		}
675 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
676 		if ((reg & PCI_PMCSR_STATE_MASK) != state) {
677 			int ostate = reg & PCI_PMCSR_STATE_MASK;
678 
679 			pci_conf_write(pc, tag, offset + PCI_PMCSR,
680 			    (reg & ~PCI_PMCSR_STATE_MASK) | state);
681 			if (state == PCI_PMCSR_STATE_D3 ||
682 			    ostate == PCI_PMCSR_STATE_D3)
683 				delay(10 * 1000);
684 			return (ostate);
685 		}
686 	}
687 	return (state);
688 }
689 
690 #ifndef PCI_MACHDEP_ENUMERATE_BUS
691 /*
692  * Generic PCI bus enumeration routine.  Used unless machine-dependent
693  * code needs to provide something else.
694  */
695 int
696 pci_enumerate_bus(struct pci_softc *sc,
697     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
698 {
699 	pci_chipset_tag_t pc = sc->sc_pc;
700 	int device, function, nfunctions, ret;
701 	const struct pci_quirkdata *qd;
702 	pcireg_t id, bhlcr;
703 	pcitag_t tag;
704 
705 	for (device = 0; device < sc->sc_maxndevs; device++) {
706 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
707 
708 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
709 		if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
710 			continue;
711 
712 		id = pci_conf_read(pc, tag, PCI_ID_REG);
713 
714 		/* Invalid vendor ID value? */
715 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
716 			continue;
717 		/* XXX Not invalid, but we've done this ~forever. */
718 		if (PCI_VENDOR(id) == 0)
719 			continue;
720 
721 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
722 
723 		if (qd != NULL &&
724 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
725 			nfunctions = 8;
726 		else if (qd != NULL &&
727 		      (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
728 			nfunctions = 1;
729 		else
730 			nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
731 
732 		for (function = 0; function < nfunctions; function++) {
733 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
734 			ret = pci_probe_device(sc, tag, match, pap);
735 			if (match != NULL && ret != 0)
736 				return (ret);
737 		}
738  	}
739 
740 	return (0);
741 }
742 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
743 
744 int
745 pci_reserve_resources(struct pci_attach_args *pa)
746 {
747 	pci_chipset_tag_t pc = pa->pa_pc;
748 	pcitag_t tag = pa->pa_tag;
749 	pcireg_t bhlc, blr, type;
750 	bus_addr_t base, limit;
751 	bus_size_t size;
752 	int reg, reg_start, reg_end;
753 	int flags;
754 
755 	bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
756 	switch (PCI_HDRTYPE_TYPE(bhlc)) {
757 	case 0:
758 		reg_start = PCI_MAPREG_START;
759 		reg_end = PCI_MAPREG_END;
760 		break;
761 	case 1: /* PCI-PCI bridge */
762 		reg_start = PCI_MAPREG_START;
763 		reg_end = PCI_MAPREG_PPB_END;
764 		break;
765 	case 2: /* PCI-CardBus bridge */
766 		reg_start = PCI_MAPREG_START;
767 		reg_end = PCI_MAPREG_PCB_END;
768 		break;
769 	default:
770 		return (0);
771 	}
772 
773 	for (reg = reg_start; reg < reg_end; reg += 4) {
774 		if (!pci_mapreg_probe(pc, tag, reg, &type))
775 			continue;
776 
777 		if (pci_mapreg_info(pc, tag, reg, type, &base, &size, &flags))
778 			continue;
779 
780 		if (base == 0)
781 			continue;
782 
783 		switch (type) {
784 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
785 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
786 #ifdef BUS_SPACE_MAP_PREFETCHABLE
787 			if (ISSET(flags, BUS_SPACE_MAP_PREFETCHABLE) &&
788 			    pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
789 			    base, size, EX_NOWAIT) == 0) {
790 				break;
791 			}
792 #endif
793 			if (pa->pa_memex && extent_alloc_region(pa->pa_memex,
794 			    base, size, EX_NOWAIT)) {
795 				printf("mem address conflict 0x%x/0x%x\n",
796 				    base, size);
797 				pci_conf_write(pc, tag, reg, 0);
798 				if (type & PCI_MAPREG_MEM_TYPE_64BIT)
799 					pci_conf_write(pc, tag, reg + 4, 0);
800 			}
801 			break;
802 		case PCI_MAPREG_TYPE_IO:
803 			if (pa->pa_ioex && extent_alloc_region(pa->pa_ioex,
804 			    base, size, EX_NOWAIT)) {
805 				printf("io address conflict 0x%x/0x%x\n",
806 				    base, size);
807 				pci_conf_write(pc, tag, reg, 0);
808 			}
809 			break;
810 		}
811 
812 		if (type & PCI_MAPREG_MEM_TYPE_64BIT)
813 			reg += 4;
814 	}
815 
816 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
817 		return (0);
818 
819 	/* Figure out the I/O address range of the bridge. */
820 	blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS);
821 	base = (blr & 0x000000f0) << 8;
822 	limit = (blr & 0x000f000) | 0x00000fff;
823 	blr = pci_conf_read(pc, tag, PPB_REG_IO_HI);
824 	base |= (blr & 0x0000ffff) << 16;
825 	limit |= (blr & 0xffff0000);
826 	if (limit > base)
827 		size = (limit - base + 1);
828 	else
829 		size = 0;
830 	if (pa->pa_ioex && base > 0 && size > 0) {
831 		if (extent_alloc_region(pa->pa_ioex, base, size, EX_NOWAIT)) {
832 			printf("bridge io address conflict 0x%x/0x%x\n",
833 			       base, size);
834 			blr &= 0xffff0000;
835 			blr |= 0x000000f0;
836 			pci_conf_write(pc, tag, PPB_REG_IOSTATUS, blr);
837 		}
838 	}
839 
840 	/* Figure out the memory mapped I/O address range of the bridge. */
841 	blr = pci_conf_read(pc, tag, PPB_REG_MEM);
842 	base = (blr & 0x0000fff0) << 16;
843 	limit = (blr & 0xfff00000) | 0x000fffff;
844 	if (limit > base)
845 		size = (limit - base + 1);
846 	else
847 		size = 0;
848 	if (pa->pa_memex && base > 0 && size > 0) {
849 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
850 			printf("bridge mem address conflict 0x%x/0x%x\n",
851 			       base, size);
852 			pci_conf_write(pc, tag, PPB_REG_MEM, 0x0000fff0);
853 		}
854 	}
855 
856 	/* Figure out the prefetchable memory address range of the bridge. */
857 	blr = pci_conf_read(pc, tag, PPB_REG_PREFMEM);
858 	base = (blr & 0x0000fff0) << 16;
859 	limit = (blr & 0xfff00000) | 0x000fffff;
860 	if (limit > base)
861 		size = (limit - base + 1);
862 	else
863 		size = 0;
864 	if (pa->pa_pmemex && base > 0 && size > 0) {
865 		if (extent_alloc_region(pa->pa_pmemex, base, size, EX_NOWAIT)) {
866 			printf("bridge mem address conflict 0x%x/0x%x\n",
867 			       base, size);
868 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
869 		}
870 	} else if (pa->pa_memex && base > 0 && size > 0) {
871 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
872 			printf("bridge mem address conflict 0x%x/0x%x\n",
873 			       base, size);
874 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
875 		}
876 	}
877 
878 	return (0);
879 }
880 
881 /*
882  * Vital Product Data (PCI 2.2)
883  */
884 
885 int
886 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
887     pcireg_t *data)
888 {
889 	uint32_t reg;
890 	int ofs, i, j;
891 
892 	KASSERT(data != NULL);
893 	KASSERT((offset + count) < 0x7fff);
894 
895 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
896 		return (1);
897 
898 	for (i = 0; i < count; offset += sizeof(*data), i++) {
899 		reg &= 0x0000ffff;
900 		reg &= ~PCI_VPD_OPFLAG;
901 		reg |= PCI_VPD_ADDRESS(offset);
902 		pci_conf_write(pc, tag, ofs, reg);
903 
904 		/*
905 		 * PCI 2.2 does not specify how long we should poll
906 		 * for completion nor whether the operation can fail.
907 		 */
908 		j = 0;
909 		do {
910 			if (j++ == 20)
911 				return (1);
912 			delay(4);
913 			reg = pci_conf_read(pc, tag, ofs);
914 		} while ((reg & PCI_VPD_OPFLAG) == 0);
915 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
916 	}
917 
918 	return (0);
919 }
920 
921 int
922 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
923     pcireg_t *data)
924 {
925 	pcireg_t reg;
926 	int ofs, i, j;
927 
928 	KASSERT(data != NULL);
929 	KASSERT((offset + count) < 0x7fff);
930 
931 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
932 		return (1);
933 
934 	for (i = 0; i < count; offset += sizeof(*data), i++) {
935 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
936 
937 		reg &= 0x0000ffff;
938 		reg |= PCI_VPD_OPFLAG;
939 		reg |= PCI_VPD_ADDRESS(offset);
940 		pci_conf_write(pc, tag, ofs, reg);
941 
942 		/*
943 		 * PCI 2.2 does not specify how long we should poll
944 		 * for completion nor whether the operation can fail.
945 		 */
946 		j = 0;
947 		do {
948 			if (j++ == 20)
949 				return (1);
950 			delay(1);
951 			reg = pci_conf_read(pc, tag, ofs);
952 		} while (reg & PCI_VPD_OPFLAG);
953 	}
954 
955 	return (0);
956 }
957 
958 int
959 pci_matchbyid(struct pci_attach_args *pa, const struct pci_matchid *ids,
960     int nent)
961 {
962 	const struct pci_matchid *pm;
963 	int i;
964 
965 	for (i = 0, pm = ids; i < nent; i++, pm++)
966 		if (PCI_VENDOR(pa->pa_id) == pm->pm_vid &&
967 		    PCI_PRODUCT(pa->pa_id) == pm->pm_pid)
968 			return (1);
969 	return (0);
970 }
971 
972 #ifdef USER_PCICONF
973 /*
974  * This is the user interface to PCI configuration space.
975  */
976 
977 #include <sys/pciio.h>
978 #include <sys/fcntl.h>
979 
980 #ifdef DEBUG
981 #define PCIDEBUG(x) printf x
982 #else
983 #define PCIDEBUG(x)
984 #endif
985 
986 void pci_disable_vga(pci_chipset_tag_t, pcitag_t);
987 void pci_enable_vga(pci_chipset_tag_t, pcitag_t);
988 void pci_route_vga(struct pci_softc *);
989 void pci_unroute_vga(struct pci_softc *);
990 
991 int pciopen(dev_t dev, int oflags, int devtype, struct proc *p);
992 int pciclose(dev_t dev, int flag, int devtype, struct proc *p);
993 int pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p);
994 
995 int
996 pciopen(dev_t dev, int oflags, int devtype, struct proc *p)
997 {
998 	PCIDEBUG(("pciopen ndevs: %d\n" , pci_cd.cd_ndevs));
999 
1000 	if (minor(dev) >= pci_ndomains) {
1001 		return ENXIO;
1002 	}
1003 
1004 #ifndef APERTURE
1005 	if ((oflags & FWRITE) && securelevel > 0) {
1006 		return EPERM;
1007 	}
1008 #else
1009 	if ((oflags & FWRITE) && securelevel > 0 && allowaperture == 0) {
1010 		return EPERM;
1011 	}
1012 #endif
1013 	return (0);
1014 }
1015 
1016 int
1017 pciclose(dev_t dev, int flag, int devtype, struct proc *p)
1018 {
1019 	PCIDEBUG(("pciclose\n"));
1020 
1021 	pci_vga_proc = NULL;
1022 	return (0);
1023 }
1024 
1025 int
1026 pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1027 {
1028 	struct pcisel *sel = (struct pcisel *)data;
1029 	struct pci_io *io;
1030 	struct pci_rom *rom;
1031 	int i, error;
1032 	pcitag_t tag;
1033 	struct pci_softc *pci;
1034 	pci_chipset_tag_t pc;
1035 
1036 	switch (cmd) {
1037 	case PCIOCREAD:
1038 	case PCIOCREADMASK:
1039 		break;
1040 	case PCIOCWRITE:
1041 		if (!(flag & FWRITE))
1042 			return EPERM;
1043 		break;
1044 	case PCIOCGETROMLEN:
1045 	case PCIOCGETROM:
1046 		break;
1047 	case PCIOCGETVGA:
1048 	case PCIOCSETVGA:
1049 		if (pci_vga_pci == NULL)
1050 			return EINVAL;
1051 		break;
1052 	default:
1053 		return ENOTTY;
1054 	}
1055 
1056 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
1057 		pci = pci_cd.cd_devs[i];
1058 		if (pci != NULL && pci->sc_domain == minor(dev) &&
1059 		    pci->sc_bus == sel->pc_bus)
1060 			break;
1061 	}
1062 	if (i >= pci_cd.cd_ndevs)
1063 		return ENXIO;
1064 
1065 	/* Check bounds */
1066 	if (pci->sc_bus >= 256 ||
1067 	    sel->pc_dev >= pci_bus_maxdevs(pci->sc_pc, pci->sc_bus) ||
1068 	    sel->pc_func >= 8)
1069 		return EINVAL;
1070 
1071 	pc = pci->sc_pc;
1072 	tag = pci_make_tag(pc, sel->pc_bus, sel->pc_dev, sel->pc_func);
1073 
1074 	switch (cmd) {
1075 	case PCIOCREAD:
1076 		io = (struct pci_io *)data;
1077 		switch (io->pi_width) {
1078 		case 4:
1079 			/* Configuration space bounds check */
1080 			if (io->pi_reg < 0 ||
1081 			    io->pi_reg >= pci_conf_size(pc, tag))
1082 				return EINVAL;
1083 			/* Make sure the register is properly aligned */
1084 			if (io->pi_reg & 0x3)
1085 				return EINVAL;
1086 			io->pi_data = pci_conf_read(pc, tag, io->pi_reg);
1087 			error = 0;
1088 			break;
1089 		default:
1090 			error = EINVAL;
1091 			break;
1092 		}
1093 		break;
1094 
1095 	case PCIOCWRITE:
1096 		io = (struct pci_io *)data;
1097 		switch (io->pi_width) {
1098 		case 4:
1099 			/* Configuration space bounds check */
1100 			if (io->pi_reg < 0 ||
1101 			    io->pi_reg >= pci_conf_size(pc, tag))
1102 				return EINVAL;
1103 			/* Make sure the register is properly aligned */
1104 			if (io->pi_reg & 0x3)
1105 				return EINVAL;
1106 			pci_conf_write(pc, tag, io->pi_reg, io->pi_data);
1107 			error = 0;
1108 			break;
1109 		default:
1110 			error = EINVAL;
1111 			break;
1112 		}
1113 		break;
1114 
1115 	case PCIOCREADMASK:
1116 	{
1117 		io = (struct pci_io *)data;
1118 		struct pci_dev *pd;
1119 		int dev, func, i;
1120 
1121 		if (io->pi_width != 4 || io->pi_reg & 0x3 ||
1122 		    io->pi_reg < PCI_MAPREG_START ||
1123 		    io->pi_reg >= PCI_MAPREG_END)
1124 			return (EINVAL);
1125 
1126 		error = ENODEV;
1127 		LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1128 			pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1129 			if (dev == sel->pc_dev && func == sel->pc_func) {
1130 				i = (io->pi_reg - PCI_MAPREG_START) / 4;
1131 				io->pi_data = pd->pd_mask[i];
1132 				error = 0;
1133 				break;
1134 			}
1135 		}
1136 		break;
1137 	}
1138 
1139 	case PCIOCGETROMLEN:
1140 	case PCIOCGETROM:
1141 	{
1142 		pcireg_t addr, mask, bhlc;
1143 		bus_space_handle_t h;
1144 		bus_size_t len, off;
1145 		char buf[256];
1146 		int s;
1147 
1148 		rom = (struct pci_rom *)data;
1149 
1150 		bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
1151 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
1152 			return (ENODEV);
1153 
1154 		s = splhigh();
1155 		addr = pci_conf_read(pc, tag, PCI_ROM_REG);
1156 		pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
1157 		mask = pci_conf_read(pc, tag, PCI_ROM_REG);
1158 		pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1159 		splx(s);
1160 
1161 		/*
1162 		 * Section 6.2.5.2 `Expansion ROM Base Addres Register',
1163 		 *
1164 		 * tells us that only the upper 21 bits are writable.
1165 		 * This means that the size of a ROM must be a
1166 		 * multiple of 2 KB.  So reading the ROM in chunks of
1167 		 * 256 bytes should work just fine.
1168 		 */
1169 		if ((PCI_ROM_ADDR(addr) == 0 ||
1170 		     PCI_ROM_SIZE(mask) % sizeof(buf)) != 0)
1171 			return (ENODEV);
1172 
1173 		/* If we're just after the size, skip reading the ROM. */
1174 		if (cmd == PCIOCGETROMLEN) {
1175 			error = 0;
1176 			goto fail;
1177 		}
1178 
1179 		if (rom->pr_romlen < PCI_ROM_SIZE(mask)) {
1180 			error = ENOMEM;
1181 			goto fail;
1182 		}
1183 
1184 		error = bus_space_map(pci->sc_memt, PCI_ROM_ADDR(addr),
1185 		    PCI_ROM_SIZE(mask), 0, &h);
1186 		if (error)
1187 			goto fail;
1188 
1189 		off = 0;
1190 		len = PCI_ROM_SIZE(mask);
1191 		while (len > 0 && error == 0) {
1192 			s = splhigh();
1193 			pci_conf_write(pc, tag, PCI_ROM_REG,
1194 			    addr | PCI_ROM_ENABLE);
1195 			bus_space_read_region_1(pci->sc_memt, h, off,
1196 			    buf, sizeof(buf));
1197 			pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1198 			splx(s);
1199 
1200 			error = copyout(buf, rom->pr_rom + off, sizeof(buf));
1201 			off += sizeof(buf);
1202 			len -= sizeof(buf);
1203 		}
1204 
1205 		bus_space_unmap(pci->sc_memt, h, PCI_ROM_SIZE(mask));
1206 
1207 	fail:
1208 		rom->pr_romlen = PCI_ROM_SIZE(mask);
1209 		break;
1210 	}
1211 
1212 	case PCIOCGETVGA:
1213 	{
1214 		struct pci_vga *vga = (struct pci_vga *)data;
1215 		int bus, device, function;
1216 
1217 		pci_decompose_tag(pci_vga_pci->sc_pc, pci_vga_tag,
1218 		    &bus, &device, &function);
1219 		vga->pv_sel.pc_bus = bus;
1220 		vga->pv_sel.pc_dev = device;
1221 		vga->pv_sel.pc_func = function;
1222 		error = 0;
1223 		break;
1224 	}
1225 	case PCIOCSETVGA:
1226 	{
1227 		struct pci_vga *vga = (struct pci_vga *)data;
1228 		int bus, device, function;
1229 
1230 		switch (vga->pv_lock) {
1231 		case PCI_VGA_UNLOCK:
1232 		case PCI_VGA_LOCK:
1233 		case PCI_VGA_TRYLOCK:
1234 			break;
1235 		default:
1236 			return (EINVAL);
1237 		}
1238 
1239 		if (vga->pv_lock == PCI_VGA_UNLOCK) {
1240 			if (pci_vga_proc != p)
1241 				return (EINVAL);
1242 			pci_vga_proc = NULL;
1243 			wakeup(&pci_vga_proc);
1244 			return (0);
1245 		}
1246 
1247 		while (pci_vga_proc != p && pci_vga_proc != NULL) {
1248 			if (vga->pv_lock == PCI_VGA_TRYLOCK)
1249 				return (EBUSY);
1250 			error = tsleep(&pci_vga_proc, PLOCK | PCATCH,
1251 			    "vgalk", 0);
1252 			if (error)
1253 				return (error);
1254 		}
1255 		pci_vga_proc = p;
1256 
1257 		pci_decompose_tag(pci_vga_pci->sc_pc, pci_vga_tag,
1258 		    &bus, &device, &function);
1259 		if (bus != vga->pv_sel.pc_bus ||
1260 		    device != vga->pv_sel.pc_dev ||
1261 		    function != vga->pv_sel.pc_func) {
1262 			pci_disable_vga(pci_vga_pci->sc_pc, pci_vga_tag);
1263 			if (pci != pci_vga_pci) {
1264 				pci_unroute_vga(pci_vga_pci);
1265 				pci_route_vga(pci);
1266 				pci_vga_pci = pci;
1267 			}
1268 			pci_enable_vga(pc, tag);
1269 			pci_vga_tag = tag;
1270 		}
1271 
1272 		error = 0;
1273 		break;
1274 	}
1275 
1276 	default:
1277 		error = ENOTTY;
1278 		break;
1279 	}
1280 
1281 	return (error);
1282 }
1283 
1284 void
1285 pci_disable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1286 {
1287 	pcireg_t csr;
1288 
1289 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1290 	csr &= ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
1291 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1292 }
1293 
1294 void
1295 pci_enable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1296 {
1297 	pcireg_t csr;
1298 
1299 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1300 	csr |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE;
1301 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1302 }
1303 
1304 void
1305 pci_route_vga(struct pci_softc *sc)
1306 {
1307 	pci_chipset_tag_t pc = sc->sc_pc;
1308 	pcireg_t bc;
1309 
1310 	if (sc->sc_bridgetag == NULL)
1311 		return;
1312 
1313 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1314 	bc |= PPB_BC_VGA_ENABLE;
1315 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1316 
1317 	pci_route_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1318 }
1319 
1320 void
1321 pci_unroute_vga(struct pci_softc *sc)
1322 {
1323 	pci_chipset_tag_t pc = sc->sc_pc;
1324 	pcireg_t bc;
1325 
1326 	if (sc->sc_bridgetag == NULL)
1327 		return;
1328 
1329 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1330 	bc &= ~PPB_BC_VGA_ENABLE;
1331 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1332 
1333 	pci_unroute_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1334 }
1335 #endif /* USER_PCICONF */
1336 
1337 int
1338 pci_count_vga(struct pci_attach_args *pa)
1339 {
1340 	/* XXX For now, only handle the first PCI domain. */
1341 	if (pa->pa_domain != 0)
1342 		return (0);
1343 
1344 	if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY ||
1345 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA) &&
1346 	    (PCI_CLASS(pa->pa_class) != PCI_CLASS_PREHISTORIC ||
1347 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_PREHISTORIC_VGA))
1348 		return (0);
1349 
1350 	pci_vga_count++;
1351 
1352 	return (0);
1353 }
1354 
1355 int
1356 pci_primary_vga(struct pci_attach_args *pa)
1357 {
1358 	/* XXX For now, only handle the first PCI domain. */
1359 	if (pa->pa_domain != 0)
1360 		return (0);
1361 
1362 	if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY ||
1363 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA) &&
1364 	    (PCI_CLASS(pa->pa_class) != PCI_CLASS_PREHISTORIC ||
1365 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_PREHISTORIC_VGA))
1366 		return (0);
1367 
1368 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
1369 	    & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1370 	    != (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1371 		return (0);
1372 
1373 	pci_vga_tag = pa->pa_tag;
1374 
1375 	return (1);
1376 }
1377