xref: /openbsd-src/sys/dev/pci/pci.c (revision fcde59b201a29a2b4570b00b71e7aa25d61cb5c1)
1 /*	$OpenBSD: pci.c,v 1.119 2020/09/08 20:13:52 kettenis Exp $	*/
2 /*	$NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1995, 1996 Christopher G. Demetriou.  All rights reserved.
6  * Copyright (c) 1994 Charles Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Charles Hannum.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * PCI bus autoconfiguration.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcidevs.h>
46 #include <dev/pci/ppbreg.h>
47 
48 int pcimatch(struct device *, void *, void *);
49 void pciattach(struct device *, struct device *, void *);
50 int pcidetach(struct device *, int);
51 int pciactivate(struct device *, int);
52 void pci_suspend(struct pci_softc *);
53 void pci_powerdown(struct pci_softc *);
54 void pci_resume(struct pci_softc *);
55 
56 struct msix_vector {
57 	uint32_t mv_ma;
58 	uint32_t mv_mau32;
59 	uint32_t mv_md;
60 	uint32_t mv_vc;
61 };
62 
63 #define NMAPREG			((PCI_MAPREG_END - PCI_MAPREG_START) / \
64 				    sizeof(pcireg_t))
65 struct pci_dev {
66 	struct device *pd_dev;
67 	LIST_ENTRY(pci_dev) pd_next;
68 	pcitag_t pd_tag;        /* pci register tag */
69 	pcireg_t pd_csr;
70 	pcireg_t pd_bhlc;
71 	pcireg_t pd_int;
72 	pcireg_t pd_map[NMAPREG];
73 	pcireg_t pd_mask[NMAPREG];
74 	pcireg_t pd_msi_mc;
75 	pcireg_t pd_msi_ma;
76 	pcireg_t pd_msi_mau32;
77 	pcireg_t pd_msi_md;
78 	pcireg_t pd_msix_mc;
79 	struct msix_vector *pd_msix_table;
80 	int pd_pmcsr_state;
81 	int pd_vga_decode;
82 };
83 
84 #ifdef APERTURE
85 extern int allowaperture;
86 #endif
87 
88 struct cfattach pci_ca = {
89 	sizeof(struct pci_softc), pcimatch, pciattach, pcidetach, pciactivate
90 };
91 
92 struct cfdriver pci_cd = {
93 	NULL, "pci", DV_DULL
94 };
95 
96 int	pci_ndomains;
97 
98 struct proc *pci_vga_proc;
99 struct pci_softc *pci_vga_pci;
100 pcitag_t pci_vga_tag;
101 
102 int	pci_dopm;
103 
104 int	pciprint(void *, const char *);
105 int	pcisubmatch(struct device *, void *, void *);
106 
107 #ifdef PCI_MACHDEP_ENUMERATE_BUS
108 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
109 #else
110 int pci_enumerate_bus(struct pci_softc *,
111     int (*)(struct pci_attach_args *), struct pci_attach_args *);
112 #endif
113 int	pci_reserve_resources(struct pci_attach_args *);
114 int	pci_primary_vga(struct pci_attach_args *);
115 
116 /*
117  * Important note about PCI-ISA bridges:
118  *
119  * Callbacks are used to configure these devices so that ISA/EISA bridges
120  * can attach their child busses after PCI configuration is done.
121  *
122  * This works because:
123  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
124  *	(2) any ISA/EISA bridges must be attached to primary PCI
125  *	    busses (i.e. bus zero).
126  *
127  * That boils down to: there can only be one of these outstanding
128  * at a time, it is cleared when configuring PCI bus 0 before any
129  * subdevices have been found, and it is run after all subdevices
130  * of PCI bus 0 have been found.
131  *
132  * This is needed because there are some (legacy) PCI devices which
133  * can show up as ISA/EISA devices as well (the prime example of which
134  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
135  * and the bridge is seen before the video board is, the board can show
136  * up as an ISA device, and that can (bogusly) complicate the PCI device's
137  * attach code, or make the PCI device not be properly attached at all.
138  *
139  * We use the generic config_defer() facility to achieve this.
140  */
141 
142 int
143 pcimatch(struct device *parent, void *match, void *aux)
144 {
145 	struct cfdata *cf = match;
146 	struct pcibus_attach_args *pba = aux;
147 
148 	if (strcmp(pba->pba_busname, cf->cf_driver->cd_name))
149 		return (0);
150 
151 	/* Check the locators */
152 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
153 	    cf->pcibuscf_bus != pba->pba_bus)
154 		return (0);
155 
156 	/* sanity */
157 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
158 		return (0);
159 
160 	/*
161 	 * XXX check other (hardware?) indicators
162 	 */
163 
164 	return (1);
165 }
166 
167 void
168 pciattach(struct device *parent, struct device *self, void *aux)
169 {
170 	struct pcibus_attach_args *pba = aux;
171 	struct pci_softc *sc = (struct pci_softc *)self;
172 
173 	pci_attach_hook(parent, self, pba);
174 
175 	printf("\n");
176 
177 	LIST_INIT(&sc->sc_devs);
178 
179 	sc->sc_iot = pba->pba_iot;
180 	sc->sc_memt = pba->pba_memt;
181 	sc->sc_dmat = pba->pba_dmat;
182 	sc->sc_pc = pba->pba_pc;
183 	sc->sc_flags = pba->pba_flags;
184 	sc->sc_ioex = pba->pba_ioex;
185 	sc->sc_memex = pba->pba_memex;
186 	sc->sc_pmemex = pba->pba_pmemex;
187 	sc->sc_busex = pba->pba_busex;
188 	sc->sc_domain = pba->pba_domain;
189 	sc->sc_bus = pba->pba_bus;
190 	sc->sc_bridgetag = pba->pba_bridgetag;
191 	sc->sc_bridgeih = pba->pba_bridgeih;
192 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
193 	sc->sc_intrswiz = pba->pba_intrswiz;
194 	sc->sc_intrtag = pba->pba_intrtag;
195 
196 	/* Reserve our own bus number. */
197 	if (sc->sc_busex)
198 		extent_alloc_region(sc->sc_busex, sc->sc_bus, 1, EX_NOWAIT);
199 
200 	pci_enumerate_bus(sc, pci_reserve_resources, NULL);
201 
202 	/* Find the VGA device that's currently active. */
203 	if (pci_enumerate_bus(sc, pci_primary_vga, NULL))
204 		pci_vga_pci = sc;
205 
206 	pci_enumerate_bus(sc, NULL, NULL);
207 }
208 
209 int
210 pcidetach(struct device *self, int flags)
211 {
212 	return pci_detach_devices((struct pci_softc *)self, flags);
213 }
214 
215 int
216 pciactivate(struct device *self, int act)
217 {
218 	int rv = 0;
219 
220 	switch (act) {
221 	case DVACT_SUSPEND:
222 		rv = config_activate_children(self, act);
223 		pci_suspend((struct pci_softc *)self);
224 		break;
225 	case DVACT_RESUME:
226 		pci_resume((struct pci_softc *)self);
227 		rv = config_activate_children(self, act);
228 		break;
229 	case DVACT_POWERDOWN:
230 		rv = config_activate_children(self, act);
231 		pci_powerdown((struct pci_softc *)self);
232 		break;
233 	default:
234 		rv = config_activate_children(self, act);
235 		break;
236 	}
237 	return (rv);
238 }
239 
240 void
241 pci_suspend(struct pci_softc *sc)
242 {
243 	struct pci_dev *pd;
244 	pcireg_t bhlc, reg;
245 	int off, i;
246 
247 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
248 		/*
249 		 * Only handle header type 0 here; PCI-PCI bridges and
250 		 * CardBus bridges need special handling, which will
251 		 * be done in their specific drivers.
252 		 */
253 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
254 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
255 			continue;
256 
257 		/* Save registers that may get lost. */
258 		for (i = 0; i < NMAPREG; i++)
259 			pd->pd_map[i] = pci_conf_read(sc->sc_pc, pd->pd_tag,
260 			    PCI_MAPREG_START + (i * 4));
261 		pd->pd_csr = pci_conf_read(sc->sc_pc, pd->pd_tag,
262 		    PCI_COMMAND_STATUS_REG);
263 		pd->pd_bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag,
264 		    PCI_BHLC_REG);
265 		pd->pd_int = pci_conf_read(sc->sc_pc, pd->pd_tag,
266 		    PCI_INTERRUPT_REG);
267 
268 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
269 		    PCI_CAP_MSI, &off, &reg)) {
270 			pd->pd_msi_ma = pci_conf_read(sc->sc_pc, pd->pd_tag,
271 			    off + PCI_MSI_MA);
272 			if (reg & PCI_MSI_MC_C64) {
273 				pd->pd_msi_mau32 = pci_conf_read(sc->sc_pc,
274 				    pd->pd_tag, off + PCI_MSI_MAU32);
275 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
276 				    pd->pd_tag, off + PCI_MSI_MD64);
277 			} else {
278 				pd->pd_msi_md = pci_conf_read(sc->sc_pc,
279 				    pd->pd_tag, off + PCI_MSI_MD32);
280 			}
281 			pd->pd_msi_mc = reg;
282 		}
283 
284 		pci_suspend_msix(sc->sc_pc, pd->pd_tag, sc->sc_memt,
285 		    &pd->pd_msix_mc, pd->pd_msix_table);
286 	}
287 }
288 
289 void
290 pci_powerdown(struct pci_softc *sc)
291 {
292 	struct pci_dev *pd;
293 	pcireg_t bhlc;
294 
295 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
296 		/*
297 		 * Only handle header type 0 here; PCI-PCI bridges and
298 		 * CardBus bridges need special handling, which will
299 		 * be done in their specific drivers.
300 		 */
301 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
302 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
303 			continue;
304 
305 		if (pci_dopm) {
306 			/*
307 			 * Place the device into the lowest possible
308 			 * power state.
309 			 */
310 			pd->pd_pmcsr_state = pci_get_powerstate(sc->sc_pc,
311 			    pd->pd_tag);
312 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
313 			    pci_min_powerstate(sc->sc_pc, pd->pd_tag));
314 		}
315 	}
316 }
317 
318 void
319 pci_resume(struct pci_softc *sc)
320 {
321 	struct pci_dev *pd;
322 	pcireg_t bhlc, reg;
323 	int off, i;
324 
325 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
326 		/*
327 		 * Only handle header type 0 here; PCI-PCI bridges and
328 		 * CardBus bridges need special handling, which will
329 		 * be done in their specific drivers.
330 		 */
331 		bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
332 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
333 			continue;
334 
335 		/* Restore power. */
336 		if (pci_dopm)
337 			pci_set_powerstate(sc->sc_pc, pd->pd_tag,
338 			    pd->pd_pmcsr_state);
339 
340 		/* Restore the registers saved above. */
341 		for (i = 0; i < NMAPREG; i++)
342 			pci_conf_write(sc->sc_pc, pd->pd_tag,
343 			    PCI_MAPREG_START + (i * 4), pd->pd_map[i]);
344 		reg = pci_conf_read(sc->sc_pc, pd->pd_tag,
345 		    PCI_COMMAND_STATUS_REG);
346 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
347 		    (reg & 0xffff0000) | (pd->pd_csr & 0x0000ffff));
348 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG,
349 		    pd->pd_bhlc);
350 		pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_INTERRUPT_REG,
351 		    pd->pd_int);
352 
353 		if (pci_get_capability(sc->sc_pc, pd->pd_tag,
354 		    PCI_CAP_MSI, &off, &reg)) {
355 			pci_conf_write(sc->sc_pc, pd->pd_tag,
356 			    off + PCI_MSI_MA, pd->pd_msi_ma);
357 			if (reg & PCI_MSI_MC_C64) {
358 				pci_conf_write(sc->sc_pc, pd->pd_tag,
359 				    off + PCI_MSI_MAU32, pd->pd_msi_mau32);
360 				pci_conf_write(sc->sc_pc, pd->pd_tag,
361 				    off + PCI_MSI_MD64, pd->pd_msi_md);
362 			} else {
363 				pci_conf_write(sc->sc_pc, pd->pd_tag,
364 				    off + PCI_MSI_MD32, pd->pd_msi_md);
365 			}
366 			pci_conf_write(sc->sc_pc, pd->pd_tag,
367 			    off + PCI_MSI_MC, pd->pd_msi_mc);
368 		}
369 
370 		pci_resume_msix(sc->sc_pc, pd->pd_tag, sc->sc_memt,
371 		    pd->pd_msix_mc, pd->pd_msix_table);
372 	}
373 }
374 
375 int
376 pciprint(void *aux, const char *pnp)
377 {
378 	struct pci_attach_args *pa = aux;
379 	char devinfo[256];
380 
381 	if (pnp) {
382 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo,
383 		    sizeof devinfo);
384 		printf("%s at %s", devinfo, pnp);
385 	}
386 	printf(" dev %d function %d", pa->pa_device, pa->pa_function);
387 	if (!pnp) {
388 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo,
389 		    sizeof devinfo);
390 		printf(" %s", devinfo);
391 	}
392 
393 	return (UNCONF);
394 }
395 
396 int
397 pcisubmatch(struct device *parent, void *match,  void *aux)
398 {
399 	struct cfdata *cf = match;
400 	struct pci_attach_args *pa = aux;
401 
402 	if (cf->pcicf_dev != PCI_UNK_DEV &&
403 	    cf->pcicf_dev != pa->pa_device)
404 		return (0);
405 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
406 	    cf->pcicf_function != pa->pa_function)
407 		return (0);
408 
409 	return ((*cf->cf_attach->ca_match)(parent, match, aux));
410 }
411 
412 int
413 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
414     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
415 {
416 	pci_chipset_tag_t pc = sc->sc_pc;
417 	struct pci_attach_args pa;
418 	struct pci_dev *pd;
419 	pcireg_t id, class, intr, bhlcr, cap;
420 	int pin, bus, device, function;
421 	int off, ret = 0;
422 	uint64_t addr;
423 
424 	pci_decompose_tag(pc, tag, &bus, &device, &function);
425 
426 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
427 	if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
428 		return (0);
429 
430 	id = pci_conf_read(pc, tag, PCI_ID_REG);
431 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
432 
433 	/* Invalid vendor ID value? */
434 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
435 		return (0);
436 	/* XXX Not invalid, but we've done this ~forever. */
437 	if (PCI_VENDOR(id) == 0)
438 		return (0);
439 
440 	pa.pa_iot = sc->sc_iot;
441 	pa.pa_memt = sc->sc_memt;
442 	pa.pa_dmat = sc->sc_dmat;
443 	pa.pa_pc = pc;
444 	pa.pa_ioex = sc->sc_ioex;
445 	pa.pa_memex = sc->sc_memex;
446 	pa.pa_pmemex = sc->sc_pmemex;
447 	pa.pa_busex = sc->sc_busex;
448 	pa.pa_domain = sc->sc_domain;
449 	pa.pa_bus = bus;
450 	pa.pa_device = device;
451 	pa.pa_function = function;
452 	pa.pa_tag = tag;
453 	pa.pa_id = id;
454 	pa.pa_class = class;
455 	pa.pa_bridgetag = sc->sc_bridgetag;
456 	pa.pa_bridgeih = sc->sc_bridgeih;
457 
458 	/* This is a simplification of the NetBSD code.
459 	   We don't support turning off I/O or memory
460 	   on broken hardware. <csapuntz@stanford.edu> */
461 	pa.pa_flags = sc->sc_flags;
462 	pa.pa_flags |= PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
463 
464 	if (sc->sc_bridgetag == NULL) {
465 		pa.pa_intrswiz = 0;
466 		pa.pa_intrtag = tag;
467 	} else {
468 		pa.pa_intrswiz = sc->sc_intrswiz + device;
469 		pa.pa_intrtag = sc->sc_intrtag;
470 	}
471 
472 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
473 
474 	pin = PCI_INTERRUPT_PIN(intr);
475 	pa.pa_rawintrpin = pin;
476 	if (pin == PCI_INTERRUPT_PIN_NONE) {
477 		/* no interrupt */
478 		pa.pa_intrpin = 0;
479 	} else {
480 		/*
481 		 * swizzle it based on the number of busses we're
482 		 * behind and our device number.
483 		 */
484 		pa.pa_intrpin = 	/* XXX */
485 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
486 	}
487 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
488 
489 	if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSI, &off, &cap)) {
490 		/*
491 		 * XXX Should we enable MSI mapping ourselves on
492 		 * systems that have it disabled?
493 		 */
494 		if (cap & PCI_HT_MSI_ENABLED) {
495 			if ((cap & PCI_HT_MSI_FIXED) == 0) {
496 				addr = pci_conf_read(pc, tag,
497 				    off + PCI_HT_MSI_ADDR);
498 				addr |= (uint64_t)pci_conf_read(pc, tag,
499 				    off + PCI_HT_MSI_ADDR_HI32) << 32;
500 			} else
501 				addr = PCI_HT_MSI_FIXED_ADDR;
502 
503 			/*
504 			 * XXX This will fail to enable MSI on systems
505 			 * that don't use the canonical address.
506 			 */
507 			if (addr == PCI_HT_MSI_FIXED_ADDR)
508 				pa.pa_flags |= PCI_FLAGS_MSI_ENABLED;
509 		}
510 	}
511 
512 	/*
513 	 * Give the MD code a chance to alter pci_attach_args and/or
514 	 * skip devices.
515 	 */
516 	if (pci_probe_device_hook(pc, &pa) != 0)
517 		return (0);
518 
519 	if (match != NULL) {
520 		ret = (*match)(&pa);
521 		if (ret != 0 && pap != NULL)
522 			*pap = pa;
523 	} else {
524 		pcireg_t address, csr;
525 		int i, reg, reg_start, reg_end;
526 		int s;
527 
528 		pd = malloc(sizeof *pd, M_DEVBUF, M_ZERO | M_WAITOK);
529 		pd->pd_tag = tag;
530 		LIST_INSERT_HEAD(&sc->sc_devs, pd, pd_next);
531 
532 		switch (PCI_HDRTYPE_TYPE(bhlcr)) {
533 		case 0:
534 			reg_start = PCI_MAPREG_START;
535 			reg_end = PCI_MAPREG_END;
536 			break;
537 		case 1: /* PCI-PCI bridge */
538 			reg_start = PCI_MAPREG_START;
539 			reg_end = PCI_MAPREG_PPB_END;
540 			break;
541 		case 2: /* PCI-CardBus bridge */
542 			reg_start = PCI_MAPREG_START;
543 			reg_end = PCI_MAPREG_PCB_END;
544 			break;
545 		default:
546 			return (0);
547 		}
548 
549 		pd->pd_msix_table = pci_alloc_msix_table(sc->sc_pc, pd->pd_tag);
550 
551 		s = splhigh();
552 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
553 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
554 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr &
555 			    ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE));
556 
557 		for (reg = reg_start, i = 0; reg < reg_end; reg += 4, i++) {
558 			address = pci_conf_read(pc, tag, reg);
559 			pci_conf_write(pc, tag, reg, 0xffffffff);
560 			pd->pd_mask[i] = pci_conf_read(pc, tag, reg);
561 			pci_conf_write(pc, tag, reg, address);
562 		}
563 
564 		if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
565 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
566 		splx(s);
567 
568 		if ((PCI_CLASS(class) == PCI_CLASS_DISPLAY &&
569 		    PCI_SUBCLASS(class) == PCI_SUBCLASS_DISPLAY_VGA) ||
570 		    (PCI_CLASS(class) == PCI_CLASS_PREHISTORIC &&
571 		    PCI_SUBCLASS(class) == PCI_SUBCLASS_PREHISTORIC_VGA))
572 			pd->pd_vga_decode = 1;
573 
574 		pd->pd_dev = config_found_sm(&sc->sc_dev, &pa, pciprint,
575 		    pcisubmatch);
576 		if (pd->pd_dev)
577 			pci_dev_postattach(pd->pd_dev, &pa);
578 	}
579 
580 	return (ret);
581 }
582 
583 int
584 pci_detach_devices(struct pci_softc *sc, int flags)
585 {
586 	struct pci_dev *pd, *next;
587 	int ret;
588 
589 	ret = config_detach_children(&sc->sc_dev, flags);
590 	if (ret != 0)
591 		return (ret);
592 
593 	for (pd = LIST_FIRST(&sc->sc_devs); pd != NULL; pd = next) {
594 		pci_free_msix_table(sc->sc_pc, pd->pd_tag, pd->pd_msix_table);
595 		next = LIST_NEXT(pd, pd_next);
596 		free(pd, M_DEVBUF, sizeof *pd);
597 	}
598 	LIST_INIT(&sc->sc_devs);
599 
600 	return (0);
601 }
602 
603 int
604 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
605     int *offset, pcireg_t *value)
606 {
607 	pcireg_t reg;
608 	unsigned int ofs;
609 
610 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
611 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
612 		return (0);
613 
614 	/* Determine the Capability List Pointer register to start with. */
615 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
616 	switch (PCI_HDRTYPE_TYPE(reg)) {
617 	case 0:	/* standard device header */
618 	case 1: /* PCI-PCI bridge header */
619 		ofs = PCI_CAPLISTPTR_REG;
620 		break;
621 	case 2:	/* PCI-CardBus bridge header */
622 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
623 		break;
624 	default:
625 		return (0);
626 	}
627 
628 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
629 	while (ofs != 0) {
630 		/*
631 		 * Some devices, like parts of the NVIDIA C51 chipset,
632 		 * have a broken Capabilities List.  So we need to do
633 		 * a sanity check here.
634 		 */
635 		if ((ofs & 3) || (ofs < 0x40))
636 			return (0);
637 		reg = pci_conf_read(pc, tag, ofs);
638 		if (PCI_CAPLIST_CAP(reg) == capid) {
639 			if (offset)
640 				*offset = ofs;
641 			if (value)
642 				*value = reg;
643 			return (1);
644 		}
645 		ofs = PCI_CAPLIST_NEXT(reg);
646 	}
647 
648 	return (0);
649 }
650 
651 int
652 pci_get_ht_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
653     int *offset, pcireg_t *value)
654 {
655 	pcireg_t reg;
656 	unsigned int ofs;
657 
658 	if (pci_get_capability(pc, tag, PCI_CAP_HT, &ofs, NULL) == 0)
659 		return (0);
660 
661 	while (ofs != 0) {
662 #ifdef DIAGNOSTIC
663 		if ((ofs & 3) || (ofs < 0x40))
664 			panic("pci_get_ht_capability");
665 #endif
666 		reg = pci_conf_read(pc, tag, ofs);
667 		if (PCI_HT_CAP(reg) == capid) {
668 			if (offset)
669 				*offset = ofs;
670 			if (value)
671 				*value = reg;
672 			return (1);
673 		}
674 		ofs = PCI_CAPLIST_NEXT(reg);
675 	}
676 
677 	return (0);
678 }
679 
680 int
681 pci_get_ext_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
682     int *offset, pcireg_t *value)
683 {
684 	pcireg_t reg;
685 	unsigned int ofs;
686 
687 	/* Make sure this is a PCI Express device. */
688 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, NULL, NULL) == 0)
689 		return (0);
690 
691 	/* Scan PCI Express extended capabilities. */
692 	ofs = PCI_PCIE_ECAP;
693 	while (ofs != 0) {
694 #ifdef DIAGNOSTIC
695 		if ((ofs & 3) || (ofs < PCI_PCIE_ECAP))
696 			panic("pci_get_ext_capability");
697 #endif
698 		reg = pci_conf_read(pc, tag, ofs);
699 		if (PCI_PCIE_ECAP_ID(reg) == capid) {
700 			if (offset)
701 				*offset = ofs;
702 			if (value)
703 				*value = reg;
704 			return (1);
705 		}
706 		ofs = PCI_PCIE_ECAP_NEXT(reg);
707 	}
708 
709 	return (0);
710 }
711 
712 uint16_t
713 pci_requester_id(pci_chipset_tag_t pc, pcitag_t tag)
714 {
715 	int bus, dev, func;
716 
717 	pci_decompose_tag(pc, tag, &bus, &dev, &func);
718 	return ((bus << 8) | (dev << 3) | func);
719 }
720 
721 int
722 pci_find_device(struct pci_attach_args *pa,
723     int (*match)(struct pci_attach_args *))
724 {
725 	extern struct cfdriver pci_cd;
726 	struct device *pcidev;
727 	int i;
728 
729 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
730 		pcidev = pci_cd.cd_devs[i];
731 		if (pcidev != NULL &&
732 		    pci_enumerate_bus((struct pci_softc *)pcidev,
733 		    		      match, pa) != 0)
734 			return (1);
735 	}
736 	return (0);
737 }
738 
739 int
740 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
741 {
742 	pcireg_t reg;
743 	int offset;
744 
745 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
746 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
747 		return (reg & PCI_PMCSR_STATE_MASK);
748 	}
749 	return (PCI_PMCSR_STATE_D0);
750 }
751 
752 int
753 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
754 {
755 	pcireg_t reg;
756 	int offset, ostate = state;
757 
758 	/*
759 	 * Warn the firmware that we are going to put the device
760 	 * into the given state.
761 	 */
762 	pci_set_powerstate_md(pc, tag, state, 1);
763 
764 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
765 		if (state == PCI_PMCSR_STATE_D3) {
766 			/*
767 			 * The PCI Power Management spec says we
768 			 * should disable I/O and memory space as well
769 			 * as bus mastering before we place the device
770 			 * into D3.
771 			 */
772 			reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
773 			reg &= ~PCI_COMMAND_IO_ENABLE;
774 			reg &= ~PCI_COMMAND_MEM_ENABLE;
775 			reg &= ~PCI_COMMAND_MASTER_ENABLE;
776 			pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg);
777 		}
778 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
779 		if ((reg & PCI_PMCSR_STATE_MASK) != state) {
780 			ostate = reg & PCI_PMCSR_STATE_MASK;
781 
782 			pci_conf_write(pc, tag, offset + PCI_PMCSR,
783 			    (reg & ~PCI_PMCSR_STATE_MASK) | state);
784 			if (state == PCI_PMCSR_STATE_D3 ||
785 			    ostate == PCI_PMCSR_STATE_D3)
786 				delay(10 * 1000);
787 		}
788 	}
789 
790 	/*
791 	 * Warn the firmware that the device is now in the given
792 	 * state.
793 	 */
794 	pci_set_powerstate_md(pc, tag, state, 0);
795 
796 	return (ostate);
797 }
798 
799 #ifndef PCI_MACHDEP_ENUMERATE_BUS
800 /*
801  * Generic PCI bus enumeration routine.  Used unless machine-dependent
802  * code needs to provide something else.
803  */
804 int
805 pci_enumerate_bus(struct pci_softc *sc,
806     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
807 {
808 	pci_chipset_tag_t pc = sc->sc_pc;
809 	int device, function, nfunctions, ret;
810 	const struct pci_quirkdata *qd;
811 	pcireg_t id, bhlcr;
812 	pcitag_t tag;
813 
814 	for (device = 0; device < sc->sc_maxndevs; device++) {
815 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
816 
817 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
818 		if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
819 			continue;
820 
821 		id = pci_conf_read(pc, tag, PCI_ID_REG);
822 
823 		/* Invalid vendor ID value? */
824 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
825 			continue;
826 		/* XXX Not invalid, but we've done this ~forever. */
827 		if (PCI_VENDOR(id) == 0)
828 			continue;
829 
830 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
831 
832 		if (qd != NULL &&
833 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
834 			nfunctions = 8;
835 		else if (qd != NULL &&
836 		      (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
837 			nfunctions = 1;
838 		else
839 			nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
840 
841 		for (function = 0; function < nfunctions; function++) {
842 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
843 			ret = pci_probe_device(sc, tag, match, pap);
844 			if (match != NULL && ret != 0)
845 				return (ret);
846 		}
847  	}
848 
849 	return (0);
850 }
851 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
852 
853 int
854 pci_reserve_resources(struct pci_attach_args *pa)
855 {
856 	pci_chipset_tag_t pc = pa->pa_pc;
857 	pcitag_t tag = pa->pa_tag;
858 	pcireg_t bhlc, blr, type, bir;
859 	pcireg_t addr, mask;
860 	bus_addr_t base, limit;
861 	bus_size_t size;
862 	int reg, reg_start, reg_end, reg_rom;
863 	int bus, dev, func;
864 	int sec, sub;
865 	int flags;
866 	int s;
867 
868 	pci_decompose_tag(pc, tag, &bus, &dev, &func);
869 
870 	bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
871 	switch (PCI_HDRTYPE_TYPE(bhlc)) {
872 	case 0:
873 		reg_start = PCI_MAPREG_START;
874 		reg_end = PCI_MAPREG_END;
875 		reg_rom = PCI_ROM_REG;
876 		break;
877 	case 1: /* PCI-PCI bridge */
878 		reg_start = PCI_MAPREG_START;
879 		reg_end = PCI_MAPREG_PPB_END;
880 		reg_rom = 0;	/* 0x38 */
881 		break;
882 	case 2: /* PCI-CardBus bridge */
883 		reg_start = PCI_MAPREG_START;
884 		reg_end = PCI_MAPREG_PCB_END;
885 		reg_rom = 0;
886 		break;
887 	default:
888 		return (0);
889 	}
890 
891 	for (reg = reg_start; reg < reg_end; reg += 4) {
892 		if (!pci_mapreg_probe(pc, tag, reg, &type))
893 			continue;
894 
895 		if (pci_mapreg_info(pc, tag, reg, type, &base, &size, &flags))
896 			continue;
897 
898 		if (base == 0)
899 			continue;
900 
901 		switch (type) {
902 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
903 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
904 			if (ISSET(flags, BUS_SPACE_MAP_PREFETCHABLE) &&
905 			    pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
906 			    base, size, EX_NOWAIT) == 0) {
907 				break;
908 			}
909 #ifdef __sparc64__
910 			/*
911 			 * Certain SPARC T5 systems assign
912 			 * non-prefetchable 64-bit BARs of its onboard
913 			 * mpii(4) controllers addresses in the
914 			 * prefetchable memory range.  This is
915 			 * (probably) safe, as reads from the device
916 			 * registers mapped by these BARs are
917 			 * side-effect free.  So assume the firmware
918 			 * knows what it is doing.
919 			 */
920 			if (base >= 0x100000000 &&
921 			    pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
922 			    base, size, EX_NOWAIT) == 0) {
923 				break;
924 			}
925 #endif
926 			if (pa->pa_memex && extent_alloc_region(pa->pa_memex,
927 			    base, size, EX_NOWAIT)) {
928 				printf("%d:%d:%d: mem address conflict 0x%lx/0x%lx\n",
929 				    bus, dev, func, base, size);
930 				pci_conf_write(pc, tag, reg, 0);
931 				if (type & PCI_MAPREG_MEM_TYPE_64BIT)
932 					pci_conf_write(pc, tag, reg + 4, 0);
933 			}
934 			break;
935 		case PCI_MAPREG_TYPE_IO:
936 			if (pa->pa_ioex && extent_alloc_region(pa->pa_ioex,
937 			    base, size, EX_NOWAIT)) {
938 				printf("%d:%d:%d: io address conflict 0x%lx/0x%lx\n",
939 				    bus, dev, func, base, size);
940 				pci_conf_write(pc, tag, reg, 0);
941 			}
942 			break;
943 		}
944 
945 		if (type & PCI_MAPREG_MEM_TYPE_64BIT)
946 			reg += 4;
947 	}
948 
949 	if (reg_rom != 0) {
950 		s = splhigh();
951 		addr = pci_conf_read(pc, tag, PCI_ROM_REG);
952 		pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
953 		mask = pci_conf_read(pc, tag, PCI_ROM_REG);
954 		pci_conf_write(pc, tag, PCI_ROM_REG, addr);
955 		splx(s);
956 
957 		base = PCI_ROM_ADDR(addr);
958 		size = PCI_ROM_SIZE(mask);
959 		if (base != 0 && size != 0) {
960 			if (pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
961 			    base, size, EX_NOWAIT) &&
962 			    pa->pa_memex && extent_alloc_region(pa->pa_memex,
963 			    base, size, EX_NOWAIT)) {
964 				printf("%d:%d:%d: rom address conflict 0x%lx/0x%lx\n",
965 				    bus, dev, func, base, size);
966 				pci_conf_write(pc, tag, PCI_ROM_REG, 0);
967 			}
968 		}
969 	}
970 
971 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
972 		return (0);
973 
974 	/* Figure out the I/O address range of the bridge. */
975 	blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS);
976 	base = (blr & 0x000000f0) << 8;
977 	limit = (blr & 0x000f000) | 0x00000fff;
978 	blr = pci_conf_read(pc, tag, PPB_REG_IO_HI);
979 	base |= (blr & 0x0000ffff) << 16;
980 	limit |= (blr & 0xffff0000);
981 	if (limit > base)
982 		size = (limit - base + 1);
983 	else
984 		size = 0;
985 	if (pa->pa_ioex && base > 0 && size > 0) {
986 		if (extent_alloc_region(pa->pa_ioex, base, size, EX_NOWAIT)) {
987 			printf("%d:%d:%d: bridge io address conflict 0x%lx/0x%lx\n",
988 			    bus, dev, func, base, size);
989 			blr &= 0xffff0000;
990 			blr |= 0x000000f0;
991 			pci_conf_write(pc, tag, PPB_REG_IOSTATUS, blr);
992 		}
993 	}
994 
995 	/* Figure out the memory mapped I/O address range of the bridge. */
996 	blr = pci_conf_read(pc, tag, PPB_REG_MEM);
997 	base = (blr & 0x0000fff0) << 16;
998 	limit = (blr & 0xfff00000) | 0x000fffff;
999 	if (limit > base)
1000 		size = (limit - base + 1);
1001 	else
1002 		size = 0;
1003 	if (pa->pa_memex && base > 0 && size > 0) {
1004 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
1005 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
1006 			    bus, dev, func, base, size);
1007 			pci_conf_write(pc, tag, PPB_REG_MEM, 0x0000fff0);
1008 		}
1009 	}
1010 
1011 	/* Figure out the prefetchable memory address range of the bridge. */
1012 	blr = pci_conf_read(pc, tag, PPB_REG_PREFMEM);
1013 	base = (blr & 0x0000fff0) << 16;
1014 	limit = (blr & 0xfff00000) | 0x000fffff;
1015 #ifdef __LP64__
1016 	blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFBASE_HI32);
1017 	base |= ((uint64_t)blr) << 32;
1018 	blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFLIM_HI32);
1019 	limit |= ((uint64_t)blr) << 32;
1020 #endif
1021 	if (limit > base)
1022 		size = (limit - base + 1);
1023 	else
1024 		size = 0;
1025 	if (pa->pa_pmemex && base > 0 && size > 0) {
1026 		if (extent_alloc_region(pa->pa_pmemex, base, size, EX_NOWAIT)) {
1027 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
1028 			    bus, dev, func, base, size);
1029 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
1030 		}
1031 	} else if (pa->pa_memex && base > 0 && size > 0) {
1032 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
1033 			printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
1034 			    bus, dev, func, base, size);
1035 			pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
1036 		}
1037 	}
1038 
1039 	/* Figure out the bus range handled by the bridge. */
1040 	bir = pci_conf_read(pc, tag, PPB_REG_BUSINFO);
1041 	sec = PPB_BUSINFO_SECONDARY(bir);
1042 	sub = PPB_BUSINFO_SUBORDINATE(bir);
1043 	if (pa->pa_busex && sub >= sec && sub > 0) {
1044 		if (extent_alloc_region(pa->pa_busex, sec, sub - sec + 1,
1045 		    EX_NOWAIT)) {
1046 			printf("%d:%d:%d: bridge bus conflict %d-%d\n",
1047 			    bus, dev, func, sec, sub);
1048 		}
1049 	}
1050 
1051 	return (0);
1052 }
1053 
1054 /*
1055  * Vital Product Data (PCI 2.2)
1056  */
1057 
1058 int
1059 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
1060     pcireg_t *data)
1061 {
1062 	uint32_t reg;
1063 	int ofs, i, j;
1064 
1065 	KASSERT(data != NULL);
1066 	if ((offset + count) >= PCI_VPD_ADDRESS_MASK)
1067 		return (EINVAL);
1068 
1069 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
1070 		return (ENXIO);
1071 
1072 	for (i = 0; i < count; offset += sizeof(*data), i++) {
1073 		reg &= 0x0000ffff;
1074 		reg &= ~PCI_VPD_OPFLAG;
1075 		reg |= PCI_VPD_ADDRESS(offset);
1076 		pci_conf_write(pc, tag, ofs, reg);
1077 
1078 		/*
1079 		 * PCI 2.2 does not specify how long we should poll
1080 		 * for completion nor whether the operation can fail.
1081 		 */
1082 		j = 0;
1083 		do {
1084 			if (j++ == 20)
1085 				return (EIO);
1086 			delay(4);
1087 			reg = pci_conf_read(pc, tag, ofs);
1088 		} while ((reg & PCI_VPD_OPFLAG) == 0);
1089 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
1090 	}
1091 
1092 	return (0);
1093 }
1094 
1095 int
1096 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
1097     pcireg_t *data)
1098 {
1099 	pcireg_t reg;
1100 	int ofs, i, j;
1101 
1102 	KASSERT(data != NULL);
1103 	KASSERT((offset + count) < 0x7fff);
1104 
1105 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
1106 		return (1);
1107 
1108 	for (i = 0; i < count; offset += sizeof(*data), i++) {
1109 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
1110 
1111 		reg &= 0x0000ffff;
1112 		reg |= PCI_VPD_OPFLAG;
1113 		reg |= PCI_VPD_ADDRESS(offset);
1114 		pci_conf_write(pc, tag, ofs, reg);
1115 
1116 		/*
1117 		 * PCI 2.2 does not specify how long we should poll
1118 		 * for completion nor whether the operation can fail.
1119 		 */
1120 		j = 0;
1121 		do {
1122 			if (j++ == 20)
1123 				return (1);
1124 			delay(1);
1125 			reg = pci_conf_read(pc, tag, ofs);
1126 		} while (reg & PCI_VPD_OPFLAG);
1127 	}
1128 
1129 	return (0);
1130 }
1131 
1132 int
1133 pci_matchbyid(struct pci_attach_args *pa, const struct pci_matchid *ids,
1134     int nent)
1135 {
1136 	const struct pci_matchid *pm;
1137 	int i;
1138 
1139 	for (i = 0, pm = ids; i < nent; i++, pm++)
1140 		if (PCI_VENDOR(pa->pa_id) == pm->pm_vid &&
1141 		    PCI_PRODUCT(pa->pa_id) == pm->pm_pid)
1142 			return (1);
1143 	return (0);
1144 }
1145 
1146 void
1147 pci_disable_legacy_vga(struct device *dev)
1148 {
1149 	struct pci_softc *pci;
1150 	struct pci_dev *pd;
1151 
1152 	/* XXX Until we attach the drm drivers directly to pci. */
1153 	while (dev->dv_parent->dv_cfdata->cf_driver != &pci_cd)
1154 		dev = dev->dv_parent;
1155 
1156 	pci = (struct pci_softc *)dev->dv_parent;
1157 	LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1158 		if (pd->pd_dev == dev) {
1159 			pd->pd_vga_decode = 0;
1160 			break;
1161 		}
1162 	}
1163 }
1164 
1165 #ifdef USER_PCICONF
1166 /*
1167  * This is the user interface to PCI configuration space.
1168  */
1169 
1170 #include <sys/pciio.h>
1171 #include <sys/fcntl.h>
1172 
1173 #ifdef DEBUG
1174 #define PCIDEBUG(x) printf x
1175 #else
1176 #define PCIDEBUG(x)
1177 #endif
1178 
1179 void pci_disable_vga(pci_chipset_tag_t, pcitag_t);
1180 void pci_enable_vga(pci_chipset_tag_t, pcitag_t);
1181 void pci_route_vga(struct pci_softc *);
1182 void pci_unroute_vga(struct pci_softc *);
1183 
1184 int pciopen(dev_t dev, int oflags, int devtype, struct proc *p);
1185 int pciclose(dev_t dev, int flag, int devtype, struct proc *p);
1186 int pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p);
1187 
1188 int
1189 pciopen(dev_t dev, int oflags, int devtype, struct proc *p)
1190 {
1191 	PCIDEBUG(("pciopen ndevs: %d\n" , pci_cd.cd_ndevs));
1192 
1193 	if (minor(dev) >= pci_ndomains) {
1194 		return ENXIO;
1195 	}
1196 
1197 #ifndef APERTURE
1198 	if ((oflags & FWRITE) && securelevel > 0) {
1199 		return EPERM;
1200 	}
1201 #else
1202 	if ((oflags & FWRITE) && securelevel > 0 && allowaperture == 0) {
1203 		return EPERM;
1204 	}
1205 #endif
1206 	return (0);
1207 }
1208 
1209 int
1210 pciclose(dev_t dev, int flag, int devtype, struct proc *p)
1211 {
1212 	PCIDEBUG(("pciclose\n"));
1213 
1214 	pci_vga_proc = NULL;
1215 	return (0);
1216 }
1217 
1218 int
1219 pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1220 {
1221 	struct pcisel *sel = (struct pcisel *)data;
1222 	struct pci_io *io;
1223 	struct pci_rom *rom;
1224 	int i, error;
1225 	pcitag_t tag;
1226 	struct pci_softc *pci;
1227 	pci_chipset_tag_t pc;
1228 
1229 	switch (cmd) {
1230 	case PCIOCREAD:
1231 	case PCIOCREADMASK:
1232 		break;
1233 	case PCIOCWRITE:
1234 		if (!(flag & FWRITE))
1235 			return EPERM;
1236 		break;
1237 	case PCIOCGETROMLEN:
1238 	case PCIOCGETROM:
1239 	case PCIOCGETVPD:
1240 		break;
1241 	case PCIOCGETVGA:
1242 	case PCIOCSETVGA:
1243 		if (pci_vga_pci == NULL)
1244 			return EINVAL;
1245 		break;
1246 	default:
1247 		return ENOTTY;
1248 	}
1249 
1250 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
1251 		pci = pci_cd.cd_devs[i];
1252 		if (pci != NULL && pci->sc_domain == minor(dev) &&
1253 		    pci->sc_bus == sel->pc_bus)
1254 			break;
1255 	}
1256 	if (i >= pci_cd.cd_ndevs)
1257 		return ENXIO;
1258 
1259 	/* Check bounds */
1260 	if (pci->sc_bus >= 256 ||
1261 	    sel->pc_dev >= pci_bus_maxdevs(pci->sc_pc, pci->sc_bus) ||
1262 	    sel->pc_func >= 8)
1263 		return EINVAL;
1264 
1265 	pc = pci->sc_pc;
1266 	tag = pci_make_tag(pc, sel->pc_bus, sel->pc_dev, sel->pc_func);
1267 
1268 	switch (cmd) {
1269 	case PCIOCREAD:
1270 		io = (struct pci_io *)data;
1271 		switch (io->pi_width) {
1272 		case 4:
1273 			/* Configuration space bounds check */
1274 			if (io->pi_reg < 0 ||
1275 			    io->pi_reg >= pci_conf_size(pc, tag))
1276 				return EINVAL;
1277 			/* Make sure the register is properly aligned */
1278 			if (io->pi_reg & 0x3)
1279 				return EINVAL;
1280 			io->pi_data = pci_conf_read(pc, tag, io->pi_reg);
1281 			error = 0;
1282 			break;
1283 		default:
1284 			error = EINVAL;
1285 			break;
1286 		}
1287 		break;
1288 
1289 	case PCIOCWRITE:
1290 		io = (struct pci_io *)data;
1291 		switch (io->pi_width) {
1292 		case 4:
1293 			/* Configuration space bounds check */
1294 			if (io->pi_reg < 0 ||
1295 			    io->pi_reg >= pci_conf_size(pc, tag))
1296 				return EINVAL;
1297 			/* Make sure the register is properly aligned */
1298 			if (io->pi_reg & 0x3)
1299 				return EINVAL;
1300 			pci_conf_write(pc, tag, io->pi_reg, io->pi_data);
1301 			error = 0;
1302 			break;
1303 		default:
1304 			error = EINVAL;
1305 			break;
1306 		}
1307 		break;
1308 
1309 	case PCIOCREADMASK:
1310 	{
1311 		io = (struct pci_io *)data;
1312 		struct pci_dev *pd;
1313 		int dev, func, i;
1314 
1315 		if (io->pi_width != 4 || io->pi_reg & 0x3 ||
1316 		    io->pi_reg < PCI_MAPREG_START ||
1317 		    io->pi_reg >= PCI_MAPREG_END)
1318 			return (EINVAL);
1319 
1320 		error = ENODEV;
1321 		LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1322 			pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1323 			if (dev == sel->pc_dev && func == sel->pc_func) {
1324 				i = (io->pi_reg - PCI_MAPREG_START) / 4;
1325 				io->pi_data = pd->pd_mask[i];
1326 				error = 0;
1327 				break;
1328 			}
1329 		}
1330 		break;
1331 	}
1332 
1333 	case PCIOCGETROMLEN:
1334 	case PCIOCGETROM:
1335 	{
1336 		pcireg_t addr, mask, bhlc;
1337 		bus_space_handle_t h;
1338 		bus_size_t len, off;
1339 		char buf[256];
1340 		int s;
1341 
1342 		rom = (struct pci_rom *)data;
1343 
1344 		bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
1345 		if (PCI_HDRTYPE_TYPE(bhlc) != 0)
1346 			return (ENODEV);
1347 
1348 		s = splhigh();
1349 		addr = pci_conf_read(pc, tag, PCI_ROM_REG);
1350 		pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
1351 		mask = pci_conf_read(pc, tag, PCI_ROM_REG);
1352 		pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1353 		splx(s);
1354 
1355 		/*
1356 		 * Section 6.2.5.2 `Expansion ROM Base Addres Register',
1357 		 *
1358 		 * tells us that only the upper 21 bits are writable.
1359 		 * This means that the size of a ROM must be a
1360 		 * multiple of 2 KB.  So reading the ROM in chunks of
1361 		 * 256 bytes should work just fine.
1362 		 */
1363 		if ((PCI_ROM_ADDR(addr) == 0 ||
1364 		     PCI_ROM_SIZE(mask) % sizeof(buf)) != 0)
1365 			return (ENODEV);
1366 
1367 		/* If we're just after the size, skip reading the ROM. */
1368 		if (cmd == PCIOCGETROMLEN) {
1369 			error = 0;
1370 			goto fail;
1371 		}
1372 
1373 		if (rom->pr_romlen < PCI_ROM_SIZE(mask)) {
1374 			error = ENOMEM;
1375 			goto fail;
1376 		}
1377 
1378 		error = bus_space_map(pci->sc_memt, PCI_ROM_ADDR(addr),
1379 		    PCI_ROM_SIZE(mask), 0, &h);
1380 		if (error)
1381 			goto fail;
1382 
1383 		off = 0;
1384 		len = PCI_ROM_SIZE(mask);
1385 		while (len > 0 && error == 0) {
1386 			s = splhigh();
1387 			pci_conf_write(pc, tag, PCI_ROM_REG,
1388 			    addr | PCI_ROM_ENABLE);
1389 			bus_space_read_region_1(pci->sc_memt, h, off,
1390 			    buf, sizeof(buf));
1391 			pci_conf_write(pc, tag, PCI_ROM_REG, addr);
1392 			splx(s);
1393 
1394 			error = copyout(buf, rom->pr_rom + off, sizeof(buf));
1395 			off += sizeof(buf);
1396 			len -= sizeof(buf);
1397 		}
1398 
1399 		bus_space_unmap(pci->sc_memt, h, PCI_ROM_SIZE(mask));
1400 
1401 	fail:
1402 		rom->pr_romlen = PCI_ROM_SIZE(mask);
1403 		break;
1404 	}
1405 
1406 	case PCIOCGETVPD: {
1407 		struct pci_vpd_req *pv = (struct pci_vpd_req *)data;
1408 		pcireg_t *data;
1409 		size_t len;
1410 		unsigned int i;
1411 		int s;
1412 
1413 		CTASSERT(sizeof(*data) == sizeof(*pv->pv_data));
1414 
1415 		data = mallocarray(pv->pv_count, sizeof(*data), M_TEMP,
1416 		    M_WAITOK|M_CANFAIL);
1417 		if (data == NULL) {
1418 			error = ENOMEM;
1419 			break;
1420 		}
1421 
1422 		s = splhigh();
1423 		error = pci_vpd_read(pc, tag, pv->pv_offset, pv->pv_count,
1424 		    data);
1425 		splx(s);
1426 
1427 		len = pv->pv_count * sizeof(*pv->pv_data);
1428 
1429 		if (error == 0) {
1430 			for (i = 0; i < pv->pv_count; i++)
1431 				data[i] = letoh32(data[i]);
1432 
1433 			error = copyout(data, pv->pv_data, len);
1434 		}
1435 
1436 		free(data, M_TEMP, len);
1437 		break;
1438 	}
1439 
1440 	case PCIOCGETVGA:
1441 	{
1442 		struct pci_vga *vga = (struct pci_vga *)data;
1443 		struct pci_dev *pd;
1444 		int bus, dev, func;
1445 
1446 		vga->pv_decode = 0;
1447 		LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
1448 			pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
1449 			if (dev == sel->pc_dev && func == sel->pc_func) {
1450 				if (pd->pd_vga_decode)
1451 					vga->pv_decode = PCI_VGA_IO_ENABLE |
1452 					    PCI_VGA_MEM_ENABLE;
1453 				break;
1454 			}
1455 		}
1456 
1457 		pci_decompose_tag(pci_vga_pci->sc_pc,
1458 		    pci_vga_tag, &bus, &dev, &func);
1459 		vga->pv_sel.pc_bus = bus;
1460 		vga->pv_sel.pc_dev = dev;
1461 		vga->pv_sel.pc_func = func;
1462 		error = 0;
1463 		break;
1464 	}
1465 	case PCIOCSETVGA:
1466 	{
1467 		struct pci_vga *vga = (struct pci_vga *)data;
1468 		int bus, dev, func;
1469 
1470 		switch (vga->pv_lock) {
1471 		case PCI_VGA_UNLOCK:
1472 		case PCI_VGA_LOCK:
1473 		case PCI_VGA_TRYLOCK:
1474 			break;
1475 		default:
1476 			return (EINVAL);
1477 		}
1478 
1479 		if (vga->pv_lock == PCI_VGA_UNLOCK) {
1480 			if (pci_vga_proc != p)
1481 				return (EINVAL);
1482 			pci_vga_proc = NULL;
1483 			wakeup(&pci_vga_proc);
1484 			return (0);
1485 		}
1486 
1487 		while (pci_vga_proc != p && pci_vga_proc != NULL) {
1488 			if (vga->pv_lock == PCI_VGA_TRYLOCK)
1489 				return (EBUSY);
1490 			error = tsleep_nsec(&pci_vga_proc, PLOCK | PCATCH,
1491 			    "vgalk", INFSLP);
1492 			if (error)
1493 				return (error);
1494 		}
1495 		pci_vga_proc = p;
1496 
1497 		pci_decompose_tag(pci_vga_pci->sc_pc,
1498 		    pci_vga_tag, &bus, &dev, &func);
1499 		if (bus != vga->pv_sel.pc_bus || dev != vga->pv_sel.pc_dev ||
1500 		    func != vga->pv_sel.pc_func) {
1501 			pci_disable_vga(pci_vga_pci->sc_pc, pci_vga_tag);
1502 			if (pci != pci_vga_pci) {
1503 				pci_unroute_vga(pci_vga_pci);
1504 				pci_route_vga(pci);
1505 				pci_vga_pci = pci;
1506 			}
1507 			pci_enable_vga(pc, tag);
1508 			pci_vga_tag = tag;
1509 		}
1510 
1511 		error = 0;
1512 		break;
1513 	}
1514 
1515 	default:
1516 		error = ENOTTY;
1517 		break;
1518 	}
1519 
1520 	return (error);
1521 }
1522 
1523 void
1524 pci_disable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1525 {
1526 	pcireg_t csr;
1527 
1528 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1529 	csr &= ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
1530 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1531 }
1532 
1533 void
1534 pci_enable_vga(pci_chipset_tag_t pc, pcitag_t tag)
1535 {
1536 	pcireg_t csr;
1537 
1538 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1539 	csr |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE;
1540 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1541 }
1542 
1543 void
1544 pci_route_vga(struct pci_softc *sc)
1545 {
1546 	pci_chipset_tag_t pc = sc->sc_pc;
1547 	pcireg_t bc;
1548 
1549 	if (sc->sc_bridgetag == NULL)
1550 		return;
1551 
1552 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1553 	bc |= PPB_BC_VGA_ENABLE;
1554 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1555 
1556 	pci_route_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1557 }
1558 
1559 void
1560 pci_unroute_vga(struct pci_softc *sc)
1561 {
1562 	pci_chipset_tag_t pc = sc->sc_pc;
1563 	pcireg_t bc;
1564 
1565 	if (sc->sc_bridgetag == NULL)
1566 		return;
1567 
1568 	bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
1569 	bc &= ~PPB_BC_VGA_ENABLE;
1570 	pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
1571 
1572 	pci_unroute_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
1573 }
1574 #endif /* USER_PCICONF */
1575 
1576 int
1577 pci_primary_vga(struct pci_attach_args *pa)
1578 {
1579 	/* XXX For now, only handle the first PCI domain. */
1580 	if (pa->pa_domain != 0)
1581 		return (0);
1582 
1583 	if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY ||
1584 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA) &&
1585 	    (PCI_CLASS(pa->pa_class) != PCI_CLASS_PREHISTORIC ||
1586 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_PREHISTORIC_VGA))
1587 		return (0);
1588 
1589 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
1590 	    & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1591 	    != (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
1592 		return (0);
1593 
1594 	pci_vga_tag = pa->pa_tag;
1595 
1596 	return (1);
1597 }
1598 
1599 #ifdef __HAVE_PCI_MSIX
1600 
1601 struct msix_vector *
1602 pci_alloc_msix_table(pci_chipset_tag_t pc, pcitag_t tag)
1603 {
1604 	struct msix_vector *table;
1605 	pcireg_t reg;
1606 	int tblsz;
1607 
1608 	if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, &reg) == 0)
1609 		return NULL;
1610 
1611 	tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
1612 	table = mallocarray(tblsz, sizeof(*table), M_DEVBUF, M_WAITOK);
1613 
1614 	return table;
1615 }
1616 
1617 void
1618 pci_free_msix_table(pci_chipset_tag_t pc, pcitag_t tag,
1619     struct msix_vector *table)
1620 {
1621 	pcireg_t reg;
1622 	int tblsz;
1623 
1624 	if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, &reg) == 0)
1625 		return;
1626 
1627 	tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
1628 	free(table, M_DEVBUF, tblsz * sizeof(*table));
1629 }
1630 
1631 void
1632 pci_suspend_msix(pci_chipset_tag_t pc, pcitag_t tag,
1633     bus_space_tag_t memt, pcireg_t *mc, struct msix_vector *table)
1634 {
1635 	bus_space_handle_t memh;
1636 	pcireg_t reg;
1637 	int tblsz, i;
1638 
1639 	if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, &reg) == 0)
1640 		return;
1641 
1642 	KASSERT(table != NULL);
1643 
1644 	if (pci_msix_table_map(pc, tag, memt, &memh))
1645 		return;
1646 
1647 	tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
1648 	for (i = 0; i < tblsz; i++) {
1649 		table[i].mv_ma = bus_space_read_4(memt, memh, PCI_MSIX_MA(i));
1650 		table[i].mv_mau32 = bus_space_read_4(memt, memh,
1651 		    PCI_MSIX_MAU32(i));
1652 		table[i].mv_md = bus_space_read_4(memt, memh, PCI_MSIX_MD(i));
1653 		table[i].mv_vc = bus_space_read_4(memt, memh, PCI_MSIX_VC(i));
1654 	}
1655 
1656 	pci_msix_table_unmap(pc, tag, memt, memh);
1657 
1658 	*mc = reg;
1659 }
1660 
1661 void
1662 pci_resume_msix(pci_chipset_tag_t pc, pcitag_t tag,
1663     bus_space_tag_t memt, pcireg_t mc, struct msix_vector *table)
1664 {
1665 	bus_space_handle_t memh;
1666 	pcireg_t reg;
1667 	int tblsz, i;
1668 	int off;
1669 
1670 	if (pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, &reg) == 0)
1671 		return;
1672 
1673 	KASSERT(table != NULL);
1674 
1675 	if (pci_msix_table_map(pc, tag, memt, &memh))
1676 		return;
1677 
1678 	tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
1679 	for (i = 0; i < tblsz; i++) {
1680 		bus_space_write_4(memt, memh, PCI_MSIX_MA(i), table[i].mv_ma);
1681 		bus_space_write_4(memt, memh, PCI_MSIX_MAU32(i),
1682 		    table[i].mv_mau32);
1683 		bus_space_write_4(memt, memh, PCI_MSIX_MD(i), table[i].mv_md);
1684 		bus_space_barrier(memt, memh, PCI_MSIX_MA(i), 16,
1685 		    BUS_SPACE_BARRIER_WRITE);
1686 		bus_space_write_4(memt, memh, PCI_MSIX_VC(i), table[i].mv_vc);
1687 		bus_space_barrier(memt, memh, PCI_MSIX_VC(i), 4,
1688 		    BUS_SPACE_BARRIER_WRITE);
1689 	}
1690 
1691 	pci_msix_table_unmap(pc, tag, memt, memh);
1692 
1693 	pci_conf_write(pc, tag, off, mc);
1694 }
1695 
1696 int
1697 pci_intr_msix_count(pci_chipset_tag_t pc, pcitag_t tag)
1698 {
1699 	pcireg_t reg;
1700 
1701 	if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, &reg) == 0)
1702 		return (0);
1703 
1704 	return (PCI_MSIX_MC_TBLSZ(reg) + 1);
1705 }
1706 
1707 #else /* __HAVE_PCI_MSIX */
1708 
1709 struct msix_vector *
1710 pci_alloc_msix_table(pci_chipset_tag_t pc, pcitag_t tag)
1711 {
1712 	return NULL;
1713 }
1714 
1715 void
1716 pci_free_msix_table(pci_chipset_tag_t pc, pcitag_t tag,
1717     struct msix_vector *table)
1718 {
1719 }
1720 
1721 void
1722 pci_suspend_msix(pci_chipset_tag_t pc, pcitag_t tag,
1723     bus_space_tag_t memt, pcireg_t *mc, struct msix_vector *table)
1724 {
1725 }
1726 
1727 void
1728 pci_resume_msix(pci_chipset_tag_t pc, pcitag_t tag,
1729     bus_space_tag_t memt, pcireg_t mc, struct msix_vector *table)
1730 {
1731 }
1732 
1733 int
1734 pci_intr_msix_count(pci_chipset_tag_t pc, pcitag_t tag)
1735 {
1736 	return (0);
1737 }
1738 
1739 #endif /* __HAVE_PCI_MSIX */
1740