xref: /openbsd-src/sys/dev/pci/pci.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: pci.c,v 1.60 2009/04/06 20:51:48 kettenis Exp $	*/
2 /*	$NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1995, 1996 Christopher G. Demetriou.  All rights reserved.
6  * Copyright (c) 1994 Charles Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Charles Hannum.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * PCI bus autoconfiguration.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcidevs.h>
46 #include <dev/pci/ppbreg.h>
47 
48 int pcimatch(struct device *, void *, void *);
49 void pciattach(struct device *, struct device *, void *);
50 int pcidetach(struct device *, int);
51 void pcipower(int, void *);
52 
53 #define NMAPREG			((PCI_MAPREG_END - PCI_MAPREG_START) / \
54 				    sizeof(pcireg_t))
55 struct pci_dev {
56 	LIST_ENTRY(pci_dev) pd_next;
57 	struct device *pd_dev;
58 	pcitag_t pd_tag;        /* pci register tag */
59 	pcireg_t pd_csr;
60 	pcireg_t pd_bhlc;
61 	pcireg_t pd_int;
62 	pcireg_t pd_map[NMAPREG];
63 };
64 
65 #ifdef APERTURE
66 extern int allowaperture;
67 #endif
68 
69 struct cfattach pci_ca = {
70 	sizeof(struct pci_softc), pcimatch, pciattach, pcidetach
71 };
72 
73 struct cfdriver pci_cd = {
74 	NULL, "pci", DV_DULL
75 };
76 
77 int	pci_ndomains;
78 
79 int	pciprint(void *, const char *);
80 int	pcisubmatch(struct device *, void *, void *);
81 
82 #ifdef PCI_MACHDEP_ENUMERATE_BUS
83 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
84 #else
85 int pci_enumerate_bus(struct pci_softc *,
86     int (*)(struct pci_attach_args *), struct pci_attach_args *);
87 #endif
88 int	pci_reserve_resources(struct pci_attach_args *);
89 
90 /*
91  * Important note about PCI-ISA bridges:
92  *
93  * Callbacks are used to configure these devices so that ISA/EISA bridges
94  * can attach their child busses after PCI configuration is done.
95  *
96  * This works because:
97  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
98  *	(2) any ISA/EISA bridges must be attached to primary PCI
99  *	    busses (i.e. bus zero).
100  *
101  * That boils down to: there can only be one of these outstanding
102  * at a time, it is cleared when configuring PCI bus 0 before any
103  * subdevices have been found, and it is run after all subdevices
104  * of PCI bus 0 have been found.
105  *
106  * This is needed because there are some (legacy) PCI devices which
107  * can show up as ISA/EISA devices as well (the prime example of which
108  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
109  * and the bridge is seen before the video board is, the board can show
110  * up as an ISA device, and that can (bogusly) complicate the PCI device's
111  * attach code, or make the PCI device not be properly attached at all.
112  *
113  * We use the generic config_defer() facility to achieve this.
114  */
115 
116 int
117 pcimatch(struct device *parent, void *match, void *aux)
118 {
119 	struct cfdata *cf = match;
120 	struct pcibus_attach_args *pba = aux;
121 
122 	if (strcmp(pba->pba_busname, cf->cf_driver->cd_name))
123 		return (0);
124 
125 	/* Check the locators */
126 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
127 	    cf->pcibuscf_bus != pba->pba_bus)
128 		return (0);
129 
130 	/* sanity */
131 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
132 		return (0);
133 
134 	/*
135 	 * XXX check other (hardware?) indicators
136 	 */
137 
138 	return (1);
139 }
140 
141 void
142 pciattach(struct device *parent, struct device *self, void *aux)
143 {
144 	struct pcibus_attach_args *pba = aux;
145 	struct pci_softc *sc = (struct pci_softc *)self;
146 
147 	pci_attach_hook(parent, self, pba);
148 
149 	printf("\n");
150 
151 	LIST_INIT(&sc->sc_devs);
152 	sc->sc_powerhook = powerhook_establish(pcipower, sc);
153 
154 	sc->sc_iot = pba->pba_iot;
155 	sc->sc_memt = pba->pba_memt;
156 	sc->sc_dmat = pba->pba_dmat;
157 	sc->sc_pc = pba->pba_pc;
158 	sc->sc_ioex = pba->pba_ioex;
159 	sc->sc_memex = pba->pba_memex;
160 	sc->sc_domain = pba->pba_domain;
161 	sc->sc_bus = pba->pba_bus;
162 	sc->sc_bridgetag = pba->pba_bridgetag;
163 	sc->sc_bridgeih = pba->pba_bridgeih;
164 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
165 	sc->sc_intrswiz = pba->pba_intrswiz;
166 	sc->sc_intrtag = pba->pba_intrtag;
167 	pci_enumerate_bus(sc, pci_reserve_resources, NULL);
168 	pci_enumerate_bus(sc, NULL, NULL);
169 }
170 
171 int
172 pcidetach(struct device *self, int flags)
173 {
174 	return pci_detach_devices((struct pci_softc *)self, flags);
175 }
176 
177 /* save and restore the pci config space */
178 void
179 pcipower(int why, void *arg)
180 {
181 	struct pci_softc *sc = (struct pci_softc *)arg;
182 	struct pci_dev *pd;
183 	pcireg_t reg;
184 	int i;
185 
186 	LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
187 		if (why != PWR_RESUME) {
188 			for (i = 0; i < NMAPREG; i++)
189 			       pd->pd_map[i] = pci_conf_read(sc->sc_pc,
190 				   pd->pd_tag, PCI_MAPREG_START + (i * 4));
191 			pd->pd_csr = pci_conf_read(sc->sc_pc, pd->pd_tag,
192 			   PCI_COMMAND_STATUS_REG);
193 			pd->pd_bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag,
194 			   PCI_BHLC_REG);
195 			pd->pd_int = pci_conf_read(sc->sc_pc, pd->pd_tag,
196 			   PCI_INTERRUPT_REG);
197 		} else {
198 			for (i = 0; i < NMAPREG; i++)
199 				pci_conf_write(sc->sc_pc, pd->pd_tag,
200 				    PCI_MAPREG_START + (i * 4),
201 					pd->pd_map[i]);
202 			reg = pci_conf_read(sc->sc_pc, pd->pd_tag,
203 			    PCI_COMMAND_STATUS_REG);
204 			pci_conf_write(sc->sc_pc, pd->pd_tag,
205 			    PCI_COMMAND_STATUS_REG,
206 			    (reg & 0xffff0000) | (pd->pd_csr & 0x0000ffff));
207 			pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG,
208 			    pd->pd_bhlc);
209 			pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_INTERRUPT_REG,
210 			    pd->pd_int);
211 		}
212 	}
213 }
214 
215 int
216 pciprint(void *aux, const char *pnp)
217 {
218 	struct pci_attach_args *pa = aux;
219 	char devinfo[256];
220 
221 	if (pnp) {
222 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo,
223 		    sizeof devinfo);
224 		printf("%s at %s", devinfo, pnp);
225 	}
226 	printf(" dev %d function %d", pa->pa_device, pa->pa_function);
227 	if (!pnp) {
228 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo,
229 		    sizeof devinfo);
230 		printf(" %s", devinfo);
231 	}
232 
233 	return (UNCONF);
234 }
235 
236 int
237 pcisubmatch(struct device *parent, void *match,  void *aux)
238 {
239 	struct cfdata *cf = match;
240 	struct pci_attach_args *pa = aux;
241 
242 	if (cf->pcicf_dev != PCI_UNK_DEV &&
243 	    cf->pcicf_dev != pa->pa_device)
244 		return (0);
245 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
246 	    cf->pcicf_function != pa->pa_function)
247 		return (0);
248 
249 	return ((*cf->cf_attach->ca_match)(parent, match, aux));
250 }
251 
252 int
253 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
254     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
255 {
256 	pci_chipset_tag_t pc = sc->sc_pc;
257 	struct pci_attach_args pa;
258 	struct pci_dev *pd;
259 	struct device *dev;
260 	pcireg_t id, csr, class, intr, bhlcr;
261 	int ret = 0, pin, bus, device, function;
262 
263 	pci_decompose_tag(pc, tag, &bus, &device, &function);
264 
265 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
266 	if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
267 		return (0);
268 
269 	id = pci_conf_read(pc, tag, PCI_ID_REG);
270 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
271 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
272 
273 	/* Invalid vendor ID value? */
274 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
275 		return (0);
276 	/* XXX Not invalid, but we've done this ~forever. */
277 	if (PCI_VENDOR(id) == 0)
278 		return (0);
279 
280 	pa.pa_iot = sc->sc_iot;
281 	pa.pa_memt = sc->sc_memt;
282 	pa.pa_dmat = sc->sc_dmat;
283 	pa.pa_pc = pc;
284 	pa.pa_ioex = sc->sc_ioex;
285 	pa.pa_memex = sc->sc_memex;
286 	pa.pa_domain = sc->sc_domain;
287 	pa.pa_bus = bus;
288 	pa.pa_device = device;
289 	pa.pa_function = function;
290 	pa.pa_tag = tag;
291 	pa.pa_id = id;
292 	pa.pa_class = class;
293 	pa.pa_bridgetag = sc->sc_bridgetag;
294 	pa.pa_bridgeih = sc->sc_bridgeih;
295 
296 	/* This is a simplification of the NetBSD code.
297 	   We don't support turning off I/O or memory
298 	   on broken hardware. <csapuntz@stanford.edu> */
299 	pa.pa_flags = PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
300 
301 	if (sc->sc_bridgetag == NULL) {
302 		pa.pa_intrswiz = 0;
303 		pa.pa_intrtag = tag;
304 	} else {
305 		pa.pa_intrswiz = sc->sc_intrswiz + device;
306 		pa.pa_intrtag = sc->sc_intrtag;
307 	}
308 
309 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
310 
311 	pin = PCI_INTERRUPT_PIN(intr);
312 	pa.pa_rawintrpin = pin;
313 	if (pin == PCI_INTERRUPT_PIN_NONE) {
314 		/* no interrupt */
315 		pa.pa_intrpin = 0;
316 	} else {
317 		/*
318 		 * swizzle it based on the number of busses we're
319 		 * behind and our device number.
320 		 */
321 		pa.pa_intrpin = 	/* XXX */
322 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
323 	}
324 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
325 
326 	if (match != NULL) {
327 		ret = (*match)(&pa);
328 		if (ret != 0 && pap != NULL)
329 			*pap = pa;
330 	} else {
331 		if ((dev = config_found_sm(&sc->sc_dev, &pa, pciprint,
332 		    pcisubmatch))) {
333 			pcireg_t reg;
334 
335 			/* skip header type != 0 */
336 			reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
337 			if (PCI_HDRTYPE_TYPE(reg) != 0)
338 				return(0);
339 			if (pci_get_capability(pc, tag,
340 			    PCI_CAP_PWRMGMT, NULL, NULL) == 0)
341 				return(0);
342 			if (!(pd = malloc(sizeof *pd, M_DEVBUF,
343 			    M_NOWAIT)))
344 				return(0);
345 			pd->pd_tag = tag;
346 			pd->pd_dev = dev;
347 			LIST_INSERT_HEAD(&sc->sc_devs, pd, pd_next);
348 		}
349 	}
350 
351 	return (ret);
352 }
353 
354 int
355 pci_detach_devices(struct pci_softc *sc, int flags)
356 {
357 	struct pci_dev *pd, *next;
358 	int ret;
359 
360 	ret = config_detach_children(&sc->sc_dev, flags);
361 	if (ret != 0)
362 		return (ret);
363 
364 	for (pd = LIST_FIRST(&sc->sc_devs);
365 	     pd != LIST_END(&sc->sc_devs); pd = next) {
366 		next = LIST_NEXT(pd, pd_next);
367 		free(pd, M_DEVBUF);
368 	}
369 	LIST_INIT(&sc->sc_devs);
370 
371 	return (0);
372 }
373 
374 int
375 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
376     int *offset, pcireg_t *value)
377 {
378 	pcireg_t reg;
379 	unsigned int ofs;
380 
381 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
382 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
383 		return (0);
384 
385 	/* Determine the Capability List Pointer register to start with. */
386 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
387 	switch (PCI_HDRTYPE_TYPE(reg)) {
388 	case 0:	/* standard device header */
389 	case 1: /* PCI-PCI bridge header */
390 		ofs = PCI_CAPLISTPTR_REG;
391 		break;
392 	case 2:	/* PCI-CardBus bridge header */
393 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
394 		break;
395 	default:
396 		return (0);
397 	}
398 
399 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
400 	while (ofs != 0) {
401 #ifdef DIAGNOSTIC
402 		if ((ofs & 3) || (ofs < 0x40))
403 			panic("pci_get_capability");
404 #endif
405 		reg = pci_conf_read(pc, tag, ofs);
406 		if (PCI_CAPLIST_CAP(reg) == capid) {
407 			if (offset)
408 				*offset = ofs;
409 			if (value)
410 				*value = reg;
411 			return (1);
412 		}
413 		ofs = PCI_CAPLIST_NEXT(reg);
414 	}
415 
416 	return (0);
417 }
418 
419 int
420 pci_find_device(struct pci_attach_args *pa,
421 		int (*match)(struct pci_attach_args *))
422 {
423 	extern struct cfdriver pci_cd;
424 	struct device *pcidev;
425 	int i;
426 
427 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
428 		pcidev = pci_cd.cd_devs[i];
429 		if (pcidev != NULL &&
430 		    pci_enumerate_bus((struct pci_softc *)pcidev,
431 		    		      match, pa) != 0)
432 			return (1);
433 	}
434 	return (0);
435 }
436 
437 int
438 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
439 {
440 	pcireg_t reg;
441 	int offset;
442 
443 	if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
444 		reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
445 		if ((reg & PCI_PMCSR_STATE_MASK) != state) {
446 			pci_conf_write(pc, tag, offset + PCI_PMCSR,
447 			    (reg & ~PCI_PMCSR_STATE_MASK) | state);
448 			return (reg & PCI_PMCSR_STATE_MASK);
449 		}
450 	}
451 	return (state);
452 }
453 
454 #ifndef PCI_MACHDEP_ENUMERATE_BUS
455 /*
456  * Generic PCI bus enumeration routine.  Used unless machine-dependent
457  * code needs to provide something else.
458  */
459 int
460 pci_enumerate_bus(struct pci_softc *sc,
461     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
462 {
463 	pci_chipset_tag_t pc = sc->sc_pc;
464 	int device, function, nfunctions, ret;
465 	const struct pci_quirkdata *qd;
466 	pcireg_t id, bhlcr;
467 	pcitag_t tag;
468 
469 	for (device = 0; device < sc->sc_maxndevs; device++) {
470 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
471 
472 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
473 		if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
474 			continue;
475 
476 		id = pci_conf_read(pc, tag, PCI_ID_REG);
477 
478 		/* Invalid vendor ID value? */
479 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
480 			continue;
481 		/* XXX Not invalid, but we've done this ~forever. */
482 		if (PCI_VENDOR(id) == 0)
483 			continue;
484 
485 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
486 
487 		if (qd != NULL &&
488 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
489 			nfunctions = 8;
490 		else if (qd != NULL &&
491 		      (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
492 			nfunctions = 1;
493 		else
494 			nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
495 
496 		for (function = 0; function < nfunctions; function++) {
497 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
498 			ret = pci_probe_device(sc, tag, match, pap);
499 			if (match != NULL && ret != 0)
500 				return (ret);
501 		}
502  	}
503 
504 	return (0);
505 }
506 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
507 
508 int
509 pci_reserve_resources(struct pci_attach_args *pa)
510 {
511 	pci_chipset_tag_t pc = pa->pa_pc;
512 	pcitag_t tag = pa->pa_tag;
513 	pcireg_t bhlc, blr, type;
514 	bus_addr_t base, limit;
515 	bus_size_t size;
516 	int reg, reg_start, reg_end;
517 
518 	bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
519 	switch (PCI_HDRTYPE_TYPE(bhlc)) {
520 	case 0:
521 		reg_start = PCI_MAPREG_START;
522 		reg_end = PCI_MAPREG_END;
523 		break;
524 	case 1: /* PCI-PCI bridge */
525 		reg_start = PCI_MAPREG_START;
526 		reg_end = PCI_MAPREG_PPB_END;
527 		break;
528 	case 2: /* PCI-CardBus bridge */
529 		reg_start = PCI_MAPREG_START;
530 		reg_end = PCI_MAPREG_PCB_END;
531 		break;
532 	default:
533 		return (0);
534 	}
535 
536 	for (reg = reg_start; reg < reg_end; reg += 4) {
537 		if (!pci_mapreg_probe(pc, tag, reg, &type))
538 			continue;
539 
540 		if (pci_mapreg_info(pc, tag, reg, type, &base, &size, NULL))
541 			continue;
542 
543 		if (base == 0)
544 			continue;
545 
546 		switch (type) {
547 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
548 		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
549 			if (pa->pa_memex && extent_alloc_region(pa->pa_memex,
550 			    base, size, EX_NOWAIT))
551 				printf("mem address conflict 0x%x/0x%x\n",
552 				    base, size);
553 			break;
554 		case PCI_MAPREG_TYPE_IO:
555 			if (pa->pa_ioex && extent_alloc_region(pa->pa_ioex,
556 			    base, size, EX_NOWAIT))
557 				printf("io address conflict 0x%x/0x%x\n",
558 				    base, size);
559 			break;
560 		}
561 
562 		if (type & PCI_MAPREG_MEM_TYPE_64BIT)
563 			reg += 4;
564 	}
565 
566 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
567 		return (0);
568 
569 	/* Figure out the I/O address range of the bridge. */
570 	blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS);
571 	base = (blr & 0x000000f0) << 8;
572 	limit = (blr & 0x000f000) | 0x00000fff;
573 	if (limit > base)
574 		size = (limit - base + 1);
575 	else
576 		size = 0;
577 	if (pa->pa_ioex && base > 0 && size > 0) {
578 		if (extent_alloc_region(pa->pa_ioex, base, size, EX_NOWAIT))
579 			printf("bridge io address conflict 0x%x/0x%x\n",
580 			       base, size);
581 	}
582 
583 	/* Figure out the memory mapped I/O address range of the bridge. */
584 	blr = pci_conf_read(pc, tag, PPB_REG_MEM);
585 	base = (blr & 0x0000fff0) << 16;
586 	limit = (blr & 0xfff00000) | 0x000fffff;
587 	if (limit > base)
588 		size = (limit - base + 1);
589 	else
590 		size = 0;
591 	if (pa->pa_memex && base > 0 && size > 0) {
592 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT))
593 			printf("bridge mem address conflict 0x%x/0x%x\n",
594 			       base, size);
595 	}
596 
597 	/* Figure out the prefetchable memory address range of the bridge. */
598 	blr = pci_conf_read(pc, tag, PPB_REG_PREFMEM);
599 	base = (blr & 0x0000fff0) << 16;
600 	limit = (blr & 0xfff00000) | 0x000fffff;
601 	if (limit > base)
602 		size = (limit - base + 1);
603 	else
604 		size = 0;
605 	if (pa->pa_memex && base > 0 && size > 0) {
606 		if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT))
607 			printf("bridge mem address conflict 0x%x/0x%x\n",
608 			       base, size);
609 	}
610 
611 	return (0);
612 }
613 
614 /*
615  * Vital Product Data (PCI 2.2)
616  */
617 
618 int
619 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
620     pcireg_t *data)
621 {
622 	uint32_t reg;
623 	int ofs, i, j;
624 
625 	KASSERT(data != NULL);
626 	KASSERT((offset + count) < 0x7fff);
627 
628 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
629 		return (1);
630 
631 	for (i = 0; i < count; offset += sizeof(*data), i++) {
632 		reg &= 0x0000ffff;
633 		reg &= ~PCI_VPD_OPFLAG;
634 		reg |= PCI_VPD_ADDRESS(offset);
635 		pci_conf_write(pc, tag, ofs, reg);
636 
637 		/*
638 		 * PCI 2.2 does not specify how long we should poll
639 		 * for completion nor whether the operation can fail.
640 		 */
641 		j = 0;
642 		do {
643 			if (j++ == 20)
644 				return (1);
645 			delay(4);
646 			reg = pci_conf_read(pc, tag, ofs);
647 		} while ((reg & PCI_VPD_OPFLAG) == 0);
648 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
649 	}
650 
651 	return (0);
652 }
653 
654 int
655 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
656     pcireg_t *data)
657 {
658 	pcireg_t reg;
659 	int ofs, i, j;
660 
661 	KASSERT(data != NULL);
662 	KASSERT((offset + count) < 0x7fff);
663 
664 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
665 		return (1);
666 
667 	for (i = 0; i < count; offset += sizeof(*data), i++) {
668 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
669 
670 		reg &= 0x0000ffff;
671 		reg |= PCI_VPD_OPFLAG;
672 		reg |= PCI_VPD_ADDRESS(offset);
673 		pci_conf_write(pc, tag, ofs, reg);
674 
675 		/*
676 		 * PCI 2.2 does not specify how long we should poll
677 		 * for completion nor whether the operation can fail.
678 		 */
679 		j = 0;
680 		do {
681 			if (j++ == 20)
682 				return (1);
683 			delay(1);
684 			reg = pci_conf_read(pc, tag, ofs);
685 		} while (reg & PCI_VPD_OPFLAG);
686 	}
687 
688 	return (0);
689 }
690 
691 int
692 pci_matchbyid(struct pci_attach_args *pa, const struct pci_matchid *ids,
693     int nent)
694 {
695 	const struct pci_matchid *pm;
696 	int i;
697 
698 	for (i = 0, pm = ids; i < nent; i++, pm++)
699 		if (PCI_VENDOR(pa->pa_id) == pm->pm_vid &&
700 		    PCI_PRODUCT(pa->pa_id) == pm->pm_pid)
701 			return (1);
702 	return (0);
703 }
704 
705 #ifdef USER_PCICONF
706 /*
707  * This is the user interface to PCI configuration space.
708  */
709 
710 #include <sys/pciio.h>
711 #include <sys/fcntl.h>
712 
713 #ifdef DEBUG
714 #define PCIDEBUG(x) printf x
715 #else
716 #define PCIDEBUG(x)
717 #endif
718 
719 
720 int pciopen(dev_t dev, int oflags, int devtype, struct proc *p);
721 int pciclose(dev_t dev, int flag, int devtype, struct proc *p);
722 int pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p);
723 
724 int
725 pciopen(dev_t dev, int oflags, int devtype, struct proc *p)
726 {
727 	PCIDEBUG(("pciopen ndevs: %d\n" , pci_cd.cd_ndevs));
728 
729 	if (minor(dev) >= pci_ndomains) {
730 		return ENXIO;
731 	}
732 
733 #ifndef APERTURE
734 	if ((oflags & FWRITE) && securelevel > 0) {
735 		return EPERM;
736 	}
737 #else
738 	if ((oflags & FWRITE) && securelevel > 0 && allowaperture == 0) {
739 		return EPERM;
740 	}
741 #endif
742 	return (0);
743 }
744 
745 int
746 pciclose(dev_t dev, int flag, int devtype, struct proc *p)
747 {
748 	PCIDEBUG(("pciclose\n"));
749 	return (0);
750 }
751 
752 int
753 pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
754 {
755 	struct pci_io *io;
756 	int i, error;
757 	pcitag_t tag;
758 	struct pci_softc *pci = NULL;
759 	pci_chipset_tag_t pc;
760 
761 	io = (struct pci_io *)data;
762 
763 	PCIDEBUG(("pciioctl cmd %s", cmd == PCIOCREAD ? "pciocread"
764 		  : cmd == PCIOCWRITE ? "pciocwrite" : "unknown"));
765 	PCIDEBUG(("  bus %d dev %d func %d reg %x\n", io->pi_sel.pc_bus,
766 		  io->pi_sel.pc_dev, io->pi_sel.pc_func, io->pi_reg));
767 
768 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
769 		pci = pci_cd.cd_devs[i];
770 		if (pci != NULL && pci->sc_domain == minor(dev) &&
771 		    pci->sc_bus == io->pi_sel.pc_bus)
772 			break;
773 	}
774 	if (i >= pci_cd.cd_ndevs)
775 		return ENXIO;
776 
777 	/* Check bounds */
778 	if (pci->sc_bus >= 256 ||
779 	    io->pi_sel.pc_dev >= pci_bus_maxdevs(pci->sc_pc, pci->sc_bus) ||
780 	    io->pi_sel.pc_func >= 8)
781 		return EINVAL;
782 
783 	pc = pci->sc_pc;
784 	tag = pci_make_tag(pc, io->pi_sel.pc_bus, io->pi_sel.pc_dev,
785 			   io->pi_sel.pc_func);
786 
787 	switch(cmd) {
788 	case PCIOCGETCONF:
789 		error = ENODEV;
790 		break;
791 
792 	case PCIOCREAD:
793 		switch(io->pi_width) {
794 		case 4:
795 			/* Make sure the register is properly aligned */
796 			if (io->pi_reg & 0x3)
797 				return EINVAL;
798 			io->pi_data = pci_conf_read(pc, tag, io->pi_reg);
799 			error = 0;
800 			break;
801 		default:
802 			error = ENODEV;
803 			break;
804 		}
805 		break;
806 
807 	case PCIOCWRITE:
808 		if (!(flag & FWRITE))
809 			return EPERM;
810 
811 		switch(io->pi_width) {
812 		case 4:
813 			/* Make sure the register is properly aligned */
814 			if (io->pi_reg & 0x3)
815 				return EINVAL;
816 			pci_conf_write(pc, tag, io->pi_reg, io->pi_data);
817 			error = 0;
818 			break;
819 		default:
820 			error = ENODEV;
821 			break;
822 		}
823 		break;
824 
825 	default:
826 		error = ENOTTY;
827 		break;
828 	}
829 
830 	return (error);
831 }
832 
833 #endif
834