xref: /netbsd-src/sys/dev/pci/pciconf.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: pciconf.c,v 1.46 2020/02/02 14:45:14 jmcneill Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Allen Briggs for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed for the NetBSD Project by
20  *      Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 /*
38  * Derived in part from code from PMON/2000 (http://pmon.groupbsd.org/).
39  */
40 
41 /*
42  * To do:
43  *    - Perform all data structure allocation dynamically, don't have
44  *	statically-sized arrays ("oops, you lose because you have too
45  *	many slots filled!")
46  *    - Do this in 2 passes, with an MD hook to control the behavior:
47  *		(1) Configure the bus (possibly including expansion
48  *		    ROMs.
49  *		(2) Another pass to disable expansion ROMs if they're
50  *		    mapped (since you're not supposed to leave them
51  *		    mapped when you're not using them).
52  *	This would facilitate MD code executing the expansion ROMs
53  *	if necessary (possibly with an x86 emulator) to configure
54  *	devices (e.g. VGA cards).
55  *    - Deal with "anything can be hot-plugged" -- i.e., carry configuration
56  *	information around & be able to reconfigure on the fly
57  *    - Deal with segments (See IA64 System Abstraction Layer)
58  *    - Deal with subtractive bridges (& non-spec positive/subtractive decode)
59  *    - Deal with ISA/VGA/VGA palette snooping
60  *    - Deal with device capabilities on bridges
61  *    - Worry about changing a bridge to/from transparency
62  * From thorpej (05/25/01)
63  *    - Try to handle devices that are already configured (perhaps using that
64  *      as a hint to where we put other devices)
65  */
66 
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: pciconf.c,v 1.46 2020/02/02 14:45:14 jmcneill Exp $");
69 
70 #include "opt_pci.h"
71 
72 #include <sys/param.h>
73 #include <sys/extent.h>
74 #include <sys/queue.h>
75 #include <sys/systm.h>
76 #include <sys/malloc.h>
77 #include <sys/kmem.h>
78 
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pciconf.h>
81 #include <dev/pci/pcidevs.h>
82 #include <dev/pci/pccbbreg.h>
83 
84 int pci_conf_debug = 0;
85 
86 #if !defined(MIN)
87 #define	MIN(a,b) (((a)<(b))?(a):(b))
88 #define	MAX(a,b) (((a)>(b))?(a):(b))
89 #endif
90 
91 /* per-bus constants. */
92 #define MAX_CONF_DEV	32			/* Arbitrary */
93 #define MAX_CONF_MEM	(3 * MAX_CONF_DEV)	/* Avg. 3 per device -- Arb. */
94 #define MAX_CONF_IO	(3 * MAX_CONF_DEV)	/* Avg. 1 per device -- Arb. */
95 
96 struct _s_pciconf_bus_t;			/* Forward declaration */
97 
98 typedef struct _s_pciconf_dev_t {
99 	int		ipin;
100 	int		iline;
101 	int		min_gnt;
102 	int		max_lat;
103 	int		enable;
104 	pcitag_t	tag;
105 	pci_chipset_tag_t	pc;
106 	struct _s_pciconf_bus_t	*ppb;		/* I am really a bridge */
107 } pciconf_dev_t;
108 
109 typedef struct _s_pciconf_win_t {
110 	pciconf_dev_t	*dev;
111 	int		reg;			/* 0 for busses */
112 	int		align;
113 	int		prefetch;
114 	uint64_t	size;
115 	uint64_t	address;
116 } pciconf_win_t;
117 
118 typedef struct _s_pciconf_bus_t {
119 	int		busno;
120 	int		next_busno;
121 	int		last_busno;
122 	int		max_mingnt;
123 	int		min_maxlat;
124 	int		cacheline_size;
125 	int		prefetch;
126 	int		fast_b2b;
127 	int		freq_66;
128 	int		def_ltim;
129 	int		max_ltim;
130 	int		bandwidth_used;
131 	int		swiz;
132 	int		io_32bit;
133 	int		pmem_64bit;
134 	int		mem_64bit;
135 	int		io_align;
136 	int		mem_align;
137 	int		pmem_align;
138 
139 	int		ndevs;
140 	pciconf_dev_t	device[MAX_CONF_DEV];
141 
142 	/* These should be sorted in order of decreasing size */
143 	int		nmemwin;
144 	pciconf_win_t	pcimemwin[MAX_CONF_MEM];
145 	int		niowin;
146 	pciconf_win_t	pciiowin[MAX_CONF_IO];
147 
148 	bus_size_t	io_total;
149 	bus_size_t	mem_total;
150 	bus_size_t	pmem_total;
151 
152 	struct extent	*ioext;
153 	struct extent	*memext;
154 	struct extent	*pmemext;
155 
156 	pci_chipset_tag_t	pc;
157 	struct _s_pciconf_bus_t *parent_bus;
158 } pciconf_bus_t;
159 
160 static int	probe_bus(pciconf_bus_t *);
161 static void	alloc_busno(pciconf_bus_t *, pciconf_bus_t *);
162 static void	set_busreg(pci_chipset_tag_t, pcitag_t, int, int, int);
163 static int	pci_do_device_query(pciconf_bus_t *, pcitag_t, int, int, int);
164 static int	setup_iowins(pciconf_bus_t *);
165 static int	setup_memwins(pciconf_bus_t *);
166 static int	configure_bridge(pciconf_dev_t *);
167 static int	configure_bus(pciconf_bus_t *);
168 static uint64_t	pci_allocate_range(struct extent *, uint64_t, int, bool);
169 static pciconf_win_t	*get_io_desc(pciconf_bus_t *, bus_size_t);
170 static pciconf_win_t	*get_mem_desc(pciconf_bus_t *, bus_size_t);
171 static pciconf_bus_t	*query_bus(pciconf_bus_t *, pciconf_dev_t *, int);
172 
173 static void	print_tag(pci_chipset_tag_t, pcitag_t);
174 
175 static void
176 print_tag(pci_chipset_tag_t pc, pcitag_t tag)
177 {
178 	int	bus, dev, func;
179 
180 	pci_decompose_tag(pc, tag, &bus, &dev, &func);
181 	printf("PCI: bus %d, device %d, function %d: ", bus, dev, func);
182 }
183 
184 #ifdef _LP64
185 #define	__used_only_lp64	__unused
186 #else
187 #define	__used_only_lp64	/* nothing */
188 #endif /* _LP64 */
189 
190 /************************************************************************/
191 /************************************************************************/
192 /***********************   Bus probing routines   ***********************/
193 /************************************************************************/
194 /************************************************************************/
195 static pciconf_win_t *
196 get_io_desc(pciconf_bus_t *pb, bus_size_t size)
197 {
198 	int	i, n;
199 
200 	n = pb->niowin;
201 	for (i = n; i > 0 && size > pb->pciiowin[i-1].size; i--)
202 		pb->pciiowin[i] = pb->pciiowin[i-1]; /* struct copy */
203 	return &pb->pciiowin[i];
204 }
205 
206 static pciconf_win_t *
207 get_mem_desc(pciconf_bus_t *pb, bus_size_t size)
208 {
209 	int	i, n;
210 
211 	n = pb->nmemwin;
212 	for (i = n; i > 0 && size > pb->pcimemwin[i-1].size; i--)
213 		pb->pcimemwin[i] = pb->pcimemwin[i-1]; /* struct copy */
214 	return &pb->pcimemwin[i];
215 }
216 
217 /*
218  * Set up bus common stuff, then loop over devices & functions.
219  * If we find something, call pci_do_device_query()).
220  */
221 static int
222 probe_bus(pciconf_bus_t *pb)
223 {
224 	int device;
225 	uint8_t devs[32];
226 	int i, n;
227 
228 	pb->ndevs = 0;
229 	pb->niowin = 0;
230 	pb->nmemwin = 0;
231 	pb->freq_66 = 1;
232 #ifdef PCICONF_NO_FAST_B2B
233 	pb->fast_b2b = 0;
234 #else
235 	pb->fast_b2b = 1;
236 #endif
237 	pb->prefetch = 1;
238 	pb->max_mingnt = 0;	/* we are looking for the maximum */
239 	pb->min_maxlat = 0x100;	/* we are looking for the minimum */
240 	pb->bandwidth_used = 0;
241 
242 	n = pci_bus_devorder(pb->pc, pb->busno, devs, __arraycount(devs));
243 	for (i = 0; i < n; i++) {
244 		pcitag_t tag;
245 		pcireg_t id, bhlcr;
246 		int function, nfunction;
247 		int confmode;
248 
249 		device = devs[i];
250 
251 		tag = pci_make_tag(pb->pc, pb->busno, device, 0);
252 		if (pci_conf_debug) {
253 			print_tag(pb->pc, tag);
254 		}
255 		id = pci_conf_read(pb->pc, tag, PCI_ID_REG);
256 
257 		if (pci_conf_debug) {
258 			printf("id=%x: Vendor=%x, Product=%x\n",
259 			    id, PCI_VENDOR(id), PCI_PRODUCT(id));
260 		}
261 		/* Invalid vendor ID value? */
262 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
263 			continue;
264 
265 		bhlcr = pci_conf_read(pb->pc, tag, PCI_BHLC_REG);
266 		nfunction = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
267 		for (function = 0; function < nfunction; function++) {
268 			tag = pci_make_tag(pb->pc, pb->busno, device, function);
269 			id = pci_conf_read(pb->pc, tag, PCI_ID_REG);
270 			if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
271 				continue;
272 			if (pb->ndevs + 1 < MAX_CONF_DEV) {
273 				if (pci_conf_debug) {
274 					print_tag(pb->pc, tag);
275 					printf("Found dev 0x%04x 0x%04x -- "
276 					    "really probing.\n",
277 					PCI_VENDOR(id), PCI_PRODUCT(id));
278 				}
279 #ifdef __HAVE_PCI_CONF_HOOK
280 				confmode = pci_conf_hook(pb->pc, pb->busno,
281 				    device, function, id);
282 				if (confmode == 0)
283 					continue;
284 #else
285 				/*
286 				 * Don't enable expansion ROMS -- some cards
287 				 * share address decoders between the EXPROM
288 				 * and PCI memory space, and enabling the ROM
289 				 * when not needed will cause all sorts of
290 				 * lossage.
291 				 */
292 				confmode = PCI_CONF_DEFAULT;
293 #endif
294 				if (pci_do_device_query(pb, tag, device,
295 				    function, confmode))
296 					return -1;
297 				pb->ndevs++;
298 			}
299 		}
300 	}
301 	return 0;
302 }
303 
304 static void
305 alloc_busno(pciconf_bus_t *parent, pciconf_bus_t *pb)
306 {
307 	pb->busno = parent->next_busno;
308 	pb->next_busno = pb->busno + 1;
309 }
310 
311 static void
312 set_busreg(pci_chipset_tag_t pc, pcitag_t tag, int prim, int sec, int sub)
313 {
314 	pcireg_t	busreg;
315 
316 	busreg  =  __SHIFTIN(prim, PCI_BRIDGE_BUS_PRIMARY);
317 	busreg |=  __SHIFTIN(sec,  PCI_BRIDGE_BUS_SECONDARY);
318 	busreg |=  __SHIFTIN(sub,  PCI_BRIDGE_BUS_SUBORDINATE);
319 	pci_conf_write(pc, tag, PCI_BRIDGE_BUS_REG, busreg);
320 }
321 
322 static pciconf_bus_t *
323 query_bus(pciconf_bus_t *parent, pciconf_dev_t *pd, int dev)
324 {
325 	pciconf_bus_t	*pb;
326 	pcireg_t	io, pmem;
327 	pciconf_win_t	*pi, *pm;
328 
329 	pb = kmem_zalloc(sizeof (pciconf_bus_t), KM_SLEEP);
330 	pb->cacheline_size = parent->cacheline_size;
331 	pb->parent_bus = parent;
332 	alloc_busno(parent, pb);
333 
334 	pb->mem_align = 0x100000;	/* 1M alignment */
335 	pb->pmem_align = 0x100000;	/* 1M alignment */
336 	pb->io_align = 0x1000;		/* 4K alignment */
337 
338 	set_busreg(parent->pc, pd->tag, parent->busno, pb->busno, 0xff);
339 
340 	pb->swiz = parent->swiz + dev;
341 
342 	pb->ioext = NULL;
343 	pb->memext = NULL;
344 	pb->pmemext = NULL;
345 	pb->pc = parent->pc;
346 	pb->io_total = pb->mem_total = pb->pmem_total = 0;
347 
348 	pb->io_32bit = 0;
349 	if (parent->io_32bit) {
350 		io = pci_conf_read(parent->pc, pd->tag, PCI_BRIDGE_STATIO_REG);
351 		if (PCI_BRIDGE_IO_32BITS(io))
352 			pb->io_32bit = 1;
353 	}
354 
355 	pb->pmem_64bit = 0;
356 	if (parent->pmem_64bit) {
357 		pmem = pci_conf_read(parent->pc, pd->tag,
358 		    PCI_BRIDGE_PREFETCHMEM_REG);
359 		if (PCI_BRIDGE_PREFETCHMEM_64BITS(pmem))
360 			pb->pmem_64bit = 1;
361 	}
362 
363 	/* Bridges only forward a 32-bit range of non-prefetcable memory. */
364 	pb->mem_64bit = 0;
365 
366 	if (probe_bus(pb)) {
367 		printf("Failed to probe bus %d\n", pb->busno);
368 		goto err;
369 	}
370 
371 	/* We have found all subordinate busses now, reprogram busreg. */
372 	pb->last_busno = pb->next_busno - 1;
373 	parent->next_busno = pb->next_busno;
374 	set_busreg(parent->pc, pd->tag, parent->busno, pb->busno,
375 		   pb->last_busno);
376 	if (pci_conf_debug)
377 		printf("PCI bus bridge (parent %d) covers busses %d-%d\n",
378 			parent->busno, pb->busno, pb->last_busno);
379 
380 	if (pb->io_total > 0) {
381 		if (parent->niowin >= MAX_CONF_IO) {
382 			printf("pciconf: too many (%d) I/O windows\n",
383 			    parent->niowin);
384 			goto err;
385 		}
386 		pb->io_total |= pb->io_align - 1; /* Round up */
387 		pi = get_io_desc(parent, pb->io_total);
388 		pi->dev = pd;
389 		pi->reg = 0;
390 		pi->size = pb->io_total;
391 		pi->align = pb->io_align;	/* 4K min alignment */
392 		if (parent->io_align < pb->io_align)
393 			parent->io_align = pb->io_align;
394 		pi->prefetch = 0;
395 		parent->niowin++;
396 		parent->io_total += pb->io_total;
397 	}
398 
399 	if (pb->mem_total > 0) {
400 		if (parent->nmemwin >= MAX_CONF_MEM) {
401 			printf("pciconf: too many (%d) MEM windows\n",
402 			     parent->nmemwin);
403 			goto err;
404 		}
405 		pb->mem_total |= pb->mem_align - 1; /* Round up */
406 		pm = get_mem_desc(parent, pb->mem_total);
407 		pm->dev = pd;
408 		pm->reg = 0;
409 		pm->size = pb->mem_total;
410 		pm->align = pb->mem_align;	/* 1M min alignment */
411 		if (parent->mem_align < pb->mem_align)
412 			parent->mem_align = pb->mem_align;
413 		pm->prefetch = 0;
414 		parent->nmemwin++;
415 		parent->mem_total += pb->mem_total;
416 	}
417 
418 	if (pb->pmem_total > 0) {
419 		if (parent->nmemwin >= MAX_CONF_MEM) {
420 			printf("pciconf: too many MEM windows\n");
421 			goto err;
422 		}
423 		pb->pmem_total |= pb->pmem_align - 1; /* Round up */
424 		pm = get_mem_desc(parent, pb->pmem_total);
425 		pm->dev = pd;
426 		pm->reg = 0;
427 		pm->size = pb->pmem_total;
428 		pm->align = pb->pmem_align;	/* 1M alignment */
429 		if (parent->pmem_align < pb->pmem_align)
430 			parent->pmem_align = pb->pmem_align;
431 		pm->prefetch = 1;
432 		parent->nmemwin++;
433 		parent->pmem_total += pb->pmem_total;
434 	}
435 
436 	return pb;
437 err:
438 	kmem_free(pb, sizeof(*pb));
439 	return NULL;
440 }
441 
442 static int
443 pci_do_device_query(pciconf_bus_t *pb, pcitag_t tag, int dev, int func,
444     int mode)
445 {
446 	pciconf_dev_t	*pd;
447 	pciconf_win_t	*pi, *pm;
448 	pcireg_t	classreg, cmd, icr, bhlc, bar, mask, bar64, mask64,
449 	    busreg;
450 	uint64_t	size;
451 	int		br, width, reg_start, reg_end;
452 
453 	pd = &pb->device[pb->ndevs];
454 	pd->pc = pb->pc;
455 	pd->tag = tag;
456 	pd->ppb = NULL;
457 	pd->enable = mode;
458 
459 	classreg = pci_conf_read(pb->pc, tag, PCI_CLASS_REG);
460 
461 	cmd = pci_conf_read(pb->pc, tag, PCI_COMMAND_STATUS_REG);
462 	bhlc = pci_conf_read(pb->pc, tag, PCI_BHLC_REG);
463 
464 	if (PCI_CLASS(classreg) != PCI_CLASS_BRIDGE
465 	    && PCI_HDRTYPE_TYPE(bhlc) != PCI_HDRTYPE_PPB) {
466 		cmd &= ~(PCI_COMMAND_MASTER_ENABLE |
467 		    PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
468 		pci_conf_write(pb->pc, tag, PCI_COMMAND_STATUS_REG, cmd);
469 	} else if (pci_conf_debug) {
470 		print_tag(pb->pc, tag);
471 		printf("device is a bridge; not clearing enables\n");
472 	}
473 
474 	if ((cmd & PCI_STATUS_BACKTOBACK_SUPPORT) == 0)
475 		pb->fast_b2b = 0;
476 
477 	if ((cmd & PCI_STATUS_66MHZ_SUPPORT) == 0)
478 		pb->freq_66 = 0;
479 
480 	switch (PCI_HDRTYPE_TYPE(bhlc)) {
481 	case PCI_HDRTYPE_DEVICE:
482 		reg_start = PCI_MAPREG_START;
483 		reg_end = PCI_MAPREG_END;
484 		break;
485 	case PCI_HDRTYPE_PPB:
486 		pd->ppb = query_bus(pb, pd, dev);
487 		if (pd->ppb == NULL)
488 			return -1;
489 		return 0;
490 	case PCI_HDRTYPE_PCB:
491 		reg_start = PCI_MAPREG_START;
492 		reg_end = PCI_MAPREG_PCB_END;
493 
494 		busreg = pci_conf_read(pb->pc, tag, PCI_BUSNUM);
495 		busreg  =  (busreg & 0xff000000) |
496 		    __SHIFTIN(pb->busno, PCI_BRIDGE_BUS_PRIMARY) |
497 		    __SHIFTIN(pb->next_busno, PCI_BRIDGE_BUS_SECONDARY) |
498 		    __SHIFTIN(pb->next_busno, PCI_BRIDGE_BUS_SUBORDINATE);
499 		pci_conf_write(pb->pc, tag, PCI_BUSNUM, busreg);
500 
501 		pb->next_busno++;
502 		break;
503 	default:
504 		return -1;
505 	}
506 
507 	icr = pci_conf_read(pb->pc, tag, PCI_INTERRUPT_REG);
508 	pd->ipin = PCI_INTERRUPT_PIN(icr);
509 	pd->iline = PCI_INTERRUPT_LINE(icr);
510 	pd->min_gnt = PCI_MIN_GNT(icr);
511 	pd->max_lat = PCI_MAX_LAT(icr);
512 	if (pd->iline || pd->ipin) {
513 		pci_conf_interrupt(pb->pc, pb->busno, dev, pd->ipin, pb->swiz,
514 		    &pd->iline);
515 		icr &= ~(PCI_INTERRUPT_LINE_MASK << PCI_INTERRUPT_LINE_SHIFT);
516 		icr |= (pd->iline << PCI_INTERRUPT_LINE_SHIFT);
517 		pci_conf_write(pb->pc, tag, PCI_INTERRUPT_REG, icr);
518 	}
519 
520 	if (pd->min_gnt != 0 || pd->max_lat != 0) {
521 		if (pd->min_gnt != 0 && pd->min_gnt > pb->max_mingnt)
522 			pb->max_mingnt = pd->min_gnt;
523 
524 		if (pd->max_lat != 0 && pd->max_lat < pb->min_maxlat)
525 			pb->min_maxlat = pd->max_lat;
526 
527 		pb->bandwidth_used += pd->min_gnt * 4000000 /
528 				(pd->min_gnt + pd->max_lat);
529 	}
530 
531 	width = 4;
532 	for (br = reg_start; br < reg_end; br += width) {
533 #if 0
534 /* XXX Should only ignore if IDE not in legacy mode? */
535 		if (PCI_CLASS(classreg) == PCI_CLASS_MASS_STORAGE &&
536 		    PCI_SUBCLASS(classreg) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
537 			break;
538 		}
539 #endif
540 		bar = pci_conf_read(pb->pc, tag, br);
541 		pci_conf_write(pb->pc, tag, br, 0xffffffff);
542 		mask = pci_conf_read(pb->pc, tag, br);
543 		pci_conf_write(pb->pc, tag, br, bar);
544 		width = 4;
545 
546 		if (   (mode & PCI_CONF_MAP_IO)
547 		    && (PCI_MAPREG_TYPE(mask) == PCI_MAPREG_TYPE_IO)) {
548 			/*
549 			 * Upper 16 bits must be one.  Devices may hardwire
550 			 * them to zero, though, per PCI 2.2, 6.2.5.1, p 203.
551 			 */
552 			mask |= 0xffff0000;
553 
554 			size = PCI_MAPREG_IO_SIZE(mask);
555 			if (size == 0) {
556 				if (pci_conf_debug) {
557 					print_tag(pb->pc, tag);
558 					printf("I/O BAR 0x%x is void\n", br);
559 				}
560 				continue;
561 			}
562 
563 			if (pb->niowin >= MAX_CONF_IO) {
564 				printf("pciconf: too many I/O windows\n");
565 				return -1;
566 			}
567 
568 			pi = get_io_desc(pb, size);
569 			pi->dev = pd;
570 			pi->reg = br;
571 			pi->size = (uint64_t)size;
572 			pi->align = 4;
573 			if (pb->io_align < pi->size)
574 				pb->io_align = pi->size;
575 			pi->prefetch = 0;
576 			if (pci_conf_debug) {
577 				print_tag(pb->pc, tag);
578 				printf("Register 0x%x, I/O size %" PRIu64 "\n",
579 				    br, pi->size);
580 			}
581 			pb->niowin++;
582 			pb->io_total += size;
583 		} else if ((mode & PCI_CONF_MAP_MEM)
584 			   && (PCI_MAPREG_TYPE(mask) == PCI_MAPREG_TYPE_MEM)) {
585 			switch (PCI_MAPREG_MEM_TYPE(mask)) {
586 			case PCI_MAPREG_MEM_TYPE_32BIT:
587 			case PCI_MAPREG_MEM_TYPE_32BIT_1M:
588 				size = (uint64_t)PCI_MAPREG_MEM_SIZE(mask);
589 				break;
590 			case PCI_MAPREG_MEM_TYPE_64BIT:
591 				bar64 = pci_conf_read(pb->pc, tag, br + 4);
592 				pci_conf_write(pb->pc, tag, br + 4, 0xffffffff);
593 				mask64 = pci_conf_read(pb->pc, tag, br + 4);
594 				pci_conf_write(pb->pc, tag, br + 4, bar64);
595 				size = (uint64_t)PCI_MAPREG_MEM64_SIZE(
596 				      (((uint64_t)mask64) << 32) | mask);
597 				width = 8;
598 				break;
599 			default:
600 				print_tag(pb->pc, tag);
601 				printf("reserved mapping type 0x%x\n",
602 					PCI_MAPREG_MEM_TYPE(mask));
603 				continue;
604 			}
605 
606 			if (size == 0) {
607 				if (pci_conf_debug) {
608 					print_tag(pb->pc, tag);
609 					printf("MEM%d BAR 0x%x is void\n",
610 					    PCI_MAPREG_MEM_TYPE(mask) ==
611 						PCI_MAPREG_MEM_TYPE_64BIT ?
612 						64 : 32, br);
613 				}
614 				continue;
615 			} else {
616 				if (pci_conf_debug) {
617 					print_tag(pb->pc, tag);
618 					printf("MEM%d BAR 0x%x has size %#lx\n",
619 					    PCI_MAPREG_MEM_TYPE(mask) ==
620 						PCI_MAPREG_MEM_TYPE_64BIT ?
621 						64 : 32,
622 					    br, (unsigned long)size);
623 				}
624 			}
625 
626 			if (pb->nmemwin >= MAX_CONF_MEM) {
627 				printf("pciconf: too many memory windows\n");
628 				return -1;
629 			}
630 
631 			pm = get_mem_desc(pb, size);
632 			pm->dev = pd;
633 			pm->reg = br;
634 			pm->size = size;
635 			pm->align = 4;
636 			pm->prefetch = PCI_MAPREG_MEM_PREFETCHABLE(mask);
637 			if (pci_conf_debug) {
638 				print_tag(pb->pc, tag);
639 				printf("Register 0x%x, memory size %"
640 				    PRIu64 "\n", br, pm->size);
641 			}
642 			pb->nmemwin++;
643 			if (pm->prefetch) {
644 				pb->pmem_total += size;
645 				if (pb->pmem_align < pm->size)
646 					pb->pmem_align = pm->size;
647 			} else {
648 				pb->mem_total += size;
649 				if (pb->mem_align < pm->size)
650 					pb->mem_align = pm->size;
651 			}
652 		}
653 	}
654 
655 	if (mode & PCI_CONF_MAP_ROM) {
656 		bar = pci_conf_read(pb->pc, tag, PCI_MAPREG_ROM);
657 		pci_conf_write(pb->pc, tag, PCI_MAPREG_ROM, 0xfffffffe);
658 		mask = pci_conf_read(pb->pc, tag, PCI_MAPREG_ROM);
659 		pci_conf_write(pb->pc, tag, PCI_MAPREG_ROM, bar);
660 
661 		if (mask != 0 && mask != 0xffffffff) {
662 			if (pb->nmemwin >= MAX_CONF_MEM) {
663 				printf("pciconf: too many memory windows\n");
664 				return -1;
665 			}
666 			size = (uint64_t)PCI_MAPREG_MEM_SIZE(mask);
667 
668 			pm = get_mem_desc(pb, size);
669 			pm->dev = pd;
670 			pm->reg = PCI_MAPREG_ROM;
671 			pm->size = size;
672 			pm->align = 4;
673 			pm->prefetch = 0;
674 			if (pci_conf_debug) {
675 				print_tag(pb->pc, tag);
676 				printf("Expansion ROM memory size %"
677 				    PRIu64 "\n", pm->size);
678 			}
679 			pb->nmemwin++;
680 			if (pm->prefetch) {
681 				pb->pmem_total += size;
682 				if (pb->pmem_align < pm->size)
683 					pb->pmem_align = pm->size;
684 			} else {
685 				pb->mem_total += size;
686 				if (pb->mem_align < pm->size)
687 					pb->mem_align = pm->size;
688 			}
689 		}
690 	} else {
691 		/* Don't enable ROMs if we aren't going to map them. */
692 		mode &= ~PCI_CONF_ENABLE_ROM;
693 		pd->enable &= ~PCI_CONF_ENABLE_ROM;
694 	}
695 
696 	if (!(mode & PCI_CONF_ENABLE_ROM)) {
697 		/* Ensure ROM is disabled */
698 		bar = pci_conf_read(pb->pc, tag, PCI_MAPREG_ROM);
699 		pci_conf_write(pb->pc, tag, PCI_MAPREG_ROM,
700 		    bar & ~PCI_MAPREG_ROM_ENABLE);
701 	}
702 
703 	return 0;
704 }
705 
706 /************************************************************************/
707 /************************************************************************/
708 /********************   Bus configuration routines   ********************/
709 /************************************************************************/
710 /************************************************************************/
711 static uint64_t
712 pci_allocate_range(struct extent * const ex, const uint64_t amt,
713 		   const int align, const bool ok64 __used_only_lp64)
714 {
715 	int	r;
716 	u_long	addr;
717 
718 	u_long end = ex->ex_end;
719 
720 #ifdef _LP64
721 	/*
722 	 * If a 64-bit range is not OK:
723 	 * ==> If the start of the range is > 4GB, allocation not possible.
724 	 * ==> If the end of the range is > (4GB-1), constrain the end.
725 	 *
726 	 * If a 64-bit range IS OK, then we prefer allocating above 4GB.
727 	 *
728 	 * XXX We guard this with _LP64 because extent maps use u_long
729 	 * internally.
730 	 */
731 	if (!ok64) {
732 		if (ex->ex_start >= (1UL << 32)) {
733 			printf("PCI: 32-BIT RESTRICTION, RANGE BEGINS AT %#lx\n",
734 			    ex->ex_start);
735 			return ~0ULL;
736 		}
737 		if (end > 0xffffffffUL) {
738 			end = 0xffffffffUL;
739 		}
740 	} else if (end > (1UL << 32)) {
741 		u_long start4g = ex->ex_start;
742 		if (start4g < (1UL << 32)) {
743 			start4g = (1UL << 32);
744 		}
745 		r = extent_alloc_subregion(ex, start4g, end, amt, align, 0,
746 					   EX_NOWAIT, &addr);
747 		if (r == 0) {
748 			return addr;
749 		}
750 	}
751 #endif /* _L64 */
752 
753 	r = extent_alloc_subregion(ex, ex->ex_start, end, amt, align, 0,
754 				   EX_NOWAIT, &addr);
755 	if (r) {
756 		printf("extent_alloc_subregion(%p, %#lx, %#lx, %#" PRIx64 ", %#x) returned %d\n",
757 		    ex, ex->ex_start, end, amt, align, r);
758 		extent_print(ex);
759 		return ~0ULL;
760 	}
761 	return addr;
762 }
763 
764 static int
765 setup_iowins(pciconf_bus_t *pb)
766 {
767 	pciconf_win_t	*pi;
768 	pciconf_dev_t	*pd;
769 
770 	for (pi = pb->pciiowin; pi < &pb->pciiowin[pb->niowin]; pi++) {
771 		if (pi->size == 0)
772 			continue;
773 
774 		pd = pi->dev;
775 		if (pb->ioext == NULL) {
776 			/* Bus has no IO ranges, disable IO BAR */
777 			pi->address = 0;
778 			pd->enable &= ~PCI_CONF_ENABLE_IO;
779 			goto write_ioaddr;
780 		}
781 		pi->address = pci_allocate_range(pb->ioext, pi->size,
782 		    pi->align, false);
783 		if (~pi->address == 0) {
784 			print_tag(pd->pc, pd->tag);
785 			printf("Failed to allocate PCI I/O space (%"
786 			    PRIu64 " req)\n", pi->size);
787 			return -1;
788 		}
789 		if (pd->ppb && pi->reg == 0) {
790 			pd->ppb->ioext = extent_create("pciconf", pi->address,
791 			    pi->address + pi->size, NULL, 0,
792 			    EX_NOWAIT);
793 			if (pd->ppb->ioext == NULL) {
794 				print_tag(pd->pc, pd->tag);
795 				printf("Failed to alloc I/O ext. for bus %d\n",
796 				    pd->ppb->busno);
797 				return -1;
798 			}
799 			continue;
800 		}
801 		if (!pb->io_32bit && pi->address > 0xFFFF) {
802 			pi->address = 0;
803 			pd->enable &= ~PCI_CONF_ENABLE_IO;
804 		} else {
805 			pd->enable |= PCI_CONF_ENABLE_IO;
806 		}
807 write_ioaddr:
808 		if (pci_conf_debug) {
809 			print_tag(pd->pc, pd->tag);
810 			printf("Putting %" PRIu64 " I/O bytes @ %#" PRIx64
811 			    " (reg %x)\n", pi->size, pi->address, pi->reg);
812 		}
813 		pci_conf_write(pd->pc, pd->tag, pi->reg,
814 		    PCI_MAPREG_IO_ADDR(pi->address) | PCI_MAPREG_TYPE_IO);
815 	}
816 	return 0;
817 }
818 
819 static int
820 setup_memwins(pciconf_bus_t *pb)
821 {
822 	pciconf_win_t	*pm;
823 	pciconf_dev_t	*pd;
824 	pcireg_t	base;
825 	struct extent	*ex;
826 	bool		ok64;
827 
828 	for (pm = pb->pcimemwin; pm < &pb->pcimemwin[pb->nmemwin]; pm++) {
829 		if (pm->size == 0)
830 			continue;
831 
832 		ok64 = false;
833 		pd = pm->dev;
834 		if (pm->prefetch) {
835 			ex = pb->pmemext;
836 			ok64 = pb->pmem_64bit;
837 		} else {
838 			ex = pb->memext;
839 			ok64 = pb->mem_64bit && pd->ppb == NULL;
840 		}
841 
842 		/*
843 		 * We need to figure out if the memory BAR is 64-bit
844 		 * capable or not.  If it's not, then we need to constrain
845 		 * the address allocation.
846 		 */
847 		if (pm->reg == PCI_MAPREG_ROM) {
848 			ok64 = false;
849 		} else if (ok64) {
850 			base = pci_conf_read(pd->pc, pd->tag, pm->reg);
851 			ok64 = PCI_MAPREG_MEM_TYPE(base) ==
852 			    PCI_MAPREG_MEM_TYPE_64BIT;
853 		}
854 
855 		pm->address = pci_allocate_range(ex, pm->size, pm->align,
856 						 ok64);
857 		if (~pm->address == 0) {
858 			print_tag(pd->pc, pd->tag);
859 			printf(
860 			   "Failed to allocate PCI memory space (%" PRIu64
861 			   " req, prefetch=%d ok64=%d)\n", pm->size,
862 			   pm->prefetch, (int)ok64);
863 			return -1;
864 		}
865 		if (pd->ppb && pm->reg == 0) {
866 			ex = extent_create("pciconf", pm->address,
867 			    pm->address + pm->size, NULL, 0, EX_NOWAIT);
868 			if (ex == NULL) {
869 				print_tag(pd->pc, pd->tag);
870 				printf("Failed to alloc MEM ext. for bus %d\n",
871 				    pd->ppb->busno);
872 				return -1;
873 			}
874 			if (pm->prefetch)
875 				pd->ppb->pmemext = ex;
876 			else
877 				pd->ppb->memext = ex;
878 
879 			continue;
880 		}
881 		if (!ok64 && pm->address > 0xFFFFFFFFULL) {
882 			pm->address = 0;
883 			pd->enable &= ~PCI_CONF_ENABLE_MEM;
884 		} else
885 			pd->enable |= PCI_CONF_ENABLE_MEM;
886 
887 		if (pm->reg != PCI_MAPREG_ROM) {
888 			if (pci_conf_debug) {
889 				print_tag(pd->pc, pd->tag);
890 				printf(
891 				    "Putting %" PRIu64 " MEM bytes @ %#"
892 				    PRIx64 " (reg %x)\n", pm->size,
893 				    pm->address, pm->reg);
894 			}
895 			base = pci_conf_read(pd->pc, pd->tag, pm->reg);
896 			base = PCI_MAPREG_MEM_ADDR(pm->address) |
897 			    PCI_MAPREG_MEM_TYPE(base);
898 			pci_conf_write(pd->pc, pd->tag, pm->reg, base);
899 			if (PCI_MAPREG_MEM_TYPE(base) ==
900 			    PCI_MAPREG_MEM_TYPE_64BIT) {
901 				base = (pcireg_t)
902 				    (PCI_MAPREG_MEM64_ADDR(pm->address) >> 32);
903 				pci_conf_write(pd->pc, pd->tag, pm->reg + 4,
904 				    base);
905 			}
906 		}
907 	}
908 	for (pm = pb->pcimemwin; pm < &pb->pcimemwin[pb->nmemwin]; pm++) {
909 		if (pm->reg == PCI_MAPREG_ROM && pm->address != -1) {
910 			pd = pm->dev;
911 			if (!(pd->enable & PCI_CONF_MAP_ROM))
912 				continue;
913 			if (pci_conf_debug) {
914 				print_tag(pd->pc, pd->tag);
915 				printf(
916 				    "Putting %" PRIu64 " ROM bytes @ %#"
917 				    PRIx64 " (reg %x)\n", pm->size,
918 				    pm->address, pm->reg);
919 			}
920 			base = (pcireg_t) pm->address;
921 			if (pd->enable & PCI_CONF_ENABLE_ROM)
922 				base |= PCI_MAPREG_ROM_ENABLE;
923 
924 			pci_conf_write(pd->pc, pd->tag, pm->reg, base);
925 		}
926 	}
927 	return 0;
928 }
929 
930 static bool
931 constrain_bridge_mem_range(struct extent * const ex,
932 			   u_long * const base,
933 			   u_long * const limit,
934 			   const bool ok64 __used_only_lp64)
935 {
936 
937 	*base = ex->ex_start;
938 	*limit = ex->ex_end;
939 
940 #ifdef _LP64
941 	if (!ok64) {
942 		if (ex->ex_start >= (1UL << 32)) {
943 			return true;
944 		}
945 		if (ex->ex_end > 0xffffffffUL) {
946 			*limit = 0xffffffffUL;
947 		}
948 	}
949 #endif /* _LP64 */
950 
951 	return false;
952 }
953 
954 /*
955  * Configure I/O, memory, and prefetcable memory spaces, then make
956  * a call to configure_bus().
957  */
958 static int
959 configure_bridge(pciconf_dev_t *pd)
960 {
961 	unsigned long	io_base, io_limit, mem_base, mem_limit;
962 	pciconf_bus_t	*pb;
963 	pcireg_t	io, iohigh, mem, cmd;
964 	int		rv;
965 	bool		isprefetchmem64;
966 	bool		bad_range;
967 
968 	pb = pd->ppb;
969 	/* Configure I/O base & limit*/
970 	if (pb->ioext) {
971 		io_base = pb->ioext->ex_start;
972 		io_limit = pb->ioext->ex_end;
973 	} else {
974 		io_base  = 0x1000;	/* 4K */
975 		io_limit = 0x0000;
976 	}
977 	if (pb->io_32bit) {
978 		iohigh = __SHIFTIN(io_base >> 16, PCI_BRIDGE_IOHIGH_BASE) |
979 		    __SHIFTIN(io_limit >> 16, PCI_BRIDGE_IOHIGH_LIMIT);
980 	} else {
981 		if (io_limit > 0xFFFF) {
982 			printf("Bus %d bridge does not support 32-bit I/O.  ",
983 			    pb->busno);
984 			printf("Disabling I/O accesses\n");
985 			io_base  = 0x1000;	/* 4K */
986 			io_limit = 0x0000;
987 		}
988 		iohigh = 0;
989 	}
990 	io = pci_conf_read(pb->pc, pd->tag, PCI_BRIDGE_STATIO_REG) &
991 	    PCI_BRIDGE_STATIO_STATUS;
992 	io |= __SHIFTIN((io_base >> 8) & PCI_BRIDGE_STATIO_IOADDR,
993 	    PCI_BRIDGE_STATIO_IOBASE);
994 	io |= __SHIFTIN((io_limit >> 8) & PCI_BRIDGE_STATIO_IOADDR,
995 	    PCI_BRIDGE_STATIO_IOLIMIT);
996 	pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_STATIO_REG, io);
997 	pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_IOHIGH_REG, iohigh);
998 
999 	/* Configure mem base & limit */
1000 	bad_range = false;
1001 	if (pb->memext) {
1002 		bad_range = constrain_bridge_mem_range(pb->memext,
1003 						       &mem_base,
1004 						       &mem_limit,
1005 						       false);
1006 	} else {
1007 		mem_base  = 0x100000;	/* 1M */
1008 		mem_limit = 0x000000;
1009 	}
1010 	if (bad_range) {
1011 		printf("Bus %d bridge MEM range out of range.  ", pb->busno);
1012 		printf("Disabling MEM accesses\n");
1013 		mem_base  = 0x100000;	/* 1M */
1014 		mem_limit = 0x000000;
1015 	}
1016 	mem = __SHIFTIN((mem_base >> 16) & PCI_BRIDGE_MEMORY_ADDR,
1017 	    PCI_BRIDGE_MEMORY_BASE);
1018 	mem |= __SHIFTIN((mem_limit >> 16) & PCI_BRIDGE_MEMORY_ADDR,
1019 	    PCI_BRIDGE_MEMORY_LIMIT);
1020 	pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_MEMORY_REG, mem);
1021 
1022 	/* Configure prefetchable mem base & limit */
1023 	mem = pci_conf_read(pb->pc, pd->tag, PCI_BRIDGE_PREFETCHMEM_REG);
1024 	isprefetchmem64 = PCI_BRIDGE_PREFETCHMEM_64BITS(mem);
1025 	bad_range = false;
1026 	if (pb->pmemext) {
1027 		bad_range = constrain_bridge_mem_range(pb->pmemext,
1028 						       &mem_base,
1029 						       &mem_limit,
1030 						       isprefetchmem64);
1031 	} else {
1032 		mem_base  = 0x100000;	/* 1M */
1033 		mem_limit = 0x000000;
1034 	}
1035 	if (bad_range) {
1036 		printf("Bus %d bridge does not support 64-bit PMEM.  ",
1037 		    pb->busno);
1038 		printf("Disabling prefetchable-MEM accesses\n");
1039 		mem_base  = 0x100000;	/* 1M */
1040 		mem_limit = 0x000000;
1041 	}
1042 	mem = __SHIFTIN((mem_base >> 16) & PCI_BRIDGE_PREFETCHMEM_ADDR,
1043 	    PCI_BRIDGE_PREFETCHMEM_BASE);
1044 	mem |= __SHIFTIN((mem_limit >> 16) & PCI_BRIDGE_PREFETCHMEM_ADDR,
1045 	    PCI_BRIDGE_PREFETCHMEM_LIMIT);
1046 	pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_PREFETCHMEM_REG, mem);
1047 	/*
1048 	 * XXX -- 64-bit systems need a lot more than just this...
1049 	 */
1050 	if (isprefetchmem64) {
1051 		mem_base  = (uint64_t)mem_base  >> 32;
1052 		mem_limit = (uint64_t)mem_limit >> 32;
1053 		pci_conf_write(pb->pc, pd->tag,
1054 		    PCI_BRIDGE_PREFETCHBASEUP32_REG, mem_base & 0xffffffff);
1055 		pci_conf_write(pb->pc, pd->tag,
1056 		    PCI_BRIDGE_PREFETCHLIMITUP32_REG, mem_limit & 0xffffffff);
1057 	}
1058 
1059 	rv = configure_bus(pb);
1060 
1061 	if (pb->ioext)
1062 		extent_destroy(pb->ioext);
1063 	if (pb->memext)
1064 		extent_destroy(pb->memext);
1065 	if (pb->pmemext)
1066 		extent_destroy(pb->pmemext);
1067 	if (rv == 0) {
1068 		cmd = pci_conf_read(pd->pc, pd->tag, PCI_BRIDGE_CONTROL_REG);
1069 		cmd &= ~PCI_BRIDGE_CONTROL; /* Clear control bit first */
1070 		cmd |= PCI_BRIDGE_CONTROL_PERE | PCI_BRIDGE_CONTROL_SERR;
1071 		if (pb->fast_b2b)
1072 			cmd |= PCI_BRIDGE_CONTROL_SECFASTB2B;
1073 
1074 		pci_conf_write(pd->pc, pd->tag, PCI_BRIDGE_CONTROL_REG, cmd);
1075 		cmd = pci_conf_read(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG);
1076 		cmd |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE;
1077 		pci_conf_write(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG, cmd);
1078 	}
1079 
1080 	return rv;
1081 }
1082 
1083 /*
1084  * Calculate latency values, allocate I/O and MEM segments, then set them
1085  * up.  If a PCI-PCI bridge is found, configure the bridge separately,
1086  * which will cause a recursive call back here.
1087  */
1088 static int
1089 configure_bus(pciconf_bus_t *pb)
1090 {
1091 	pciconf_dev_t	*pd;
1092 	int		def_ltim, max_ltim, band, bus_mhz;
1093 
1094 	if (pb->ndevs == 0) {
1095 		if (pci_conf_debug)
1096 			printf("PCI bus %d - no devices\n", pb->busno);
1097 		return 1;
1098 	}
1099 	bus_mhz = pb->freq_66 ? 66 : 33;
1100 	max_ltim = pb->max_mingnt * bus_mhz / 4;	/* cvt to cycle count */
1101 	band = 4000000;					/* 0.25us cycles/sec */
1102 	if (band < pb->bandwidth_used) {
1103 		printf("PCI bus %d: Warning: Total bandwidth exceeded!? (%d)\n",
1104 		    pb->busno, pb->bandwidth_used);
1105 		def_ltim = -1;
1106 	} else {
1107 		def_ltim = (band - pb->bandwidth_used) / pb->ndevs;
1108 		if (def_ltim > pb->min_maxlat)
1109 			def_ltim = pb->min_maxlat;
1110 		def_ltim = def_ltim * bus_mhz / 4;
1111 	}
1112 	def_ltim = (def_ltim + 7) & ~7;
1113 	max_ltim = (max_ltim + 7) & ~7;
1114 
1115 	pb->def_ltim = MIN(def_ltim, 255);
1116 	pb->max_ltim = MIN(MAX(max_ltim, def_ltim), 255);
1117 
1118 	/*
1119 	 * Now we have what we need to initialize the devices.
1120 	 * It would probably be better if we could allocate all of these
1121 	 * for all busses at once, but "not right now".  First, get a list
1122 	 * of free memory ranges from the m.d. system.
1123 	 */
1124 	if (setup_iowins(pb) || setup_memwins(pb)) {
1125 		printf("PCI bus configuration failed: "
1126 		"unable to assign all I/O and memory ranges.\n");
1127 		return -1;
1128 	}
1129 
1130 	/*
1131 	 * Configure the latency for the devices, and enable them.
1132 	 */
1133 	for (pd = pb->device; pd < &pb->device[pb->ndevs]; pd++) {
1134 		pcireg_t cmd, classreg, misc;
1135 		int	ltim;
1136 
1137 		if (pci_conf_debug) {
1138 			print_tag(pd->pc, pd->tag);
1139 			printf("Configuring device.\n");
1140 		}
1141 		classreg = pci_conf_read(pd->pc, pd->tag, PCI_CLASS_REG);
1142 		misc = pci_conf_read(pd->pc, pd->tag, PCI_BHLC_REG);
1143 		cmd = pci_conf_read(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG);
1144 		if (pd->enable & PCI_CONF_ENABLE_PARITY)
1145 			cmd |= PCI_COMMAND_PARITY_ENABLE;
1146 		if (pd->enable & PCI_CONF_ENABLE_SERR)
1147 			cmd |= PCI_COMMAND_SERR_ENABLE;
1148 		if (pb->fast_b2b)
1149 			cmd |= PCI_COMMAND_BACKTOBACK_ENABLE;
1150 		if (PCI_CLASS(classreg) != PCI_CLASS_BRIDGE ||
1151 		    PCI_SUBCLASS(classreg) != PCI_SUBCLASS_BRIDGE_PCI) {
1152 			if (pd->enable & PCI_CONF_ENABLE_IO)
1153 				cmd |= PCI_COMMAND_IO_ENABLE;
1154 			if (pd->enable & PCI_CONF_ENABLE_MEM)
1155 				cmd |= PCI_COMMAND_MEM_ENABLE;
1156 			if (pd->enable & PCI_CONF_ENABLE_BM)
1157 				cmd |= PCI_COMMAND_MASTER_ENABLE;
1158 			ltim = pd->min_gnt * bus_mhz / 4;
1159 			ltim = MIN (MAX (pb->def_ltim, ltim), pb->max_ltim);
1160 		} else {
1161 			cmd |= PCI_COMMAND_MASTER_ENABLE;
1162 			ltim = MIN (pb->def_ltim, pb->max_ltim);
1163 		}
1164 		if ((pd->enable &
1165 		    (PCI_CONF_ENABLE_MEM | PCI_CONF_ENABLE_IO)) == 0) {
1166 			print_tag(pd->pc, pd->tag);
1167 			printf("Disabled due to lack of resources.\n");
1168 			cmd &= ~(PCI_COMMAND_MASTER_ENABLE |
1169 			    PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
1170 		}
1171 		pci_conf_write(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG, cmd);
1172 
1173 		misc &= ~((PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT) |
1174 		    (PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT));
1175 		misc |= (ltim & PCI_LATTIMER_MASK) << PCI_LATTIMER_SHIFT;
1176 		misc |= ((pb->cacheline_size >> 2) & PCI_CACHELINE_MASK) <<
1177 		    PCI_CACHELINE_SHIFT;
1178 		pci_conf_write(pd->pc, pd->tag, PCI_BHLC_REG, misc);
1179 
1180 		if (pd->ppb) {
1181 			if (configure_bridge(pd) < 0)
1182 				return -1;
1183 			continue;
1184 		}
1185 	}
1186 
1187 	if (pci_conf_debug)
1188 		printf("PCI bus %d configured\n", pb->busno);
1189 
1190 	return 0;
1191 }
1192 
1193 static bool
1194 mem_region_ok64(struct extent * const ex __used_only_lp64)
1195 {
1196 	bool rv = false;
1197 
1198 #ifdef _LP64
1199 	/*
1200 	 * XXX We need to guard this with _LP64 because
1201 	 * extent maps use u_long internally.
1202 	 */
1203 	u_long addr64;
1204 	if (ex->ex_end > (1UL << 32) &&
1205 	    extent_alloc_subregion(ex, MAX((1UL << 32), ex->ex_start),
1206 				   ex->ex_end,
1207 				   1 /* size */,
1208 				   1 /* alignment */,
1209 				   0 /* boundary */,
1210 				   EX_NOWAIT,
1211 				   &addr64) == 0) {
1212 		(void) extent_free(ex, addr64,
1213 				   1 /* size */,
1214 				   EX_NOWAIT);
1215 		rv = true;
1216 	}
1217 #endif /* _LP64 */
1218 
1219 	return rv;
1220 }
1221 
1222 /*
1223  * Let's configure the PCI bus.
1224  * This consists of basically scanning for all existing devices,
1225  * identifying their needs, and then making another pass over them
1226  * to set:
1227  *	1. I/O addresses
1228  *	2. Memory addresses (Prefetchable and not)
1229  *	3. PCI command register
1230  *	4. The latency part of the PCI BHLC (BIST (Built-In Self Test),
1231  *	    Header type, Latency timer, Cache line size) register
1232  *
1233  * The command register is set to enable fast back-to-back transactions
1234  * if the host bridge says it can handle it.  We also configure
1235  * Master Enable, SERR enable, parity enable, and (if this is not a
1236  * PCI-PCI bridge) the I/O and Memory spaces.  Apparently some devices
1237  * will not report some I/O space.
1238  *
1239  * The latency is computed to be a "fair share" of the bus bandwidth.
1240  * The bus bandwidth variable is initialized to the number of PCI cycles
1241  * in one second.  The number of cycles taken for one transaction by each
1242  * device (MAX_LAT + MIN_GNT) is then subtracted from the bandwidth.
1243  * Care is taken to ensure that the latency timer won't be set such that
1244  * it would exceed the critical time for any device.
1245  *
1246  * This is complicated somewhat due to the presence of bridges.  PCI-PCI
1247  * bridges are probed and configured recursively.
1248  */
1249 int
1250 pci_configure_bus(pci_chipset_tag_t pc, struct extent *ioext,
1251     struct extent *memext, struct extent *pmemext, int firstbus,
1252     int cacheline_size)
1253 {
1254 	pciconf_bus_t	*pb;
1255 	int		rv;
1256 
1257 	pb = kmem_zalloc(sizeof (pciconf_bus_t), KM_SLEEP);
1258 	pb->busno = firstbus;
1259 	pb->next_busno = pb->busno + 1;
1260 	pb->last_busno = 255;
1261 	pb->cacheline_size = cacheline_size;
1262 	pb->parent_bus = NULL;
1263 	pb->swiz = 0;
1264 	pb->io_32bit = 1;
1265 	pb->ioext = ioext;
1266 	pb->memext = memext;
1267 	if (pmemext == NULL)
1268 		pb->pmemext = memext;
1269 	else
1270 		pb->pmemext = pmemext;
1271 
1272 	/*
1273 	 * Probe the memory region extent maps to see
1274 	 * if allocation of 64-bit addresses is possible.
1275 	 */
1276 	pb->mem_64bit = mem_region_ok64(pb->memext);
1277 	pb->pmem_64bit = mem_region_ok64(pb->pmemext);
1278 
1279 	pb->pc = pc;
1280 	pb->io_total = pb->mem_total = pb->pmem_total = 0;
1281 
1282 	rv = probe_bus(pb);
1283 	pb->last_busno = pb->next_busno - 1;
1284 	if (rv == 0)
1285 		rv = configure_bus(pb);
1286 
1287 	/*
1288 	 * All done!
1289 	 */
1290 	kmem_free(pb, sizeof(*pb));
1291 	return rv;
1292 }
1293