xref: /netbsd-src/sys/arch/x86/pci/pci_ranges.c (revision 9aa0541bdf64142d9a27c2cf274394d60182818f)
1 /*	$NetBSD: pci_ranges.c,v 1.2 2011/09/13 18:09:52 dyoung Exp $	*/
2 
3 /*-
4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by David Young <dyoung@NetBSD.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: pci_ranges.c,v 1.2 2011/09/13 18:09:52 dyoung Exp $");
35 
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/errno.h>
40 #include <sys/bus.h>
41 #include <sys/kmem.h>
42 
43 #include <prop/proplib.h>
44 #include <ppath/ppath.h>
45 
46 #include <dev/pci/pcivar.h>
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pccbbreg.h>
49 
50 typedef enum pci_alloc_regtype {
51 	  PCI_ALLOC_REGTYPE_NONE = 0
52 	, PCI_ALLOC_REGTYPE_BAR = 1
53 	, PCI_ALLOC_REGTYPE_WIN = 2
54 	, PCI_ALLOC_REGTYPE_CBWIN = 3
55 	, PCI_ALLOC_REGTYPE_VGA_EN = 4
56 } pci_alloc_regtype_t;
57 
58 typedef enum pci_alloc_space {
59 	  PCI_ALLOC_SPACE_IO = 0
60 	, PCI_ALLOC_SPACE_MEM = 1
61 } pci_alloc_space_t;
62 
63 typedef enum pci_alloc_flags {
64 	  PCI_ALLOC_F_PREFETCHABLE = 0x1
65 } pci_alloc_flags_t;
66 
67 typedef struct pci_alloc {
68 	TAILQ_ENTRY(pci_alloc)		pal_link;
69 	pcitag_t			pal_tag;
70 	uint64_t			pal_addr;
71 	uint64_t			pal_size;
72 	pci_alloc_regtype_t		pal_type;
73 	struct pci_alloc_reg {
74 		int			r_ofs;
75 		pcireg_t		r_val;
76 		pcireg_t		r_mask;
77 	}				pal_reg[3];
78 	pci_alloc_space_t		pal_space;
79 	pci_alloc_flags_t		pal_flags;
80 } pci_alloc_t;
81 
82 typedef struct pci_alloc_reg pci_alloc_reg_t;
83 
84 TAILQ_HEAD(pci_alloc_list, pci_alloc);
85 
86 typedef struct pci_alloc_list pci_alloc_list_t;
87 
88 static pci_alloc_t *
89 pci_alloc_dup(const pci_alloc_t *pal)
90 {
91 	pci_alloc_t *npal;
92 
93 	if ((npal = kmem_alloc(sizeof(*npal), KM_SLEEP)) == NULL)
94 		return NULL;
95 
96 	*npal = *pal;
97 
98 	return npal;
99 }
100 
101 static bool
102 pci_alloc_linkdup(pci_alloc_list_t *pals, const pci_alloc_t *pal)
103 {
104 	pci_alloc_t *npal;
105 
106 	if ((npal = pci_alloc_dup(pal)) == NULL)
107 		return false;
108 
109 	TAILQ_INSERT_TAIL(pals, npal, pal_link);
110 
111 	return true;
112 }
113 
114 struct range_infer_ctx {
115 	pci_chipset_tag_t	ric_pc;
116 	pci_alloc_list_t	ric_pals;
117 	bus_addr_t		ric_mmio_bottom;
118 	bus_addr_t		ric_mmio_top;
119 	bus_addr_t		ric_io_bottom;
120 	bus_addr_t		ric_io_top;
121 };
122 
123 static bool
124 io_range_extend(struct range_infer_ctx *ric, const pci_alloc_t *pal)
125 {
126 	if (ric->ric_io_bottom > pal->pal_addr)
127 		ric->ric_io_bottom = pal->pal_addr;
128 	if (ric->ric_io_top < pal->pal_addr + pal->pal_size)
129 		ric->ric_io_top = pal->pal_addr + pal->pal_size;
130 
131 	return pci_alloc_linkdup(&ric->ric_pals, pal);
132 }
133 
134 static bool
135 io_range_extend_by_bar(struct range_infer_ctx *ric, int bus, int dev, int fun,
136     int ofs, pcireg_t curbar, pcireg_t sizebar)
137 {
138 	pci_alloc_reg_t *r;
139 	pci_alloc_t pal = {
140 		  .pal_flags = 0
141 		, .pal_space = PCI_ALLOC_SPACE_IO
142 		, .pal_type = PCI_ALLOC_REGTYPE_BAR
143 		, .pal_reg = {{
144 			  .r_mask = ~(pcireg_t)0
145 		  }}
146 	};
147 
148 	r = &pal.pal_reg[0];
149 
150 	pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
151 	r->r_ofs = ofs;
152 	r->r_val = curbar;
153 
154 	pal.pal_addr = PCI_MAPREG_IO_ADDR(curbar);
155 	pal.pal_size = PCI_MAPREG_IO_SIZE(sizebar);
156 
157 	aprint_debug("%s: %d.%d.%d base at %" PRIx64 " size %" PRIx64 "\n",
158 	    __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
159 
160 	return (pal.pal_size == 0) || io_range_extend(ric, &pal);
161 }
162 
163 static bool
164 io_range_extend_by_vga_enable(struct range_infer_ctx *ric,
165     int bus, int dev, int fun, pcireg_t csr, pcireg_t bcr)
166 {
167 	pci_alloc_reg_t *r;
168 	pci_alloc_t tpal = {
169 		  .pal_flags = 0
170 		, .pal_space = PCI_ALLOC_SPACE_IO
171 		, .pal_type = PCI_ALLOC_REGTYPE_VGA_EN
172 		, .pal_reg = {{
173 			  .r_ofs = PCI_COMMAND_STATUS_REG
174 			, .r_mask = PCI_COMMAND_IO_ENABLE
175 		  }, {
176 			  .r_ofs = PCI_BRIDGE_CONTROL_REG
177 			, .r_mask =
178 			    PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT
179 		  }}
180 	}, pal[2];
181 
182 	aprint_debug("%s: %d.%d.%d enter\n", __func__, bus, dev, fun);
183 
184 	if ((csr & PCI_COMMAND_IO_ENABLE) == 0 ||
185 	    (bcr & (PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT)) == 0) {
186 		aprint_debug("%s: %d.%d.%d I/O or VGA disabled\n",
187 		    __func__, bus, dev, fun);
188 		return true;
189 	}
190 
191 	r = &tpal.pal_reg[0];
192 	tpal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
193 	r[0].r_val = csr;
194 	r[1].r_val = bcr;
195 
196 	pal[0] = pal[1] = tpal;
197 
198 	pal[0].pal_addr = 0x3b0;
199 	pal[0].pal_size = 0x3bb - 0x3b0 + 1;
200 
201 	pal[1].pal_addr = 0x3c0;
202 	pal[1].pal_size = 0x3df - 0x3c0 + 1;
203 
204 	/* XXX add aliases for pal[0..1] */
205 
206 	return io_range_extend(ric, &pal[0]) && io_range_extend(ric, &pal[1]);
207 }
208 
209 static bool
210 io_range_extend_by_win(struct range_infer_ctx *ric,
211     int bus, int dev, int fun, int ofs, int ofshigh,
212     pcireg_t io, pcireg_t iohigh)
213 {
214 	const int fourkb = 4 * 1024;
215 	pcireg_t baser, limitr;
216 	pci_alloc_reg_t *r;
217 	pci_alloc_t pal = {
218 		  .pal_flags = 0
219 		, .pal_space = PCI_ALLOC_SPACE_IO
220 		, .pal_type = PCI_ALLOC_REGTYPE_WIN
221 		, .pal_reg = {{
222 			  .r_mask = ~(pcireg_t)0
223 		  }}
224 	};
225 
226 	r = &pal.pal_reg[0];
227 
228 	pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
229 	r[0].r_ofs = ofs;
230 	r[0].r_val = io;
231 
232 	baser = ((io >> PCI_BRIDGE_STATIO_IOBASE_SHIFT) &
233 	    PCI_BRIDGE_STATIO_IOBASE_MASK) >> 4;
234 	limitr = ((io >> PCI_BRIDGE_STATIO_IOLIMIT_SHIFT) &
235 	    PCI_BRIDGE_STATIO_IOLIMIT_MASK) >> 4;
236 
237 	if (PCI_BRIDGE_IO_32BITS(io)) {
238 		pcireg_t baseh, limith;
239 
240 		r[1].r_mask = ~(pcireg_t)0;
241 		r[1].r_ofs = ofshigh;
242 		r[1].r_val = iohigh;
243 
244 		baseh = (iohigh >> PCI_BRIDGE_IOHIGH_BASE_SHIFT) & PCI_BRIDGE_IOHIGH_BASE_MASK;
245 		limith = (iohigh >> PCI_BRIDGE_IOHIGH_LIMIT_SHIFT) & PCI_BRIDGE_IOHIGH_LIMIT_MASK;
246 
247 		baser |= baseh << 4;
248 		limitr |= limith << 4;
249 	}
250 
251 	/* XXX check with the PCI standard */
252 	if (baser > limitr)
253 		return true;
254 
255 	pal.pal_addr = baser * fourkb;
256 	pal.pal_size = (limitr - baser + 1) * fourkb;
257 
258 	aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
259 	    __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
260 
261 	return io_range_extend(ric, &pal);
262 }
263 
264 static bool
265 io_range_extend_by_cbwin(struct range_infer_ctx *ric,
266     int bus, int dev, int fun, int ofs, pcireg_t base0, pcireg_t limit0)
267 {
268 	pcireg_t base, limit;
269 	pci_alloc_reg_t *r;
270 	pci_alloc_t pal = {
271 		  .pal_flags = 0
272 		, .pal_space = PCI_ALLOC_SPACE_IO
273 		, .pal_type = PCI_ALLOC_REGTYPE_CBWIN
274 		, .pal_reg = {{
275 			  .r_mask = ~(pcireg_t)0
276 		  }, {
277 			  .r_mask = ~(pcireg_t)0
278 		  }}
279 	};
280 
281 	r = &pal.pal_reg[0];
282 
283 	pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
284 	r[0].r_ofs = ofs;
285 	r[0].r_val = base0;
286 	r[1].r_ofs = ofs + 4;
287 	r[1].r_val = limit0;
288 
289 	base = base0 & __BITS(31, 2);
290 	limit = limit0 & __BITS(31, 2);
291 
292 	if (base > limit)
293 		return true;
294 
295 	pal.pal_addr = base;
296 	pal.pal_size = limit - base + 4;	/* XXX */
297 
298 	aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
299 	    __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
300 
301 	return io_range_extend(ric, &pal);
302 }
303 
304 static void
305 io_range_infer(pci_chipset_tag_t pc, pcitag_t tag, void *ctx)
306 {
307 	struct range_infer_ctx *ric = ctx;
308 	pcireg_t bhlcr, limit, io;
309 	int bar, bus, dev, fun, hdrtype, nbar;
310 	bool ok = true;
311 
312 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
313 
314 	hdrtype = PCI_HDRTYPE_TYPE(bhlcr);
315 
316 	pci_decompose_tag(pc, tag, &bus, &dev, &fun);
317 
318 	switch (hdrtype) {
319 	case PCI_HDRTYPE_PPB:
320 		nbar = 2;
321 		/* Extract I/O windows */
322 		ok = ok && io_range_extend_by_win(ric, bus, dev, fun,
323 		    PCI_BRIDGE_STATIO_REG,
324 		    PCI_BRIDGE_IOHIGH_REG,
325 		    pci_conf_read(pc, tag, PCI_BRIDGE_STATIO_REG),
326 		    pci_conf_read(pc, tag, PCI_BRIDGE_IOHIGH_REG));
327 		ok = ok && io_range_extend_by_vga_enable(ric, bus, dev, fun,
328 		    pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG),
329 		    pci_conf_read(pc, tag, PCI_BRIDGE_CONTROL_REG));
330 		break;
331 	case PCI_HDRTYPE_PCB:
332 		/* Extract I/O windows */
333 		io = pci_conf_read(pc, tag, PCI_CB_IOBASE0);
334 		limit = pci_conf_read(pc, tag, PCI_CB_IOLIMIT0);
335 		ok = ok && io_range_extend_by_cbwin(ric, bus, dev, fun,
336 		    PCI_CB_IOBASE0, io, limit);
337 		io = pci_conf_read(pc, tag, PCI_CB_IOBASE1);
338 		limit = pci_conf_read(pc, tag, PCI_CB_IOLIMIT1);
339 		ok = ok && io_range_extend_by_cbwin(ric, bus, dev, fun,
340 		    PCI_CB_IOBASE1, io, limit);
341 		nbar = 1;
342 		break;
343 	case PCI_HDRTYPE_DEVICE:
344 		nbar = 6;
345 		break;
346 	default:
347 		aprint_debug("%s: unknown header type %d at %d.%d.%d\n",
348 		    __func__, hdrtype, bus, dev, fun);
349 		return;
350 	}
351 
352 	for (bar = 0; bar < nbar; bar++) {
353 		pcireg_t basebar, sizebar;
354 
355 		basebar = pci_conf_read(pc, tag, PCI_BAR(bar));
356 		pci_conf_write(pc, tag, PCI_BAR(bar), 0xffffffff);
357 		sizebar = pci_conf_read(pc, tag, PCI_BAR(bar));
358 		pci_conf_write(pc, tag, PCI_BAR(bar), basebar);
359 
360 		if (sizebar == 0)
361 			continue;
362 		if (PCI_MAPREG_TYPE(sizebar) != PCI_MAPREG_TYPE_IO)
363 			continue;
364 
365 		ok = ok && io_range_extend_by_bar(ric, bus, dev, fun,
366 		    PCI_BAR(bar), basebar, sizebar);
367 	}
368 	if (!ok) {
369 		aprint_verbose("I/O range inference failed at PCI %d.%d.%d\n",
370 		    bus, dev, fun);
371 	}
372 }
373 
374 static bool
375 mmio_range_extend(struct range_infer_ctx *ric, const pci_alloc_t *pal)
376 {
377 	if (ric->ric_mmio_bottom > pal->pal_addr)
378 		ric->ric_mmio_bottom = pal->pal_addr;
379 	if (ric->ric_mmio_top < pal->pal_addr + pal->pal_size)
380 		ric->ric_mmio_top = pal->pal_addr + pal->pal_size;
381 
382 	return pci_alloc_linkdup(&ric->ric_pals, pal);
383 }
384 
385 static bool
386 mmio_range_extend_by_bar(struct range_infer_ctx *ric, int bus, int dev, int fun,
387     int ofs, pcireg_t curbar, pcireg_t sizebar)
388 {
389 	int type;
390 	bool prefetchable;
391 	pci_alloc_reg_t *r;
392 	pci_alloc_t pal = {
393 		  .pal_flags = 0
394 		, .pal_space = PCI_ALLOC_SPACE_MEM
395 		, .pal_type = PCI_ALLOC_REGTYPE_BAR
396 		, .pal_reg = {{
397 			  .r_mask = ~(pcireg_t)0
398 		  }}
399 	};
400 
401 	r = &pal.pal_reg[0];
402 
403 	pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
404 	r->r_ofs = ofs;
405 	r->r_val = curbar;
406 
407 	pal.pal_addr = PCI_MAPREG_MEM_ADDR(curbar);
408 
409 	type = PCI_MAPREG_MEM_TYPE(curbar);
410 	prefetchable = PCI_MAPREG_MEM_PREFETCHABLE(curbar);
411 
412 	if (prefetchable)
413 		pal.pal_flags |= PCI_ALLOC_F_PREFETCHABLE;
414 
415 	switch (type) {
416 	case PCI_MAPREG_MEM_TYPE_32BIT:
417 		pal.pal_size = PCI_MAPREG_MEM_SIZE(sizebar);
418 		break;
419 	case PCI_MAPREG_MEM_TYPE_64BIT:
420 		pal.pal_size = PCI_MAPREG_MEM64_SIZE(sizebar);
421 		break;
422 	case PCI_MAPREG_MEM_TYPE_32BIT_1M:
423 	default:
424 		aprint_debug("%s: ignored memory type %d at %d.%d.%d\n",
425 		    __func__, type, bus, dev, fun);
426 		return false;
427 	}
428 
429 	aprint_debug("%s: %d.%d.%d base at %" PRIx64 " size %" PRIx64 "\n",
430 	    __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
431 
432 	return (pal.pal_size == 0) || mmio_range_extend(ric, &pal);
433 }
434 
435 static bool
436 mmio_range_extend_by_vga_enable(struct range_infer_ctx *ric,
437     int bus, int dev, int fun, pcireg_t csr, pcireg_t bcr)
438 {
439 	pci_alloc_reg_t *r;
440 	pci_alloc_t tpal = {
441 		  .pal_flags = PCI_ALLOC_F_PREFETCHABLE	/* XXX a guess */
442 		, .pal_space = PCI_ALLOC_SPACE_MEM
443 		, .pal_type = PCI_ALLOC_REGTYPE_VGA_EN
444 		, .pal_reg = {{
445 			  .r_ofs = PCI_COMMAND_STATUS_REG
446 			, .r_mask = PCI_COMMAND_MEM_ENABLE
447 		  }, {
448 			  .r_ofs = PCI_BRIDGE_CONTROL_REG
449 			, .r_mask =
450 			    PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT
451 		  }}
452 	}, pal;
453 
454 	aprint_debug("%s: %d.%d.%d enter\n", __func__, bus, dev, fun);
455 
456 	if ((csr & PCI_COMMAND_MEM_ENABLE) == 0 ||
457 	    (bcr & (PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT)) == 0) {
458 		aprint_debug("%s: %d.%d.%d memory or VGA disabled\n",
459 		    __func__, bus, dev, fun);
460 		return true;
461 	}
462 
463 	r = &tpal.pal_reg[0];
464 	tpal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
465 	r[0].r_val = csr;
466 	r[1].r_val = bcr;
467 
468 	pal = tpal;
469 
470 	pal.pal_addr = 0xa0000;
471 	pal.pal_size = 0xbffff - 0xa0000 + 1;
472 
473 	return mmio_range_extend(ric, &pal);
474 }
475 
476 static bool
477 mmio_range_extend_by_win(struct range_infer_ctx *ric,
478     int bus, int dev, int fun, int ofs, pcireg_t mem)
479 {
480 	const int onemeg = 1024 * 1024;
481 	pcireg_t baser, limitr;
482 	pci_alloc_reg_t *r;
483 	pci_alloc_t pal = {
484 		  .pal_flags = 0
485 		, .pal_space = PCI_ALLOC_SPACE_MEM
486 		, .pal_type = PCI_ALLOC_REGTYPE_WIN
487 		, .pal_reg = {{
488 			  .r_mask = ~(pcireg_t)0
489 		  }}
490 	};
491 
492 	r = &pal.pal_reg[0];
493 
494 	pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
495 	r->r_ofs = ofs;
496 	r->r_val = mem;
497 
498 	baser = (mem >> PCI_BRIDGE_MEMORY_BASE_SHIFT) &
499 	    PCI_BRIDGE_MEMORY_BASE_MASK;
500 	limitr = (mem >> PCI_BRIDGE_MEMORY_LIMIT_SHIFT) &
501 	    PCI_BRIDGE_MEMORY_LIMIT_MASK;
502 
503 	/* XXX check with the PCI standard */
504 	if (baser > limitr || limitr == 0)
505 		return true;
506 
507 	pal.pal_addr = baser * onemeg;
508 	pal.pal_size = (limitr - baser + 1) * onemeg;
509 
510 	aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
511 	    __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
512 
513 	return mmio_range_extend(ric, &pal);
514 }
515 
516 static bool
517 mmio_range_extend_by_prememwin(struct range_infer_ctx *ric,
518     int bus, int dev, int fun, int ofs, pcireg_t mem,
519     int hibaseofs, pcireg_t hibase,
520     int hilimitofs, pcireg_t hilimit)
521 {
522 	const int onemeg = 1024 * 1024;
523 	uint64_t baser, limitr;
524 	pci_alloc_reg_t *r;
525 	pci_alloc_t pal = {
526 		  .pal_flags = PCI_ALLOC_F_PREFETCHABLE
527 		, .pal_space = PCI_ALLOC_SPACE_MEM
528 		, .pal_type = PCI_ALLOC_REGTYPE_WIN
529 		, .pal_reg = {{
530 			  .r_mask = ~(pcireg_t)0
531 		  }}
532 	};
533 
534 	r = &pal.pal_reg[0];
535 
536 	pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
537 	r[0].r_ofs = ofs;
538 	r[0].r_val = mem;
539 
540 	baser = (mem >> PCI_BRIDGE_PREFETCHMEM_BASE_SHIFT) &
541 	    PCI_BRIDGE_PREFETCHMEM_BASE_MASK;
542 	limitr = (mem >> PCI_BRIDGE_PREFETCHMEM_LIMIT_SHIFT) &
543 	    PCI_BRIDGE_PREFETCHMEM_LIMIT_MASK;
544 
545 	if (PCI_BRIDGE_PREFETCHMEM_64BITS(mem)) {
546 		r[1].r_mask = r[2].r_mask = ~(pcireg_t)0;
547 		r[1].r_ofs = hibaseofs;
548 		r[1].r_val = hibase;
549 		r[2].r_ofs = hilimitofs;
550 		r[2].r_val = hilimit;
551 
552 		baser |= hibase << 12;
553 		limitr |= hibase << 12;
554 	}
555 
556 	/* XXX check with the PCI standard */
557 	if (baser > limitr || limitr == 0)
558 		return true;
559 
560 	pal.pal_addr = baser * onemeg;
561 	pal.pal_size = (limitr - baser + 1) * onemeg;
562 
563 	aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
564 	    __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
565 
566 	return mmio_range_extend(ric, &pal);
567 }
568 
569 static bool
570 mmio_range_extend_by_cbwin(struct range_infer_ctx *ric,
571     int bus, int dev, int fun, int ofs, pcireg_t base, pcireg_t limit,
572     bool prefetchable)
573 {
574 	pci_alloc_reg_t *r;
575 	pci_alloc_t pal = {
576 		  .pal_flags = 0
577 		, .pal_space = PCI_ALLOC_SPACE_MEM
578 		, .pal_type = PCI_ALLOC_REGTYPE_CBWIN
579 		, .pal_reg = {{
580 			  .r_mask = ~(pcireg_t)0
581 		  }, {
582 			  .r_mask = ~(pcireg_t)0
583 		  }}
584 	};
585 
586 	r = &pal.pal_reg[0];
587 
588 	if (prefetchable)
589 		pal.pal_flags |= PCI_ALLOC_F_PREFETCHABLE;
590 
591 	pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
592 	r[0].r_ofs = ofs;
593 	r[0].r_val = base;
594 	r[1].r_ofs = ofs + 4;
595 	r[1].r_val = limit;
596 
597 	if (base > limit)
598 		return true;
599 
600 	if (limit == 0)
601 		return true;
602 
603 	pal.pal_addr = base;
604 	pal.pal_size = limit - base + 4096;
605 
606 	aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
607 	    __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
608 
609 	return mmio_range_extend(ric, &pal);
610 }
611 
612 static void
613 mmio_range_infer(pci_chipset_tag_t pc, pcitag_t tag, void *ctx)
614 {
615 	struct range_infer_ctx *ric = ctx;
616 	pcireg_t bcr, bhlcr, limit, mem, premem, hiprebase, hiprelimit;
617 	int bar, bus, dev, fun, hdrtype, nbar;
618 	bool ok = true;
619 
620 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
621 
622 	hdrtype = PCI_HDRTYPE_TYPE(bhlcr);
623 
624 	pci_decompose_tag(pc, tag, &bus, &dev, &fun);
625 
626 	switch (hdrtype) {
627 	case PCI_HDRTYPE_PPB:
628 		nbar = 2;
629 		/* Extract memory windows */
630 		ok = ok && mmio_range_extend_by_win(ric, bus, dev, fun,
631 		    PCI_BRIDGE_MEMORY_REG,
632 		    pci_conf_read(pc, tag, PCI_BRIDGE_MEMORY_REG));
633 		premem = pci_conf_read(pc, tag, PCI_BRIDGE_PREFETCHMEM_REG);
634 		if (PCI_BRIDGE_PREFETCHMEM_64BITS(premem)) {
635 			aprint_debug("%s: 64-bit prefetchable memory window "
636 			    "at %d.%d.%d\n", __func__, bus, dev, fun);
637 			hiprebase = pci_conf_read(pc, tag,
638 			    PCI_BRIDGE_PREFETCHBASE32_REG);
639 			hiprelimit = pci_conf_read(pc, tag,
640 			    PCI_BRIDGE_PREFETCHLIMIT32_REG);
641 		} else
642 			hiprebase = hiprelimit = 0;
643 		ok = ok &&
644 		    mmio_range_extend_by_prememwin(ric, bus, dev, fun,
645 		        PCI_BRIDGE_PREFETCHMEM_REG, premem,
646 		        PCI_BRIDGE_PREFETCHBASE32_REG, hiprebase,
647 		        PCI_BRIDGE_PREFETCHLIMIT32_REG, hiprelimit) &&
648 		    mmio_range_extend_by_vga_enable(ric, bus, dev, fun,
649 		        pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG),
650 		        pci_conf_read(pc, tag, PCI_BRIDGE_CONTROL_REG));
651 		break;
652 	case PCI_HDRTYPE_PCB:
653 		/* Extract memory windows */
654 		bcr = pci_conf_read(pc, tag, PCI_BRIDGE_CONTROL_REG);
655 		mem = pci_conf_read(pc, tag, PCI_CB_MEMBASE0);
656 		limit = pci_conf_read(pc, tag, PCI_CB_MEMLIMIT0);
657 		ok = ok && mmio_range_extend_by_cbwin(ric, bus, dev, fun,
658 		    PCI_CB_MEMBASE0, mem, limit,
659 		    (bcr & CB_BCR_PREFETCH_MEMWIN0) != 0);
660 		mem = pci_conf_read(pc, tag, PCI_CB_MEMBASE1);
661 		limit = pci_conf_read(pc, tag, PCI_CB_MEMLIMIT1);
662 		ok = ok && mmio_range_extend_by_cbwin(ric, bus, dev, fun,
663 		    PCI_CB_MEMBASE1, mem, limit,
664 		    (bcr & CB_BCR_PREFETCH_MEMWIN1) != 0);
665 		nbar = 1;
666 		break;
667 	case PCI_HDRTYPE_DEVICE:
668 		nbar = 6;
669 		break;
670 	default:
671 		aprint_debug("%s: unknown header type %d at %d.%d.%d\n",
672 		    __func__, hdrtype, bus, dev, fun);
673 		return;
674 	}
675 
676 	for (bar = 0; bar < nbar; bar++) {
677 		pcireg_t basebar, sizebar;
678 
679 		basebar = pci_conf_read(pc, tag, PCI_BAR(bar));
680 		pci_conf_write(pc, tag, PCI_BAR(bar), 0xffffffff);
681 		sizebar = pci_conf_read(pc, tag, PCI_BAR(bar));
682 		pci_conf_write(pc, tag, PCI_BAR(bar), basebar);
683 
684 		if (sizebar == 0)
685 			continue;
686 		if (PCI_MAPREG_TYPE(sizebar) != PCI_MAPREG_TYPE_MEM)
687 			continue;
688 
689 		ok = ok && mmio_range_extend_by_bar(ric, bus, dev, fun,
690 		    PCI_BAR(bar), basebar, sizebar);
691 	}
692 	if (!ok) {
693 		aprint_verbose("MMIO range inference failed at PCI %d.%d.%d\n",
694 		    bus, dev, fun);
695 	}
696 }
697 
698 static const char *
699 pci_alloc_regtype_string(const pci_alloc_regtype_t t)
700 {
701 	switch (t) {
702 	case PCI_ALLOC_REGTYPE_BAR:
703 		return "bar";
704 	case PCI_ALLOC_REGTYPE_WIN:
705 	case PCI_ALLOC_REGTYPE_CBWIN:
706 		return "window";
707 	case PCI_ALLOC_REGTYPE_VGA_EN:
708 		return "vga-enable";
709 	default:
710 		return "<unknown>";
711 	}
712 }
713 
714 static void
715 pci_alloc_print(pci_chipset_tag_t pc, const pci_alloc_t *pal)
716 {
717 	int bus, dev, fun;
718 	const pci_alloc_reg_t *r;
719 
720 	pci_decompose_tag(pc, pal->pal_tag, &bus, &dev, &fun);
721 	r = &pal->pal_reg[0];
722 
723 	aprint_normal("%s range [0x%08" PRIx64 ", 0x%08" PRIx64 ")"
724 	    " at %d.%d.%d %s%s 0x%02x\n",
725 	    (pal->pal_space == PCI_ALLOC_SPACE_IO) ? "IO" : "MMIO",
726 	    pal->pal_addr, pal->pal_addr + pal->pal_size,
727 	    bus, dev, fun,
728 	    (pal->pal_flags & PCI_ALLOC_F_PREFETCHABLE) ? "prefetchable " : "",
729 	    pci_alloc_regtype_string(pal->pal_type),
730 	    r->r_ofs);
731 }
732 
733 prop_dictionary_t pci_rsrc_dict = NULL;
734 
735 static bool
736 pci_range_record(pci_chipset_tag_t pc, prop_array_t rsvns,
737     pci_alloc_list_t *pals, pci_alloc_space_t space)
738 {
739 	int bus, dev, fun, i;
740 	prop_array_t regs;
741 	prop_dictionary_t reg;
742 	const pci_alloc_t *pal;
743 	const pci_alloc_reg_t *r;
744 	prop_dictionary_t rsvn;
745 
746 	TAILQ_FOREACH(pal, pals, pal_link) {
747 		bool ok = true;
748 
749 		r = &pal->pal_reg[0];
750 
751 		if (pal->pal_space != space)
752 			continue;
753 
754 		if ((rsvn = prop_dictionary_create()) == NULL)
755 			return false;
756 
757 		if ((regs = prop_array_create()) == NULL) {
758 			prop_object_release(rsvn);
759 			return false;
760 		}
761 
762 		if (!prop_dictionary_set(rsvn, "regs", regs)) {
763 			prop_object_release(rsvn);
764 			prop_object_release(regs);
765 			return false;
766 		}
767 
768 		for (i = 0; i < __arraycount(pal->pal_reg); i++) {
769 			r = &pal->pal_reg[i];
770 
771 			if (r->r_mask == 0)
772 				break;
773 
774 			ok = (reg = prop_dictionary_create()) != NULL;
775 			if (!ok)
776 				break;
777 
778 			ok = prop_dictionary_set_uint16(reg, "offset",
779 			        r->r_ofs) &&
780 			    prop_dictionary_set_uint32(reg, "val", r->r_val) &&
781 			    prop_dictionary_set_uint32(reg, "mask",
782 			        r->r_mask) && prop_array_add(regs, reg);
783 			if (!ok) {
784 				prop_object_release(reg);
785 				break;
786 			}
787 		}
788 
789 		pci_decompose_tag(pc, pal->pal_tag, &bus, &dev, &fun);
790 
791 		ok = ok &&
792 		    prop_dictionary_set_cstring_nocopy(rsvn, "type",
793 		        pci_alloc_regtype_string(pal->pal_type)) &&
794 		    prop_dictionary_set_uint64(rsvn, "address",
795 		        pal->pal_addr) &&
796 		    prop_dictionary_set_uint64(rsvn, "size", pal->pal_size) &&
797 		    prop_dictionary_set_uint8(rsvn, "bus", bus) &&
798 		    prop_dictionary_set_uint8(rsvn, "device", dev) &&
799 		    prop_dictionary_set_uint8(rsvn, "function", fun) &&
800 		    prop_array_add(rsvns, rsvn);
801 		prop_object_release(rsvn);
802 		if (!ok)
803 			return false;
804 	}
805 	return true;
806 }
807 
808 prop_dictionary_t
809 pci_rsrc_filter(prop_dictionary_t rsrcs0,
810     bool (*predicate)(void *, prop_dictionary_t), void *arg)
811 {
812 	int i, space;
813 	prop_dictionary_t rsrcs;
814 	prop_array_t rsvns;
815 	ppath_t *op, *p;
816 
817 	if ((rsrcs = prop_dictionary_copy(rsrcs0)) == NULL)
818 		return NULL;
819 
820 	for (space = 0; space < 2; space++) {
821 		op = p = ppath_create();
822 		p = ppath_push_key(p, (space == 0) ? "memory" : "io");
823 		p = ppath_push_key(p, "bios-reservations");
824 		if (p == NULL) {
825 			ppath_release(op);
826 			return NULL;
827 		}
828 		if ((rsvns = ppath_lookup(rsrcs0, p)) == NULL) {
829 			printf("%s: reservations not found\n", __func__);
830 			ppath_release(p);
831 			return NULL;
832 		}
833 		for (i = prop_array_count(rsvns); --i >= 0; ) {
834 			prop_dictionary_t rsvn;
835 
836 			if ((p = ppath_push_idx(p, i)) == NULL) {
837 				printf("%s: ppath_push_idx\n", __func__);
838 				ppath_release(op);
839 				prop_object_release(rsrcs);
840 				return NULL;
841 			}
842 
843 			rsvn = ppath_lookup(rsrcs0, p);
844 
845 			KASSERT(rsvn != NULL);
846 
847 			if (!(*predicate)(arg, rsvn)) {
848 				ppath_copydel_object((prop_object_t)rsrcs0,
849 				    (prop_object_t *)&rsrcs, p);
850 			}
851 
852 			if ((p = ppath_pop(p, NULL)) == NULL) {
853 				printf("%s: ppath_pop\n", __func__);
854 				ppath_release(p);
855 				prop_object_release(rsrcs);
856 				return NULL;
857 			}
858 		}
859 		ppath_release(op);
860 	}
861 	return rsrcs;
862 }
863 
864 void
865 pci_ranges_infer(pci_chipset_tag_t pc, int minbus, int maxbus,
866     bus_addr_t *iobasep, bus_size_t *iosizep,
867     bus_addr_t *membasep, bus_size_t *memsizep)
868 {
869 	prop_dictionary_t iodict = NULL, memdict = NULL;
870 	prop_array_t iorsvns, memrsvns;
871 	struct range_infer_ctx ric = {
872 		  .ric_io_bottom = ~((bus_addr_t)0)
873 		, .ric_io_top = 0
874 		, .ric_mmio_bottom = ~((bus_addr_t)0)
875 		, .ric_mmio_top = 0
876 		, .ric_pals = TAILQ_HEAD_INITIALIZER(ric.ric_pals)
877 	};
878 	const pci_alloc_t *pal;
879 
880 	ric.ric_pc = pc;
881 	pci_device_foreach_min(pc, minbus, maxbus, mmio_range_infer, &ric);
882 	pci_device_foreach_min(pc, minbus, maxbus, io_range_infer, &ric);
883 	if (membasep != NULL)
884 		*membasep = ric.ric_mmio_bottom;
885 	if (memsizep != NULL)
886 		*memsizep = ric.ric_mmio_top - ric.ric_mmio_bottom;
887 	if (iobasep != NULL)
888 		*iobasep = ric.ric_io_bottom;
889 	if (iosizep != NULL)
890 		*iosizep = ric.ric_io_top - ric.ric_io_bottom;
891 	aprint_verbose("%s: inferred %" PRIuMAX
892 	    " bytes of memory-mapped PCI space at 0x%" PRIxMAX "\n", __func__,
893 	    (uintmax_t)(ric.ric_mmio_top - ric.ric_mmio_bottom),
894 	    (uintmax_t)ric.ric_mmio_bottom);
895 	aprint_verbose("%s: inferred %" PRIuMAX
896 	    " bytes of PCI I/O space at 0x%" PRIxMAX "\n", __func__,
897 	    (uintmax_t)(ric.ric_io_top - ric.ric_io_bottom),
898 	    (uintmax_t)ric.ric_io_bottom);
899 	TAILQ_FOREACH(pal, &ric.ric_pals, pal_link)
900 		pci_alloc_print(pc, pal);
901 
902 	if ((memdict = prop_dictionary_create()) == NULL) {
903 		aprint_error("%s: could not create PCI MMIO "
904 		    "resources dictionary\n", __func__);
905 	} else if ((memrsvns = prop_array_create()) == NULL) {
906 		aprint_error("%s: could not create PCI BIOS memory "
907 		    "reservations array\n", __func__);
908 	} else if (!prop_dictionary_set(memdict, "bios-reservations",
909 	    memrsvns)) {
910 		aprint_error("%s: could not record PCI BIOS memory "
911 		    "reservations array\n", __func__);
912 	} else if (!pci_range_record(pc, memrsvns, &ric.ric_pals,
913 	    PCI_ALLOC_SPACE_MEM)) {
914 		aprint_error("%s: could not record PCI BIOS memory "
915 		    "reservations\n", __func__);
916 	} else if (!prop_dictionary_set_uint64(memdict,
917 	    "start", ric.ric_mmio_bottom) ||
918 	    !prop_dictionary_set_uint64(memdict, "size",
919 	     ric.ric_mmio_top - ric.ric_mmio_bottom)) {
920 		aprint_error("%s: could not record PCI memory min & max\n",
921 		    __func__);
922 	} else if ((iodict = prop_dictionary_create()) == NULL) {
923 		aprint_error("%s: could not create PCI I/O "
924 		    "resources dictionary\n", __func__);
925 	} else if ((iorsvns = prop_array_create()) == NULL) {
926 		aprint_error("%s: could not create PCI BIOS I/O "
927 		    "reservations array\n", __func__);
928 	} else if (!prop_dictionary_set(iodict, "bios-reservations",
929 	    iorsvns)) {
930 		aprint_error("%s: could not record PCI BIOS I/O "
931 		    "reservations array\n", __func__);
932 	} else if (!pci_range_record(pc, iorsvns, &ric.ric_pals,
933 	    PCI_ALLOC_SPACE_IO)) {
934 		aprint_error("%s: could not record PCI BIOS I/O "
935 		    "reservations\n", __func__);
936 	} else if (!prop_dictionary_set_uint64(iodict,
937 	    "start", ric.ric_io_bottom) ||
938 	    !prop_dictionary_set_uint64(iodict, "size",
939 	     ric.ric_io_top - ric.ric_io_bottom)) {
940 		aprint_error("%s: could not record PCI I/O min & max\n",
941 		    __func__);
942 	} else if ((pci_rsrc_dict = prop_dictionary_create()) == NULL) {
943 		aprint_error("%s: could not create PCI resources dictionary\n",
944 		    __func__);
945 	} else if (!prop_dictionary_set(pci_rsrc_dict, "memory", memdict) ||
946 	           !prop_dictionary_set(pci_rsrc_dict, "io", iodict)) {
947 		aprint_error("%s: could not record PCI memory- or I/O-"
948 		    "resources dictionary\n", __func__);
949 		prop_object_release(pci_rsrc_dict);
950 		pci_rsrc_dict = NULL;
951 	}
952 
953 	if (iodict != NULL)
954 		prop_object_release(iodict);
955 	if (memdict != NULL)
956 		prop_object_release(memdict);
957 	/* XXX release iorsvns, memrsvns */
958 }
959