xref: /netbsd-src/sys/dev/pci/isp_pci.c (revision 17dd36da8292193180754d5047c0926dbb56818c)
1 /* $NetBSD: isp_pci.c,v 1.68 2001/03/14 05:47:56 mjacob Exp $ */
2 /*
3  * This driver, which is contained in NetBSD in the files:
4  *
5  *	sys/dev/ic/isp.c
6  *	sys/dev/ic/isp_inline.h
7  *	sys/dev/ic/isp_netbsd.c
8  *	sys/dev/ic/isp_netbsd.h
9  *	sys/dev/ic/isp_target.c
10  *	sys/dev/ic/isp_target.h
11  *	sys/dev/ic/isp_tpublic.h
12  *	sys/dev/ic/ispmbox.h
13  *	sys/dev/ic/ispreg.h
14  *	sys/dev/ic/ispvar.h
15  *	sys/microcode/isp/asm_sbus.h
16  *	sys/microcode/isp/asm_1040.h
17  *	sys/microcode/isp/asm_1080.h
18  *	sys/microcode/isp/asm_12160.h
19  *	sys/microcode/isp/asm_2100.h
20  *	sys/microcode/isp/asm_2200.h
21  *	sys/pci/isp_pci.c
22  *	sys/sbus/isp_sbus.c
23  *
24  * Is being actively maintained by Matthew Jacob (mjacob@netbsd.org).
25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26  * Linux versions. This tends to be an interesting maintenance problem.
27  *
28  * Please coordinate with Matthew Jacob on changes you wish to make here.
29  */
30 /*
31  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
32  */
33 /*
34  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
35  * All rights reserved.
36  *
37  * Additional Copyright (C) 2000, 2001 by Matthew Jacob
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. The name of the author may not be used to endorse or promote products
45  *    derived from this software without specific prior written permission
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  */
58 
59 #include <dev/ic/isp_netbsd.h>
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pcidevs.h>
63 #include <uvm/uvm_extern.h>
64 #include <sys/reboot.h>
65 
66 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
67 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
68 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
69 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
70 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
71 #endif
72 static int isp_pci_mbxdma(struct ispsoftc *);
73 static int isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *,
74     u_int16_t *, u_int16_t);
75 static void isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
76 static void isp_pci_reset1(struct ispsoftc *);
77 static void isp_pci_dumpregs(struct ispsoftc *, const char *);
78 static int isp_pci_intr(void *);
79 
80 #if	defined(ISP_DISABLE_1020_SUPPORT)
81 #define	ISP_1040_RISC_CODE	NULL
82 #else
83 #define	ISP_1040_RISC_CODE	isp_1040_risc_code
84 #include <dev/microcode/isp/asm_1040.h>
85 #endif
86 
87 #if	defined(ISP_DISABLE_1080_SUPPORT)
88 #define	ISP_1080_RISC_CODE	NULL
89 #else
90 #define	ISP_1080_RISC_CODE	isp_1080_risc_code
91 #include <dev/microcode/isp/asm_1080.h>
92 #endif
93 
94 #if	defined(ISP_DISABLE_12160_SUPPORT)
95 #define	ISP_12160_RISC_CODE	NULL
96 #else
97 #define	ISP_12160_RISC_CODE	isp_12160_risc_code
98 #include <dev/microcode/isp/asm_12160.h>
99 #endif
100 
101 #if	defined(ISP_DISABLE_2100_SUPPORT)
102 #define	ISP_2100_RISC_CODE	NULL
103 #else
104 #define	ISP_2100_RISC_CODE	isp_2100_risc_code
105 #include <dev/microcode/isp/asm_2100.h>
106 #endif
107 
108 #if	defined(ISP_DISABLE_2200_SUPPORT)
109 #define	ISP_2200_RISC_CODE	NULL
110 #else
111 #define	ISP_2200_RISC_CODE	isp_2200_risc_code
112 #include <dev/microcode/isp/asm_2200.h>
113 #endif
114 
115 #ifndef	ISP_DISABLE_1020_SUPPORT
116 static struct ispmdvec mdvec = {
117 	isp_pci_rd_reg,
118 	isp_pci_wr_reg,
119 	isp_pci_mbxdma,
120 	isp_pci_dmasetup,
121 	isp_pci_dmateardown,
122 	NULL,
123 	isp_pci_reset1,
124 	isp_pci_dumpregs,
125 	ISP_1040_RISC_CODE,
126 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
127 };
128 #endif
129 
130 #ifndef	ISP_DISABLE_1080_SUPPORT
131 static struct ispmdvec mdvec_1080 = {
132 	isp_pci_rd_reg_1080,
133 	isp_pci_wr_reg_1080,
134 	isp_pci_mbxdma,
135 	isp_pci_dmasetup,
136 	isp_pci_dmateardown,
137 	NULL,
138 	isp_pci_reset1,
139 	isp_pci_dumpregs,
140 	ISP_1080_RISC_CODE,
141 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
142 };
143 #endif
144 
145 #ifndef	ISP_DISABLE_12160_SUPPORT
146 static struct ispmdvec mdvec_12160 = {
147 	isp_pci_rd_reg_1080,
148 	isp_pci_wr_reg_1080,
149 	isp_pci_mbxdma,
150 	isp_pci_dmasetup,
151 	isp_pci_dmateardown,
152 	NULL,
153 	isp_pci_reset1,
154 	isp_pci_dumpregs,
155 	ISP_12160_RISC_CODE,
156 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
157 };
158 #endif
159 
160 #ifndef	ISP_DISABLE_2100_SUPPORT
161 static struct ispmdvec mdvec_2100 = {
162 	isp_pci_rd_reg,
163 	isp_pci_wr_reg,
164 	isp_pci_mbxdma,
165 	isp_pci_dmasetup,
166 	isp_pci_dmateardown,
167 	NULL,
168 	isp_pci_reset1,
169 	isp_pci_dumpregs,
170 	ISP_2100_RISC_CODE
171 };
172 #endif
173 
174 #ifndef	ISP_DISABLE_2200_SUPPORT
175 static struct ispmdvec mdvec_2200 = {
176 	isp_pci_rd_reg,
177 	isp_pci_wr_reg,
178 	isp_pci_mbxdma,
179 	isp_pci_dmasetup,
180 	isp_pci_dmateardown,
181 	NULL,
182 	isp_pci_reset1,
183 	isp_pci_dumpregs,
184 	ISP_2200_RISC_CODE
185 };
186 #endif
187 
188 #ifndef	PCI_VENDOR_QLOGIC
189 #define	PCI_VENDOR_QLOGIC	0x1077
190 #endif
191 
192 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
193 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
194 #endif
195 
196 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
197 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
198 #endif
199 
200 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
201 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
202 #endif
203 
204 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
205 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
206 #endif
207 
208 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
209 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
210 #endif
211 
212 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
213 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
214 #endif
215 
216 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
217 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
218 #endif
219 
220 #define	PCI_QLOGIC_ISP	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
221 
222 #define	PCI_QLOGIC_ISP1080	\
223 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
224 
225 #define	PCI_QLOGIC_ISP1240	\
226 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
227 
228 #define	PCI_QLOGIC_ISP1280	\
229 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
230 
231 #define	PCI_QLOGIC_ISP12160	\
232 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
233 
234 #define	PCI_QLOGIC_ISP2100	\
235 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
236 
237 #define	PCI_QLOGIC_ISP2200	\
238 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
239 
240 #define	IO_MAP_REG	0x10
241 #define	MEM_MAP_REG	0x14
242 #define	PCIR_ROMADDR	0x30
243 
244 #define	PCI_DFLT_LTNCY	0x40
245 #define	PCI_DFLT_LNSZ	0x10
246 
247 
248 static int isp_pci_probe(struct device *, struct cfdata *, void *);
249 static void isp_pci_attach(struct device *, struct device *, void *);
250 
251 struct isp_pcisoftc {
252 	struct ispsoftc		pci_isp;
253 	pci_chipset_tag_t	pci_pc;
254 	pcitag_t		pci_tag;
255 	bus_space_tag_t		pci_st;
256 	bus_space_handle_t	pci_sh;
257 	bus_dma_tag_t		pci_dmat;
258 	bus_dmamap_t		pci_scratch_dmap;	/* for fcp only */
259 	bus_dmamap_t		pci_rquest_dmap;
260 	bus_dmamap_t		pci_result_dmap;
261 	bus_dmamap_t		*pci_xfer_dmap;
262 	void *			pci_ih;
263 	int16_t			pci_poff[_NREG_BLKS];
264 };
265 
266 struct cfattach isp_pci_ca = {
267 	sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
268 };
269 
270 #ifdef	DEBUG
271 const char vstring[] =
272     "Qlogic ISP Driver, NetBSD (pci) Platform Version %d.%d Core Version %d.%d";
273 #endif
274 
275 static int
276 isp_pci_probe(struct device *parent, struct cfdata *match, void *aux)
277 {
278 	struct pci_attach_args *pa = aux;
279 	switch (pa->pa_id) {
280 #ifndef	ISP_DISABLE_1020_SUPPORT
281 	case PCI_QLOGIC_ISP:
282 		return (1);
283 #endif
284 #ifndef	ISP_DISABLE_1080_SUPPORT
285 	case PCI_QLOGIC_ISP1080:
286 	case PCI_QLOGIC_ISP1240:
287 	case PCI_QLOGIC_ISP1280:
288 		return (1);
289 #endif
290 #ifndef	ISP_DISABLE_12160_SUPPORT
291 	case PCI_QLOGIC_ISP12160:
292 		return (1);
293 #endif
294 #ifndef	ISP_DISABLE_2100_SUPPORT
295 	case PCI_QLOGIC_ISP2100:
296 		return (1);
297 #endif
298 #ifndef	ISP_DISABLE_2200_SUPPORT
299 	case PCI_QLOGIC_ISP2200:
300 		return (1);
301 #endif
302 	default:
303 		return (0);
304 	}
305 }
306 
307 
308 static void
309 isp_pci_attach(struct device *parent, struct device *self, void *aux)
310 {
311 #ifdef	DEBUG
312 	static char oneshot = 1;
313 #endif
314 	static const char nomem[] = "%s: no mem for sdparam table\n";
315 	u_int32_t data, rev, linesz = PCI_DFLT_LNSZ;
316 	struct pci_attach_args *pa = aux;
317 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
318 	struct ispsoftc *isp = &pcs->pci_isp;
319 	bus_space_tag_t st, iot, memt;
320 	bus_space_handle_t sh, ioh, memh;
321 	pci_intr_handle_t ih;
322 	const char *intrstr;
323 	int ioh_valid, memh_valid;
324 
325 	ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
326 	    PCI_MAPREG_TYPE_IO, 0,
327 	    &iot, &ioh, NULL, NULL) == 0);
328 	memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
329 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
330 	    &memt, &memh, NULL, NULL) == 0);
331 	if (memh_valid) {
332 		st = memt;
333 		sh = memh;
334 	} else if (ioh_valid) {
335 		st = iot;
336 		sh = ioh;
337 	} else {
338 		printf(": unable to map device registers\n");
339 		return;
340 	}
341 	printf("\n");
342 
343 	pcs->pci_st = st;
344 	pcs->pci_sh = sh;
345 	pcs->pci_dmat = pa->pa_dmat;
346 	pcs->pci_pc = pa->pa_pc;
347 	pcs->pci_tag = pa->pa_tag;
348 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
349 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
350 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
351 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
352 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
353 	rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff;
354 
355 #ifndef	ISP_DISABLE_1020_SUPPORT
356 	if (pa->pa_id == PCI_QLOGIC_ISP) {
357 		isp->isp_mdvec = &mdvec;
358 		isp->isp_type = ISP_HA_SCSI_UNKNOWN;
359 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
360 		if (isp->isp_param == NULL) {
361 			printf(nomem, isp->isp_name);
362 			return;
363 		}
364 		bzero(isp->isp_param, sizeof (sdparam));
365 	}
366 #endif
367 #ifndef	ISP_DISABLE_1080_SUPPORT
368 	if (pa->pa_id == PCI_QLOGIC_ISP1080) {
369 		isp->isp_mdvec = &mdvec_1080;
370 		isp->isp_type = ISP_HA_SCSI_1080;
371 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
372 		if (isp->isp_param == NULL) {
373 			printf(nomem, isp->isp_name);
374 			return;
375 		}
376 		bzero(isp->isp_param, sizeof (sdparam));
377 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
378 		    ISP1080_DMA_REGS_OFF;
379 	}
380 	if (pa->pa_id == PCI_QLOGIC_ISP1240) {
381 		isp->isp_mdvec = &mdvec_1080;
382 		isp->isp_type = ISP_HA_SCSI_1240;
383 		isp->isp_param =
384 		    malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
385 		if (isp->isp_param == NULL) {
386 			printf(nomem, isp->isp_name);
387 			return;
388 		}
389 		bzero(isp->isp_param, 2 * sizeof (sdparam));
390 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
391 		    ISP1080_DMA_REGS_OFF;
392 	}
393 	if (pa->pa_id == PCI_QLOGIC_ISP1280) {
394 		isp->isp_mdvec = &mdvec_1080;
395 		isp->isp_type = ISP_HA_SCSI_1280;
396 		isp->isp_param =
397 		    malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
398 		if (isp->isp_param == NULL) {
399 			printf(nomem, isp->isp_name);
400 			return;
401 		}
402 		bzero(isp->isp_param, 2 * sizeof (sdparam));
403 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
404 		    ISP1080_DMA_REGS_OFF;
405 	}
406 #endif
407 #ifndef	ISP_DISABLE_12160_SUPPORT
408 	if (pa->pa_id == PCI_QLOGIC_ISP12160) {
409 		isp->isp_mdvec = &mdvec_12160;
410 		isp->isp_type = ISP_HA_SCSI_12160;
411 		isp->isp_param =
412 		    malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
413 		if (isp->isp_param == NULL) {
414 			printf(nomem, isp->isp_name);
415 			return;
416 		}
417 		bzero(isp->isp_param, 2 * sizeof (sdparam));
418 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
419 		    ISP1080_DMA_REGS_OFF;
420 	}
421 #endif
422 #ifndef	ISP_DISABLE_2100_SUPPORT
423 	if (pa->pa_id == PCI_QLOGIC_ISP2100) {
424 		isp->isp_mdvec = &mdvec_2100;
425 		isp->isp_type = ISP_HA_FC_2100;
426 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
427 		if (isp->isp_param == NULL) {
428 			printf(nomem, isp->isp_name);
429 			return;
430 		}
431 		bzero(isp->isp_param, sizeof (fcparam));
432 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
433 		    PCI_MBOX_REGS2100_OFF;
434 		if (rev < 3) {
435 			/*
436 			 * XXX: Need to get the actual revision
437 			 * XXX: number of the 2100 FB. At any rate,
438 			 * XXX: lower cache line size for early revision
439 			 * XXX; boards.
440 			 */
441 			linesz = 1;
442 		}
443 	}
444 #endif
445 #ifndef	ISP_DISABLE_2200_SUPPORT
446 	if (pa->pa_id == PCI_QLOGIC_ISP2200) {
447 		isp->isp_mdvec = &mdvec_2200;
448 		isp->isp_type = ISP_HA_FC_2200;
449 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
450 		if (isp->isp_param == NULL) {
451 			printf(nomem, isp->isp_name);
452 			return;
453 		}
454 		bzero(isp->isp_param, sizeof (fcparam));
455 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
456 		    PCI_MBOX_REGS2100_OFF;
457 		data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
458 	}
459 #endif
460 	/*
461 	 * Set up logging levels.
462 	 */
463 #ifdef	ISP_LOGDEFAULT
464 	isp->isp_dblev = ISP_LOGDEFAULT;
465 #else
466 	isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
467 	if (bootverbose)
468 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
469 #ifdef	SCSIDEBUG
470 	isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
471 #endif
472 #ifdef	DEBUG
473 	isp->isp_dblev |= ISP_LOGDEBUG0;
474 #endif
475 #endif
476 
477 #ifdef	DEBUG
478 	if (oneshot) {
479 		oneshot = 0;
480 		isp_prt(isp, ISP_LOGCONFIG, vstring,
481 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
482 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
483 	}
484 #endif
485 
486 	isp->isp_revision = rev;
487 
488 	/*
489 	 * Make sure that command register set sanely.
490 	 */
491 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
492 	data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
493 
494 	/*
495 	 * Not so sure about these- but I think it's important that they get
496 	 * enabled......
497 	 */
498 	data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
499 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
500 
501 	/*
502 	 * Make sure that the latency timer, cache line size,
503 	 * and ROM is disabled.
504 	 */
505 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
506 	data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
507 	data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
508 	data |= (PCI_DFLT_LTNCY	<< PCI_LATTIMER_SHIFT);
509 	data |= (linesz << PCI_CACHELINE_SHIFT);
510 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
511 
512 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
513 	data &= ~1;
514 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
515 
516 	if (pci_intr_map(pa, &ih)) {
517 		printf("%s: couldn't map interrupt\n", isp->isp_name);
518 		free(isp->isp_param, M_DEVBUF);
519 		return;
520 	}
521 	intrstr = pci_intr_string(pa->pa_pc, ih);
522 	if (intrstr == NULL)
523 		intrstr = "<I dunno>";
524 	pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
525 	    isp_pci_intr, isp);
526 	if (pcs->pci_ih == NULL) {
527 		printf("%s: couldn't establish interrupt at %s\n",
528 			isp->isp_name, intrstr);
529 		free(isp->isp_param, M_DEVBUF);
530 		return;
531 	}
532 
533 	printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
534 
535 	if (IS_FC(isp)) {
536 		DEFAULT_NODEWWN(isp) = 0x400000007F000002;
537 		DEFAULT_PORTWWN(isp) = 0x400000007F000002;
538 	}
539 
540 	isp->isp_confopts = self->dv_cfdata->cf_flags;
541 	isp->isp_role = ISP_DEFAULT_ROLES;
542 	ISP_LOCK(isp);
543 	isp->isp_osinfo.no_mbox_ints = 1;
544 	isp_reset(isp);
545 	if (isp->isp_state != ISP_RESETSTATE) {
546 		ISP_UNLOCK(isp);
547 		free(isp->isp_param, M_DEVBUF);
548 		return;
549 	}
550 	ENABLE_INTS(isp);
551 	isp_init(isp);
552 	if (isp->isp_state != ISP_INITSTATE) {
553 		isp_uninit(isp);
554 		ISP_UNLOCK(isp);
555 		free(isp->isp_param, M_DEVBUF);
556 		return;
557 	}
558 	/*
559 	 * Do platform attach.
560 	 */
561 	ISP_UNLOCK(isp);
562 	isp_attach(isp);
563 	if (isp->isp_state != ISP_RUNSTATE) {
564 		ISP_LOCK(isp);
565 		isp_uninit(isp);
566 		free(isp->isp_param, M_DEVBUF);
567 		ISP_UNLOCK(isp);
568 	}
569 }
570 
571 static u_int16_t
572 isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
573 {
574 	u_int16_t rv;
575 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
576 	int offset, oldconf = 0;
577 
578 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
579 		/*
580 		 * We will assume that someone has paused the RISC processor.
581 		 */
582 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
583 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
584 		delay(250);
585 	}
586 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
587 	offset += (regoff & 0xff);
588 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
589 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
590 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
591 		delay(250);
592 	}
593 	return (rv);
594 }
595 
596 static void
597 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
598 {
599 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
600 	int offset, oldconf = 0;
601 
602 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
603 		/*
604 		 * We will assume that someone has paused the RISC processor.
605 		 */
606 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
607 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
608 		delay(250);
609 	}
610 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
611 	offset += (regoff & 0xff);
612 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
613 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
614 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
615 		delay(250);
616 	}
617 }
618 
619 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
620 static u_int16_t
621 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
622 {
623 	u_int16_t rv, oc = 0;
624 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
625 	int offset;
626 
627 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
628 		u_int16_t tc;
629 		/*
630 		 * We will assume that someone has paused the RISC processor.
631 		 */
632 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
633 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
634 		if (IS_1280(isp)) {
635 			if (regoff & SXP_BANK1_SELECT)
636 				tc |= BIU_PCI1080_CONF1_SXP0;
637 			else
638 				tc |= BIU_PCI1080_CONF1_SXP1;
639 		} else {
640 			tc |= BIU_PCI1080_CONF1_SXP0;
641 		}
642 		isp_pci_wr_reg(isp, BIU_CONF1, tc);
643 		delay(250);
644 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
645 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
646 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
647 		delay(250);
648 	}
649 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
650 	offset += (regoff & 0xff);
651 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
652 	/*
653 	 * Okay, because BIU_CONF1 is always nonzero
654 	 */
655 	if (oc) {
656 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
657 		delay(250);
658 	}
659 	return (rv);
660 }
661 
662 static void
663 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
664 {
665 	u_int16_t oc = 0;
666 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
667 	int offset;
668 
669 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
670 		u_int16_t tc;
671 		/*
672 		 * We will assume that someone has paused the RISC processor.
673 		 */
674 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
675 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
676 		if (IS_1280(isp)) {
677 			if (regoff & SXP_BANK1_SELECT)
678 				tc |= BIU_PCI1080_CONF1_SXP0;
679 			else
680 				tc |= BIU_PCI1080_CONF1_SXP1;
681 		} else {
682 			tc |= BIU_PCI1080_CONF1_SXP0;
683 		}
684 		isp_pci_wr_reg(isp, BIU_CONF1, tc);
685 		delay(250);
686 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
687 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
688 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
689 		delay(250);
690 	}
691 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
692 	offset += (regoff & 0xff);
693 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
694 	/*
695 	 * Okay, because BIU_CONF1 is always nonzero
696 	 */
697 	if (oc) {
698 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
699 		delay(250);
700 	}
701 }
702 #endif
703 
704 static int
705 isp_pci_mbxdma(struct ispsoftc *isp)
706 {
707 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
708 	bus_dma_tag_t dmat = pcs->pci_dmat;
709 	bus_dma_segment_t sg;
710 	bus_size_t len;
711 	fcparam *fcp;
712 	int rs, i;
713 
714 	if (isp->isp_rquest_dma)	/* been here before? */
715 		return (0);
716 
717 	len = isp->isp_maxcmds * sizeof (XS_T);
718 	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK);
719 	if (isp->isp_xflist == NULL) {
720 		isp_prt(isp, ISP_LOGERR, "cannot malloc xflist array");
721 		return (1);
722 	}
723 	bzero(isp->isp_xflist, len);
724 	len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
725 	pcs->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
726 	if (pcs->pci_xfer_dmap == NULL) {
727 		free(isp->isp_xflist, M_DEVBUF);
728 		isp->isp_xflist = NULL;
729 		isp_prt(isp, ISP_LOGERR, "cannot malloc dma map array");
730 		return (1);
731 	}
732 	for (i = 0; i < isp->isp_maxcmds; i++) {
733 		if (bus_dmamap_create(dmat, MAXPHYS, (MAXPHYS / PAGE_SIZE) + 1,
734 		    MAXPHYS, 0, BUS_DMA_NOWAIT, &pcs->pci_xfer_dmap[i])) {
735 			isp_prt(isp, ISP_LOGERR, "cannot create dma maps");
736 			break;
737 		}
738 	}
739 	if (i < isp->isp_maxcmds) {
740 		while (--i >= 0) {
741 			bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
742 		}
743 		free(isp->isp_xflist, M_DEVBUF);
744 		free(pcs->pci_xfer_dmap, M_DEVBUF);
745 		isp->isp_xflist = NULL;
746 		pcs->pci_xfer_dmap = NULL;
747 		return (1);
748 	}
749 
750 	/*
751 	 * Allocate and map the request queue.
752 	 */
753 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
754 	if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
755 			     BUS_DMA_NOWAIT) ||
756 	    bus_dmamem_map(pcs->pci_dmat, &sg, rs, len,
757 	    (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
758 		goto dmafail;
759 	}
760 
761 	if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
762 	    &pcs->pci_rquest_dmap) || bus_dmamap_load(dmat,
763 	    pcs->pci_rquest_dmap, (caddr_t)isp->isp_rquest, len, NULL,
764 	    BUS_DMA_NOWAIT)) {
765 		goto dmafail;
766 	}
767 
768 	isp->isp_rquest_dma = pcs->pci_rquest_dmap->dm_segs[0].ds_addr;
769 
770 	/*
771 	 * Allocate and map the result queue.
772 	 */
773 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
774 	if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
775 			     BUS_DMA_NOWAIT) ||
776 	    bus_dmamem_map(dmat, &sg, rs, len, (caddr_t *)&isp->isp_result,
777 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
778 		goto dmafail;
779 	}
780 	if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
781 	    &pcs->pci_result_dmap) || bus_dmamap_load(pcs->pci_dmat,
782 	    pcs->pci_result_dmap, (caddr_t)isp->isp_result, len, NULL,
783 	    BUS_DMA_NOWAIT)) {
784 		goto dmafail;
785 	}
786 	isp->isp_result_dma = pcs->pci_result_dmap->dm_segs[0].ds_addr;
787 
788 	if (IS_SCSI(isp)) {
789 		return (0);
790 	}
791 
792 	fcp = isp->isp_param;
793 	len = ISP2100_SCRLEN;
794 	if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
795 			     BUS_DMA_NOWAIT) ||
796 	    bus_dmamem_map(dmat, &sg, rs, len, (caddr_t *)&fcp->isp_scratch,
797 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
798 		goto dmafail;
799 	}
800 	if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
801 	    &pcs->pci_scratch_dmap) || bus_dmamap_load(dmat,
802 	    pcs->pci_scratch_dmap, (caddr_t)fcp->isp_scratch, len, NULL,
803 	    BUS_DMA_NOWAIT)) {
804 		goto dmafail;
805 	}
806 	fcp->isp_scdma = pcs->pci_scratch_dmap->dm_segs[0].ds_addr;
807 	return (0);
808 dmafail:
809 	isp_prt(isp, ISP_LOGERR, "mailbox dma setup failure");
810 	for (i = 0; i < isp->isp_maxcmds; i++) {
811 		bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
812 	}
813 	free(isp->isp_xflist, M_DEVBUF);
814 	free(pcs->pci_xfer_dmap, M_DEVBUF);
815 	isp->isp_xflist = NULL;
816 	pcs->pci_xfer_dmap = NULL;
817 	return (1);
818 }
819 
820 static int
821 isp_pci_dmasetup(struct ispsoftc *isp, struct scsipi_xfer *xs, ispreq_t *rq,
822     u_int16_t *iptrp, u_int16_t optr)
823 {
824 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
825 	bus_dmamap_t dmap;
826 	ispcontreq_t *crq;
827 	int segcnt, seg, error, ovseg, seglim, drq;
828 
829 	dmap = pcs->pci_xfer_dmap[isp_handle_index(rq->req_handle)];
830 
831 	if (xs->datalen == 0) {
832 		rq->req_seg_count = 1;
833 		goto mbxsync;
834 	}
835 	if (xs->xs_control & XS_CTL_DATA_IN) {
836 		drq = REQFLAG_DATA_IN;
837 	} else {
838 		drq = REQFLAG_DATA_OUT;
839 	}
840 
841 	if (IS_FC(isp)) {
842 		seglim = ISP_RQDSEG_T2;
843 		((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
844 		((ispreqt2_t *)rq)->req_flags |= drq;
845 	} else {
846 		rq->req_flags |= drq;
847 		if (XS_CDBLEN(xs) > 12) {
848 			seglim = 0;
849 		} else {
850 			seglim = ISP_RQDSEG;
851 		}
852 	}
853 	error = bus_dmamap_load(pcs->pci_dmat, dmap, xs->data, xs->datalen,
854 	    NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ?
855 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING);
856 	if (error) {
857 		XS_SETERR(xs, HBA_BOTCH);
858 		return (CMD_COMPLETE);
859 	}
860 
861 	segcnt = dmap->dm_nsegs;
862 
863 	isp_prt(isp, ISP_LOGDEBUG2, "%d byte %s %p in %d segs",
864 	    xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)? "read to" :
865 	    "write from", xs->data, segcnt);
866 
867 	for (seg = 0, rq->req_seg_count = 0;
868 	    seglim && seg < segcnt && rq->req_seg_count < seglim;
869 	    seg++, rq->req_seg_count++) {
870 		if (IS_FC(isp)) {
871 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
872 #if	_BYTE_ORDER == _BIG_ENDIAN
873 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
874 			    bswap32(dmap->dm_segs[seg].ds_len);
875 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
876 			    bswap32(dmap->dm_segs[seg].ds_addr);
877 #else
878 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
879 			    dmap->dm_segs[seg].ds_len;
880 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
881 			    dmap->dm_segs[seg].ds_addr;
882 #endif
883 		} else {
884 #if	_BYTE_ORDER == _BIG_ENDIAN
885 			rq->req_dataseg[rq->req_seg_count].ds_count =
886 			    bswap32(dmap->dm_segs[seg].ds_len);
887 			rq->req_dataseg[rq->req_seg_count].ds_base =
888 			    bswap32(dmap->dm_segs[seg].ds_addr);
889 #else
890 			rq->req_dataseg[rq->req_seg_count].ds_count =
891 			    dmap->dm_segs[seg].ds_len;
892 			rq->req_dataseg[rq->req_seg_count].ds_base =
893 			    dmap->dm_segs[seg].ds_addr;
894 #endif
895 		}
896 		isp_prt(isp, ISP_LOGDEBUG2, "seg0.[%d]={0x%lx,%lu}",
897 		    rq->req_seg_count, (long) dmap->dm_segs[seg].ds_addr,
898 		    (unsigned long) dmap->dm_segs[seg].ds_len);
899 	}
900 
901 	if (seg == segcnt)
902 		goto dmasync;
903 
904 	do {
905 		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
906 		*iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN(isp));
907 		if (*iptrp == optr) {
908 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
909 			bus_dmamap_unload(pcs->pci_dmat, dmap);
910 			XS_SETERR(xs, HBA_BOTCH);
911 			return (CMD_EAGAIN);
912 		}
913 		rq->req_header.rqs_entry_count++;
914 		bzero((void *)crq, sizeof (*crq));
915 		crq->req_header.rqs_entry_count = 1;
916 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
917 
918 		for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
919 		    rq->req_seg_count++, seg++, ovseg++) {
920 #if	_BYTE_ORDER == _BIG_ENDIAN
921 			crq->req_dataseg[ovseg].ds_count =
922 			    bswap32(dmap->dm_segs[seg].ds_len);
923 			crq->req_dataseg[ovseg].ds_base =
924 			    bswap32(dmap->dm_segs[seg].ds_addr);
925 #else
926 			crq->req_dataseg[ovseg].ds_count =
927 			    dmap->dm_segs[seg].ds_len;
928 			crq->req_dataseg[ovseg].ds_base =
929 			    dmap->dm_segs[seg].ds_addr;
930 #endif
931 			isp_prt(isp, ISP_LOGDEBUG2, "seg%d.[%d]={0x%lx,%lu}",
932 			    rq->req_header.rqs_entry_count - 1,
933 			    rq->req_seg_count, (long)dmap->dm_segs[seg].ds_addr,
934 			    (unsigned long) dmap->dm_segs[seg].ds_len);
935 		}
936 	} while (seg < segcnt);
937 
938 
939 dmasync:
940 	bus_dmamap_sync(pcs->pci_dmat, dmap, 0, dmap->dm_mapsize,
941 	    (xs->xs_control & XS_CTL_DATA_IN) ?  BUS_DMASYNC_PREREAD :
942 	    BUS_DMASYNC_PREWRITE);
943 
944 mbxsync:
945 	ISP_SWIZZLE_REQUEST(isp, rq);
946 	bus_dmamap_sync(pcs->pci_dmat, pcs->pci_rquest_dmap, 0,
947 	    pcs->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
948 	return (CMD_QUEUED);
949 }
950 
951 static int
952 isp_pci_intr(void *arg)
953 {
954 	int rv;
955 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)arg;
956 	bus_dmamap_sync(pcs->pci_dmat, pcs->pci_result_dmap, 0,
957 	    pcs->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
958 	pcs->pci_isp.isp_osinfo.onintstack = 1;
959 	rv = isp_intr(arg);
960 	pcs->pci_isp.isp_osinfo.onintstack = 0;
961 	return (rv);
962 }
963 
964 static void
965 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
966 {
967 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
968 	bus_dmamap_t dmap = pcs->pci_xfer_dmap[isp_handle_index(handle)];
969 	bus_dmamap_sync(pcs->pci_dmat, dmap, 0, dmap->dm_mapsize,
970 	    xs->xs_control & XS_CTL_DATA_IN ?
971 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
972 	bus_dmamap_unload(pcs->pci_dmat, dmap);
973 }
974 
975 static void
976 isp_pci_reset1(struct ispsoftc *isp)
977 {
978 	/* Make sure the BIOS is disabled */
979 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
980 }
981 
982 static void
983 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
984 {
985 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
986 	if (msg)
987 		printf("%s: %s\n", isp->isp_name, msg);
988 	if (IS_SCSI(isp))
989 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
990 	else
991 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
992 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
993 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
994 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
995 
996 
997 	if (IS_SCSI(isp)) {
998 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
999 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1000 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1001 			ISP_READ(isp, CDMA_FIFO_STS));
1002 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1003 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1004 			ISP_READ(isp, DDMA_FIFO_STS));
1005 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1006 			ISP_READ(isp, SXP_INTERRUPT),
1007 			ISP_READ(isp, SXP_GROSS_ERR),
1008 			ISP_READ(isp, SXP_PINS_CTRL));
1009 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1010 	}
1011 	printf("    mbox regs: %x %x %x %x %x\n",
1012 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1013 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1014 	    ISP_READ(isp, OUTMAILBOX4));
1015 	printf("    PCI Status Command/Status=%x\n",
1016 	    pci_conf_read(pcs->pci_pc, pcs->pci_tag, PCI_COMMAND_STATUS_REG));
1017 }
1018