xref: /netbsd-src/sys/dev/pci/isp_pci.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /* $NetBSD: isp_pci.c,v 1.103 2007/06/26 20:58:27 mjacob Exp $ */
2 /*
3  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
4  * All rights reserved.
5  *
6  * Additional Copyright (C) 2000-2007 by Matthew Jacob
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
33  */
34 
35 /*
36  * 24XX 4Gb material support provided by MetrumRG Associates.
37  * Many thanks are due to them.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: isp_pci.c,v 1.103 2007/06/26 20:58:27 mjacob Exp $");
42 
43 #include <dev/ic/isp_netbsd.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 #include <uvm/uvm_extern.h>
48 #include <sys/reboot.h>
49 
50 static uint32_t isp_pci_rd_reg(struct ispsoftc *, int);
51 static void isp_pci_wr_reg(struct ispsoftc *, int, uint32_t);
52 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
53 static uint32_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
54 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, uint32_t);
55 #endif
56 #if !defined(ISP_DISABLE_2100_SUPPORT) && \
57 	 !defined(ISP_DISABLE_2200_SUPPORT) && \
58 	 !defined(ISP_DISABLE_1020_SUPPORT) && \
59 	 !defined(ISP_DISABLE_1080_SUPPORT) && \
60 	 !defined(ISP_DISABLE_12160_SUPPORT)
61 static int
62 isp_pci_rd_isr(struct ispsoftc *, uint32_t *, uint16_t *, uint16_t *);
63 #endif
64 #if !defined(ISP_DISABLE_2300_SUPPORT)
65 static int
66 isp_pci_rd_isr_2300(struct ispsoftc *, uint32_t *, uint16_t *, uint16_t *);
67 #endif
68 #if !defined(ISP_DISABLE_2400_SUPPORT)
69 static uint32_t isp_pci_rd_reg_2400(struct ispsoftc *, int);
70 static void isp_pci_wr_reg_2400(struct ispsoftc *, int, uint32_t);
71 static int
72 isp_pci_rd_isr_2400(struct ispsoftc *, uint32_t *, uint16_t *, uint16_t *);
73 static int isp2400_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *,
74     uint32_t *, uint32_t);
75 #endif
76 static int isp_pci_mbxdma(struct ispsoftc *);
77 static int isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *,
78     uint32_t *, uint32_t);
79 static void isp_pci_dmateardown(struct ispsoftc *, XS_T *, uint32_t);
80 static void isp_pci_reset0(struct ispsoftc *);
81 static void isp_pci_reset1(struct ispsoftc *);
82 static void isp_pci_dumpregs(struct ispsoftc *, const char *);
83 static int isp_pci_intr(void *);
84 
85 #if	defined(ISP_DISABLE_1020_SUPPORT) || defined(ISP_DISABLE_FW)
86 #define	ISP_1040_RISC_CODE	NULL
87 #else
88 #define	ISP_1040_RISC_CODE	(const uint16_t *) isp_1040_risc_code
89 #include <dev/microcode/isp/asm_1040.h>
90 #endif
91 
92 #if	defined(ISP_DISABLE_1080_SUPPORT) || defined(ISP_DISABLE_FW)
93 #define	ISP_1080_RISC_CODE	NULL
94 #else
95 #define	ISP_1080_RISC_CODE	(const uint16_t *) isp_1080_risc_code
96 #include <dev/microcode/isp/asm_1080.h>
97 #endif
98 
99 #if	defined(ISP_DISABLE_12160_SUPPORT) || defined(ISP_DISABLE_FW)
100 #define	ISP_12160_RISC_CODE	NULL
101 #else
102 #define	ISP_12160_RISC_CODE	(const uint16_t *) isp_12160_risc_code
103 #include <dev/microcode/isp/asm_12160.h>
104 #endif
105 
106 #if	defined(ISP_DISABLE_2100_SUPPORT) || defined(ISP_DISABLE_FW)
107 #define	ISP_2100_RISC_CODE	NULL
108 #else
109 #define	ISP_2100_RISC_CODE	(const uint16_t *) isp_2100_risc_code
110 #include <dev/microcode/isp/asm_2100.h>
111 #endif
112 
113 #if	defined(ISP_DISABLE_2200_SUPPORT) || defined(ISP_DISABLE_FW)
114 #define	ISP_2200_RISC_CODE	NULL
115 #else
116 #define	ISP_2200_RISC_CODE	(const uint16_t *) isp_2200_risc_code
117 #include <dev/microcode/isp/asm_2200.h>
118 #endif
119 
120 #if	defined(ISP_DISABLE_2300_SUPPORT) || defined(ISP_DISABLE_FW)
121 #define	ISP_2300_RISC_CODE	NULL
122 #define	ISP_2322_RISC_CODE	NULL
123 #else
124 #define	ISP_2300_RISC_CODE	(const uint16_t *) isp_2300_risc_code
125 #include <dev/microcode/isp/asm_2300.h>
126 #define	ISP_2322_RISC_CODE	(const uint16_t *) isp_2322_risc_code
127 #include <dev/microcode/isp/asm_2322.h>
128 #endif
129 
130 #if	defined(ISP_DISABLE_2400_SUPPORT) || defined(ISP_DISABLE_FW)
131 #define	ISP_2400_RISC_CODE	NULL
132 #else
133 #define	ISP_2400_RISC_CODE	(const uint32_t *) isp_2400_risc_code
134 #include <dev/microcode/isp/asm_2400.h>
135 #endif
136 
137 #ifndef	ISP_DISABLE_1020_SUPPORT
138 static struct ispmdvec mdvec = {
139 	isp_pci_rd_isr,
140 	isp_pci_rd_reg,
141 	isp_pci_wr_reg,
142 	isp_pci_mbxdma,
143 	isp_pci_dmasetup,
144 	isp_pci_dmateardown,
145 	isp_pci_reset0,
146 	isp_pci_reset1,
147 	isp_pci_dumpregs,
148 	ISP_1040_RISC_CODE,
149 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
150 	0
151 };
152 #endif
153 
154 #ifndef	ISP_DISABLE_1080_SUPPORT
155 static struct ispmdvec mdvec_1080 = {
156 	isp_pci_rd_isr,
157 	isp_pci_rd_reg_1080,
158 	isp_pci_wr_reg_1080,
159 	isp_pci_mbxdma,
160 	isp_pci_dmasetup,
161 	isp_pci_dmateardown,
162 	isp_pci_reset0,
163 	isp_pci_reset1,
164 	isp_pci_dumpregs,
165 	ISP_1080_RISC_CODE,
166 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
167 	0
168 };
169 #endif
170 
171 #ifndef	ISP_DISABLE_12160_SUPPORT
172 static struct ispmdvec mdvec_12160 = {
173 	isp_pci_rd_isr,
174 	isp_pci_rd_reg_1080,
175 	isp_pci_wr_reg_1080,
176 	isp_pci_mbxdma,
177 	isp_pci_dmasetup,
178 	isp_pci_dmateardown,
179 	isp_pci_reset0,
180 	isp_pci_reset1,
181 	isp_pci_dumpregs,
182 	ISP_12160_RISC_CODE,
183 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
184 	0
185 };
186 #endif
187 
188 #ifndef	ISP_DISABLE_2100_SUPPORT
189 static struct ispmdvec mdvec_2100 = {
190 	isp_pci_rd_isr,
191 	isp_pci_rd_reg,
192 	isp_pci_wr_reg,
193 	isp_pci_mbxdma,
194 	isp_pci_dmasetup,
195 	isp_pci_dmateardown,
196 	isp_pci_reset0,
197 	isp_pci_reset1,
198 	isp_pci_dumpregs,
199 	ISP_2100_RISC_CODE,
200 	0,
201 	0
202 };
203 #endif
204 
205 #ifndef	ISP_DISABLE_2200_SUPPORT
206 static struct ispmdvec mdvec_2200 = {
207 	isp_pci_rd_isr,
208 	isp_pci_rd_reg,
209 	isp_pci_wr_reg,
210 	isp_pci_mbxdma,
211 	isp_pci_dmasetup,
212 	isp_pci_dmateardown,
213 	isp_pci_reset0,
214 	isp_pci_reset1,
215 	isp_pci_dumpregs,
216 	ISP_2200_RISC_CODE,
217 	0,
218 	0
219 };
220 #endif
221 
222 #ifndef	ISP_DISABLE_2300_SUPPORT
223 static struct ispmdvec mdvec_2300 = {
224 	isp_pci_rd_isr_2300,
225 	isp_pci_rd_reg,
226 	isp_pci_wr_reg,
227 	isp_pci_mbxdma,
228 	isp_pci_dmasetup,
229 	isp_pci_dmateardown,
230 	isp_pci_reset0,
231 	isp_pci_reset1,
232 	isp_pci_dumpregs,
233 	ISP_2300_RISC_CODE,
234 	0,
235 	0
236 };
237 #endif
238 
239 #ifndef	ISP_DISABLE_2400_SUPPORT
240 static struct ispmdvec mdvec_2400 = {
241 	isp_pci_rd_isr_2400,
242 	isp_pci_rd_reg_2400,
243 	isp_pci_wr_reg_2400,
244 	isp_pci_mbxdma,
245 	isp2400_pci_dmasetup,
246 	isp_pci_dmateardown,
247 	isp_pci_reset0,
248 	isp_pci_reset1,
249 	NULL,
250 	ISP_2400_RISC_CODE,
251 	0,
252 	0
253 };
254 #endif
255 
256 #ifndef	PCI_VENDOR_QLOGIC
257 #define	PCI_VENDOR_QLOGIC	0x1077
258 #endif
259 
260 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
261 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
262 #endif
263 
264 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
265 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
266 #endif
267 
268 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
269 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
270 #endif
271 
272 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
273 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
274 #endif
275 
276 #ifndef	PCI_PRODUCT_QLOGIC_ISP10160
277 #define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
278 #endif
279 
280 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
281 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
282 #endif
283 
284 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
285 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
286 #endif
287 
288 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
289 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
290 #endif
291 
292 #ifndef	PCI_PRODUCT_QLOGIC_ISP2300
293 #define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
294 #endif
295 
296 #ifndef	PCI_PRODUCT_QLOGIC_ISP2312
297 #define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
298 #endif
299 
300 #ifndef	PCI_PRODUCT_QLOGIC_ISP2322
301 #define	PCI_PRODUCT_QLOGIC_ISP2322	0x2322
302 #endif
303 
304 #ifndef	PCI_PRODUCT_QLOGIC_ISP2422
305 #define	PCI_PRODUCT_QLOGIC_ISP2422	0x2422
306 #endif
307 
308 #ifndef	PCI_PRODUCT_QLOGIC_ISP2432
309 #define	PCI_PRODUCT_QLOGIC_ISP2432	0x2432
310 #endif
311 
312 #ifndef	PCI_PRODUCT_QLOGIC_ISP6312
313 #define	PCI_PRODUCT_QLOGIC_ISP6312	0x6312
314 #endif
315 
316 #ifndef	PCI_PRODUCT_QLOGIC_ISP6322
317 #define	PCI_PRODUCT_QLOGIC_ISP6322	0x6322
318 #endif
319 
320 
321 #define	PCI_QLOGIC_ISP	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
322 
323 #define	PCI_QLOGIC_ISP1080	\
324 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
325 
326 #define	PCI_QLOGIC_ISP10160	\
327 	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
328 
329 #define	PCI_QLOGIC_ISP12160	\
330 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
331 
332 #define	PCI_QLOGIC_ISP1240	\
333 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
334 
335 #define	PCI_QLOGIC_ISP1280	\
336 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
337 
338 #define	PCI_QLOGIC_ISP2100	\
339 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
340 
341 #define	PCI_QLOGIC_ISP2200	\
342 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
343 
344 #define	PCI_QLOGIC_ISP2300	\
345 	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
346 
347 #define	PCI_QLOGIC_ISP2312	\
348 	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
349 
350 #define	PCI_QLOGIC_ISP2322	\
351 	((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
352 
353 #define	PCI_QLOGIC_ISP2422	\
354 	((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
355 
356 #define	PCI_QLOGIC_ISP2432	\
357 	((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC)
358 
359 #define	PCI_QLOGIC_ISP6312	\
360 	((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
361 
362 #define	PCI_QLOGIC_ISP6322	\
363 	((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
364 
365 #define	IO_MAP_REG	0x10
366 #define	MEM_MAP_REG	0x14
367 #define	PCIR_ROMADDR	0x30
368 
369 #define	PCI_DFLT_LTNCY	0x40
370 #define	PCI_DFLT_LNSZ	0x10
371 
372 static int isp_pci_probe(struct device *, struct cfdata *, void *);
373 static void isp_pci_attach(struct device *, struct device *, void *);
374 
375 struct isp_pcisoftc {
376 	struct ispsoftc		pci_isp;
377 	pci_chipset_tag_t	pci_pc;
378 	pcitag_t		pci_tag;
379 	bus_space_tag_t		pci_st;
380 	bus_space_handle_t	pci_sh;
381 	bus_dmamap_t		*pci_xfer_dmap;
382 	void *			pci_ih;
383 	int16_t			pci_poff[_NREG_BLKS];
384 };
385 
386 CFATTACH_DECL(isp_pci, sizeof (struct isp_pcisoftc),
387     isp_pci_probe, isp_pci_attach, NULL, NULL);
388 
389 #ifdef	DEBUG
390 const char vstring[] =
391     "Qlogic ISP Driver, NetBSD (pci) Platform Version %d.%d Core Version %d.%d";
392 #endif
393 
394 static int
395 isp_pci_probe(struct device *parent, struct cfdata *match, void *aux)
396 {
397 	struct pci_attach_args *pa = aux;
398 	switch (pa->pa_id) {
399 #ifndef	ISP_DISABLE_1020_SUPPORT
400 	case PCI_QLOGIC_ISP:
401 		return (1);
402 #endif
403 #ifndef	ISP_DISABLE_1080_SUPPORT
404 	case PCI_QLOGIC_ISP1080:
405 	case PCI_QLOGIC_ISP1240:
406 	case PCI_QLOGIC_ISP1280:
407 		return (1);
408 #endif
409 #ifndef	ISP_DISABLE_12160_SUPPORT
410 	case PCI_QLOGIC_ISP10160:
411 	case PCI_QLOGIC_ISP12160:
412 		return (1);
413 #endif
414 #ifndef	ISP_DISABLE_2100_SUPPORT
415 	case PCI_QLOGIC_ISP2100:
416 		return (1);
417 #endif
418 #ifndef	ISP_DISABLE_2200_SUPPORT
419 	case PCI_QLOGIC_ISP2200:
420 		return (1);
421 #endif
422 #ifndef	ISP_DISABLE_2300_SUPPORT
423 	case PCI_QLOGIC_ISP2300:
424 	case PCI_QLOGIC_ISP2312:
425 	case PCI_QLOGIC_ISP2322:
426 	case PCI_QLOGIC_ISP6312:
427 	case PCI_QLOGIC_ISP6322:
428 		return (1);
429 #endif
430 #ifndef	ISP_DISABLE_2400_SUPPORT
431 	case PCI_QLOGIC_ISP2422:
432 	case PCI_QLOGIC_ISP2432:
433 		return (1);
434 #endif
435 	default:
436 		return (0);
437 	}
438 }
439 
440 
441 static void
442 isp_pci_attach(struct device *parent, struct device *self, void *aux)
443 {
444 #ifdef	DEBUG
445 	static char oneshot = 1;
446 #endif
447 	static const char nomem[] = "\n%s: no mem for sdparam table\n";
448 	uint32_t data, rev, linesz = PCI_DFLT_LNSZ;
449 	struct pci_attach_args *pa = aux;
450 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
451 	struct ispsoftc *isp = &pcs->pci_isp;
452 	bus_space_tag_t st, iot, memt;
453 	bus_space_handle_t sh, ioh, memh;
454 	pci_intr_handle_t ih;
455 	pcireg_t mem_type;
456 	const char *dstring;
457 	const char *intrstr;
458 	int ioh_valid, memh_valid;
459 
460 	ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
461 	    PCI_MAPREG_TYPE_IO, 0,
462 	    &iot, &ioh, NULL, NULL) == 0);
463 
464 	mem_type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, MEM_MAP_REG);
465 	if (PCI_MAPREG_TYPE(mem_type) != PCI_MAPREG_TYPE_MEM) {
466 		memh_valid = 0;
467 	} else if (PCI_MAPREG_MEM_TYPE(mem_type) != PCI_MAPREG_MEM_TYPE_32BIT &&
468 	    PCI_MAPREG_MEM_TYPE(mem_type) != PCI_MAPREG_MEM_TYPE_64BIT) {
469 		memh_valid = 0;
470 	} else {
471 		memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG, mem_type, 0,
472 		    &memt, &memh, NULL, NULL) == 0);
473 	}
474 	if (memh_valid) {
475 		st = memt;
476 		sh = memh;
477 	} else if (ioh_valid) {
478 		st = iot;
479 		sh = ioh;
480 	} else {
481 		printf(": unable to map device registers\n");
482 		return;
483 	}
484 	dstring = "\n";
485 
486 	pcs->pci_st = st;
487 	pcs->pci_sh = sh;
488 	pcs->pci_pc = pa->pa_pc;
489 	pcs->pci_tag = pa->pa_tag;
490 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
491 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
492 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
493 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
494 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
495 	rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff;
496 
497 #ifndef	ISP_DISABLE_1020_SUPPORT
498 	if (pa->pa_id == PCI_QLOGIC_ISP) {
499 		dstring = ": QLogic 1020 Fast Wide SCSI HBA\n";
500 		isp->isp_mdvec = &mdvec;
501 		isp->isp_type = ISP_HA_SCSI_UNKNOWN;
502 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
503 		if (isp->isp_param == NULL) {
504 			printf(nomem, isp->isp_name);
505 			return;
506 		}
507 		memset(isp->isp_param, 0, sizeof (sdparam));
508 	}
509 #endif
510 #ifndef	ISP_DISABLE_1080_SUPPORT
511 	if (pa->pa_id == PCI_QLOGIC_ISP1080) {
512 		dstring = ": QLogic 1080 Ultra-2 Wide SCSI HBA\n";
513 		isp->isp_mdvec = &mdvec_1080;
514 		isp->isp_type = ISP_HA_SCSI_1080;
515 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
516 		if (isp->isp_param == NULL) {
517 			printf(nomem, isp->isp_name);
518 			return;
519 		}
520 		memset(isp->isp_param, 0, sizeof (sdparam));
521 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
522 		    ISP1080_DMA_REGS_OFF;
523 	}
524 	if (pa->pa_id == PCI_QLOGIC_ISP1240) {
525 		dstring = ": QLogic Dual Channel Ultra Wide SCSI HBA\n";
526 		isp->isp_mdvec = &mdvec_1080;
527 		isp->isp_type = ISP_HA_SCSI_1240;
528 		isp->isp_param =
529 		    malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
530 		if (isp->isp_param == NULL) {
531 			printf(nomem, isp->isp_name);
532 			return;
533 		}
534 		memset(isp->isp_param, 0, 2 * sizeof (sdparam));
535 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
536 		    ISP1080_DMA_REGS_OFF;
537 	}
538 	if (pa->pa_id == PCI_QLOGIC_ISP1280) {
539 		dstring = ": QLogic Dual Channel Ultra-2 Wide SCSI HBA\n";
540 		isp->isp_mdvec = &mdvec_1080;
541 		isp->isp_type = ISP_HA_SCSI_1280;
542 		isp->isp_param =
543 		    malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
544 		if (isp->isp_param == NULL) {
545 			printf(nomem, isp->isp_name);
546 			return;
547 		}
548 		memset(isp->isp_param, 0, 2 * sizeof (sdparam));
549 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
550 		    ISP1080_DMA_REGS_OFF;
551 	}
552 #endif
553 #ifndef	ISP_DISABLE_12160_SUPPORT
554 	if (pa->pa_id == PCI_QLOGIC_ISP10160) {
555 		dstring = ": QLogic Ultra-3 Wide SCSI HBA\n";
556 		isp->isp_mdvec = &mdvec_12160;
557 		isp->isp_type = ISP_HA_SCSI_10160;
558 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
559 		if (isp->isp_param == NULL) {
560 			printf(nomem, isp->isp_name);
561 			return;
562 		}
563 		memset(isp->isp_param, 0, sizeof (sdparam));
564 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
565 		    ISP1080_DMA_REGS_OFF;
566 	}
567 	if (pa->pa_id == PCI_QLOGIC_ISP12160) {
568 		dstring = ": QLogic Dual Channel Ultra-3 Wide SCSI HBA\n";
569 		isp->isp_mdvec = &mdvec_12160;
570 		isp->isp_type = ISP_HA_SCSI_12160;
571 		isp->isp_param =
572 		    malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
573 		if (isp->isp_param == NULL) {
574 			printf(nomem, isp->isp_name);
575 			return;
576 		}
577 		memset(isp->isp_param, 0, 2 * sizeof (sdparam));
578 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
579 		    ISP1080_DMA_REGS_OFF;
580 	}
581 #endif
582 #ifndef	ISP_DISABLE_2100_SUPPORT
583 	if (pa->pa_id == PCI_QLOGIC_ISP2100) {
584 		dstring = ": QLogic FC-AL HBA\n";
585 		isp->isp_mdvec = &mdvec_2100;
586 		isp->isp_type = ISP_HA_FC_2100;
587 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
588 		if (isp->isp_param == NULL) {
589 			printf(nomem, isp->isp_name);
590 			return;
591 		}
592 		memset(isp->isp_param, 0, sizeof (fcparam));
593 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
594 		    PCI_MBOX_REGS2100_OFF;
595 		if (rev < 3) {
596 			/*
597 			 * XXX: Need to get the actual revision
598 			 * XXX: number of the 2100 FB. At any rate,
599 			 * XXX: lower cache line size for early revision
600 			 * XXX; boards.
601 			 */
602 			linesz = 1;
603 		}
604 	}
605 #endif
606 #ifndef	ISP_DISABLE_2200_SUPPORT
607 	if (pa->pa_id == PCI_QLOGIC_ISP2200) {
608 		dstring = ": QLogic FC-AL and Fabric HBA\n";
609 		isp->isp_mdvec = &mdvec_2200;
610 		isp->isp_type = ISP_HA_FC_2200;
611 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
612 		if (isp->isp_param == NULL) {
613 			printf(nomem, isp->isp_name);
614 			return;
615 		}
616 		memset(isp->isp_param, 0, sizeof (fcparam));
617 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
618 		    PCI_MBOX_REGS2100_OFF;
619 		data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
620 	}
621 #endif
622 #ifndef	ISP_DISABLE_2300_SUPPORT
623 	if (pa->pa_id == PCI_QLOGIC_ISP2300 ||
624 	    pa->pa_id == PCI_QLOGIC_ISP2312 ||
625 	    pa->pa_id == PCI_QLOGIC_ISP6312) {
626 		isp->isp_mdvec = &mdvec_2300;
627 		if (pa->pa_id == PCI_QLOGIC_ISP2300 ||
628 		    pa->pa_id == PCI_QLOGIC_ISP6312) {
629 			dstring = ": QLogic FC-AL and 2Gbps Fabric HBA\n";
630 			isp->isp_type = ISP_HA_FC_2300;
631 		} else {
632 			dstring =
633 			    ": QLogic Dual Port FC-AL and 2Gbps Fabric HBA\n";
634 			isp->isp_port = pa->pa_function;
635 		}
636 		isp->isp_type = ISP_HA_FC_2312;
637 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
638 		if (isp->isp_param == NULL) {
639 			printf(nomem, isp->isp_name);
640 			return;
641 		}
642 		memset(isp->isp_param, 0, sizeof (fcparam));
643 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
644 		    PCI_MBOX_REGS2300_OFF;
645 		data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
646 	}
647 	if (pa->pa_id == PCI_QLOGIC_ISP2322 ||
648 	    pa->pa_id == PCI_QLOGIC_ISP6322) {
649 		isp->isp_mdvec = &mdvec_2300;
650 		dstring = ": QLogic FC-AL and 2Gbps Fabric PCI-E HBA\n";
651 		isp->isp_type = ISP_HA_FC_2322;
652 		isp->isp_port = pa->pa_function;
653 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
654 		if (isp->isp_param == NULL) {
655 			printf(nomem, isp->isp_name);
656 			return;
657 		}
658 		memset(isp->isp_param, 0, sizeof (fcparam));
659 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
660 		    PCI_MBOX_REGS2300_OFF;
661 		data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
662 	}
663 #endif
664 #ifndef	ISP_DISABLE_2400_SUPPORT
665 	if (pa->pa_id == PCI_QLOGIC_ISP2422 ||
666 	    pa->pa_id == PCI_QLOGIC_ISP2432) {
667 		isp->isp_mdvec = &mdvec_2400;
668 		if (pa->pa_id == PCI_QLOGIC_ISP2422) {
669 			dstring = ": QLogic FC-AL and 4Gbps Fabric PCI-X HBA\n";
670 		} else {
671 			dstring = ": QLogic FC-AL and 4Gbps Fabric PCI-E HBA\n";
672 		}
673 		isp->isp_type = ISP_HA_FC_2400;
674 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
675 		if (isp->isp_param == NULL) {
676 			printf(nomem, isp->isp_name);
677 			return;
678 		}
679 		memset(isp->isp_param, 0, sizeof (fcparam));
680 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
681 		    PCI_MBOX_REGS2400_OFF;
682 		data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
683 	}
684 #endif
685 	/*
686 	 * Set up logging levels.
687 	 */
688 #ifdef	ISP_LOGDEFAULT
689 	isp->isp_dblev = ISP_LOGDEFAULT;
690 #else
691 	isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
692 	if (bootverbose)
693 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
694 #ifdef	SCSIDEBUG
695 	isp->isp_dblev |= ISP_LOGDEBUG0|ISP_LOGDEBUG1|ISP_LOGDEBUG2;
696 #endif
697 #endif
698 	if (isp->isp_dblev & ISP_LOGCONFIG) {
699 		printf("\n");
700 	} else {
701 		printf(dstring);
702 	}
703 
704 #ifdef	DEBUG
705 	if (oneshot) {
706 		oneshot = 0;
707 		isp_prt(isp, ISP_LOGCONFIG, vstring,
708 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
709 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
710 	}
711 #endif
712 
713 	isp->isp_dmatag = pa->pa_dmat;
714 	isp->isp_revision = rev;
715 
716 	/*
717 	 * Make sure that command register set sanely.
718 	 */
719 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
720 	data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
721 
722 	/*
723 	 * Not so sure about these- but I think it's important that they get
724 	 * enabled......
725 	 */
726 	data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
727 	if (IS_2300(isp)) {	/* per QLogic errata */
728 		data &= ~PCI_COMMAND_INVALIDATE_ENABLE;
729 	}
730 	if (IS_23XX(isp)) {
731 		isp->isp_touched = 1;
732 	}
733 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
734 
735 	/*
736 	 * Make sure that the latency timer, cache line size,
737 	 * and ROM is disabled.
738 	 */
739 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
740 	data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
741 	data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
742 	data |= (PCI_DFLT_LTNCY	<< PCI_LATTIMER_SHIFT);
743 	data |= (linesz << PCI_CACHELINE_SHIFT);
744 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
745 
746 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
747 	data &= ~1;
748 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
749 
750 	if (pci_intr_map(pa, &ih)) {
751 		printf("%s: couldn't map interrupt\n", isp->isp_name);
752 		free(isp->isp_param, M_DEVBUF);
753 		return;
754 	}
755 	intrstr = pci_intr_string(pa->pa_pc, ih);
756 	if (intrstr == NULL)
757 		intrstr = "<I dunno>";
758 	pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
759 	    isp_pci_intr, isp);
760 	if (pcs->pci_ih == NULL) {
761 		printf("%s: couldn't establish interrupt at %s\n",
762 			isp->isp_name, intrstr);
763 		free(isp->isp_param, M_DEVBUF);
764 		return;
765 	}
766 
767 	printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
768 
769 	if (IS_FC(isp)) {
770 		DEFAULT_NODEWWN(isp) = 0x400000007F000002ULL;
771 		DEFAULT_PORTWWN(isp) = 0x400000007F000002ULL;
772 	}
773 
774 	isp->isp_confopts = self->dv_cfdata->cf_flags;
775 	isp->isp_role = ISP_DEFAULT_ROLES;
776 	ISP_LOCK(isp);
777 	isp_reset(isp);
778 	if (isp->isp_state != ISP_RESETSTATE) {
779 		ISP_UNLOCK(isp);
780 		free(isp->isp_param, M_DEVBUF);
781 		return;
782 	}
783 	isp_init(isp);
784 	if (isp->isp_state != ISP_INITSTATE) {
785 		isp_uninit(isp);
786 		ISP_UNLOCK(isp);
787 		free(isp->isp_param, M_DEVBUF);
788 		return;
789 	}
790 	/*
791 	 * Do platform attach.
792 	 */
793 	ISP_UNLOCK(isp);
794 	isp_attach(isp);
795 	if (isp->isp_state != ISP_RUNSTATE) {
796 		ISP_LOCK(isp);
797 		isp_uninit(isp);
798 		free(isp->isp_param, M_DEVBUF);
799 		ISP_UNLOCK(isp);
800 	}
801 }
802 
803 #define	IspVirt2Off(a, x)	\
804 	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
805 	_BLK_REG_SHFT] + ((x) & 0xff))
806 
807 #define	BXR2(pcs, off)		\
808 	bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
809 #define	BXW2(pcs, off, v)	\
810 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
811 #define	BXR4(pcs, off)		\
812 	bus_space_read_4(pcs->pci_st, pcs->pci_sh, off)
813 #define	BXW4(pcs, off, v)	\
814 	bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v)
815 
816 
817 static int
818 isp_pci_rd_debounced(struct ispsoftc *isp, int off, uint16_t *rp)
819 {
820 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
821 	uint16_t val0, val1;
822 	int i = 0;
823 
824 	do {
825 		val0 = BXR2(pcs, IspVirt2Off(isp, off));
826 		val1 = BXR2(pcs, IspVirt2Off(isp, off));
827 	} while (val0 != val1 && ++i < 1000);
828 	if (val0 != val1) {
829 		return (1);
830 	}
831 	*rp = val0;
832 	return (0);
833 }
834 
835 #if !defined(ISP_DISABLE_2100_SUPPORT) && \
836 	 !defined(ISP_DISABLE_2200_SUPPORT) && \
837 	 !defined(ISP_DISABLE_1020_SUPPORT) && \
838 	 !defined(ISP_DISABLE_1080_SUPPORT) && \
839 	 !defined(ISP_DISABLE_12160_SUPPORT)
840 static int
841 isp_pci_rd_isr(struct ispsoftc *isp, uint32_t *isrp,
842     uint16_t *semap, uint16_t *mbp)
843 {
844 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
845 	uint16_t isr, sema;
846 
847 	if (IS_2100(isp)) {
848 		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
849 		    return (0);
850 		}
851 		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
852 		    return (0);
853 		}
854 	} else {
855 		isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
856 		sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
857 	}
858 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
859 	isr &= INT_PENDING_MASK(isp);
860 	sema &= BIU_SEMA_LOCK;
861 	if (isr == 0 && sema == 0) {
862 		return (0);
863 	}
864 	*isrp = isr;
865 	if ((*semap = sema) != 0) {
866 		if (IS_2100(isp)) {
867 			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
868 				return (0);
869 			}
870 		} else {
871 			*mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
872 		}
873 	}
874 	return (1);
875 }
876 #endif
877 
878 #ifndef	ISP_DISABLE_2300_SUPPORT
879 static int
880 isp_pci_rd_isr_2300(struct ispsoftc *isp, uint32_t *isrp,
881     uint16_t *semap, uint16_t *mbox0p)
882 {
883 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
884 	uint32_t r2hisr;
885 
886 	if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT)) {
887 		*isrp = 0;
888 		return (0);
889 	}
890 	r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
891 	    IspVirt2Off(pcs, BIU_R2HSTSLO));
892 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
893 	if ((r2hisr & BIU_R2HST_INTR) == 0) {
894 		*isrp = 0;
895 		return (0);
896 	}
897 	switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
898 	case ISPR2HST_ROM_MBX_OK:
899 	case ISPR2HST_ROM_MBX_FAIL:
900 	case ISPR2HST_MBX_OK:
901 	case ISPR2HST_MBX_FAIL:
902 	case ISPR2HST_ASYNC_EVENT:
903 		*isrp = r2hisr & 0xffff;
904 		*mbox0p = (r2hisr >> 16);
905 		*semap = 1;
906 		return (1);
907 	case ISPR2HST_RIO_16:
908 		*isrp = r2hisr & 0xffff;
909 		*mbox0p = ASYNC_RIO1;
910 		*semap = 1;
911 		return (1);
912 	case ISPR2HST_FPOST:
913 		*isrp = r2hisr & 0xffff;
914 		*mbox0p = ASYNC_CMD_CMPLT;
915 		*semap = 1;
916 		return (1);
917 	case ISPR2HST_FPOST_CTIO:
918 		*isrp = r2hisr & 0xffff;
919 		*mbox0p = ASYNC_CTIO_DONE;
920 		*semap = 1;
921 		return (1);
922 	case ISPR2HST_RSPQ_UPDATE:
923 		*isrp = r2hisr & 0xffff;
924 		*mbox0p = 0;
925 		*semap = 0;
926 		return (1);
927 	default:
928 		return (0);
929 	}
930 }
931 #endif
932 
933 #ifndef	ISP_DISABLE_2400_SUPPORT
934 static int
935 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp,
936     uint16_t *semap, uint16_t *mbox0p)
937 {
938 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
939 	uint32_t r2hisr;
940 
941 	r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO));
942 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
943 	if ((r2hisr & BIU2400_R2HST_INTR) == 0) {
944 		*isrp = 0;
945 		return (0);
946 	}
947 	switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) {
948 	case ISP2400R2HST_ROM_MBX_OK:
949 	case ISP2400R2HST_ROM_MBX_FAIL:
950 	case ISP2400R2HST_MBX_OK:
951 	case ISP2400R2HST_MBX_FAIL:
952 	case ISP2400R2HST_ASYNC_EVENT:
953 		*isrp = r2hisr & 0xffff;
954 		*mbox0p = (r2hisr >> 16);
955 		*semap = 1;
956 		return (1);
957 	case ISP2400R2HST_RSPQ_UPDATE:
958 	case ISP2400R2HST_ATIO_RSPQ_UPDATE:
959 	case ISP2400R2HST_ATIO_RQST_UPDATE:
960 		*isrp = r2hisr & 0xffff;
961 		*mbox0p = 0;
962 		*semap = 0;
963 		return (1);
964 	default:
965 		ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
966 		isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
967 		return (0);
968 	}
969 }
970 
971 static uint32_t
972 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff)
973 {
974 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
975 	uint32_t rv;
976 	int block = regoff & _BLK_REG_MASK;
977 
978 	switch (block) {
979 	case BIU_BLOCK:
980 		break;
981 	case MBOX_BLOCK:
982 		return (BXR2(pcs, IspVirt2Off(pcs, regoff)));
983 	case SXP_BLOCK:
984 		isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff);
985 		return (0xffffffff);
986 	case RISC_BLOCK:
987 		isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff);
988 		return (0xffffffff);
989 	case DMA_BLOCK:
990 		isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff);
991 		return (0xffffffff);
992 	default:
993 		isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff);
994 		return (0xffffffff);
995 	}
996 
997 
998 	switch (regoff) {
999 	case BIU2400_FLASH_ADDR:
1000 	case BIU2400_FLASH_DATA:
1001 	case BIU2400_ICR:
1002 	case BIU2400_ISR:
1003 	case BIU2400_CSR:
1004 	case BIU2400_REQINP:
1005 	case BIU2400_REQOUTP:
1006 	case BIU2400_RSPINP:
1007 	case BIU2400_RSPOUTP:
1008 	case BIU2400_PRI_RQINP:
1009 	case BIU2400_PRI_RSPINP:
1010 	case BIU2400_ATIO_RSPINP:
1011 	case BIU2400_ATIO_REQINP:
1012 	case BIU2400_HCCR:
1013 	case BIU2400_GPIOD:
1014 	case BIU2400_GPIOE:
1015 	case BIU2400_HSEMA:
1016 		rv = BXR4(pcs, IspVirt2Off(pcs, regoff));
1017 		break;
1018 	case BIU2400_R2HSTSLO:
1019 		rv = BXR4(pcs, IspVirt2Off(pcs, regoff));
1020 		break;
1021 	case BIU2400_R2HSTSHI:
1022 		rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16;
1023 		break;
1024 	default:
1025 		isp_prt(isp, ISP_LOGERR,
1026 		    "isp_pci_rd_reg_2400: unknown offset %x", regoff);
1027 		rv = 0xffffffff;
1028 		break;
1029 	}
1030 	return (rv);
1031 }
1032 
1033 static void
1034 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val)
1035 {
1036 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1037 	int block = regoff & _BLK_REG_MASK;
1038 	volatile int junk;
1039 
1040 	switch (block) {
1041 	case BIU_BLOCK:
1042 		break;
1043 	case MBOX_BLOCK:
1044 		BXW2(pcs, IspVirt2Off(pcs, regoff), val);
1045 		junk = BXR2(pcs, IspVirt2Off(pcs, regoff));
1046 		return;
1047 	case SXP_BLOCK:
1048 		isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff);
1049 		return;
1050 	case RISC_BLOCK:
1051 		isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff);
1052 		return;
1053 	case DMA_BLOCK:
1054 		isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff);
1055 		return;
1056 	default:
1057 		isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x",
1058 		    regoff);
1059 		break;
1060 	}
1061 
1062 	switch (regoff) {
1063 	case BIU2400_FLASH_ADDR:
1064 	case BIU2400_FLASH_DATA:
1065 	case BIU2400_ICR:
1066 	case BIU2400_ISR:
1067 	case BIU2400_CSR:
1068 	case BIU2400_REQINP:
1069 	case BIU2400_REQOUTP:
1070 	case BIU2400_RSPINP:
1071 	case BIU2400_RSPOUTP:
1072 	case BIU2400_PRI_RQINP:
1073 	case BIU2400_PRI_RSPINP:
1074 	case BIU2400_ATIO_RSPINP:
1075 	case BIU2400_ATIO_REQINP:
1076 	case BIU2400_HCCR:
1077 	case BIU2400_GPIOD:
1078 	case BIU2400_GPIOE:
1079 	case BIU2400_HSEMA:
1080 		BXW4(pcs, IspVirt2Off(pcs, regoff), val);
1081 		junk = BXR4(pcs, IspVirt2Off(pcs, regoff));
1082 		break;
1083 	default:
1084 		isp_prt(isp, ISP_LOGERR,
1085 		    "isp_pci_wr_reg_2400: bad offset 0x%x", regoff);
1086 		break;
1087 	}
1088 }
1089 #endif
1090 
1091 static uint32_t
1092 isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
1093 {
1094 	uint32_t rv;
1095 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1096 	int oldconf = 0;
1097 
1098 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1099 		/*
1100 		 * We will assume that someone has paused the RISC processor.
1101 		 */
1102 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1103 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1104 		    oldconf | BIU_PCI_CONF1_SXP);
1105 	}
1106 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1107 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1108 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1109 	}
1110 	return (rv);
1111 }
1112 
1113 static void
1114 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, uint32_t val)
1115 {
1116 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1117 	int oldconf = 0;
1118 
1119 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1120 		/*
1121 		 * We will assume that someone has paused the RISC processor.
1122 		 */
1123 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1124 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1125 		    oldconf | BIU_PCI_CONF1_SXP);
1126 	}
1127 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1128 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1129 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1130 	}
1131 }
1132 
1133 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
1134 static uint32_t
1135 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
1136 {
1137 	uint16_t rv, oc = 0;
1138 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1139 
1140 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1141 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1142 		uint16_t tc;
1143 		/*
1144 		 * We will assume that someone has paused the RISC processor.
1145 		 */
1146 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1147 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1148 		if (regoff & SXP_BANK1_SELECT)
1149 			tc |= BIU_PCI1080_CONF1_SXP1;
1150 		else
1151 			tc |= BIU_PCI1080_CONF1_SXP0;
1152 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1153 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1154 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1155 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1156 		    oc | BIU_PCI1080_CONF1_DMA);
1157 	}
1158 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1159 	if (oc) {
1160 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1161 	}
1162 	return (rv);
1163 }
1164 
1165 static void
1166 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, uint32_t val)
1167 {
1168 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1169 	int oc = 0;
1170 
1171 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1172 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1173 		uint16_t tc;
1174 		/*
1175 		 * We will assume that someone has paused the RISC processor.
1176 		 */
1177 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1178 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1179 		if (regoff & SXP_BANK1_SELECT)
1180 			tc |= BIU_PCI1080_CONF1_SXP1;
1181 		else
1182 			tc |= BIU_PCI1080_CONF1_SXP0;
1183 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1184 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1185 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1186 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1187 		    oc | BIU_PCI1080_CONF1_DMA);
1188 	}
1189 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1190 	if (oc) {
1191 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1192 	}
1193 }
1194 #endif
1195 
1196 static int
1197 isp_pci_mbxdma(struct ispsoftc *isp)
1198 {
1199 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1200 	bus_dma_tag_t dmat = isp->isp_dmatag;
1201 	bus_dma_segment_t sg;
1202 	bus_size_t len, dbound;
1203 	fcparam *fcp;
1204 	int rs, i;
1205 
1206 	if (isp->isp_rquest_dma)	/* been here before? */
1207 		return (0);
1208 
1209 	if (isp->isp_type <= ISP_HA_SCSI_1040B) {
1210 		dbound = 1 << 24;
1211 	} else {
1212 		/*
1213 		 * For 32-bit PCI DMA, the range is 32 bits or zero :-)
1214 		 */
1215 		dbound = 0;
1216 	}
1217 	len = isp->isp_maxcmds * sizeof (XS_T *);
1218 	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK);
1219 	if (isp->isp_xflist == NULL) {
1220 		isp_prt(isp, ISP_LOGERR, "cannot malloc xflist array");
1221 		return (1);
1222 	}
1223 	memset(isp->isp_xflist, 0, len);
1224 	len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
1225 	pcs->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
1226 	if (pcs->pci_xfer_dmap == NULL) {
1227 		free(isp->isp_xflist, M_DEVBUF);
1228 		isp->isp_xflist = NULL;
1229 		isp_prt(isp, ISP_LOGERR, "cannot malloc DMA map array");
1230 		return (1);
1231 	}
1232 	for (i = 0; i < isp->isp_maxcmds; i++) {
1233 		if (bus_dmamap_create(dmat, MAXPHYS, (MAXPHYS / PAGE_SIZE) + 1,
1234 		    MAXPHYS, dbound, BUS_DMA_NOWAIT, &pcs->pci_xfer_dmap[i])) {
1235 			isp_prt(isp, ISP_LOGERR, "cannot create DMA maps");
1236 			break;
1237 		}
1238 	}
1239 	if (i < isp->isp_maxcmds) {
1240 		while (--i >= 0) {
1241 			bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
1242 		}
1243 		free(isp->isp_xflist, M_DEVBUF);
1244 		free(pcs->pci_xfer_dmap, M_DEVBUF);
1245 		isp->isp_xflist = NULL;
1246 		pcs->pci_xfer_dmap = NULL;
1247 		return (1);
1248 	}
1249 
1250 	/*
1251 	 * Allocate and map the request queue.
1252 	 */
1253 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1254 	if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs, 0)) {
1255 		goto dmafail;
1256 	}
1257  	if (bus_dmamem_map(isp->isp_dmatag, &sg, rs, len,
1258 	    (void *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
1259 		goto dmafail;
1260 	}
1261 	if (bus_dmamap_create(dmat, len, 1, len, dbound, BUS_DMA_NOWAIT,
1262 	    &isp->isp_rqdmap)) {
1263 		goto dmafail;
1264 	}
1265 	if (bus_dmamap_load(dmat, isp->isp_rqdmap, isp->isp_rquest, len, NULL,
1266 	    BUS_DMA_NOWAIT)) {
1267 		goto dmafail;
1268 	}
1269 	isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
1270 
1271 	/*
1272 	 * Allocate and map the result queue.
1273 	 */
1274 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1275 	if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
1276 	    BUS_DMA_NOWAIT)) {
1277 		goto dmafail;
1278 	}
1279 	if (bus_dmamem_map(dmat, &sg, rs, len,
1280 	    (void *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
1281 		goto dmafail;
1282 	}
1283 	if (bus_dmamap_create(dmat, len, 1, len, dbound, BUS_DMA_NOWAIT,
1284 	    &isp->isp_rsdmap)) {
1285 		goto dmafail;
1286 	}
1287 	if (bus_dmamap_load(dmat, isp->isp_rsdmap, isp->isp_result, len, NULL,
1288 	    BUS_DMA_NOWAIT)) {
1289 		goto dmafail;
1290 	}
1291 	isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
1292 
1293 	if (IS_SCSI(isp)) {
1294 		return (0);
1295 	}
1296 
1297 	/*
1298 	 * Allocate and map an FC scratch area
1299 	 */
1300 	fcp = isp->isp_param;
1301 	len = ISP2100_SCRLEN;
1302 	if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
1303 	    BUS_DMA_NOWAIT)) {
1304 		goto dmafail;
1305 	}
1306 	if (bus_dmamem_map(dmat, &sg, rs, len,
1307 	    (void *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
1308 		goto dmafail;
1309 	}
1310 	if (bus_dmamap_create(dmat, len, 1, len, dbound, BUS_DMA_NOWAIT,
1311 	    &isp->isp_scdmap)) {
1312 		goto dmafail;
1313 	}
1314 	if (bus_dmamap_load(dmat, isp->isp_scdmap, fcp->isp_scratch, len, NULL,
1315 	    BUS_DMA_NOWAIT)) {
1316 		goto dmafail;
1317 	}
1318 	fcp->isp_scdma = isp->isp_scdmap->dm_segs[0].ds_addr;
1319 	return (0);
1320 dmafail:
1321 	isp_prt(isp, ISP_LOGERR, "mailbox DMA setup failure");
1322 	for (i = 0; i < isp->isp_maxcmds; i++) {
1323 		bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
1324 	}
1325 	free(isp->isp_xflist, M_DEVBUF);
1326 	free(pcs->pci_xfer_dmap, M_DEVBUF);
1327 	isp->isp_xflist = NULL;
1328 	pcs->pci_xfer_dmap = NULL;
1329 	return (1);
1330 }
1331 
1332 static int
1333 isp_pci_dmasetup(struct ispsoftc *isp, struct scsipi_xfer *xs, ispreq_t *rq,
1334     uint32_t *nxtip, uint32_t optr)
1335 {
1336 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1337 	bus_dmamap_t dmap;
1338 	uint32_t starti = isp->isp_reqidx, nxti = *nxtip;
1339 	ispreq_t *qep;
1340 	int segcnt, seg, error, ovseg, seglim, drq;
1341 
1342 	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, starti);
1343 	dmap = pcs->pci_xfer_dmap[isp_handle_index(rq->req_handle)];
1344 	if (xs->datalen == 0) {
1345 		rq->req_seg_count = 1;
1346 		goto mbxsync;
1347 	}
1348 	if (xs->xs_control & XS_CTL_DATA_IN) {
1349 		drq = REQFLAG_DATA_IN;
1350 	} else {
1351 		drq = REQFLAG_DATA_OUT;
1352 	}
1353 
1354 	if (IS_FC(isp)) {
1355 		seglim = ISP_RQDSEG_T2;
1356 		((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
1357 		((ispreqt2_t *)rq)->req_flags |= drq;
1358 	} else {
1359 		rq->req_flags |= drq;
1360 		if (XS_CDBLEN(xs) > 12) {
1361 			seglim = 0;
1362 		} else {
1363 			seglim = ISP_RQDSEG;
1364 		}
1365 	}
1366 	error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
1367 	    NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ?
1368 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
1369 	    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1370 	if (error) {
1371 		isp_prt(isp, ISP_LOGWARN, "unable to load DMA (%d)", error);
1372 		XS_SETERR(xs, HBA_BOTCH);
1373 		if (error == EAGAIN || error == ENOMEM)
1374 			return (CMD_EAGAIN);
1375 		else
1376 			return (CMD_COMPLETE);
1377 	}
1378 
1379 	segcnt = dmap->dm_nsegs;
1380 
1381 	isp_prt(isp, ISP_LOGDEBUG2, "%d byte %s %p in %d segs",
1382 	    xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)? "read to" :
1383 	    "write from", xs->data, segcnt);
1384 
1385 	for (seg = 0, rq->req_seg_count = 0;
1386 	    seglim && seg < segcnt && rq->req_seg_count < seglim;
1387 	    seg++, rq->req_seg_count++) {
1388 		if (IS_FC(isp)) {
1389 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1390 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
1391 			    dmap->dm_segs[seg].ds_len;
1392 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
1393 			    dmap->dm_segs[seg].ds_addr;
1394 		} else {
1395 			rq->req_dataseg[rq->req_seg_count].ds_count =
1396 			    dmap->dm_segs[seg].ds_len;
1397 			rq->req_dataseg[rq->req_seg_count].ds_base =
1398 			    dmap->dm_segs[seg].ds_addr;
1399 		}
1400 		isp_prt(isp, ISP_LOGDEBUG2, "seg0.[%d]={0x%lx,%lu}",
1401 		    rq->req_seg_count, (long) dmap->dm_segs[seg].ds_addr,
1402 		    (unsigned long) dmap->dm_segs[seg].ds_len);
1403 	}
1404 
1405 	if (seg == segcnt) {
1406 		goto dmasync;
1407 	}
1408 
1409 	do {
1410 		uint32_t onxti;
1411 		ispcontreq_t *crq, *cqe, local;
1412 
1413 		crq = &local;
1414 
1415 		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1416 		onxti = nxti;
1417 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1418 		if (nxti == optr) {
1419 			isp_prt(isp, ISP_LOGERR, "Request Queue Overflow++");
1420 			bus_dmamap_unload(isp->isp_dmatag, dmap);
1421 			XS_SETERR(xs, HBA_BOTCH);
1422 			return (CMD_EAGAIN);
1423 		}
1424 		rq->req_header.rqs_entry_count++;
1425 		memset((void *)crq, 0, sizeof (*crq));
1426 		crq->req_header.rqs_entry_count = 1;
1427 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1428 
1429 		for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
1430 		    rq->req_seg_count++, seg++, ovseg++) {
1431 			crq->req_dataseg[ovseg].ds_count =
1432 			    dmap->dm_segs[seg].ds_len;
1433 			crq->req_dataseg[ovseg].ds_base =
1434 			    dmap->dm_segs[seg].ds_addr;
1435 			isp_prt(isp, ISP_LOGDEBUG2, "seg%d.[%d]={0x%lx,%lu}",
1436 			    rq->req_header.rqs_entry_count - 1,
1437 			    rq->req_seg_count, (long)dmap->dm_segs[seg].ds_addr,
1438 			    (unsigned long) dmap->dm_segs[seg].ds_len);
1439 		}
1440 		isp_put_cont_req(isp, crq, cqe);
1441 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1442 	} while (seg < segcnt);
1443 
1444 
1445 dmasync:
1446 	bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
1447 	    (xs->xs_control & XS_CTL_DATA_IN) ?  BUS_DMASYNC_PREREAD :
1448 	    BUS_DMASYNC_PREWRITE);
1449 
1450 mbxsync:
1451 	switch (rq->req_header.rqs_entry_type) {
1452 	case RQSTYPE_REQUEST:
1453 		isp_put_request(isp, rq, qep);
1454 		break;
1455 	case RQSTYPE_CMDONLY:
1456 		isp_put_extended_request(isp, (ispextreq_t *)rq,
1457 		    (ispextreq_t *)qep);
1458 		break;
1459 	case RQSTYPE_T2RQS:
1460 		if (FCPARAM(isp)->isp_2klogin) {
1461 			isp_put_request_t2e(isp,
1462 			    (ispreqt2e_t *) rq, (ispreqt2e_t *) qep);
1463 		} else {
1464 			isp_put_request_t2(isp,
1465 			    (ispreqt2_t *) rq, (ispreqt2_t *) qep);
1466 		}
1467 		break;
1468 	}
1469 	*nxtip = nxti;
1470 	return (CMD_QUEUED);
1471 }
1472 
1473 
1474 #if !defined(ISP_DISABLE_2400_SUPPORT)
1475 static int
1476 isp2400_pci_dmasetup(struct ispsoftc *isp, struct scsipi_xfer *xs,
1477     ispreq_t *ispreq, uint32_t *nxtip, uint32_t optr)
1478 {
1479 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1480 	bus_dmamap_t dmap;
1481 	bus_dma_segment_t *dm_segs, *eseg;
1482 	uint32_t starti = isp->isp_reqidx, nxti = *nxtip;
1483 	ispreqt7_t *rq;
1484 	void *qep;
1485 	int nseg, datalen, error, seglim;
1486 
1487 	rq = (ispreqt7_t *) ispreq;
1488 	qep = ISP_QUEUE_ENTRY(isp->isp_rquest, starti);
1489 	dmap = pcs->pci_xfer_dmap[isp_handle_index(rq->req_handle)];
1490 	if (xs->datalen == 0) {
1491 		rq->req_seg_count = 1;
1492 		goto mbxsync;
1493 	}
1494 
1495 	error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
1496 	    NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ?
1497 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
1498 	    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1499 	if (error) {
1500 		isp_prt(isp, ISP_LOGWARN, "unable to load DMA (%d)", error);
1501 		XS_SETERR(xs, HBA_BOTCH);
1502 		if (error == EAGAIN || error == ENOMEM) {
1503 			return (CMD_EAGAIN);
1504 		} else {
1505 			return (CMD_COMPLETE);
1506 		}
1507 	}
1508 
1509 	nseg = dmap->dm_nsegs;
1510 	dm_segs = dmap->dm_segs;
1511 
1512 	isp_prt(isp, ISP_LOGDEBUG2, "%d byte %s %p in %d segs",
1513 	    xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)? "read to" :
1514 	    "write from", xs->data, nseg);
1515 
1516 	/*
1517 	 * We're passed an initial partially filled in entry that
1518 	 * has most fields filled in except for data transfer
1519 	 * related values.
1520 	 *
1521 	 * Our job is to fill in the initial request queue entry and
1522 	 * then to start allocating and filling in continuation entries
1523 	 * until we've covered the entire transfer.
1524 	 */
1525 	rq->req_header.rqs_entry_type = RQSTYPE_T7RQS;
1526 	rq->req_dl = xs->datalen;
1527 	datalen = xs->datalen;
1528 	if (xs->xs_control & XS_CTL_DATA_IN) {
1529 		rq->req_alen_datadir = 0x2;
1530 	} else {
1531 		rq->req_alen_datadir = 0x1;
1532 	}
1533 
1534 	eseg = dm_segs + nseg;
1535 
1536 	rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr);
1537 	rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr);
1538 	rq->req_dataseg.ds_count = dm_segs->ds_len;
1539 
1540 	datalen -= dm_segs->ds_len;
1541 
1542 	dm_segs++;
1543 	rq->req_seg_count++;
1544 
1545 	while (datalen > 0 && dm_segs != eseg) {
1546 		uint32_t onxti;
1547 		ispcontreq64_t local, *crq = &local, *cqe;
1548 
1549 		cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1550 		onxti = nxti;
1551 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1552 		if (nxti == optr) {
1553 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1554 			return (CMD_EAGAIN);
1555 		}
1556 		rq->req_header.rqs_entry_count++;
1557 		MEMZERO((void *)crq, sizeof (*crq));
1558 		crq->req_header.rqs_entry_count = 1;
1559 		crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1560 
1561 		seglim = 0;
1562 		while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
1563 			crq->req_dataseg[seglim].ds_base =
1564 			    DMA_LO32(dm_segs->ds_addr);
1565 			crq->req_dataseg[seglim].ds_basehi =
1566 			    DMA_HI32(dm_segs->ds_addr);
1567 			crq->req_dataseg[seglim].ds_count =
1568 			    dm_segs->ds_len;
1569 			rq->req_seg_count++;
1570 			dm_segs++;
1571 			seglim++;
1572 			datalen -= dm_segs->ds_len;
1573 		}
1574 		if (isp->isp_dblev & ISP_LOGDEBUG1) {
1575 			isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
1576 		}
1577 		isp_put_cont64_req(isp, crq, cqe);
1578 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1579 	}
1580 	*nxtip = nxti;
1581 
1582 
1583 	bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
1584 	    (xs->xs_control & XS_CTL_DATA_IN) ?  BUS_DMASYNC_PREREAD :
1585 	    BUS_DMASYNC_PREWRITE);
1586 
1587 mbxsync:
1588 	isp_put_request_t7(isp, rq, qep);
1589 	*nxtip = nxti;
1590 	return (CMD_QUEUED);
1591 }
1592 #endif
1593 
1594 static int
1595 isp_pci_intr(void *arg)
1596 {
1597 	uint32_t isr;
1598 	uint16_t sema, mbox;
1599 	struct ispsoftc *isp = arg;
1600 
1601 	isp->isp_intcnt++;
1602 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1603 		isp->isp_intbogus++;
1604 		return (0);
1605 	} else {
1606 		isp->isp_osinfo.onintstack = 1;
1607 		isp_intr(isp, isr, sema, mbox);
1608 		isp->isp_osinfo.onintstack = 0;
1609 		return (1);
1610 	}
1611 }
1612 
1613 static void
1614 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, uint32_t handle)
1615 {
1616 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1617 	bus_dmamap_t dmap = pcs->pci_xfer_dmap[isp_handle_index(handle)];
1618 	bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
1619 	    xs->xs_control & XS_CTL_DATA_IN ?
1620 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1621 	bus_dmamap_unload(isp->isp_dmatag, dmap);
1622 }
1623 
1624 static void
1625 isp_pci_reset0(ispsoftc_t *isp)
1626 {
1627 	ISP_DISABLE_INTS(isp);
1628 }
1629 
1630 static void
1631 isp_pci_reset1(ispsoftc_t *isp)
1632 {
1633 	if (!IS_24XX(isp)) {
1634 		/* Make sure the BIOS is disabled */
1635 		isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1636 	}
1637 	/* and enable interrupts */
1638 	ISP_ENABLE_INTS(isp);
1639 }
1640 
1641 static void
1642 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1643 {
1644 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1645 	if (msg)
1646 		printf("%s: %s\n", isp->isp_name, msg);
1647 	if (IS_SCSI(isp))
1648 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1649 	else
1650 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1651 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1652 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1653 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1654 
1655 
1656 	if (IS_SCSI(isp)) {
1657 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1658 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1659 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1660 			ISP_READ(isp, CDMA_FIFO_STS));
1661 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1662 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1663 			ISP_READ(isp, DDMA_FIFO_STS));
1664 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1665 			ISP_READ(isp, SXP_INTERRUPT),
1666 			ISP_READ(isp, SXP_GROSS_ERR),
1667 			ISP_READ(isp, SXP_PINS_CTRL));
1668 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1669 	}
1670 	printf("    mbox regs: %x %x %x %x %x\n",
1671 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1672 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1673 	    ISP_READ(isp, OUTMAILBOX4));
1674 	printf("    PCI Status Command/Status=%x\n",
1675 	    pci_conf_read(pcs->pci_pc, pcs->pci_tag, PCI_COMMAND_STATUS_REG));
1676 }
1677