xref: /netbsd-src/sys/dev/pci/isp_pci.c (revision 3b435a73967be44dfb4a27315acd72bfacde430c)
1 /* $NetBSD: isp_pci.c,v 1.45 1999/10/17 02:40:26 mjacob Exp $ */
2 /*
3  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4  * Matthew Jacob (mjacob@nas.nasa.gov)
5  */
6 /*
7  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <dev/ic/isp_netbsd.h>
34 #include <dev/microcode/isp/asm_pci.h>
35 
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/pcidevs.h>
39 
40 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
41 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
42 #ifndef	ISP_DISABLE_1080_SUPPORT
43 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
44 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
45 #endif
46 static int isp_pci_mbxdma __P((struct ispsoftc *));
47 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
48 	ispreq_t *, u_int8_t *, u_int8_t));
49 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
50 	u_int32_t));
51 static void isp_pci_reset1 __P((struct ispsoftc *));
52 static void isp_pci_dumpregs __P((struct ispsoftc *));
53 static int isp_pci_intr __P((void *));
54 
55 #ifndef	ISP_DISABLE_1020_SUPPORT
56 static struct ispmdvec mdvec = {
57 	isp_pci_rd_reg,
58 	isp_pci_wr_reg,
59 	isp_pci_mbxdma,
60 	isp_pci_dmasetup,
61 	isp_pci_dmateardown,
62 	NULL,
63 	isp_pci_reset1,
64 	isp_pci_dumpregs,
65 	ISP_RISC_CODE,
66 	ISP_CODE_LENGTH,
67 	ISP_CODE_ORG,
68 	0,
69 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
70 	0
71 };
72 #endif
73 
74 #ifndef	ISP_DISABLE_1080_SUPPORT
75 static struct ispmdvec mdvec_1080 = {
76 	isp_pci_rd_reg_1080,
77 	isp_pci_wr_reg_1080,
78 	isp_pci_mbxdma,
79 	isp_pci_dmasetup,
80 	isp_pci_dmateardown,
81 	NULL,
82 	isp_pci_reset1,
83 	isp_pci_dumpregs,
84 	ISP1080_RISC_CODE,
85 	ISP1080_CODE_LENGTH,
86 	ISP1080_CODE_ORG,
87 	0,
88 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
89 	0
90 };
91 #endif
92 
93 #ifndef	ISP_DISABLE_2100_SUPPORT
94 static struct ispmdvec mdvec_2100 = {
95 	isp_pci_rd_reg,
96 	isp_pci_wr_reg,
97 	isp_pci_mbxdma,
98 	isp_pci_dmasetup,
99 	isp_pci_dmateardown,
100 	NULL,
101 	isp_pci_reset1,
102 	isp_pci_dumpregs,
103 	ISP2100_RISC_CODE,
104 	ISP2100_CODE_LENGTH,
105 	ISP2100_CODE_ORG,
106 	0,
107 	0,
108 	0
109 };
110 #endif
111 
112 #ifndef	ISP_DISABLE_2200_SUPPORT
113 static struct ispmdvec mdvec_2200 = {
114 	isp_pci_rd_reg,
115 	isp_pci_wr_reg,
116 	isp_pci_mbxdma,
117 	isp_pci_dmasetup,
118 	isp_pci_dmateardown,
119 	NULL,
120 	isp_pci_reset1,
121 	isp_pci_dumpregs,
122 	ISP2200_RISC_CODE,
123 	ISP2200_CODE_LENGTH,
124 	ISP2200_CODE_ORG,
125 	0,
126 	0,
127 	0
128 };
129 #endif
130 
131 #ifndef	PCI_VENDOR_QLOGIC
132 #define	PCI_VENDOR_QLOGIC	0x1077
133 #endif
134 
135 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
136 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
137 #endif
138 
139 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
140 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
141 #endif
142 
143 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
144 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
145 #endif
146 
147 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
148 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
149 #endif
150 
151 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
152 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
153 #endif
154 
155 #define	PCI_QLOGIC_ISP	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
156 
157 #define	PCI_QLOGIC_ISP1080	\
158 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
159 
160 #define	PCI_QLOGIC_ISP1240	\
161 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
162 
163 #define	PCI_QLOGIC_ISP2100	\
164 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
165 
166 #define	PCI_QLOGIC_ISP2200	\
167 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
168 
169 #define	IO_MAP_REG	0x10
170 #define	MEM_MAP_REG	0x14
171 #define	PCIR_ROMADDR	0x30
172 
173 #define	PCI_DFLT_LTNCY	0x40
174 #define	PCI_DFLT_LNSZ	0x10
175 
176 
177 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
178 static void isp_pci_attach __P((struct device *, struct device *, void *));
179 
180 struct isp_pcisoftc {
181 	struct ispsoftc		pci_isp;
182 	pci_chipset_tag_t	pci_pc;
183 	pcitag_t		pci_tag;
184 	bus_space_tag_t		pci_st;
185 	bus_space_handle_t	pci_sh;
186 	bus_dma_tag_t		pci_dmat;
187 	bus_dmamap_t		pci_scratch_dmap;	/* for fcp only */
188 	bus_dmamap_t		pci_rquest_dmap;
189 	bus_dmamap_t		pci_result_dmap;
190 	bus_dmamap_t		*pci_xfer_dmap;
191 	void *			pci_ih;
192 	int16_t			pci_poff[_NREG_BLKS];
193 };
194 
195 struct cfattach isp_pci_ca = {
196 	sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
197 };
198 
199 static int
200 isp_pci_probe(parent, match, aux)
201 	struct device *parent;
202 	struct cfdata *match;
203 	void *aux;
204 {
205 	struct pci_attach_args *pa = aux;
206 	switch (pa->pa_id) {
207 #ifndef	ISP_DISABLE_1020_SUPPORT
208 	case PCI_QLOGIC_ISP:
209 		return (1);
210 #endif
211 #ifndef	ISP_DISABLE_1080_SUPPORT
212 	case PCI_QLOGIC_ISP1080:
213 	case PCI_QLOGIC_ISP1240:
214 		return (1);
215 #endif
216 #ifndef	ISP_DISABLE_2100_SUPPORT
217 	case PCI_QLOGIC_ISP2100:
218 		return (1);
219 #endif
220 #ifndef	ISP_DISABLE_2200_SUPPORT
221 	case PCI_QLOGIC_ISP2200:
222 		return (1);
223 #endif
224 	default:
225 		return (0);
226 	}
227 }
228 
229 
230 static void
231 isp_pci_attach(parent, self, aux)
232 	struct device *parent, *self;
233 	void *aux;
234 {
235 #ifdef	DEBUG
236 	static char oneshot = 1;
237 #endif
238 	static char *nomem = "%s: no mem for sdparam table\n";
239 	u_int32_t data, linesz = PCI_DFLT_LNSZ;
240 	struct pci_attach_args *pa = aux;
241 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
242 	struct ispsoftc *isp = &pcs->pci_isp;
243 	bus_space_tag_t st, iot, memt;
244 	bus_space_handle_t sh, ioh, memh;
245 	pci_intr_handle_t ih;
246 	const char *intrstr;
247 	int ioh_valid, memh_valid, i;
248 	long foo;
249 	ISP_LOCKVAL_DECL;
250 
251 	ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
252 	    PCI_MAPREG_TYPE_IO, 0,
253 	    &iot, &ioh, NULL, NULL) == 0);
254 	memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
255 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
256 	    &memt, &memh, NULL, NULL) == 0);
257 
258 	if (memh_valid) {
259 		st = memt;
260 		sh = memh;
261 	} else if (ioh_valid) {
262 		st = iot;
263 		sh = ioh;
264 	} else {
265 		printf(": unable to map device registers\n");
266 		return;
267 	}
268 	printf("\n");
269 
270 	pcs->pci_st = st;
271 	pcs->pci_sh = sh;
272 	pcs->pci_dmat = pa->pa_dmat;
273 	pcs->pci_pc = pa->pa_pc;
274 	pcs->pci_tag = pa->pa_tag;
275 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
276 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
277 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
278 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
279 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
280 
281 #ifndef	ISP_DISABLE_1020_SUPPORT
282 	if (pa->pa_id == PCI_QLOGIC_ISP) {
283 		isp->isp_mdvec = &mdvec;
284 		isp->isp_type = ISP_HA_SCSI_UNKNOWN;
285 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
286 		if (isp->isp_param == NULL) {
287 			printf(nomem, isp->isp_name);
288 			return;
289 		}
290 		bzero(isp->isp_param, sizeof (sdparam));
291 	}
292 #endif
293 #ifndef	ISP_DISABLE_1080_SUPPORT
294 	if (pa->pa_id == PCI_QLOGIC_ISP1080) {
295 		isp->isp_mdvec = &mdvec_1080;
296 		isp->isp_type = ISP_HA_SCSI_1080;
297 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
298 		if (isp->isp_param == NULL) {
299 			printf(nomem, isp->isp_name);
300 			return;
301 		}
302 		bzero(isp->isp_param, sizeof (sdparam));
303 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
304 		    ISP1080_DMA_REGS_OFF;
305 	}
306 	if (pa->pa_id == PCI_QLOGIC_ISP1240) {
307 		isp->isp_mdvec = &mdvec_1080;
308 		isp->isp_type = ISP_HA_SCSI_12X0;
309 		isp->isp_param =
310 		    malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
311 		if (isp->isp_param == NULL) {
312 			printf(nomem, isp->isp_name);
313 			return;
314 		}
315 		bzero(isp->isp_param, 2 * sizeof (sdparam));
316 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
317 		    ISP1080_DMA_REGS_OFF;
318 	}
319 #endif
320 #ifndef	ISP_DISABLE_2100_SUPPORT
321 	if (pa->pa_id == PCI_QLOGIC_ISP2100) {
322 		isp->isp_mdvec = &mdvec_2100;
323 		isp->isp_type = ISP_HA_FC_2100;
324 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
325 		if (isp->isp_param == NULL) {
326 			printf(nomem, isp->isp_name);
327 			return;
328 		}
329 		bzero(isp->isp_param, sizeof (fcparam));
330 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
331 		    PCI_MBOX_REGS2100_OFF;
332 		data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
333 		if ((data & 0xff) < 3) {
334 			/*
335 			 * XXX: Need to get the actual revision
336 			 * XXX: number of the 2100 FB. At any rate,
337 			 * XXX: lower cache line size for early revision
338 			 * XXX; boards.
339 			 */
340 			linesz = 1;
341 		}
342 	}
343 #endif
344 #ifndef	ISP_DISABLE_2200_SUPPORT
345 	if (pa->pa_id == PCI_QLOGIC_ISP2200) {
346 		isp->isp_mdvec = &mdvec_2200;
347 		isp->isp_type = ISP_HA_FC_2200;
348 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
349 		if (isp->isp_param == NULL) {
350 			printf(nomem, isp->isp_name);
351 			return;
352 		}
353 		bzero(isp->isp_param, sizeof (fcparam));
354 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
355 		    PCI_MBOX_REGS2100_OFF;
356 		data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
357 	}
358 #endif
359 
360 	/*
361 	 * Make sure that command register set sanely.
362 	 */
363 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
364 	data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
365 
366 	/*
367 	 * Not so sure about these- but I think it's important that they get
368 	 * enabled......
369 	 */
370 	data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
371 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
372 
373 	/*
374 	 * Make sure that the latency timer, cache line size,
375 	 * and ROM is disabled.
376 	 */
377 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
378 	data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
379 	data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
380 	data |= (PCI_DFLT_LTNCY	<< PCI_LATTIMER_SHIFT);
381 	data |= (linesz << PCI_CACHELINE_SHIFT);
382 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
383 
384 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
385 	data &= ~1;
386 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
387 
388 #ifdef DEBUG
389 	if (oneshot) {
390 		oneshot = 0;
391 		printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
392 		    "%d.%d Core Version %d.%d\n",
393 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
394 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
395 	}
396 #endif
397 	if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
398 	    pa->pa_intrline, &ih)) {
399 		printf("%s: couldn't map interrupt\n", isp->isp_name);
400 		free(isp->isp_param, M_DEVBUF);
401 		return;
402 	}
403 	intrstr = pci_intr_string(pa->pa_pc, ih);
404 	if (intrstr == NULL)
405 		intrstr = "<I dunno>";
406 	pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
407 	    isp_pci_intr, isp);
408 	if (pcs->pci_ih == NULL) {
409 		printf("%s: couldn't establish interrupt at %s\n",
410 			isp->isp_name, intrstr);
411 		free(isp->isp_param, M_DEVBUF);
412 		return;
413 	}
414 	printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
415 
416 	if (IS_FC(isp)) {
417 		/*
418 		 * This isn't very random, but it's the best we can do for
419 		 * the real edge case of cards that don't have WWNs.
420 		 */
421 		foo = (long) isp;
422 		foo >>= 4;
423 		foo &= 0x7;
424 		while (version[foo])
425 			isp->isp_osinfo.seed += (int) version[foo++];
426 		isp->isp_osinfo.seed <<= 8;
427 		isp->isp_osinfo.seed += (isp->isp_osinfo._dev.dv_unit + 1);
428 	}
429 
430 	ISP_LOCK(isp);
431 	isp_reset(isp);
432 	if (isp->isp_state != ISP_RESETSTATE) {
433 		ISP_UNLOCK(isp);
434 		free(isp->isp_param, M_DEVBUF);
435 		return;
436 	}
437 	isp_init(isp);
438 	if (isp->isp_state != ISP_INITSTATE) {
439 		isp_uninit(isp);
440 		ISP_UNLOCK(isp);
441 		free(isp->isp_param, M_DEVBUF);
442 		return;
443 	}
444 
445 
446 
447 	/*
448 	 * Create the DMA maps for the data transfers.
449 	 */
450 	for (i = 0; i < isp->isp_maxcmds; i++) {
451 		if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
452 		    (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
453 		    &pcs->pci_xfer_dmap[i])) {
454 			printf("%s: can't create dma maps\n",
455 			    isp->isp_name);
456 			isp_uninit(isp);
457 			ISP_UNLOCK(isp);
458 			return;
459 		}
460 	}
461 	/*
462 	 * Do Generic attach now.
463 	 */
464 	isp_attach(isp);
465 	if (isp->isp_state != ISP_RUNSTATE) {
466 		isp_uninit(isp);
467 		free(isp->isp_param, M_DEVBUF);
468 	}
469 	ISP_UNLOCK(isp);
470 }
471 
472 static u_int16_t
473 isp_pci_rd_reg(isp, regoff)
474 	struct ispsoftc *isp;
475 	int regoff;
476 {
477 	u_int16_t rv;
478 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
479 	int offset, oldconf = 0;
480 
481 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
482 		/*
483 		 * We will assume that someone has paused the RISC processor.
484 		 */
485 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
486 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
487 	}
488 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
489 	offset += (regoff & 0xff);
490 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
491 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
492 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
493 	}
494 	return (rv);
495 }
496 
497 static void
498 isp_pci_wr_reg(isp, regoff, val)
499 	struct ispsoftc *isp;
500 	int regoff;
501 	u_int16_t val;
502 {
503 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
504 	int offset, oldconf = 0;
505 
506 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
507 		/*
508 		 * We will assume that someone has paused the RISC processor.
509 		 */
510 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
511 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
512 	}
513 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
514 	offset += (regoff & 0xff);
515 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
516 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
517 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
518 	}
519 }
520 
521 #ifndef	ISP_DISABLE_1080_SUPPORT
522 static u_int16_t
523 isp_pci_rd_reg_1080(isp, regoff)
524 	struct ispsoftc *isp;
525 	int regoff;
526 {
527 	u_int16_t rv;
528 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
529 	int offset, oc = 0;
530 
531 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
532 		/*
533 		 * We will assume that someone has paused the RISC processor.
534 		 */
535 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
536 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
537 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
538 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
539 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
540 	}
541 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
542 	offset += (regoff & 0xff);
543 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
544 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
545 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
546 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
547 	}
548 	return (rv);
549 }
550 
551 static void
552 isp_pci_wr_reg_1080(isp, regoff, val)
553 	struct ispsoftc *isp;
554 	int regoff;
555 	u_int16_t val;
556 {
557 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
558 	int offset, oc = 0;
559 
560 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
561 		/*
562 		 * We will assume that someone has paused the RISC processor.
563 		 */
564 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
565 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
566 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
567 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
568 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
569 	}
570 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
571 	offset += (regoff & 0xff);
572 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
573 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
574 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
575 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
576 	}
577 }
578 #endif
579 
580 static int
581 isp_pci_mbxdma(isp)
582 	struct ispsoftc *isp;
583 {
584 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
585 	bus_dma_segment_t seg;
586 	bus_size_t len;
587 	fcparam *fcp;
588 	int rseg;
589 
590 	if (isp->isp_rquest_dma)	/* been here before? */
591 		return (0);
592 
593 	len = isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T);
594 	isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK);
595 	if (isp->isp_xflist == NULL) {
596 		printf("%s: cannot malloc xflist array\n", isp->isp_name);
597 		return (1);
598 	}
599 	bzero(isp->isp_xflist, len);
600 	len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
601 	pci->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
602 	if (pci->pci_xfer_dmap == NULL) {
603 		printf("%s: cannot malloc xflist array\n", isp->isp_name);
604 		return (1);
605 	}
606 
607 	/*
608 	 * Allocate and map the request queue.
609 	 */
610 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
611 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
612 	    BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
613 	    (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
614 		return (1);
615 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
616 	    &pci->pci_rquest_dmap) || bus_dmamap_load(pci->pci_dmat,
617 	    pci->pci_rquest_dmap, (caddr_t)isp->isp_rquest, len, NULL,
618 	    BUS_DMA_NOWAIT))
619 		return (1);
620 
621 	isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
622 
623 	/*
624 	 * Allocate and map the result queue.
625 	 */
626 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
627 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
628 	    BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
629 	    (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
630 		return (1);
631 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
632 	    &pci->pci_result_dmap) || bus_dmamap_load(pci->pci_dmat,
633 	    pci->pci_result_dmap, (caddr_t)isp->isp_result, len, NULL,
634 	    BUS_DMA_NOWAIT))
635 		return (1);
636 	isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
637 
638 	if (IS_SCSI(isp)) {
639 		return (0);
640 	}
641 
642 	fcp = isp->isp_param;
643 	len = ISP2100_SCRLEN;
644 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
645 	    BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
646 	    (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
647 		return (1);
648 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
649 	    &pci->pci_scratch_dmap) || bus_dmamap_load(pci->pci_dmat,
650 	    pci->pci_scratch_dmap, (caddr_t)fcp->isp_scratch, len, NULL,
651 	    BUS_DMA_NOWAIT))
652 		return (1);
653 	fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
654 	return (0);
655 }
656 
657 static int
658 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
659 	struct ispsoftc *isp;
660 	struct scsipi_xfer *xs;
661 	ispreq_t *rq;
662 	u_int8_t *iptrp;
663 	u_int8_t optr;
664 {
665 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
666 	bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
667 	ispcontreq_t *crq;
668 	int segcnt, seg, error, ovseg, seglim, drq;
669 
670 	if (xs->datalen == 0) {
671 		rq->req_seg_count = 1;
672 		goto mbxsync;
673 	}
674 	assert(rq->req_handle != 0 && rq->req_handle <= isp->isp_maxcmds);
675 	if (xs->xs_control & XS_CTL_DATA_IN) {
676 		drq = REQFLAG_DATA_IN;
677 	} else {
678 		drq = REQFLAG_DATA_OUT;
679 	}
680 
681 	if (IS_FC(isp)) {
682 		seglim = ISP_RQDSEG_T2;
683 		((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
684 		((ispreqt2_t *)rq)->req_flags |= drq;
685 	} else {
686 		seglim = ISP_RQDSEG;
687 		rq->req_flags |= drq;
688 	}
689 	error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
690 	    NULL, xs->xs_control & XS_CTL_NOSLEEP ?
691 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
692 	if (error) {
693 		XS_SETERR(xs, HBA_BOTCH);
694 		return (CMD_COMPLETE);
695 	}
696 
697 	segcnt = dmap->dm_nsegs;
698 
699 	for (seg = 0, rq->req_seg_count = 0;
700 	    seg < segcnt && rq->req_seg_count < seglim;
701 	    seg++, rq->req_seg_count++) {
702 		if (IS_FC(isp)) {
703 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
704 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
705 			    dmap->dm_segs[seg].ds_len;
706 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
707 			    dmap->dm_segs[seg].ds_addr;
708 		} else {
709 			rq->req_dataseg[rq->req_seg_count].ds_count =
710 			    dmap->dm_segs[seg].ds_len;
711 			rq->req_dataseg[rq->req_seg_count].ds_base =
712 			    dmap->dm_segs[seg].ds_addr;
713 		}
714 	}
715 
716 	if (seg == segcnt)
717 		goto dmasync;
718 
719 	do {
720 		crq = (ispcontreq_t *)
721 			ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
722 		*iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
723 		if (*iptrp == optr) {
724 			printf("%s: Request Queue Overflow++\n", isp->isp_name);
725 			bus_dmamap_unload(pci->pci_dmat, dmap);
726 			XS_SETERR(xs, HBA_BOTCH);
727 			return (CMD_COMPLETE);
728 		}
729 		rq->req_header.rqs_entry_count++;
730 		bzero((void *)crq, sizeof (*crq));
731 		crq->req_header.rqs_entry_count = 1;
732 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
733 
734 		for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
735 		    rq->req_seg_count++, seg++, ovseg++) {
736 			crq->req_dataseg[ovseg].ds_count =
737 			    dmap->dm_segs[seg].ds_len;
738 			crq->req_dataseg[ovseg].ds_base =
739 			    dmap->dm_segs[seg].ds_addr;
740 		}
741 	} while (seg < segcnt);
742 
743 dmasync:
744 	bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
745 	    (xs->xs_control & XS_CTL_DATA_IN) ?  BUS_DMASYNC_PREREAD :
746 	    BUS_DMASYNC_PREWRITE);
747 
748 mbxsync:
749 	ISP_SWIZZLE_REQUEST(isp, rq);
750 	bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
751 	    pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
752 	return (CMD_QUEUED);
753 }
754 
755 static int
756 isp_pci_intr(arg)
757 	void *arg;
758 {
759 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
760 	bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
761 	    pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
762 	return (isp_intr(arg));
763 }
764 
765 static void
766 isp_pci_dmateardown(isp, xs, handle)
767 	struct ispsoftc *isp;
768 	struct scsipi_xfer *xs;
769 	u_int32_t handle;
770 {
771 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
772 	bus_dmamap_t dmap;
773 	assert(handle != 0 && handle <= isp->isp_maxcmds);
774 	dmap = pci->pci_xfer_dmap[handle-1];
775 	bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
776 	    xs->xs_control & XS_CTL_DATA_IN ?
777 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
778 	bus_dmamap_unload(pci->pci_dmat, dmap);
779 }
780 
781 static void
782 isp_pci_reset1(isp)
783 	struct ispsoftc *isp;
784 {
785 	/* Make sure the BIOS is disabled */
786 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
787 }
788 
789 static void
790 isp_pci_dumpregs(isp)
791 	struct ispsoftc *isp;
792 {
793 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
794 	printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
795 	    pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
796 }
797