xref: /netbsd-src/sys/dev/pci/isp_pci.c (revision dc306354b0b29af51801a7632f1e95265a68cd81)
1 /* $NetBSD: isp_pci.c,v 1.33 1999/01/10 03:41:47 mjacob Exp $ */
2 /* release_12_28_98_A+ */
3 /*
4  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5  *
6  *---------------------------------------
7  * Copyright (c) 1997, 1998 by Matthew Jacob
8  * NASA/Ames Research Center
9  * All rights reserved.
10  *---------------------------------------
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice immediately at the beginning of the file, without modification,
17  *    this list of conditions, and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
28  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  */
37 
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/microcode/isp/asm_pci.h>
40 
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcidevs.h>
44 
45 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
46 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
47 static int isp_pci_mbxdma __P((struct ispsoftc *));
48 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
49 	ispreq_t *, u_int8_t *, u_int8_t));
50 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
51 	u_int32_t));
52 static void isp_pci_reset1 __P((struct ispsoftc *));
53 static void isp_pci_dumpregs __P((struct ispsoftc *));
54 static int isp_pci_intr __P((void *));
55 
56 static struct ispmdvec mdvec = {
57 	isp_pci_rd_reg,
58 	isp_pci_wr_reg,
59 	isp_pci_mbxdma,
60 	isp_pci_dmasetup,
61 	isp_pci_dmateardown,
62 	NULL,
63 	isp_pci_reset1,
64 	isp_pci_dumpregs,
65 	ISP_RISC_CODE,
66 	ISP_CODE_LENGTH,
67 	ISP_CODE_ORG,
68 	ISP_CODE_VERSION,
69 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
70 	0
71 };
72 
73 static struct ispmdvec mdvec_2100 = {
74 	isp_pci_rd_reg,
75 	isp_pci_wr_reg,
76 	isp_pci_mbxdma,
77 	isp_pci_dmasetup,
78 	isp_pci_dmateardown,
79 	NULL,
80 	isp_pci_reset1,
81 	isp_pci_dumpregs,
82 	ISP2100_RISC_CODE,
83 	ISP2100_CODE_LENGTH,
84 	ISP2100_CODE_ORG,
85 	ISP2100_CODE_VERSION,
86 	0,				/* Irrelevant to the 2100 */
87 	0
88 };
89 
90 #define	PCI_QLOGIC_ISP	\
91 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
92 
93 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
94 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
95 #endif
96 #define	PCI_QLOGIC_ISP2100	\
97 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
98 
99 #define IO_MAP_REG	0x10
100 #define MEM_MAP_REG	0x14
101 
102 
103 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
104 static void isp_pci_attach __P((struct device *, struct device *, void *));
105 
106 struct isp_pcisoftc {
107 	struct ispsoftc		pci_isp;
108 	pci_chipset_tag_t	pci_pc;
109 	pcitag_t		pci_tag;
110 	bus_space_tag_t		pci_st;
111 	bus_space_handle_t	pci_sh;
112 	bus_dma_tag_t		pci_dmat;
113 	bus_dmamap_t		pci_scratch_dmap;	/* for fcp only */
114 	bus_dmamap_t		pci_rquest_dmap;
115 	bus_dmamap_t		pci_result_dmap;
116 	bus_dmamap_t		pci_xfer_dmap[MAXISPREQUEST];
117 	void *			pci_ih;
118 };
119 
120 struct cfattach isp_pci_ca = {
121 	sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
122 };
123 
124 static int
125 isp_pci_probe(parent, match, aux)
126         struct device *parent;
127         struct cfdata *match;
128 	void *aux;
129 {
130         struct pci_attach_args *pa = aux;
131 
132 	if (pa->pa_id == PCI_QLOGIC_ISP ||
133 	    pa->pa_id == PCI_QLOGIC_ISP2100) {
134 		return (1);
135 	} else {
136 		return (0);
137 	}
138 }
139 
140 
141 static void
142 isp_pci_attach(parent, self, aux)
143         struct device *parent, *self;
144         void *aux;
145 {
146 #ifdef	DEBUG
147 	static char oneshot = 1;
148 #endif
149 	struct pci_attach_args *pa = aux;
150 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
151 	struct ispsoftc *isp = &pcs->pci_isp;
152 	bus_space_tag_t st, iot, memt;
153 	bus_space_handle_t sh, ioh, memh;
154 	pci_intr_handle_t ih;
155 	const char *intrstr;
156 	int ioh_valid, memh_valid, i;
157 	ISP_LOCKVAL_DECL;
158 
159 	ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
160 	    PCI_MAPREG_TYPE_IO, 0,
161 	    &iot, &ioh, NULL, NULL) == 0);
162 	memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
163 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
164 	    &memt, &memh, NULL, NULL) == 0);
165 
166 	if (memh_valid) {
167 		st = memt;
168 		sh = memh;
169 	} else if (ioh_valid) {
170 		st = iot;
171 		sh = ioh;
172 	} else {
173 		printf(": unable to map device registers\n");
174 		return;
175 	}
176 	printf("\n");
177 
178 	pcs->pci_st = st;
179 	pcs->pci_sh = sh;
180 	pcs->pci_dmat = pa->pa_dmat;
181 	pcs->pci_pc = pa->pa_pc;
182 	pcs->pci_tag = pa->pa_tag;
183 	if (pa->pa_id == PCI_QLOGIC_ISP) {
184 		isp->isp_mdvec = &mdvec;
185 		isp->isp_type = ISP_HA_SCSI_UNKNOWN;
186 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
187 		if (isp->isp_param == NULL) {
188 			printf("%s: couldn't allocate sdparam table\n",
189 			       isp->isp_name);
190 			return;
191 		}
192 		bzero(isp->isp_param, sizeof (sdparam));
193 	} else if (pa->pa_id == PCI_QLOGIC_ISP2100) {
194 		u_int32_t data;
195 		isp->isp_mdvec = &mdvec_2100;
196 		if (ioh_valid == 0) {
197 			printf("%s: warning, ISP2100 cannot use I/O Space"
198 				" Mappings\n", isp->isp_name);
199 		} else {
200 			pcs->pci_st = iot;
201 			pcs->pci_sh = ioh;
202 		}
203 
204 		isp->isp_type = ISP_HA_FC_2100;
205 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
206 		if (isp->isp_param == NULL) {
207 			printf("%s: couldn't allocate fcparam table\n",
208 			       isp->isp_name);
209 			return;
210 		}
211 		bzero(isp->isp_param, sizeof (fcparam));
212 
213 		data = pci_conf_read(pa->pa_pc, pa->pa_tag,
214 			PCI_COMMAND_STATUS_REG);
215 		data |= PCI_COMMAND_MASTER_ENABLE |
216 			PCI_COMMAND_INVALIDATE_ENABLE;
217 		pci_conf_write(pa->pa_pc, pa->pa_tag,
218 			PCI_COMMAND_STATUS_REG, data);
219 	} else {
220 		return;
221 	}
222 #ifdef DEBUG
223 	if (oneshot) {
224 		oneshot = 0;
225 		printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
226 		    "%d.%d Core Version %d.%d\n",
227 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
228 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
229 	}
230 #endif
231 	ISP_LOCK(isp);
232 	isp_reset(isp);
233 	if (isp->isp_state != ISP_RESETSTATE) {
234 		ISP_UNLOCK(isp);
235 		free(isp->isp_param, M_DEVBUF);
236 		return;
237 	}
238 	isp_init(isp);
239 	if (isp->isp_state != ISP_INITSTATE) {
240 		isp_uninit(isp);
241 		ISP_UNLOCK(isp);
242 		free(isp->isp_param, M_DEVBUF);
243 		return;
244 	}
245 
246 	if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
247 			 pa->pa_intrline, &ih)) {
248 		printf("%s: couldn't map interrupt\n", isp->isp_name);
249 		isp_uninit(isp);
250 		ISP_UNLOCK(isp);
251 		free(isp->isp_param, M_DEVBUF);
252 		return;
253 	}
254 
255 	intrstr = pci_intr_string(pa->pa_pc, ih);
256 	if (intrstr == NULL)
257 		intrstr = "<I dunno>";
258 	pcs->pci_ih =
259 	  pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
260 	if (pcs->pci_ih == NULL) {
261 		printf("%s: couldn't establish interrupt at %s\n",
262 			isp->isp_name, intrstr);
263 		isp_uninit(isp);
264 		ISP_UNLOCK(isp);
265 		free(isp->isp_param, M_DEVBUF);
266 		return;
267 	}
268 	printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
269 
270 	/*
271 	 * Create the DMA maps for the data transfers.
272 	 */
273 	for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
274 		if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
275 		    (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
276 		    &pcs->pci_xfer_dmap[i])) {
277 			printf("%s: can't create dma maps\n",
278 			    isp->isp_name);
279 			isp_uninit(isp);
280 			ISP_UNLOCK(isp);
281 			return;
282 		}
283 	}
284 	/*
285 	 * Do Generic attach now.
286 	 */
287 	isp_attach(isp);
288 	if (isp->isp_state != ISP_RUNSTATE) {
289 		isp_uninit(isp);
290 		free(isp->isp_param, M_DEVBUF);
291 	}
292 	ISP_UNLOCK(isp);
293 }
294 
295 #define  PCI_BIU_REGS_OFF		BIU_REGS_OFF
296 
297 static u_int16_t
298 isp_pci_rd_reg(isp, regoff)
299 	struct ispsoftc *isp;
300 	int regoff;
301 {
302 	u_int16_t rv;
303 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
304 	int offset, oldsxp = 0;
305 
306 	if ((regoff & BIU_BLOCK) != 0) {
307 		offset = PCI_BIU_REGS_OFF;
308 	} else if ((regoff & MBOX_BLOCK) != 0) {
309 		if (isp->isp_type & ISP_HA_SCSI)
310 			offset = PCI_MBOX_REGS_OFF;
311 		else
312 			offset = PCI_MBOX_REGS2100_OFF;
313 	} else if ((regoff & SXP_BLOCK) != 0) {
314 		offset = PCI_SXP_REGS_OFF;
315 		/*
316 		 * We will assume that someone has paused the RISC processor.
317 		 */
318 		oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
319 		isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
320 	} else {
321 		offset = PCI_RISC_REGS_OFF;
322 	}
323 	regoff &= 0xff;
324 	offset += regoff;
325 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
326 	if ((regoff & SXP_BLOCK) != 0) {
327 		isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
328 	}
329 	return (rv);
330 }
331 
332 static void
333 isp_pci_wr_reg(isp, regoff, val)
334 	struct ispsoftc *isp;
335 	int regoff;
336 	u_int16_t val;
337 {
338 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
339 	int offset, oldsxp = 0;
340 	if ((regoff & BIU_BLOCK) != 0) {
341 		offset = PCI_BIU_REGS_OFF;
342 	} else if ((regoff & MBOX_BLOCK) != 0) {
343 		if (isp->isp_type & ISP_HA_SCSI)
344 			offset = PCI_MBOX_REGS_OFF;
345 		else
346 			offset = PCI_MBOX_REGS2100_OFF;
347 	} else if ((regoff & SXP_BLOCK) != 0) {
348 		offset = PCI_SXP_REGS_OFF;
349 		/*
350 		 * We will assume that someone has paused the RISC processor.
351 		 */
352 		oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
353 		isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
354 	} else {
355 		offset = PCI_RISC_REGS_OFF;
356 	}
357 	regoff &= 0xff;
358 	offset += regoff;
359 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
360 	if ((regoff & SXP_BLOCK) != 0) {
361 		isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
362 	}
363 }
364 
365 static int
366 isp_pci_mbxdma(isp)
367 	struct ispsoftc *isp;
368 {
369 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
370 	bus_dma_segment_t seg;
371 	bus_size_t len;
372 	fcparam *fcp;
373 	int rseg;
374 
375 	/*
376 	 * Allocate and map the request queue.
377 	 */
378 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
379 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
380 	      BUS_DMA_NOWAIT) ||
381 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
382 	      (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
383 		return (1);
384 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
385 	      &pci->pci_rquest_dmap) ||
386 	    bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
387 	      (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
388 		return (1);
389 
390 	isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
391 
392 	/*
393 	 * Allocate and map the result queue.
394 	 */
395 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
396 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
397 	      BUS_DMA_NOWAIT) ||
398 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
399 	      (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
400 		return (1);
401 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
402 	      &pci->pci_result_dmap) ||
403 	    bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
404 	      (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
405 		return (1);
406 	isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
407 
408 	if (isp->isp_type & ISP_HA_SCSI) {
409 		return (0);
410 	}
411 
412 	fcp = isp->isp_param;
413 	len = ISP2100_SCRLEN;
414 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
415 		BUS_DMA_NOWAIT) ||
416 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
417 	      (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
418 		return (1);
419 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
420 	      &pci->pci_scratch_dmap) ||
421 	    bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
422 	      (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
423 		return (1);
424 	fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
425 	return (0);
426 }
427 
428 static int
429 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
430 	struct ispsoftc *isp;
431 	struct scsipi_xfer *xs;
432 	ispreq_t *rq;
433 	u_int8_t *iptrp;
434 	u_int8_t optr;
435 {
436 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
437 	bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
438 	ispcontreq_t *crq;
439 	int segcnt, seg, error, ovseg, seglim, drq;
440 
441 	if (xs->datalen == 0) {
442 		rq->req_seg_count = 1;
443 		goto mbxsync;
444 	}
445 
446 	if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
447 		panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
448 		    isp->isp_name, rq->req_handle);
449 		/* NOTREACHED */
450 	}
451 
452 	if (xs->flags & SCSI_DATA_IN) {
453 		drq = REQFLAG_DATA_IN;
454 	} else {
455 		drq = REQFLAG_DATA_OUT;
456 	}
457 
458 	if (isp->isp_type & ISP_HA_FC) {
459 		seglim = ISP_RQDSEG_T2;
460 		((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
461 		((ispreqt2_t *)rq)->req_flags |= drq;
462 	} else {
463 		seglim = ISP_RQDSEG;
464 		rq->req_flags |= drq;
465 	}
466 	error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
467 	    NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
468 	if (error) {
469 		XS_SETERR(xs, HBA_BOTCH);
470 		return (CMD_COMPLETE);
471 	}
472 
473 	segcnt = dmap->dm_nsegs;
474 
475 	for (seg = 0, rq->req_seg_count = 0;
476 	     seg < segcnt && rq->req_seg_count < seglim;
477 	     seg++, rq->req_seg_count++) {
478 		if (isp->isp_type & ISP_HA_FC) {
479 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
480 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
481 			    dmap->dm_segs[seg].ds_len;
482 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
483 			    dmap->dm_segs[seg].ds_addr;
484 		} else {
485 			rq->req_dataseg[rq->req_seg_count].ds_count =
486 			    dmap->dm_segs[seg].ds_len;
487 			rq->req_dataseg[rq->req_seg_count].ds_base =
488 			    dmap->dm_segs[seg].ds_addr;
489 		}
490 	}
491 
492 	if (seg == segcnt)
493 		goto dmasync;
494 
495 	do {
496 		crq = (ispcontreq_t *)
497 			ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
498 		*iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
499 		if (*iptrp == optr) {
500 			printf("%s: Request Queue Overflow++\n",
501 			       isp->isp_name);
502 			bus_dmamap_unload(pci->pci_dmat, dmap);
503 			XS_SETERR(xs, HBA_BOTCH);
504 			return (CMD_COMPLETE);
505 		}
506 		rq->req_header.rqs_entry_count++;
507 		bzero((void *)crq, sizeof (*crq));
508 		crq->req_header.rqs_entry_count = 1;
509 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
510 
511 		for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
512 		    rq->req_seg_count++, seg++, ovseg++) {
513 			crq->req_dataseg[ovseg].ds_count =
514 			    dmap->dm_segs[seg].ds_len;
515 			crq->req_dataseg[ovseg].ds_base =
516 			    dmap->dm_segs[seg].ds_addr;
517 		}
518 	} while (seg < segcnt);
519 
520 dmasync:
521 	bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
522 	    (xs->flags & SCSI_DATA_IN) ?  BUS_DMASYNC_PREREAD :
523 	    BUS_DMASYNC_PREWRITE);
524 
525 mbxsync:
526 
527 	bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
528 	    pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
529 	return (CMD_QUEUED);
530 }
531 
532 static int
533 isp_pci_intr(arg)
534 	void *arg;
535 {
536 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
537 	bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
538 	    pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
539 	return (isp_intr(arg));
540 }
541 
542 static void
543 isp_pci_dmateardown(isp, xs, handle)
544 	struct ispsoftc *isp;
545 	struct scsipi_xfer *xs;
546 	u_int32_t handle;
547 {
548 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
549 	bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
550 
551 	bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
552 	    xs->flags & SCSI_DATA_IN ?
553 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
554 	bus_dmamap_unload(pci->pci_dmat, dmap);
555 }
556 
557 static void
558 isp_pci_reset1(isp)
559 	struct ispsoftc *isp;
560 {
561 	/* Make sure the BIOS is disabled */
562 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
563 }
564 
565 static void
566 isp_pci_dumpregs(isp)
567 	struct ispsoftc *isp;
568 {
569 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
570 	printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
571 	    pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
572 }
573