xref: /netbsd-src/sys/dev/sbus/isp_sbus.c (revision 3b01aba77a7a698587faaae455bbfe740923c1f5)
1 /* $NetBSD: isp_sbus.c,v 1.44 2001/07/06 16:09:38 mjacob Exp $ */
2 /*
3  * This driver, which is contained in NetBSD in the files:
4  *
5  *	sys/dev/ic/isp.c
6  *	sys/dev/ic/isp_inline.h
7  *	sys/dev/ic/isp_netbsd.c
8  *	sys/dev/ic/isp_netbsd.h
9  *	sys/dev/ic/isp_target.c
10  *	sys/dev/ic/isp_target.h
11  *	sys/dev/ic/isp_tpublic.h
12  *	sys/dev/ic/ispmbox.h
13  *	sys/dev/ic/ispreg.h
14  *	sys/dev/ic/ispvar.h
15  *	sys/microcode/isp/asm_sbus.h
16  *	sys/microcode/isp/asm_1040.h
17  *	sys/microcode/isp/asm_1080.h
18  *	sys/microcode/isp/asm_12160.h
19  *	sys/microcode/isp/asm_2100.h
20  *	sys/microcode/isp/asm_2200.h
21  *	sys/pci/isp_pci.c
22  *	sys/sbus/isp_sbus.c
23  *
24  * Is being actively maintained by Matthew Jacob (mjacob@netbsd.org).
25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26  * Linux versions. This tends to be an interesting maintenance problem.
27  *
28  * Please coordinate with Matthew Jacob on changes you wish to make here.
29  */
30 /*
31  * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
32  *
33  * Copyright (c) 1997, 2001 by Matthew Jacob
34  * NASA AMES Research Center
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice immediately at the beginning of the file, without modification,
42  *    this list of conditions, and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
51  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  */
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/device.h>
64 #include <sys/kernel.h>
65 #include <sys/malloc.h>
66 #include <sys/queue.h>
67 
68 #include <machine/bus.h>
69 #include <machine/intr.h>
70 #include <machine/autoconf.h>
71 
72 #include <dev/ic/isp_netbsd.h>
73 #include <dev/microcode/isp/asm_sbus.h>
74 #include <dev/sbus/sbusvar.h>
75 #include <sys/reboot.h>
76 
77 static int isp_sbus_intr(void *);
78 static u_int16_t isp_sbus_rd_reg(struct ispsoftc *, int);
79 static void isp_sbus_wr_reg (struct ispsoftc *, int, u_int16_t);
80 static int isp_sbus_mbxdma(struct ispsoftc *);
81 static int isp_sbus_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *,
82     u_int16_t);
83 static void isp_sbus_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
84 
85 #ifndef	ISP_1000_RISC_CODE
86 #define	ISP_1000_RISC_CODE	NULL
87 #endif
88 
89 static struct ispmdvec mdvec = {
90 	isp_sbus_rd_reg,
91 	isp_sbus_wr_reg,
92 	isp_sbus_mbxdma,
93 	isp_sbus_dmasetup,
94 	isp_sbus_dmateardown,
95 	NULL,
96 	NULL,
97 	NULL,
98 	ISP_1000_RISC_CODE
99 };
100 
101 struct isp_sbussoftc {
102 	struct ispsoftc	sbus_isp;
103 	struct sbusdev	sbus_sd;
104 	sdparam		sbus_dev;
105 	bus_space_tag_t	sbus_bustag;
106 	bus_dma_tag_t	sbus_dmatag;
107 	bus_space_handle_t sbus_reg;
108 	int		sbus_node;
109 	int		sbus_pri;
110 	struct ispmdvec	sbus_mdvec;
111 	bus_dmamap_t	*sbus_dmamap;
112 	bus_dmamap_t	sbus_rquest_dmamap;
113 	bus_dmamap_t	sbus_result_dmamap;
114 	int16_t		sbus_poff[_NREG_BLKS];
115 };
116 
117 
118 static int isp_match(struct device *, struct cfdata *, void *);
119 static void isp_sbus_attach(struct device *, struct device *, void *);
120 struct cfattach isp_sbus_ca = {
121 	sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
122 };
123 
124 static int
125 isp_match(struct device *parent, struct cfdata *cf, void *aux)
126 {
127 	int rv;
128 #ifdef DEBUG
129 	static int oneshot = 1;
130 #endif
131 	struct sbus_attach_args *sa = aux;
132 
133 	rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
134 		strcmp("PTI,ptisp", sa->sa_name) == 0 ||
135 		strcmp("ptisp", sa->sa_name) == 0 ||
136 		strcmp("SUNW,isp", sa->sa_name) == 0 ||
137 		strcmp("QLGC,isp", sa->sa_name) == 0);
138 #ifdef DEBUG
139 	if (rv && oneshot) {
140 		oneshot = 0;
141 		printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
142 		    "%d.%d Core Version %d.%d\n",
143 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
144 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
145 	}
146 #endif
147 	return (rv);
148 }
149 
150 
151 static void
152 isp_sbus_attach(struct device *parent, struct device *self, void *aux)
153 {
154 	int freq, ispburst, sbusburst;
155 	struct sbus_attach_args *sa = aux;
156 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
157 	struct ispsoftc *isp = &sbc->sbus_isp;
158 
159 	printf(" for %s\n", sa->sa_name);
160 
161 	sbc->sbus_bustag = sa->sa_bustag;
162 	sbc->sbus_dmatag = sa->sa_dmatag;
163 	if (sa->sa_nintr != 0)
164 		sbc->sbus_pri = sa->sa_pri;
165 	sbc->sbus_mdvec = mdvec;
166 
167 	if (sa->sa_npromvaddrs != 0) {
168 		sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
169 	} else {
170 		if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
171 				 sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
172 				 &sbc->sbus_reg) != 0) {
173 			printf("%s: cannot map registers\n", self->dv_xname);
174 			return;
175 		}
176 	}
177 	sbc->sbus_node = sa->sa_node;
178 
179 	freq = getpropint(sa->sa_node, "clock-frequency", 0);
180 	if (freq) {
181 		/*
182 		 * Convert from HZ to MHz, rounding up.
183 		 */
184 		freq = (freq + 500000)/1000000;
185 #if	0
186 		printf("%s: %d MHz\n", self->dv_xname, freq);
187 #endif
188 	}
189 	sbc->sbus_mdvec.dv_clock = freq;
190 
191 	/*
192 	 * Now figure out what the proper burst sizes, etc., to use.
193 	 * Unfortunately, there is no ddi_dma_burstsizes here which
194 	 * walks up the tree finding the limiting burst size node (if
195 	 * any).
196 	 */
197 	sbusburst = ((struct sbus_softc *)parent)->sc_burst;
198 	if (sbusburst == 0)
199 		sbusburst = SBUS_BURST_32 - 1;
200 	ispburst = getpropint(sa->sa_node, "burst-sizes", -1);
201 	if (ispburst == -1) {
202 		ispburst = sbusburst;
203 	}
204 	ispburst &= sbusburst;
205 	ispburst &= ~(1 << 7);
206 	ispburst &= ~(1 << 6);
207 	sbc->sbus_mdvec.dv_conf1 =  0;
208 	if (ispburst & (1 << 5)) {
209 		sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
210 	} else if (ispburst & (1 << 4)) {
211 		sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
212 	} else if (ispburst & (1 << 3)) {
213 		sbc->sbus_mdvec.dv_conf1 =
214 		    BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
215 	}
216 	if (sbc->sbus_mdvec.dv_conf1) {
217 		sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
218 	}
219 
220 	/*
221 	 * Some early versions of the PTI SBus adapter
222 	 * would fail in trying to download (via poking)
223 	 * FW. We give up on them.
224 	 */
225 	if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
226 	    strcmp("ptisp", sa->sa_name) == 0) {
227 		sbc->sbus_mdvec.dv_ispfw = NULL;
228 	}
229 
230 	isp->isp_mdvec = &sbc->sbus_mdvec;
231 	isp->isp_bustype = ISP_BT_SBUS;
232 	isp->isp_type = ISP_HA_SCSI_UNKNOWN;
233 	isp->isp_param = &sbc->sbus_dev;
234 	bzero(isp->isp_param, sizeof (sdparam));
235 
236 	sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
237 	sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
238 	sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
239 	sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
240 	sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
241 
242 	/* Establish interrupt channel */
243 	bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO, 0,
244 	    isp_sbus_intr, sbc);
245 	sbus_establish(&sbc->sbus_sd, &sbc->sbus_isp.isp_osinfo._dev);
246 
247 	/*
248 	 * Set up logging levels.
249 	 */
250 #ifdef	ISP_LOGDEFAULT
251 	isp->isp_dblev = ISP_LOGDEFAULT;
252 #else
253 	isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
254 	if (bootverbose)
255 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
256 #ifdef	SCSIDEBUG
257 	isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
258 #endif
259 #ifdef	DEBUG
260 	isp->isp_dblev |= ISP_LOGDEBUG0;
261 #endif
262 #endif
263 
264 	isp->isp_confopts = self->dv_cfdata->cf_flags;
265 	isp->isp_role = ISP_DEFAULT_ROLES;
266 
267 	/*
268 	 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
269 	 */
270 	isp->isp_confopts |= ISP_CFG_NONVRAM;
271 	ISP_LOCK(isp);
272 	isp->isp_osinfo.no_mbox_ints = 1;
273 	isp_reset(isp);
274 	if (isp->isp_state != ISP_RESETSTATE) {
275 		ISP_UNLOCK(isp);
276 		return;
277 	}
278 	ENABLE_INTS(isp);
279 	isp_init(isp);
280 	if (isp->isp_state != ISP_INITSTATE) {
281 		isp_uninit(isp);
282 		ISP_UNLOCK(isp);
283 		return;
284 	}
285 
286 	/*
287 	 * do generic attach.
288 	 */
289 	ISP_UNLOCK(isp);
290 	isp_attach(isp);
291 	if (isp->isp_state != ISP_RUNSTATE) {
292 		ISP_LOCK(isp);
293 		isp_uninit(isp);
294 		ISP_UNLOCK(isp);
295 	}
296 }
297 
298 static int
299 isp_sbus_intr(void *arg)
300 {
301 	int rv;
302 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *)arg;
303 	bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_result_dmamap, 0,
304 	    sbc->sbus_result_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
305 	sbc->sbus_isp.isp_osinfo.onintstack = 1;
306 	rv = isp_intr(arg);
307 	sbc->sbus_isp.isp_osinfo.onintstack = 0;
308 	return (rv);
309 }
310 
311 static u_int16_t
312 isp_sbus_rd_reg(struct ispsoftc *isp, int regoff)
313 {
314 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
315 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
316 	offset += (regoff & 0xff);
317 	return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
318 }
319 
320 static void
321 isp_sbus_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
322 {
323 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
324 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
325 	offset += (regoff & 0xff);
326 	bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
327 }
328 
329 static int
330 isp_sbus_mbxdma(struct ispsoftc *isp)
331 {
332 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
333 	bus_dma_tag_t dmatag = sbc->sbus_dmatag;
334 	bus_dma_segment_t reqseg, rspseg;
335 	int reqrs, rsprs, i, progress;
336 	size_t n;
337 	bus_size_t len;
338 
339 	if (isp->isp_rquest_dma)
340 		return (0);
341 
342 	n = isp->isp_maxcmds * sizeof (XS_T *);
343 	isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
344 	if (isp->isp_xflist == NULL) {
345 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
346 		return (1);
347 	}
348 	bzero(isp->isp_xflist, n);
349 	n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
350 	sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
351 	if (sbc->sbus_dmamap == NULL) {
352 		free(isp->isp_xflist, M_DEVBUF);
353 		isp->isp_xflist = NULL;
354 		isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
355 		return (1);
356 	}
357 	for (i = 0; i < isp->isp_maxcmds; i++) {
358 		/* Allocate a DMA handle */
359 		if (bus_dmamap_create(dmatag, MAXPHYS, 1, MAXPHYS, 0,
360 		    BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
361 			isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
362 			break;
363 		}
364 	}
365 	if (i < isp->isp_maxcmds) {
366 		while (--i >= 0) {
367 			bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
368 		}
369 		free(isp->isp_xflist, M_DEVBUF);
370 		free(sbc->sbus_dmamap, M_DEVBUF);
371 		isp->isp_xflist = NULL;
372 		sbc->sbus_dmamap = NULL;
373 		return (1);
374 	}
375 
376 	/*
377 	 * Allocate and map the request and response queues
378 	 */
379 	progress = 0;
380 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
381 	if (bus_dmamem_alloc(dmatag, len, 0, 0, &reqseg, 1, &reqrs,
382 	    BUS_DMA_NOWAIT)) {
383 		goto dmafail;
384 	}
385 	progress++;
386 	if (bus_dmamem_map(dmatag, &reqseg, reqrs, len,
387 	    (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
388 		goto dmafail;
389 	}
390 	progress++;
391 	if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
392 	    &sbc->sbus_rquest_dmamap) != 0) {
393 		goto dmafail;
394 	}
395 	progress++;
396 	if (bus_dmamap_load(dmatag, sbc->sbus_rquest_dmamap,
397 	    isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT) != 0) {
398 		goto dmafail;
399 	}
400 	progress++;
401 	isp->isp_rquest_dma = sbc->sbus_rquest_dmamap->dm_segs[0].ds_addr;
402 
403 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
404 	if (bus_dmamem_alloc(dmatag, len, 0, 0, &rspseg, 1, &rsprs,
405 	    BUS_DMA_NOWAIT)) {
406 		goto dmafail;
407 	}
408 	progress++;
409 	if (bus_dmamem_map(dmatag, &rspseg, rsprs, len,
410 	    (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
411 		goto dmafail;
412 	}
413 	progress++;
414 	if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
415 	    &sbc->sbus_result_dmamap) != 0) {
416 		goto dmafail;
417 	}
418 	progress++;
419 	if (bus_dmamap_load(dmatag, sbc->sbus_result_dmamap,
420 	    isp->isp_result, len, NULL, BUS_DMA_NOWAIT) != 0) {
421 		goto dmafail;
422 	}
423 	isp->isp_result_dma = sbc->sbus_result_dmamap->dm_segs[0].ds_addr;
424 
425 	return (0);
426 
427 dmafail:
428 	isp_prt(isp, ISP_LOGERR, "Mailbox DMA Setup Failure");
429 
430 	if (progress >= 8) {
431 		bus_dmamap_unload(dmatag, sbc->sbus_result_dmamap);
432 	}
433 	if (progress >= 7) {
434 		bus_dmamap_destroy(dmatag, sbc->sbus_result_dmamap);
435 	}
436 	if (progress >= 6) {
437 		bus_dmamem_unmap(dmatag,
438 		    isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));
439 	}
440 	if (progress >= 5) {
441 		bus_dmamem_free(dmatag, &rspseg, rsprs);
442 	}
443 
444 	if (progress >= 4) {
445 		bus_dmamap_unload(dmatag, sbc->sbus_rquest_dmamap);
446 	}
447 	if (progress >= 3) {
448 		bus_dmamap_destroy(dmatag, sbc->sbus_rquest_dmamap);
449 	}
450 	if (progress >= 2) {
451 		bus_dmamem_unmap(dmatag,
452 		    isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));
453 	}
454 	if (progress >= 1) {
455 		bus_dmamem_free(dmatag, &reqseg, reqrs);
456 	}
457 
458 	for (i = 0; i < isp->isp_maxcmds; i++) {
459 		bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
460 	}
461 	free(sbc->sbus_dmamap, M_DEVBUF);
462 	free(isp->isp_xflist, M_DEVBUF);
463 	isp->isp_xflist = NULL;
464 	sbc->sbus_dmamap = NULL;
465 	return (1);
466 }
467 
468 /*
469  * Map a DMA request.
470  * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
471  */
472 
473 static int
474 isp_sbus_dmasetup(struct ispsoftc *isp, XS_T *xs, ispreq_t *rq,
475     u_int16_t *iptrp, u_int16_t optr)
476 {
477 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
478 	bus_dmamap_t dmap;
479 	ispcontreq_t *crq;
480 	int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
481 	int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
482 
483 	if (xs->datalen == 0) {
484 		rq->req_seg_count = 1;
485 		goto mbxsync;
486 	}
487 
488 	dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
489 	if (dmap->dm_nsegs != 0) {
490 		panic("%s: dma map already allocated\n", isp->isp_name);
491 		/* NOTREACHED */
492 	}
493 	if (bus_dmamap_load(sbc->sbus_dmatag, dmap, xs->data, xs->datalen,
494 	    NULL, (cansleep ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) |
495 	    BUS_DMA_STREAMING) != 0) {
496 		XS_SETERR(xs, HBA_BOTCH);
497 		return (CMD_COMPLETE);
498 	}
499 
500 	bus_dmamap_sync(sbc->sbus_dmatag, dmap, 0, xs->datalen,
501 	    in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
502 
503 	if (in) {
504 		rq->req_flags |= REQFLAG_DATA_IN;
505 	} else {
506 		rq->req_flags |= REQFLAG_DATA_OUT;
507 	}
508 
509 	if (XS_CDBLEN(xs) > 12) {
510 		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
511 		*iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN(isp));
512 		if (*iptrp == optr) {
513 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
514 			bus_dmamap_unload(sbc->sbus_dmatag, dmap);
515 			XS_SETERR(xs, HBA_BOTCH);
516 			return (CMD_EAGAIN);
517 		}
518 		rq->req_seg_count = 2;
519 		rq->req_dataseg[0].ds_count = 0;
520 		rq->req_dataseg[0].ds_base =  0;
521 		bzero((void *)crq, sizeof (*crq));
522 		crq->req_header.rqs_entry_count = 1;
523 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
524 		crq->req_dataseg[0].ds_count = xs->datalen;
525 		crq->req_dataseg[0].ds_base =  dmap->dm_segs[0].ds_addr;
526 		ISP_SBUSIFY_ISPHDR(isp, &crq->req_header)
527 	} else {
528 		rq->req_dataseg[0].ds_count = xs->datalen;
529 		rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
530 		rq->req_seg_count = 1;
531 	}
532 
533 mbxsync:
534 	ISP_SWIZZLE_REQUEST(isp, rq);
535 	bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_rquest_dmamap, 0,
536 	     sbc->sbus_rquest_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
537 	return (CMD_QUEUED);
538 }
539 
540 static void
541 isp_sbus_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
542 {
543 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
544 	bus_dmamap_t dmap;
545 
546 	dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
547 
548 	if (dmap->dm_nsegs == 0) {
549 		panic("%s: dma map not already allocated\n", isp->isp_name);
550 		/* NOTREACHED */
551 	}
552 	bus_dmamap_sync(sbc->sbus_dmatag, dmap, 0,
553 	    xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
554 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
555 	bus_dmamap_unload(sbc->sbus_dmatag, dmap);
556 }
557