xref: /netbsd-src/sys/dev/sbus/isp_sbus.c (revision 2d48ac808c43ea6701ba8f33cfc3645685301f79)
1 /* $NetBSD: isp_sbus.c,v 1.78 2009/09/07 13:39:19 tsutsui Exp $ */
2 /*
3  * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
4  *
5  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
6  * All rights reserved.
7  *
8  * Additional Copyright (C) 2000-2007 by Matthew Jacob
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isp_sbus.c,v 1.78 2009/09/07 13:39:19 tsutsui Exp $");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/queue.h>
44 #include <dev/ic/isp_netbsd.h>
45 #include <sys/intr.h>
46 #include <machine/autoconf.h>
47 #include <dev/sbus/sbusvar.h>
48 #include <sys/reboot.h>
49 
50 static void isp_sbus_reset0(ispsoftc_t *);
51 static void isp_sbus_reset1(ispsoftc_t *);
52 static int isp_sbus_intr(void *);
53 static int
54 isp_sbus_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
55 static uint32_t isp_sbus_rd_reg(ispsoftc_t *, int);
56 static void isp_sbus_wr_reg (ispsoftc_t *, int, uint32_t);
57 static int isp_sbus_mbxdma(ispsoftc_t *);
58 static int isp_sbus_dmasetup(ispsoftc_t *, XS_T *, void *);
59 static void isp_sbus_dmateardown(ispsoftc_t *, XS_T *, uint32_t);
60 
61 #ifndef	ISP_DISABLE_FW
62 #include <dev/microcode/isp/asm_sbus.h>
63 #else
64 #define	ISP_1000_RISC_CODE	NULL
65 #endif
66 
67 static const struct ispmdvec mdvec = {
68 	isp_sbus_rd_isr,
69 	isp_sbus_rd_reg,
70 	isp_sbus_wr_reg,
71 	isp_sbus_mbxdma,
72 	isp_sbus_dmasetup,
73 	isp_sbus_dmateardown,
74 	isp_sbus_reset0,
75 	isp_sbus_reset1,
76 	NULL,
77 	ISP_1000_RISC_CODE,
78 	0,
79 	0
80 };
81 
82 struct isp_sbussoftc {
83 	ispsoftc_t	sbus_isp;
84 	struct sbusdev	sbus_sd;
85 	sdparam		sbus_dev;
86 	struct scsipi_channel sbus_chan;
87 	bus_space_tag_t	sbus_bustag;
88 	bus_space_handle_t sbus_reg;
89 	int		sbus_node;
90 	int		sbus_pri;
91 	struct ispmdvec	sbus_mdvec;
92 	bus_dmamap_t	*sbus_dmamap;
93 	int16_t		sbus_poff[_NREG_BLKS];
94 };
95 
96 
97 static int isp_match(device_t, cfdata_t, void *);
98 static void isp_sbus_attach(device_t, device_t, void *);
99 CFATTACH_DECL_NEW(isp_sbus, sizeof (struct isp_sbussoftc),
100     isp_match, isp_sbus_attach, NULL, NULL);
101 
102 static int
103 isp_match(device_t parent, cfdata_t cf, void *aux)
104 {
105 	int rv;
106 	struct sbus_attach_args *sa = aux;
107 
108 	rv = (strcmp(cf->cf_name, sa->sa_name) == 0 ||
109 		strcmp("PTI,ptisp", sa->sa_name) == 0 ||
110 		strcmp("ptisp", sa->sa_name) == 0 ||
111 		strcmp("SUNW,isp", sa->sa_name) == 0 ||
112 		strcmp("QLGC,isp", sa->sa_name) == 0);
113 
114 	return (rv);
115 }
116 
117 
118 static void
119 isp_sbus_attach(device_t parent, device_t self, void *aux)
120 {
121 	int freq, ispburst, sbusburst;
122 	struct sbus_attach_args *sa = aux;
123 	struct isp_sbussoftc *sbc = device_private(self);
124 	struct sbus_softc *sbsc = device_private(parent);
125 	ispsoftc_t *isp = &sbc->sbus_isp;
126 
127 	isp->isp_osinfo.dev = self;
128 
129 	printf(" for %s\n", sa->sa_name);
130 
131 	isp->isp_nchan = isp->isp_osinfo.adapter.adapt_nchannels = 1;
132 
133 	sbc->sbus_bustag = sa->sa_bustag;
134 	if (sa->sa_nintr != 0)
135 		sbc->sbus_pri = sa->sa_pri;
136 	sbc->sbus_mdvec = mdvec;
137 
138 	if (sa->sa_npromvaddrs) {
139 		sbus_promaddr_to_handle(sa->sa_bustag,
140 			sa->sa_promvaddrs[0], &sbc->sbus_reg);
141 	} else {
142 		if (sbus_bus_map(sa->sa_bustag,	sa->sa_slot, sa->sa_offset,
143 			sa->sa_size, 0, &sbc->sbus_reg) != 0) {
144 			aprint_error_dev(self, "cannot map registers\n");
145 			return;
146 		}
147 	}
148 	sbc->sbus_node = sa->sa_node;
149 
150 	freq = prom_getpropint(sa->sa_node, "clock-frequency", 0);
151 	if (freq) {
152 		/*
153 		 * Convert from HZ to MHz, rounding up.
154 		 */
155 		freq = (freq + 500000)/1000000;
156 	}
157 	sbc->sbus_mdvec.dv_clock = freq;
158 
159 	/*
160 	 * Now figure out what the proper burst sizes, etc., to use.
161 	 * Unfortunately, there is no ddi_dma_burstsizes here which
162 	 * walks up the tree finding the limiting burst size node (if
163 	 * any).
164 	 */
165 	sbusburst = sbsc->sc_burst;
166 	if (sbusburst == 0)
167 		sbusburst = SBUS_BURST_32 - 1;
168 	ispburst = prom_getpropint(sa->sa_node, "burst-sizes", -1);
169 	if (ispburst == -1) {
170 		ispburst = sbusburst;
171 	}
172 	ispburst &= sbusburst;
173 	ispburst &= ~(1 << 7);
174 	ispburst &= ~(1 << 6);
175 	sbc->sbus_mdvec.dv_conf1 =  0;
176 	if (ispburst & (1 << 5)) {
177 		sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
178 	} else if (ispburst & (1 << 4)) {
179 		sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
180 	} else if (ispburst & (1 << 3)) {
181 		sbc->sbus_mdvec.dv_conf1 =
182 		    BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
183 	}
184 	if (sbc->sbus_mdvec.dv_conf1) {
185 		sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
186 	}
187 
188 	isp->isp_mdvec = &sbc->sbus_mdvec;
189 	isp->isp_bustype = ISP_BT_SBUS;
190 	isp->isp_type = ISP_HA_SCSI_UNKNOWN;
191 	isp->isp_param = &sbc->sbus_dev;
192 	isp->isp_dmatag = sa->sa_dmatag;
193 	ISP_MEMZERO(isp->isp_param, sizeof (sdparam));
194 	isp->isp_osinfo.chan = &sbc->sbus_chan;
195 
196 	sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
197 	sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
198 	sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
199 	sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
200 	sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
201 
202 	/* Establish interrupt channel */
203 	bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO,
204 	    isp_sbus_intr, sbc);
205 	sbus_establish(&sbc->sbus_sd, self);
206 
207 	/*
208 	 * Set up logging levels.
209 	 */
210 #ifdef	ISP_LOGDEFAULT
211 	isp->isp_dblev = ISP_LOGDEFAULT;
212 #else
213 	isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
214 	if (bootverbose)
215 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
216 #ifdef	SCSIDEBUG
217 	isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
218 #endif
219 #ifdef	DEBUG
220 	isp->isp_dblev |= ISP_LOGDEBUG0;
221 #endif
222 #endif
223 
224 	isp->isp_confopts = device_cfdata(self)->cf_flags;
225 	SDPARAM(isp, 0)->role = ISP_DEFAULT_ROLES;
226 
227 	/*
228 	 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
229 	 */
230 	isp->isp_confopts |= ISP_CFG_NONVRAM;
231 
232 	/*
233 	 * Mark things if we're a PTI SBus adapter.
234 	 */
235 	if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
236 	    strcmp("ptisp", sa->sa_name) == 0) {
237 		SDPARAM(isp, 0)->isp_ptisp = 1;
238 	}
239 	ISP_LOCK(isp);
240 	isp_reset(isp, 1);
241 	if (isp->isp_state != ISP_RESETSTATE) {
242 		ISP_UNLOCK(isp);
243 		return;
244 	}
245 	ISP_ENABLE_INTS(isp);
246 	isp_init(isp);
247 	if (isp->isp_state != ISP_INITSTATE) {
248 		isp_uninit(isp);
249 		ISP_UNLOCK(isp);
250 		return;
251 	}
252 
253 	/*
254 	 * do generic attach.
255 	 */
256 	ISP_UNLOCK(isp);
257 	isp_attach(isp);
258 }
259 
260 
261 static void
262 isp_sbus_reset0(ispsoftc_t *isp)
263 {
264 	ISP_DISABLE_INTS(isp);
265 }
266 
267 static void
268 isp_sbus_reset1(ispsoftc_t *isp)
269 {
270 	ISP_ENABLE_INTS(isp);
271 }
272 
273 static int
274 isp_sbus_intr(void *arg)
275 {
276 	uint32_t isr;
277 	uint16_t sema, mbox;
278 	ispsoftc_t *isp = arg;
279 
280 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
281 		isp->isp_intbogus++;
282 		return (0);
283 	} else {
284 		struct isp_sbussoftc *sbc = arg;
285 		sbc->sbus_isp.isp_osinfo.onintstack = 1;
286 		isp_intr(isp, isr, sema, mbox);
287 		sbc->sbus_isp.isp_osinfo.onintstack = 0;
288 		return (1);
289 	}
290 }
291 
292 #define	IspVirt2Off(a, x)	\
293 	(((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \
294 	_BLK_REG_SHFT] + ((x) & 0xff))
295 
296 #define	BXR2(sbc, off)		\
297 	bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, off)
298 
299 static int
300 isp_sbus_rd_isr(ispsoftc_t *isp, uint32_t *isrp,
301     uint16_t *semap, uint16_t *mbp)
302 {
303 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
304 	uint32_t isr;
305 	uint16_t sema;
306 
307 	isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR));
308 	sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA));
309 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
310 	isr &= INT_PENDING_MASK(isp);
311 	sema &= BIU_SEMA_LOCK;
312 	if (isr == 0 && sema == 0) {
313 		return (0);
314 	}
315 	*isrp = isr;
316 	if ((*semap = sema) != 0) {
317 		*mbp = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0));
318 	}
319 	return (1);
320 }
321 
322 static uint32_t
323 isp_sbus_rd_reg(ispsoftc_t *isp, int regoff)
324 {
325 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
326 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
327 	offset += (regoff & 0xff);
328 	return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
329 }
330 
331 static void
332 isp_sbus_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
333 {
334 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
335 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
336 	offset += (regoff & 0xff);
337 	bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
338 }
339 
340 static int
341 isp_sbus_mbxdma(ispsoftc_t *isp)
342 {
343 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
344 	bus_dma_segment_t reqseg, rspseg;
345 	int reqrs, rsprs, i, progress;
346 	size_t n;
347 	bus_size_t len;
348 
349 	if (isp->isp_rquest_dma)
350 		return (0);
351 
352 	n = isp->isp_maxcmds * sizeof (XS_T *);
353 	isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
354 	if (isp->isp_xflist == NULL) {
355 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
356 		return (1);
357 	}
358 	ISP_MEMZERO(isp->isp_xflist, n);
359 	n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
360 	sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
361 	if (sbc->sbus_dmamap == NULL) {
362 		free(isp->isp_xflist, M_DEVBUF);
363 		isp->isp_xflist = NULL;
364 		isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
365 		return (1);
366 	}
367 	for (i = 0; i < isp->isp_maxcmds; i++) {
368 		/* Allocate a DMA handle */
369 		if (bus_dmamap_create(isp->isp_dmatag, MAXPHYS, 1, MAXPHYS,
370 		    1 << 24, BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
371 			isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
372 			break;
373 		}
374 	}
375 	if (i < isp->isp_maxcmds) {
376 		while (--i >= 0) {
377 			bus_dmamap_destroy(isp->isp_dmatag,
378 			    sbc->sbus_dmamap[i]);
379 		}
380 		free(isp->isp_xflist, M_DEVBUF);
381 		free(sbc->sbus_dmamap, M_DEVBUF);
382 		isp->isp_xflist = NULL;
383 		sbc->sbus_dmamap = NULL;
384 		return (1);
385 	}
386 
387 	/*
388 	 * Allocate and map the request and response queues
389 	 */
390 	progress = 0;
391 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
392 	if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &reqseg, 1, &reqrs,
393 	    BUS_DMA_NOWAIT)) {
394 		goto dmafail;
395 	}
396 	progress++;
397 	if (bus_dmamem_map(isp->isp_dmatag, &reqseg, reqrs, len,
398 	    (void *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
399 		goto dmafail;
400 	}
401 	progress++;
402 	if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 1 << 24,
403 	    BUS_DMA_NOWAIT, &isp->isp_rqdmap) != 0) {
404 		goto dmafail;
405 	}
406 	progress++;
407 	if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rqdmap,
408 	    isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT) != 0) {
409 		goto dmafail;
410 	}
411 	progress++;
412 	isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
413 
414 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
415 	if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &rspseg, 1, &rsprs,
416 	    BUS_DMA_NOWAIT)) {
417 		goto dmafail;
418 	}
419 	progress++;
420 	if (bus_dmamem_map(isp->isp_dmatag, &rspseg, rsprs, len,
421 	    (void *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
422 		goto dmafail;
423 	}
424 	progress++;
425 	if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 1 << 24,
426 	    BUS_DMA_NOWAIT, &isp->isp_rsdmap) != 0) {
427 		goto dmafail;
428 	}
429 	progress++;
430 	if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rsdmap,
431 	    isp->isp_result, len, NULL, BUS_DMA_NOWAIT) != 0) {
432 		goto dmafail;
433 	}
434 	isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
435 
436 	return (0);
437 
438 dmafail:
439 	isp_prt(isp, ISP_LOGERR, "Mailbox DMA Setup Failure");
440 
441 	if (progress >= 8) {
442 		bus_dmamap_unload(isp->isp_dmatag, isp->isp_rsdmap);
443 	}
444 	if (progress >= 7) {
445 		bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rsdmap);
446 	}
447 	if (progress >= 6) {
448 		bus_dmamem_unmap(isp->isp_dmatag,
449 		    isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));
450 	}
451 	if (progress >= 5) {
452 		bus_dmamem_free(isp->isp_dmatag, &rspseg, rsprs);
453 	}
454 
455 	if (progress >= 4) {
456 		bus_dmamap_unload(isp->isp_dmatag, isp->isp_rqdmap);
457 	}
458 	if (progress >= 3) {
459 		bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rqdmap);
460 	}
461 	if (progress >= 2) {
462 		bus_dmamem_unmap(isp->isp_dmatag,
463 		    isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));
464 	}
465 	if (progress >= 1) {
466 		bus_dmamem_free(isp->isp_dmatag, &reqseg, reqrs);
467 	}
468 
469 	for (i = 0; i < isp->isp_maxcmds; i++) {
470 		bus_dmamap_destroy(isp->isp_dmatag, sbc->sbus_dmamap[i]);
471 	}
472 	free(sbc->sbus_dmamap, M_DEVBUF);
473 	free(isp->isp_xflist, M_DEVBUF);
474 	isp->isp_xflist = NULL;
475 	sbc->sbus_dmamap = NULL;
476 	return (1);
477 }
478 
479 /*
480  * Map a DMA request.
481  * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
482  */
483 
484 static int
485 isp_sbus_dmasetup(struct ispsoftc *isp, struct scsipi_xfer *xs, void *arg)
486 {
487 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *)isp;
488 	ispreq_t *rq = arg;
489 	bus_dmamap_t dmap;
490 	bus_dma_segment_t *dm_segs;
491 	uint32_t nsegs;
492 	isp_ddir_t ddir;
493 
494 	dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
495 	if (xs->datalen == 0) {
496 		ddir = ISP_NOXFR;
497 		nsegs = 0;
498 		dm_segs = NULL;
499 	 } else {
500 		int error;
501 		uint32_t flag, flg2;
502 
503 		if (xs->xs_control & XS_CTL_DATA_IN) {
504 			flg2 = BUS_DMASYNC_PREREAD;
505 			flag = BUS_DMA_READ;
506 			ddir = ISP_FROM_DEVICE;
507 		} else {
508 			flg2 = BUS_DMASYNC_PREWRITE;
509 			flag = BUS_DMA_WRITE;
510 			ddir = ISP_TO_DEVICE;
511 		}
512 		error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
513 		    NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING | flag);
514 		if (error) {
515 			isp_prt(isp, ISP_LOGWARN, "unable to load DMA (%d)", error);
516 			XS_SETERR(xs, HBA_BOTCH);
517 			if (error == EAGAIN || error == ENOMEM) {
518 				return (CMD_EAGAIN);
519 			} else {
520 				return (CMD_COMPLETE);
521 			}
522 		}
523 		dm_segs = dmap->dm_segs;
524 		nsegs = dmap->dm_nsegs;
525 		bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize, flg2);
526 	}
527 
528 	if (isp_send_cmd(isp, rq, dm_segs, nsegs, xs->datalen, ddir) != CMD_QUEUED) {
529 		return (CMD_EAGAIN);
530 	} else {
531 		return (CMD_QUEUED);
532 	}
533 }
534 
535 static void
536 isp_sbus_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle)
537 {
538 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
539 	bus_dmamap_t dmap;
540 
541 	dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
542 
543 	if (dmap->dm_nsegs == 0) {
544 		panic("%s: DMA map not already allocated",
545 		    device_xname(isp->isp_osinfo.dev));
546 		/* NOTREACHED */
547 	}
548 	bus_dmamap_sync(isp->isp_dmatag, dmap, 0,
549 	    xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
550 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
551 	bus_dmamap_unload(isp->isp_dmatag, dmap);
552 }
553