xref: /netbsd-src/sys/dev/sbus/isp_sbus.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /* $NetBSD: isp_sbus.c,v 1.80 2010/03/26 20:52:01 mjacob Exp $ */
2 /*
3  * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
4  *
5  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
6  * All rights reserved.
7  *
8  * Additional Copyright (C) 2000-2007 by Matthew Jacob
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isp_sbus.c,v 1.80 2010/03/26 20:52:01 mjacob Exp $");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/queue.h>
44 #include <dev/ic/isp_netbsd.h>
45 #include <sys/intr.h>
46 #include <machine/autoconf.h>
47 #include <dev/sbus/sbusvar.h>
48 #include <sys/reboot.h>
49 
50 static void isp_sbus_reset0(ispsoftc_t *);
51 static void isp_sbus_reset1(ispsoftc_t *);
52 static int isp_sbus_intr(void *);
53 static int
54 isp_sbus_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
55 static uint32_t isp_sbus_rd_reg(ispsoftc_t *, int);
56 static void isp_sbus_wr_reg (ispsoftc_t *, int, uint32_t);
57 static int isp_sbus_mbxdma(ispsoftc_t *);
58 static int isp_sbus_dmasetup(ispsoftc_t *, XS_T *, void *);
59 static void isp_sbus_dmateardown(ispsoftc_t *, XS_T *, uint32_t);
60 
61 #ifndef	ISP_DISABLE_FW
62 #include <dev/microcode/isp/asm_sbus.h>
63 #else
64 #define	ISP_1000_RISC_CODE	NULL
65 #endif
66 
67 static const struct ispmdvec mdvec = {
68 	isp_sbus_rd_isr,
69 	isp_sbus_rd_reg,
70 	isp_sbus_wr_reg,
71 	isp_sbus_mbxdma,
72 	isp_sbus_dmasetup,
73 	isp_sbus_dmateardown,
74 	isp_sbus_reset0,
75 	isp_sbus_reset1,
76 	NULL,
77 	ISP_1000_RISC_CODE,
78 	0,
79 	0
80 };
81 
82 struct isp_sbussoftc {
83 	ispsoftc_t	sbus_isp;
84 	sdparam		sbus_dev;
85 	struct scsipi_channel sbus_chan;
86 	bus_space_tag_t	sbus_bustag;
87 	bus_space_handle_t sbus_reg;
88 	int		sbus_node;
89 	int		sbus_pri;
90 	struct ispmdvec	sbus_mdvec;
91 	bus_dmamap_t	*sbus_dmamap;
92 	int16_t		sbus_poff[_NREG_BLKS];
93 };
94 
95 
96 static int isp_match(device_t, cfdata_t, void *);
97 static void isp_sbus_attach(device_t, device_t, void *);
98 CFATTACH_DECL_NEW(isp_sbus, sizeof (struct isp_sbussoftc),
99     isp_match, isp_sbus_attach, NULL, NULL);
100 
101 static int
102 isp_match(device_t parent, cfdata_t cf, void *aux)
103 {
104 	int rv;
105 	struct sbus_attach_args *sa = aux;
106 
107 	rv = (strcmp(cf->cf_name, sa->sa_name) == 0 ||
108 		strcmp("PTI,ptisp", sa->sa_name) == 0 ||
109 		strcmp("ptisp", sa->sa_name) == 0 ||
110 		strcmp("SUNW,isp", sa->sa_name) == 0 ||
111 		strcmp("QLGC,isp", sa->sa_name) == 0);
112 
113 	return (rv);
114 }
115 
116 
117 static void
118 isp_sbus_attach(device_t parent, device_t self, void *aux)
119 {
120 	int freq, ispburst, sbusburst;
121 	struct sbus_attach_args *sa = aux;
122 	struct isp_sbussoftc *sbc = device_private(self);
123 	struct sbus_softc *sbsc = device_private(parent);
124 	ispsoftc_t *isp = &sbc->sbus_isp;
125 
126 	isp->isp_osinfo.dev = self;
127 
128 	printf(" for %s\n", sa->sa_name);
129 
130 	isp->isp_nchan = isp->isp_osinfo.adapter.adapt_nchannels = 1;
131 
132 	sbc->sbus_bustag = sa->sa_bustag;
133 	if (sa->sa_nintr != 0)
134 		sbc->sbus_pri = sa->sa_pri;
135 	sbc->sbus_mdvec = mdvec;
136 
137 	if (sa->sa_npromvaddrs) {
138 		sbus_promaddr_to_handle(sa->sa_bustag,
139 			sa->sa_promvaddrs[0], &sbc->sbus_reg);
140 	} else {
141 		if (sbus_bus_map(sa->sa_bustag,	sa->sa_slot, sa->sa_offset,
142 			sa->sa_size, 0, &sbc->sbus_reg) != 0) {
143 			aprint_error_dev(self, "cannot map registers\n");
144 			return;
145 		}
146 	}
147 	sbc->sbus_node = sa->sa_node;
148 
149 	freq = prom_getpropint(sa->sa_node, "clock-frequency", 0);
150 	if (freq) {
151 		/*
152 		 * Convert from HZ to MHz, rounding up.
153 		 */
154 		freq = (freq + 500000)/1000000;
155 	}
156 	sbc->sbus_mdvec.dv_clock = freq;
157 
158 	/*
159 	 * Now figure out what the proper burst sizes, etc., to use.
160 	 * Unfortunately, there is no ddi_dma_burstsizes here which
161 	 * walks up the tree finding the limiting burst size node (if
162 	 * any).
163 	 */
164 	sbusburst = sbsc->sc_burst;
165 	if (sbusburst == 0)
166 		sbusburst = SBUS_BURST_32 - 1;
167 	ispburst = prom_getpropint(sa->sa_node, "burst-sizes", -1);
168 	if (ispburst == -1) {
169 		ispburst = sbusburst;
170 	}
171 	ispburst &= sbusburst;
172 	ispburst &= ~(1 << 7);
173 	ispburst &= ~(1 << 6);
174 	sbc->sbus_mdvec.dv_conf1 =  0;
175 	if (ispburst & (1 << 5)) {
176 		sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
177 	} else if (ispburst & (1 << 4)) {
178 		sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
179 	} else if (ispburst & (1 << 3)) {
180 		sbc->sbus_mdvec.dv_conf1 =
181 		    BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
182 	}
183 	if (sbc->sbus_mdvec.dv_conf1) {
184 		sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
185 	}
186 
187 	isp->isp_mdvec = &sbc->sbus_mdvec;
188 	isp->isp_bustype = ISP_BT_SBUS;
189 	isp->isp_type = ISP_HA_SCSI_UNKNOWN;
190 	isp->isp_param = &sbc->sbus_dev;
191 	isp->isp_dmatag = sa->sa_dmatag;
192 	ISP_MEMZERO(isp->isp_param, sizeof (sdparam));
193 	isp->isp_osinfo.chan = &sbc->sbus_chan;
194 
195 	sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
196 	sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
197 	sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
198 	sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
199 	sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
200 
201 	/* Establish interrupt channel */
202 	bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO,
203 	    isp_sbus_intr, sbc);
204 
205 	/*
206 	 * Set up logging levels.
207 	 */
208 #ifdef	ISP_LOGDEFAULT
209 	isp->isp_dblev = ISP_LOGDEFAULT;
210 #else
211 	isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
212 	if (bootverbose)
213 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
214 #ifdef	SCSIDEBUG
215 	isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
216 #endif
217 #ifdef	DEBUG
218 	isp->isp_dblev |= ISP_LOGDEBUG0;
219 #endif
220 #endif
221 
222 	isp->isp_confopts = device_cfdata(self)->cf_flags;
223 	SDPARAM(isp, 0)->role = ISP_DEFAULT_ROLES;
224 
225 	/*
226 	 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
227 	 */
228 	isp->isp_confopts |= ISP_CFG_NONVRAM;
229 
230 	/*
231 	 * Mark things if we're a PTI SBus adapter.
232 	 */
233 	if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
234 	    strcmp("ptisp", sa->sa_name) == 0) {
235 		SDPARAM(isp, 0)->isp_ptisp = 1;
236 	}
237 	ISP_LOCK(isp);
238 	isp_reset(isp, 1);
239 	if (isp->isp_state != ISP_RESETSTATE) {
240 		ISP_UNLOCK(isp);
241 		return;
242 	}
243 	ISP_ENABLE_INTS(isp);
244 	isp_init(isp);
245 	if (isp->isp_state != ISP_INITSTATE) {
246 		isp_uninit(isp);
247 		ISP_UNLOCK(isp);
248 		return;
249 	}
250 
251 	/*
252 	 * do generic attach.
253 	 */
254 	ISP_UNLOCK(isp);
255 	isp_attach(isp);
256 }
257 
258 
259 static void
260 isp_sbus_reset0(ispsoftc_t *isp)
261 {
262 	ISP_DISABLE_INTS(isp);
263 }
264 
265 static void
266 isp_sbus_reset1(ispsoftc_t *isp)
267 {
268 	ISP_ENABLE_INTS(isp);
269 }
270 
271 static int
272 isp_sbus_intr(void *arg)
273 {
274 	uint32_t isr;
275 	uint16_t sema, mbox;
276 	ispsoftc_t *isp = arg;
277 
278 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
279 		isp->isp_intbogus++;
280 		return (0);
281 	} else {
282 		struct isp_sbussoftc *sbc = arg;
283 		sbc->sbus_isp.isp_osinfo.onintstack = 1;
284 		isp_intr(isp, isr, sema, mbox);
285 		sbc->sbus_isp.isp_osinfo.onintstack = 0;
286 		return (1);
287 	}
288 }
289 
290 #define	IspVirt2Off(a, x)	\
291 	(((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \
292 	_BLK_REG_SHFT] + ((x) & 0xff))
293 
294 #define	BXR2(sbc, off)		\
295 	bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, off)
296 
297 static int
298 isp_sbus_rd_isr(ispsoftc_t *isp, uint32_t *isrp,
299     uint16_t *semap, uint16_t *mbp)
300 {
301 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
302 	uint32_t isr;
303 	uint16_t sema;
304 
305 	isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR));
306 	sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA));
307 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
308 	isr &= INT_PENDING_MASK(isp);
309 	sema &= BIU_SEMA_LOCK;
310 	if (isr == 0 && sema == 0) {
311 		return (0);
312 	}
313 	*isrp = isr;
314 	if ((*semap = sema) != 0) {
315 		*mbp = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0));
316 	}
317 	return (1);
318 }
319 
320 static uint32_t
321 isp_sbus_rd_reg(ispsoftc_t *isp, int regoff)
322 {
323 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
324 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
325 	offset += (regoff & 0xff);
326 	return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
327 }
328 
329 static void
330 isp_sbus_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
331 {
332 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
333 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
334 	offset += (regoff & 0xff);
335 	bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
336 }
337 
338 static int
339 isp_sbus_mbxdma(ispsoftc_t *isp)
340 {
341 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
342 	bus_dma_segment_t reqseg, rspseg;
343 	int reqrs, rsprs, i, progress;
344 	size_t n;
345 	bus_size_t len;
346 
347 	if (isp->isp_rquest_dma)
348 		return (0);
349 
350 	n = isp->isp_maxcmds * sizeof (isp_hdl_t);
351 	isp->isp_xflist = (isp_hdl_t *) malloc(n, M_DEVBUF, M_WAITOK);
352 	if (isp->isp_xflist == NULL) {
353 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
354 		return (1);
355 	}
356 	ISP_MEMZERO(isp->isp_xflist, n);
357 	for (n = 0; n < isp->isp_maxcmds - 1; n++) {
358 		isp->isp_xflist[n].cmd = &isp->isp_xflist[n+1];
359 	}
360 	n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
361 	sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
362 	if (sbc->sbus_dmamap == NULL) {
363 		free(isp->isp_xflist, M_DEVBUF);
364 		isp->isp_xflist = NULL;
365 		isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
366 		return (1);
367 	}
368 	for (i = 0; i < isp->isp_maxcmds; i++) {
369 		/* Allocate a DMA handle */
370 		if (bus_dmamap_create(isp->isp_dmatag, MAXPHYS, 1, MAXPHYS,
371 		    1 << 24, BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
372 			isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
373 			break;
374 		}
375 	}
376 	if (i < isp->isp_maxcmds) {
377 		while (--i >= 0) {
378 			bus_dmamap_destroy(isp->isp_dmatag,
379 			    sbc->sbus_dmamap[i]);
380 		}
381 		free(isp->isp_xflist, M_DEVBUF);
382 		free(sbc->sbus_dmamap, M_DEVBUF);
383 		isp->isp_xflist = NULL;
384 		sbc->sbus_dmamap = NULL;
385 		return (1);
386 	}
387 
388 	/*
389 	 * Allocate and map the request and response queues
390 	 */
391 	progress = 0;
392 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
393 	if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &reqseg, 1, &reqrs,
394 	    BUS_DMA_NOWAIT)) {
395 		goto dmafail;
396 	}
397 	progress++;
398 	if (bus_dmamem_map(isp->isp_dmatag, &reqseg, reqrs, len,
399 	    (void *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
400 		goto dmafail;
401 	}
402 	progress++;
403 	if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 1 << 24,
404 	    BUS_DMA_NOWAIT, &isp->isp_rqdmap) != 0) {
405 		goto dmafail;
406 	}
407 	progress++;
408 	if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rqdmap,
409 	    isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT) != 0) {
410 		goto dmafail;
411 	}
412 	progress++;
413 	isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
414 
415 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
416 	if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &rspseg, 1, &rsprs,
417 	    BUS_DMA_NOWAIT)) {
418 		goto dmafail;
419 	}
420 	progress++;
421 	if (bus_dmamem_map(isp->isp_dmatag, &rspseg, rsprs, len,
422 	    (void *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
423 		goto dmafail;
424 	}
425 	progress++;
426 	if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 1 << 24,
427 	    BUS_DMA_NOWAIT, &isp->isp_rsdmap) != 0) {
428 		goto dmafail;
429 	}
430 	progress++;
431 	if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rsdmap,
432 	    isp->isp_result, len, NULL, BUS_DMA_NOWAIT) != 0) {
433 		goto dmafail;
434 	}
435 	isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
436 
437 	return (0);
438 
439 dmafail:
440 	isp_prt(isp, ISP_LOGERR, "Mailbox DMA Setup Failure");
441 
442 	if (progress >= 8) {
443 		bus_dmamap_unload(isp->isp_dmatag, isp->isp_rsdmap);
444 	}
445 	if (progress >= 7) {
446 		bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rsdmap);
447 	}
448 	if (progress >= 6) {
449 		bus_dmamem_unmap(isp->isp_dmatag,
450 		    isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));
451 	}
452 	if (progress >= 5) {
453 		bus_dmamem_free(isp->isp_dmatag, &rspseg, rsprs);
454 	}
455 
456 	if (progress >= 4) {
457 		bus_dmamap_unload(isp->isp_dmatag, isp->isp_rqdmap);
458 	}
459 	if (progress >= 3) {
460 		bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rqdmap);
461 	}
462 	if (progress >= 2) {
463 		bus_dmamem_unmap(isp->isp_dmatag,
464 		    isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));
465 	}
466 	if (progress >= 1) {
467 		bus_dmamem_free(isp->isp_dmatag, &reqseg, reqrs);
468 	}
469 
470 	for (i = 0; i < isp->isp_maxcmds; i++) {
471 		bus_dmamap_destroy(isp->isp_dmatag, sbc->sbus_dmamap[i]);
472 	}
473 	free(sbc->sbus_dmamap, M_DEVBUF);
474 	free(isp->isp_xflist, M_DEVBUF);
475 	isp->isp_xflist = NULL;
476 	sbc->sbus_dmamap = NULL;
477 	return (1);
478 }
479 
480 /*
481  * Map a DMA request.
482  * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
483  */
484 
485 static int
486 isp_sbus_dmasetup(struct ispsoftc *isp, struct scsipi_xfer *xs, void *arg)
487 {
488 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *)isp;
489 	ispreq_t *rq = arg;
490 	bus_dmamap_t dmap;
491 	bus_dma_segment_t *dm_segs;
492 	uint32_t nsegs, hidx;
493 	isp_ddir_t ddir;
494 
495 	hidx = isp_handle_index(isp, rq->req_handle);
496 	if (hidx == ISP_BAD_HANDLE_INDEX) {
497 		XS_SETERR(xs, HBA_BOTCH);
498 		return (CMD_COMPLETE);
499 	}
500 	dmap = sbc->sbus_dmamap[hidx];
501 	if (xs->datalen == 0) {
502 		ddir = ISP_NOXFR;
503 		nsegs = 0;
504 		dm_segs = NULL;
505 	 } else {
506 		int error;
507 		uint32_t flag, flg2;
508 
509 		if (xs->xs_control & XS_CTL_DATA_IN) {
510 			flg2 = BUS_DMASYNC_PREREAD;
511 			flag = BUS_DMA_READ;
512 			ddir = ISP_FROM_DEVICE;
513 		} else {
514 			flg2 = BUS_DMASYNC_PREWRITE;
515 			flag = BUS_DMA_WRITE;
516 			ddir = ISP_TO_DEVICE;
517 		}
518 		error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
519 		    NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING | flag);
520 		if (error) {
521 			isp_prt(isp, ISP_LOGWARN, "unable to load DMA (%d)", error);
522 			XS_SETERR(xs, HBA_BOTCH);
523 			if (error == EAGAIN || error == ENOMEM) {
524 				return (CMD_EAGAIN);
525 			} else {
526 				return (CMD_COMPLETE);
527 			}
528 		}
529 		dm_segs = dmap->dm_segs;
530 		nsegs = dmap->dm_nsegs;
531 		bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize, flg2);
532 	}
533 
534 	if (isp_send_cmd(isp, rq, dm_segs, nsegs, xs->datalen, ddir) != CMD_QUEUED) {
535 		return (CMD_EAGAIN);
536 	} else {
537 		return (CMD_QUEUED);
538 	}
539 }
540 
541 static void
542 isp_sbus_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle)
543 {
544 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
545 	bus_dmamap_t dmap;
546 	uint32_t hidx;
547 
548 	hidx = isp_handle_index(isp, handle);
549 	if (hidx == ISP_BAD_HANDLE_INDEX) {
550 		isp_xs_prt(isp, xs, ISP_LOGERR, "bad handle on teardown");
551 		return;
552 	}
553 	dmap = sbc->sbus_dmamap[hidx];
554 	bus_dmamap_sync(isp->isp_dmatag, dmap, 0,
555 	    xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
556 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
557 	bus_dmamap_unload(isp->isp_dmatag, dmap);
558 }
559