xref: /netbsd-src/sys/dev/sbus/isp_sbus.c (revision 9fbd88883c38d0c0fbfcbe66d76fe6b0fab3f9de)
1 /* $NetBSD: isp_sbus.c,v 1.48 2001/12/14 00:13:47 mjacob Exp $ */
2 /*
3  * This driver, which is contained in NetBSD in the files:
4  *
5  *	sys/dev/ic/isp.c
6  *	sys/dev/ic/isp_inline.h
7  *	sys/dev/ic/isp_netbsd.c
8  *	sys/dev/ic/isp_netbsd.h
9  *	sys/dev/ic/isp_target.c
10  *	sys/dev/ic/isp_target.h
11  *	sys/dev/ic/isp_tpublic.h
12  *	sys/dev/ic/ispmbox.h
13  *	sys/dev/ic/ispreg.h
14  *	sys/dev/ic/ispvar.h
15  *	sys/microcode/isp/asm_sbus.h
16  *	sys/microcode/isp/asm_1040.h
17  *	sys/microcode/isp/asm_1080.h
18  *	sys/microcode/isp/asm_12160.h
19  *	sys/microcode/isp/asm_2100.h
20  *	sys/microcode/isp/asm_2200.h
21  *	sys/pci/isp_pci.c
22  *	sys/sbus/isp_sbus.c
23  *
24  * Is being actively maintained by Matthew Jacob (mjacob@netbsd.org).
25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26  * Linux versions. This tends to be an interesting maintenance problem.
27  *
28  * Please coordinate with Matthew Jacob on changes you wish to make here.
29  */
30 /*
31  * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
32  *
33  * Copyright (c) 1997, 2001 by Matthew Jacob
34  * NASA AMES Research Center
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice immediately at the beginning of the file, without modification,
42  *    this list of conditions, and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
51  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  */
60 
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_sbus.c,v 1.48 2001/12/14 00:13:47 mjacob Exp $");
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/device.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/queue.h>
70 #include <dev/ic/isp_netbsd.h>
71 #include <machine/intr.h>
72 #include <machine/autoconf.h>
73 #include <dev/microcode/isp/asm_sbus.h>
74 #include <dev/sbus/sbusvar.h>
75 #include <sys/reboot.h>
76 
77 static int isp_sbus_intr(void *);
78 static int
79 isp_sbus_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
80 static u_int16_t isp_sbus_rd_reg(struct ispsoftc *, int);
81 static void isp_sbus_wr_reg (struct ispsoftc *, int, u_int16_t);
82 static int isp_sbus_mbxdma(struct ispsoftc *);
83 static int isp_sbus_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *,
84     u_int16_t);
85 static void isp_sbus_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
86 
87 #ifndef	ISP_1000_RISC_CODE
88 #define	ISP_1000_RISC_CODE	NULL
89 #endif
90 
91 static struct ispmdvec mdvec = {
92 	isp_sbus_rd_isr,
93 	isp_sbus_rd_reg,
94 	isp_sbus_wr_reg,
95 	isp_sbus_mbxdma,
96 	isp_sbus_dmasetup,
97 	isp_sbus_dmateardown,
98 	NULL,
99 	NULL,
100 	NULL,
101 	ISP_1000_RISC_CODE
102 };
103 
104 struct isp_sbussoftc {
105 	struct ispsoftc	sbus_isp;
106 	struct sbusdev	sbus_sd;
107 	sdparam		sbus_dev;
108 	bus_space_tag_t	sbus_bustag;
109 	bus_space_handle_t sbus_reg;
110 	int		sbus_node;
111 	int		sbus_pri;
112 	struct ispmdvec	sbus_mdvec;
113 	bus_dmamap_t	*sbus_dmamap;
114 	int16_t		sbus_poff[_NREG_BLKS];
115 };
116 
117 
118 static int isp_match(struct device *, struct cfdata *, void *);
119 static void isp_sbus_attach(struct device *, struct device *, void *);
120 struct cfattach isp_sbus_ca = {
121 	sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
122 };
123 
124 static int
125 isp_match(struct device *parent, struct cfdata *cf, void *aux)
126 {
127 	int rv;
128 #ifdef DEBUG
129 	static int oneshot = 1;
130 #endif
131 	struct sbus_attach_args *sa = aux;
132 
133 	rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
134 		strcmp("PTI,ptisp", sa->sa_name) == 0 ||
135 		strcmp("ptisp", sa->sa_name) == 0 ||
136 		strcmp("SUNW,isp", sa->sa_name) == 0 ||
137 		strcmp("QLGC,isp", sa->sa_name) == 0);
138 #ifdef DEBUG
139 	if (rv && oneshot) {
140 		oneshot = 0;
141 		printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
142 		    "%d.%d Core Version %d.%d\n",
143 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
144 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
145 	}
146 #endif
147 	return (rv);
148 }
149 
150 
151 static void
152 isp_sbus_attach(struct device *parent, struct device *self, void *aux)
153 {
154 	int freq, ispburst, sbusburst;
155 	struct sbus_attach_args *sa = aux;
156 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
157 	struct ispsoftc *isp = &sbc->sbus_isp;
158 
159 	printf(" for %s\n", sa->sa_name);
160 
161 	sbc->sbus_bustag = sa->sa_bustag;
162 	if (sa->sa_nintr != 0)
163 		sbc->sbus_pri = sa->sa_pri;
164 	sbc->sbus_mdvec = mdvec;
165 
166 	if (sa->sa_npromvaddrs != 0) {
167 		sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
168 	} else {
169 		if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
170 				 sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
171 				 &sbc->sbus_reg) != 0) {
172 			printf("%s: cannot map registers\n", self->dv_xname);
173 			return;
174 		}
175 	}
176 	sbc->sbus_node = sa->sa_node;
177 
178 	freq = PROM_getpropint(sa->sa_node, "clock-frequency", 0);
179 	if (freq) {
180 		/*
181 		 * Convert from HZ to MHz, rounding up.
182 		 */
183 		freq = (freq + 500000)/1000000;
184 #if	0
185 		printf("%s: %d MHz\n", self->dv_xname, freq);
186 #endif
187 	}
188 	sbc->sbus_mdvec.dv_clock = freq;
189 
190 	/*
191 	 * Now figure out what the proper burst sizes, etc., to use.
192 	 * Unfortunately, there is no ddi_dma_burstsizes here which
193 	 * walks up the tree finding the limiting burst size node (if
194 	 * any).
195 	 */
196 	sbusburst = ((struct sbus_softc *)parent)->sc_burst;
197 	if (sbusburst == 0)
198 		sbusburst = SBUS_BURST_32 - 1;
199 	ispburst = PROM_getpropint(sa->sa_node, "burst-sizes", -1);
200 	if (ispburst == -1) {
201 		ispburst = sbusburst;
202 	}
203 	ispburst &= sbusburst;
204 	ispburst &= ~(1 << 7);
205 	ispburst &= ~(1 << 6);
206 	sbc->sbus_mdvec.dv_conf1 =  0;
207 	if (ispburst & (1 << 5)) {
208 		sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
209 	} else if (ispburst & (1 << 4)) {
210 		sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
211 	} else if (ispburst & (1 << 3)) {
212 		sbc->sbus_mdvec.dv_conf1 =
213 		    BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
214 	}
215 	if (sbc->sbus_mdvec.dv_conf1) {
216 		sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
217 	}
218 
219 	/*
220 	 * Some early versions of the PTI SBus adapter
221 	 * would fail in trying to download (via poking)
222 	 * FW. We give up on them.
223 	 */
224 	if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
225 	    strcmp("ptisp", sa->sa_name) == 0) {
226 		sbc->sbus_mdvec.dv_ispfw = NULL;
227 	}
228 
229 	isp->isp_mdvec = &sbc->sbus_mdvec;
230 	isp->isp_bustype = ISP_BT_SBUS;
231 	isp->isp_type = ISP_HA_SCSI_UNKNOWN;
232 	isp->isp_param = &sbc->sbus_dev;
233 	isp->isp_dmatag = sa->sa_dmatag;
234 	MEMZERO(isp->isp_param, sizeof (sdparam));
235 
236 	sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
237 	sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
238 	sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
239 	sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
240 	sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
241 
242 	/* Establish interrupt channel */
243 	bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO, 0,
244 	    isp_sbus_intr, sbc);
245 	sbus_establish(&sbc->sbus_sd, &sbc->sbus_isp.isp_osinfo._dev);
246 
247 	/*
248 	 * Set up logging levels.
249 	 */
250 #ifdef	ISP_LOGDEFAULT
251 	isp->isp_dblev = ISP_LOGDEFAULT;
252 #else
253 	isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
254 	if (bootverbose)
255 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
256 #ifdef	SCSIDEBUG
257 	isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
258 #endif
259 #ifdef	DEBUG
260 	isp->isp_dblev |= ISP_LOGDEBUG0;
261 #endif
262 #endif
263 
264 	isp->isp_confopts = self->dv_cfdata->cf_flags;
265 	isp->isp_role = ISP_DEFAULT_ROLES;
266 
267 	/*
268 	 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
269 	 */
270 	isp->isp_confopts |= ISP_CFG_NONVRAM;
271 	ISP_LOCK(isp);
272 	isp->isp_osinfo.no_mbox_ints = 1;
273 	isp_reset(isp);
274 	if (isp->isp_state != ISP_RESETSTATE) {
275 		ISP_UNLOCK(isp);
276 		return;
277 	}
278 	ENABLE_INTS(isp);
279 	isp_init(isp);
280 	if (isp->isp_state != ISP_INITSTATE) {
281 		isp_uninit(isp);
282 		ISP_UNLOCK(isp);
283 		return;
284 	}
285 
286 	/*
287 	 * do generic attach.
288 	 */
289 	ISP_UNLOCK(isp);
290 	isp_attach(isp);
291 	if (isp->isp_state != ISP_RUNSTATE) {
292 		ISP_LOCK(isp);
293 		isp_uninit(isp);
294 		ISP_UNLOCK(isp);
295 	}
296 }
297 
298 static int
299 isp_sbus_intr(void *arg)
300 {
301 	u_int16_t isr, sema, mbox;
302 	struct ispsoftc *isp = arg;
303 
304 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
305 		isp->isp_intbogus++;
306 		return (0);
307 	} else {
308 		struct isp_sbussoftc *sbc = arg;
309 		sbc->sbus_isp.isp_osinfo.onintstack = 1;
310 		isp_intr(isp, isr, sema, mbox);
311 		sbc->sbus_isp.isp_osinfo.onintstack = 0;
312 		return (1);
313 	}
314 }
315 
316 #define	IspVirt2Off(a, x)	\
317 	(((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \
318 	_BLK_REG_SHFT] + ((x) & 0xff))
319 
320 #define	BXR2(sbc, off)		\
321 	bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, off)
322 
323 static int
324 isp_sbus_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
325     u_int16_t *semap, u_int16_t *mbp)
326 {
327 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
328 	u_int16_t isr, sema;
329 
330 	isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR));
331 	sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA));
332 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
333 	isr &= INT_PENDING_MASK(isp);
334 	sema &= BIU_SEMA_LOCK;
335 	if (isr == 0 && sema == 0) {
336 		return (0);
337 	}
338 	*isrp = isr;
339 	if ((*semap = sema) != 0) {
340 		*mbp = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0));
341 	}
342 	return (1);
343 }
344 
345 static u_int16_t
346 isp_sbus_rd_reg(struct ispsoftc *isp, int regoff)
347 {
348 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
349 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
350 	offset += (regoff & 0xff);
351 	return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
352 }
353 
354 static void
355 isp_sbus_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
356 {
357 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
358 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
359 	offset += (regoff & 0xff);
360 	bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
361 }
362 
363 static int
364 isp_sbus_mbxdma(struct ispsoftc *isp)
365 {
366 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
367 	bus_dma_segment_t reqseg, rspseg;
368 	int reqrs, rsprs, i, progress;
369 	size_t n;
370 	bus_size_t len;
371 
372 	if (isp->isp_rquest_dma)
373 		return (0);
374 
375 	n = isp->isp_maxcmds * sizeof (XS_T *);
376 	isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
377 	if (isp->isp_xflist == NULL) {
378 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
379 		return (1);
380 	}
381 	MEMZERO(isp->isp_xflist, n);
382 	n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
383 	sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
384 	if (sbc->sbus_dmamap == NULL) {
385 		free(isp->isp_xflist, M_DEVBUF);
386 		isp->isp_xflist = NULL;
387 		isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
388 		return (1);
389 	}
390 	for (i = 0; i < isp->isp_maxcmds; i++) {
391 		/* Allocate a DMA handle */
392 		if (bus_dmamap_create(isp->isp_dmatag, MAXPHYS, 1, MAXPHYS, 0,
393 		    BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
394 			isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
395 			break;
396 		}
397 	}
398 	if (i < isp->isp_maxcmds) {
399 		while (--i >= 0) {
400 			bus_dmamap_destroy(isp->isp_dmatag,
401 			    sbc->sbus_dmamap[i]);
402 		}
403 		free(isp->isp_xflist, M_DEVBUF);
404 		free(sbc->sbus_dmamap, M_DEVBUF);
405 		isp->isp_xflist = NULL;
406 		sbc->sbus_dmamap = NULL;
407 		return (1);
408 	}
409 
410 	/*
411 	 * Allocate and map the request and response queues
412 	 */
413 	progress = 0;
414 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
415 	if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &reqseg, 1, &reqrs,
416 	    BUS_DMA_NOWAIT)) {
417 		goto dmafail;
418 	}
419 	progress++;
420 	if (bus_dmamem_map(isp->isp_dmatag, &reqseg, reqrs, len,
421 	    (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
422 		goto dmafail;
423 	}
424 	progress++;
425 	if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
426 	    &isp->isp_rqdmap) != 0) {
427 		goto dmafail;
428 	}
429 	progress++;
430 	if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rqdmap,
431 	    isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT) != 0) {
432 		goto dmafail;
433 	}
434 	progress++;
435 	isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
436 
437 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
438 	if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &rspseg, 1, &rsprs,
439 	    BUS_DMA_NOWAIT)) {
440 		goto dmafail;
441 	}
442 	progress++;
443 	if (bus_dmamem_map(isp->isp_dmatag, &rspseg, rsprs, len,
444 	    (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
445 		goto dmafail;
446 	}
447 	progress++;
448 	if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
449 	    &isp->isp_rsdmap) != 0) {
450 		goto dmafail;
451 	}
452 	progress++;
453 	if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rsdmap,
454 	    isp->isp_result, len, NULL, BUS_DMA_NOWAIT) != 0) {
455 		goto dmafail;
456 	}
457 	isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
458 
459 	return (0);
460 
461 dmafail:
462 	isp_prt(isp, ISP_LOGERR, "Mailbox DMA Setup Failure");
463 
464 	if (progress >= 8) {
465 		bus_dmamap_unload(isp->isp_dmatag, isp->isp_rsdmap);
466 	}
467 	if (progress >= 7) {
468 		bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rsdmap);
469 	}
470 	if (progress >= 6) {
471 		bus_dmamem_unmap(isp->isp_dmatag,
472 		    isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));
473 	}
474 	if (progress >= 5) {
475 		bus_dmamem_free(isp->isp_dmatag, &rspseg, rsprs);
476 	}
477 
478 	if (progress >= 4) {
479 		bus_dmamap_unload(isp->isp_dmatag, isp->isp_rqdmap);
480 	}
481 	if (progress >= 3) {
482 		bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rqdmap);
483 	}
484 	if (progress >= 2) {
485 		bus_dmamem_unmap(isp->isp_dmatag,
486 		    isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));
487 	}
488 	if (progress >= 1) {
489 		bus_dmamem_free(isp->isp_dmatag, &reqseg, reqrs);
490 	}
491 
492 	for (i = 0; i < isp->isp_maxcmds; i++) {
493 		bus_dmamap_destroy(isp->isp_dmatag, sbc->sbus_dmamap[i]);
494 	}
495 	free(sbc->sbus_dmamap, M_DEVBUF);
496 	free(isp->isp_xflist, M_DEVBUF);
497 	isp->isp_xflist = NULL;
498 	sbc->sbus_dmamap = NULL;
499 	return (1);
500 }
501 
502 /*
503  * Map a DMA request.
504  * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
505  */
506 
507 static int
508 isp_sbus_dmasetup(struct ispsoftc *isp, XS_T *xs, ispreq_t *rq,
509     u_int16_t *nxtip, u_int16_t optr)
510 {
511 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
512 	bus_dmamap_t dmap;
513 	ispreq_t *qep;
514 	int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
515 	int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
516 
517 	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
518 	if (xs->datalen == 0) {
519 		rq->req_seg_count = 1;
520 		goto mbxsync;
521 	}
522 
523 	dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
524 	if (dmap->dm_nsegs != 0) {
525 		panic("%s: dma map already allocated\n", isp->isp_name);
526 		/* NOTREACHED */
527 	}
528 	if (bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
529 	    NULL, (cansleep ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) |
530 	    BUS_DMA_STREAMING) != 0) {
531 		XS_SETERR(xs, HBA_BOTCH);
532 		return (CMD_COMPLETE);
533 	}
534 
535 	bus_dmamap_sync(isp->isp_dmatag, dmap, 0, xs->datalen,
536 	    in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
537 
538 	if (in) {
539 		rq->req_flags |= REQFLAG_DATA_IN;
540 	} else {
541 		rq->req_flags |= REQFLAG_DATA_OUT;
542 	}
543 
544 	if (XS_CDBLEN(xs) > 12) {
545 		u_int16_t onxti;
546 		ispcontreq_t local, *crq = &local, *cqe;
547 
548 		onxti = *nxtip;
549 		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, onxti);
550 		*nxtip = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
551 		if (*nxtip == optr) {
552 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
553 			bus_dmamap_unload(isp->isp_dmatag, dmap);
554 			XS_SETERR(xs, HBA_BOTCH);
555 			return (CMD_EAGAIN);
556 		}
557 		rq->req_seg_count = 2;
558 		MEMZERO((void *)crq, sizeof (*crq));
559 		crq->req_header.rqs_entry_count = 1;
560 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
561 		crq->req_dataseg[0].ds_count = xs->datalen;
562 		crq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
563 		isp_put_cont_req(isp, crq, cqe);
564 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
565 	} else {
566 		rq->req_seg_count = 1;
567 		rq->req_dataseg[0].ds_count = xs->datalen;
568 		rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
569 	}
570 
571 mbxsync:
572 	if (XS_CDBLEN(xs) > 12) {
573 		isp_put_extended_request(isp,
574 		    (ispextreq_t *)rq, (ispextreq_t *) qep);
575 	} else {
576 		isp_put_request(isp, rq, qep);
577 	}
578 	return (CMD_QUEUED);
579 }
580 
581 static void
582 isp_sbus_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
583 {
584 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
585 	bus_dmamap_t dmap;
586 
587 	dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
588 
589 	if (dmap->dm_nsegs == 0) {
590 		panic("%s: dma map not already allocated\n", isp->isp_name);
591 		/* NOTREACHED */
592 	}
593 	bus_dmamap_sync(isp->isp_dmatag, dmap, 0,
594 	    xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
595 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
596 	bus_dmamap_unload(isp->isp_dmatag, dmap);
597 }
598