xref: /netbsd-src/sys/dev/ic/dpt.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: dpt.c,v 1.43 2004/04/22 00:17:11 itojun Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9  * Aerospace Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
42  * Copyright (c) 2000 Adaptec Corporation
43  * All rights reserved.
44  *
45  * TERMS AND CONDITIONS OF USE
46  *
47  * Redistribution and use in source form, with or without modification, are
48  * permitted provided that redistributions of source code must retain the
49  * above copyright notice, this list of conditions and the following disclaimer.
50  *
51  * This software is provided `as is' by Adaptec and any express or implied
52  * warranties, including, but not limited to, the implied warranties of
53  * merchantability and fitness for a particular purpose, are disclaimed. In no
54  * event shall Adaptec be liable for any direct, indirect, incidental, special,
55  * exemplary or consequential damages (including, but not limited to,
56  * procurement of substitute goods or services; loss of use, data, or profits;
57  * or business interruptions) however caused and on any theory of liability,
58  * whether in contract, strict liability, or tort (including negligence or
59  * otherwise) arising in any way out of the use of this driver software, even
60  * if advised of the possibility of such damage.
61  */
62 
63 /*
64  * Portions of this code fall under the following copyright:
65  *
66  * Originally written by Julian Elischer (julian@tfs.com)
67  * for TRW Financial Systems for use under the MACH(2.5) operating system.
68  *
69  * TRW Financial Systems, in accordance with their agreement with Carnegie
70  * Mellon University, makes this software available to CMU to distribute
71  * or use in any manner that they see fit as long as this message is kept with
72  * the software. For this reason TFS also grants any other persons or
73  * organisations permission to use or modify this software.
74  *
75  * TFS supplies this software to be publicly redistributed
76  * on the understanding that TFS is not responsible for the correct
77  * functioning of this software in any circumstances.
78  */
79 
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.43 2004/04/22 00:17:11 itojun Exp $");
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/device.h>
86 #include <sys/queue.h>
87 #include <sys/buf.h>
88 #include <sys/endian.h>
89 #include <sys/conf.h>
90 
91 #include <uvm/uvm_extern.h>
92 
93 #include <machine/bus.h>
94 #ifdef i386
95 #include <machine/pio.h>
96 #endif
97 
98 #include <dev/scsipi/scsi_all.h>
99 #include <dev/scsipi/scsipi_all.h>
100 #include <dev/scsipi/scsiconf.h>
101 
102 #include <dev/ic/dptreg.h>
103 #include <dev/ic/dptvar.h>
104 
105 #include <dev/i2o/dptivar.h>
106 
107 #ifdef DEBUG
108 #define	DPRINTF(x)		printf x
109 #else
110 #define	DPRINTF(x)
111 #endif
112 
113 #define dpt_inb(x, o)		\
114     bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
115 #define dpt_outb(x, o, d)	\
116     bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
117 
118 static const char * const dpt_cname[] = {
119 	"3334", "SmartRAID IV",
120 	"3332", "SmartRAID IV",
121 	"2144", "SmartCache IV",
122 	"2044", "SmartCache IV",
123 	"2142", "SmartCache IV",
124 	"2042", "SmartCache IV",
125 	"2041", "SmartCache IV",
126 	"3224", "SmartRAID III",
127 	"3222", "SmartRAID III",
128 	"3021", "SmartRAID III",
129 	"2124", "SmartCache III",
130 	"2024", "SmartCache III",
131 	"2122", "SmartCache III",
132 	"2022", "SmartCache III",
133 	"2021", "SmartCache III",
134 	"2012", "SmartCache Plus",
135 	"2011", "SmartCache Plus",
136 	NULL,   "<unknown>",
137 };
138 
139 static void	*dpt_sdh;
140 
141 dev_type_open(dptopen);
142 dev_type_ioctl(dptioctl);
143 
144 const struct cdevsw dpt_cdevsw = {
145 	dptopen, nullclose, noread, nowrite, dptioctl,
146 	nostop, notty, nopoll, nommap, nokqfilter,
147 };
148 
149 extern struct cfdriver dpt_cd;
150 
151 static struct dpt_sig dpt_sig = {
152 	{ 'd', 'P', 't', 'S', 'i', 'G'},
153 	SIG_VERSION,
154 #if defined(i386)
155 	PROC_INTEL,
156 #elif defined(powerpc)
157 	PROC_POWERPC,
158 #elif defined(alpha)
159 	PROC_ALPHA,
160 #elif defined(__mips__)
161 	PROC_MIPS,
162 #elif defined(sparc64)
163 	PROC_ULTRASPARC,
164 #else
165 	0xff,
166 #endif
167 #if defined(i386)
168 	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
169 #else
170 	0,
171 #endif
172 	FT_HBADRVR,
173 	0,
174 	OEM_DPT,
175 	OS_FREE_BSD,	/* XXX */
176 	CAP_ABOVE16MB,
177 	DEV_ALL,
178 	ADF_ALL_EATA,
179 	0,
180 	0,
181 	DPT_VERSION,
182 	DPT_REVISION,
183 	DPT_SUBREVISION,
184 	DPT_MONTH,
185 	DPT_DAY,
186 	DPT_YEAR,
187 	""		/* Will be filled later */
188 };
189 
190 static void	dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
191 static void	dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
192 static int	dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
193 static int	dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
194 static void	dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
195 static int	dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
196 static void	dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *);
197 static void	dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
198 static void	dpt_minphys(struct buf *);
199 static int	dpt_passthrough(struct dpt_softc *, struct eata_ucp *,
200 				struct proc *);
201 static void	dpt_scsipi_request(struct scsipi_channel *,
202 				   scsipi_adapter_req_t, void *);
203 static void	dpt_shutdown(void *);
204 static void	dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *);
205 static int	dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
206 
207 static __inline__ struct dpt_ccb	*dpt_ccb_alloc(struct dpt_softc *);
208 static __inline__ void	dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
209 
210 static __inline__ struct dpt_ccb *
211 dpt_ccb_alloc(struct dpt_softc *sc)
212 {
213 	struct dpt_ccb *ccb;
214 	int s;
215 
216 	s = splbio();
217 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
218 	SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
219 	splx(s);
220 
221 	return (ccb);
222 }
223 
224 static __inline__ void
225 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
226 {
227 	int s;
228 
229 	ccb->ccb_flg = 0;
230 	ccb->ccb_savesp = NULL;
231 	s = splbio();
232 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
233 	splx(s);
234 }
235 
236 /*
237  * Handle an interrupt from the HBA.
238  */
239 int
240 dpt_intr(void *cookie)
241 {
242 	struct dpt_softc *sc;
243 	struct dpt_ccb *ccb;
244 	struct eata_sp *sp;
245 	volatile int junk;
246 	int forus;
247 
248 	sc = cookie;
249 	sp = sc->sc_stp;
250 	forus = 0;
251 
252 	for (;;) {
253 		/*
254 		 * HBA might have interrupted while we were dealing with the
255 		 * last completed command, since we ACK before we deal; keep
256 		 * polling.
257 		 */
258 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
259 			break;
260 		forus = 1;
261 
262 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
263 		    sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
264 
265 		/* Might have looped before HBA can reset HBA_AUX_INTR. */
266 		if (sp->sp_ccbid == -1) {
267 			DELAY(50);
268 
269 			if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
270 				return (0);
271 
272 			printf("%s: no status\n", sc->sc_dv.dv_xname);
273 
274 			/* Re-sync DMA map */
275 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
276 			    sc->sc_stpoff, sizeof(struct eata_sp),
277 			    BUS_DMASYNC_POSTREAD);
278 		}
279 
280 		/* Make sure CCB ID from status packet is realistic. */
281 		if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
282 			printf("%s: bogus status (returned CCB id %d)\n",
283 			    sc->sc_dv.dv_xname, sp->sp_ccbid);
284 
285 			/* Ack the interrupt */
286 			sp->sp_ccbid = -1;
287 			junk = dpt_inb(sc, HA_STATUS);
288 			continue;
289 		}
290 
291 		/* Sync up DMA map and cache cmd status. */
292 		ccb = sc->sc_ccbs + sp->sp_ccbid;
293 
294 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
295 		    sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
296 
297 		ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
298 		ccb->ccb_scsi_status = sp->sp_scsi_status;
299 		if (ccb->ccb_savesp != NULL)
300 			memcpy(ccb->ccb_savesp, sp, sizeof(*sp));
301 
302 		/*
303 		 * Ack the interrupt and process the CCB.  If this
304 		 * is a private CCB it's up to dpt_ccb_poll() to
305 		 * notice.
306 		 */
307 		sp->sp_ccbid = -1;
308 		ccb->ccb_flg |= CCB_INTR;
309 		junk = dpt_inb(sc, HA_STATUS);
310 		if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
311 			dpt_ccb_done(sc, ccb);
312 		else if ((ccb->ccb_flg & CCB_WAIT) != 0)
313 			wakeup(ccb);
314 	}
315 
316 	return (forus);
317 }
318 
319 /*
320  * Initialize and attach the HBA.  This is the entry point from bus
321  * specific probe-and-attach code.
322  */
323 void
324 dpt_init(struct dpt_softc *sc, const char *intrstr)
325 {
326 	struct scsipi_adapter *adapt;
327 	struct scsipi_channel *chan;
328 	struct eata_inquiry_data *ei;
329 	int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
330 	bus_dma_segment_t seg;
331 	struct eata_cfg *ec;
332 	struct dpt_ccb *ccb;
333 	char model[16];
334 
335 	ec = &sc->sc_ec;
336 	snprintf(dpt_sig.dsDescription, sizeof(dpt_sig.dsDescription),
337 	    "NetBSD %s DPT driver", osrelease);
338 
339 	/*
340 	 * Allocate the CCB/status packet/scratch DMA map and load.
341 	 */
342 	sc->sc_nccbs =
343 	    min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
344 	sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
345 	sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
346 	mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
347 	    DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
348 
349 	if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
350 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
351 		aprint_error("%s: unable to allocate CCBs, rv = %d\n",
352 		    sc->sc_dv.dv_xname, rv);
353 		return;
354 	}
355 
356 	if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
357 	    (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
358 		aprint_error("%s: unable to map CCBs, rv = %d\n",
359 		    sc->sc_dv.dv_xname, rv);
360 		return;
361 	}
362 
363 	if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
364 	    mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
365 		aprint_error("%s: unable to create CCB DMA map, rv = %d\n",
366 		    sc->sc_dv.dv_xname, rv);
367 		return;
368 	}
369 
370 	if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
371 	    sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
372 		aprint_error("%s: unable to load CCB DMA map, rv = %d\n",
373 		    sc->sc_dv.dv_xname, rv);
374 		return;
375 	}
376 
377 	sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff);
378 	sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
379 	sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
380 	sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
381 	sc->sc_stp->sp_ccbid = -1;
382 
383 	/*
384 	 * Create the CCBs.
385 	 */
386 	SLIST_INIT(&sc->sc_ccb_free);
387 	memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
388 
389 	for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
390 		rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
391 		    DPT_SG_SIZE, DPT_MAX_XFER, 0,
392 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
393 		    &ccb->ccb_dmamap_xfer);
394 		if (rv) {
395 			aprint_error("%s: can't create ccb dmamap (%d)\n",
396 			    sc->sc_dv.dv_xname, rv);
397 			break;
398 		}
399 
400 		ccb->ccb_id = i;
401 		ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
402 		    CCB_OFF(sc, ccb);
403 		SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
404 	}
405 
406 	if (i == 0) {
407 		aprint_error("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
408 		return;
409 	} else if (i != sc->sc_nccbs) {
410 		aprint_error("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname,
411 		    i, sc->sc_nccbs);
412 		sc->sc_nccbs = i;
413 	}
414 
415 	/* Set shutdownhook before we start any device activity. */
416 	if (dpt_sdh == NULL)
417 		dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
418 
419 	/* Get the inquiry data from the HBA. */
420 	dpt_hba_inquire(sc, &ei);
421 
422 	/*
423 	 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
424 	 * dpt0: interrupting at irq 10
425 	 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
426 	 */
427 	for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
428 		;
429 	ei->ei_vendor[i] = '\0';
430 
431 	for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
432 		model[i] = ei->ei_model[i];
433 	for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; i++, j++)
434 		model[i] = ei->ei_model[i];
435 	model[i] = '\0';
436 
437 	/* Find the marketing name for the board. */
438 	for (i = 0; dpt_cname[i] != NULL; i += 2)
439 		if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
440 			break;
441 
442 	aprint_normal("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
443 
444 	if (intrstr != NULL)
445 		aprint_normal("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
446 		    intrstr);
447 
448 	maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
449 	    EC_F3_MAX_CHANNEL_SHIFT;
450 	maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
451 	    EC_F3_MAX_TARGET_SHIFT;
452 
453 	aprint_normal("%s: %d queued commands, %d channel(s), adapter on ID(s)",
454 	    sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
455 
456 	for (i = 0; i <= maxchannel; i++) {
457 		sc->sc_hbaid[i] = ec->ec_hba[3 - i];
458 		aprint_normal(" %d", sc->sc_hbaid[i]);
459 	}
460 	aprint_normal("\n");
461 
462 	/*
463 	 * Reset the SCSI controller chip(s) and bus.  XXX Do we need to do
464 	 * this for each bus?
465 	 */
466 	if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
467 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
468 
469 	/* Fill in the scsipi_adapter. */
470 	adapt = &sc->sc_adapt;
471 	memset(adapt, 0, sizeof(*adapt));
472 	adapt->adapt_dev = &sc->sc_dv;
473 	adapt->adapt_nchannels = maxchannel + 1;
474 	adapt->adapt_openings = sc->sc_nccbs - 1;
475 	adapt->adapt_max_periph = sc->sc_nccbs - 1;
476 	adapt->adapt_request = dpt_scsipi_request;
477 	adapt->adapt_minphys = dpt_minphys;
478 
479 	for (i = 0; i <= maxchannel; i++) {
480 		/* Fill in the scsipi_channel. */
481 		chan = &sc->sc_chans[i];
482 		memset(chan, 0, sizeof(*chan));
483 		chan->chan_adapter = adapt;
484 		chan->chan_bustype = &scsi_bustype;
485 		chan->chan_channel = i;
486 		chan->chan_ntargets = maxtarget + 1;
487 		chan->chan_nluns = ec->ec_maxlun + 1;
488 		chan->chan_id = sc->sc_hbaid[i];
489 		config_found(&sc->sc_dv, chan, scsiprint);
490 	}
491 }
492 
493 /*
494  * Read the EATA configuration from the HBA and perform some sanity checks.
495  */
496 int
497 dpt_readcfg(struct dpt_softc *sc)
498 {
499 	struct eata_cfg *ec;
500 	int i, j, stat;
501 	u_int16_t *p;
502 
503 	ec = &sc->sc_ec;
504 
505 	/* Older firmware may puke if we talk to it too soon after reset. */
506 	dpt_outb(sc, HA_COMMAND, CP_RESET);
507 	DELAY(750000);
508 
509 	for (i = 1000; i; i--) {
510 		if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
511 			break;
512 		DELAY(2000);
513 	}
514 
515 	if (i == 0) {
516 		printf("%s: HBA not ready after reset (hba status:%02x)\n",
517 		    sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
518 		return (-1);
519 	}
520 
521 	while((((stat = dpt_inb(sc, HA_STATUS))
522 	    != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
523 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
524 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
525 	    || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
526 		/* RAID drives still spinning up? */
527 		if(dpt_inb(sc, HA_ERROR) != 'D' ||
528 		   dpt_inb(sc, HA_ERROR + 1) != 'P' ||
529 		   dpt_inb(sc, HA_ERROR + 2) != 'T') {
530 			printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
531 			return (-1);
532 		}
533 	}
534 
535 	/*
536 	 * Issue the read-config command and wait for the data to appear.
537 	 *
538 	 * Apparently certian firmware revisions won't DMA later on if we
539 	 * request the config data using PIO, but it makes it a lot easier
540 	 * as no DMA setup is required.
541 	 */
542 	dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
543 	memset(ec, 0, sizeof(*ec));
544 	i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
545 	    sizeof(ec->ec_cfglen)) >> 1;
546 	p = (u_int16_t *)ec;
547 
548 	if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
549 		printf("%s: cfg data didn't appear (hba status:%02x)\n",
550 		    sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
551 		return (-1);
552 	}
553 
554 	/* Begin reading. */
555 	while (i--)
556 		*p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
557 
558 	if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
559 	    - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
560 	    - sizeof(ec->ec_cfglen)))
561 		i = sizeof(struct eata_cfg)
562 		  - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
563 		  - sizeof(ec->ec_cfglen);
564 
565 	j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
566 	    sizeof(ec->ec_cfglen);
567 	i >>= 1;
568 
569 	while (i--)
570 		*p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
571 
572 	/* Flush until we have read 512 bytes. */
573 	i = (512 - j + 1) >> 1;
574 	while (i--)
575 		bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
576 
577 	/* Defaults for older firmware... */
578 	if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
579 		ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
580 
581 	if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
582 		printf("%s: HBA error\n", sc->sc_dv.dv_xname);
583 		return (-1);
584 	}
585 
586 	if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
587 		printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
588 		return (-1);
589 	}
590 
591 	if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
592 		printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
593 		return (-1);
594 	}
595 
596 	if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
597 		printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
598 		return (-1);
599 	}
600 
601 	return (0);
602 }
603 
604 /*
605  * Our `shutdownhook' to cleanly shut down the HBA.  The HBA must flush all
606  * data from it's cache and mark array groups as clean.
607  *
608  * XXX This doesn't always work (i.e., the HBA may still be flushing after
609  * we tell root that it's safe to power off).
610  */
611 static void
612 dpt_shutdown(void *cookie)
613 {
614 	extern struct cfdriver dpt_cd;
615 	struct dpt_softc *sc;
616 	int i;
617 
618 	printf("shutting down dpt devices...");
619 
620 	for (i = 0; i < dpt_cd.cd_ndevs; i++) {
621 		if ((sc = device_lookup(&dpt_cd, i)) == NULL)
622 			continue;
623 		dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
624 	}
625 
626 	delay(10000*1000);
627 	printf(" done\n");
628 }
629 
630 /*
631  * Send an EATA command to the HBA.
632  */
633 static int
634 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
635 {
636 	u_int32_t pa;
637 	int i, s;
638 
639 	s = splbio();
640 
641 	for (i = 20000; i != 0; i--) {
642 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
643 			break;
644 		DELAY(50);
645 	}
646 	if (i == 0) {
647 		splx(s);
648 		return (-1);
649 	}
650 
651 	pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
652 	dpt_outb(sc, HA_DMA_BASE + 0, (pa      ) & 0xff);
653 	dpt_outb(sc, HA_DMA_BASE + 1, (pa >>  8) & 0xff);
654 	dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
655 	dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
656 
657 	if (eatacmd == CP_IMMEDIATE)
658 		dpt_outb(sc, HA_ICMD, icmd);
659 
660 	dpt_outb(sc, HA_COMMAND, eatacmd);
661 
662 	splx(s);
663 	return (0);
664 }
665 
666 /*
667  * Wait for the HBA status register to reach a specific state.
668  */
669 static int
670 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
671 {
672 
673 	for (ms *= 10; ms != 0; ms--) {
674 		if ((dpt_inb(sc, HA_STATUS) & mask) == state)
675 			return (0);
676 		DELAY(100);
677 	}
678 
679 	return (-1);
680 }
681 
682 /*
683  * Spin waiting for a command to finish.  The timeout value from the CCB is
684  * used.  The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
685  * recycled before we get a look at it.
686  */
687 static int
688 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
689 {
690 	int i, s;
691 
692 #ifdef DEBUG
693 	if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
694 		panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
695 #endif
696 
697 	s = splbio();
698 
699 	if ((ccb->ccb_flg & CCB_INTR) != 0) {
700 		splx(s);
701 		return (0);
702 	}
703 
704 	for (i = ccb->ccb_timeout * 20; i != 0; i--) {
705 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
706 			dpt_intr(sc);
707 		if ((ccb->ccb_flg & CCB_INTR) != 0)
708 			break;
709 		DELAY(50);
710 	}
711 
712 	splx(s);
713 	return (i == 0);
714 }
715 
716 /*
717  * We have a command which has been processed by the HBA, so now we look to
718  * see how the operation went.  CCBs marked CCB_PRIVATE are not passed here
719  * by dpt_intr().
720  */
721 static void
722 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
723 {
724 	struct scsipi_xfer *xs;
725 
726 	xs = ccb->ccb_xs;
727 
728 	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
729 
730 	/*
731 	 * If we were a data transfer, unload the map that described the
732 	 * data buffer.
733 	 */
734 	if (xs->datalen != 0)
735 		dpt_ccb_unmap(sc, ccb);
736 
737 	if (xs->error == XS_NOERROR) {
738 		if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
739 			switch (ccb->ccb_hba_status) {
740 			case SP_HBA_ERROR_SEL_TO:
741 				xs->error = XS_SELTIMEOUT;
742 				break;
743 			case SP_HBA_ERROR_RESET:
744 				xs->error = XS_RESET;
745 				break;
746 			default:
747 				printf("%s: HBA status %x\n",
748 				    sc->sc_dv.dv_xname, ccb->ccb_hba_status);
749 				xs->error = XS_DRIVER_STUFFUP;
750 				break;
751 			}
752 		} else if (ccb->ccb_scsi_status != SCSI_OK) {
753 			switch (ccb->ccb_scsi_status) {
754 			case SCSI_CHECK:
755 				memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
756 				    sizeof(xs->sense.scsi_sense));
757 				xs->error = XS_SENSE;
758 				break;
759 			case SCSI_BUSY:
760 			case SCSI_QUEUE_FULL:
761 				xs->error = XS_BUSY;
762 				break;
763 			default:
764 				scsipi_printaddr(xs->xs_periph);
765 				printf("SCSI status %x\n",
766 				    ccb->ccb_scsi_status);
767 				xs->error = XS_DRIVER_STUFFUP;
768 				break;
769 			}
770 		} else
771 			xs->resid = 0;
772 
773 		xs->status = ccb->ccb_scsi_status;
774 	}
775 
776 	/* Free up the CCB and mark the command as done. */
777 	dpt_ccb_free(sc, ccb);
778 	scsipi_done(xs);
779 }
780 
781 /*
782  * Specified CCB has timed out, abort it.
783  */
784 static void
785 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
786 {
787 	struct scsipi_periph *periph;
788 	struct scsipi_xfer *xs;
789 	int s;
790 
791 	xs = ccb->ccb_xs;
792 	periph = xs->xs_periph;
793 
794 	scsipi_printaddr(periph);
795 	printf("timed out (status:%02x aux status:%02x)",
796 	    dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
797 
798 	s = splbio();
799 
800 	if ((ccb->ccb_flg & CCB_ABORT) != 0) {
801 		/* Abort timed out, reset the HBA */
802 		printf(" AGAIN, resetting HBA\n");
803 		dpt_outb(sc, HA_COMMAND, CP_RESET);
804 		DELAY(750000);
805 	} else {
806 		/* Abort the operation that has timed out */
807 		printf("\n");
808 		xs->error = XS_TIMEOUT;
809 		ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
810 		ccb->ccb_flg |= CCB_ABORT;
811 		/* Start the abort */
812 		if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
813 			printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
814 	}
815 
816 	splx(s);
817 }
818 
819 /*
820  * Map a data transfer.
821  */
822 static int
823 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
824 {
825 	struct scsipi_xfer *xs;
826 	bus_dmamap_t xfer;
827 	bus_dma_segment_t *ds;
828 	struct eata_sg *sg;
829 	struct eata_cp *cp;
830 	int rv, i;
831 
832 	xs = ccb->ccb_xs;
833 	xfer = ccb->ccb_dmamap_xfer;
834 	cp = &ccb->ccb_eata_cp;
835 
836 	rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
837 	    ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
838 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
839 	    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
840 
841 	switch (rv) {
842 	case 0:
843 		break;
844 	case ENOMEM:
845 	case EAGAIN:
846 		xs->error = XS_RESOURCE_SHORTAGE;
847 		break;
848 	default:
849 		xs->error = XS_DRIVER_STUFFUP;
850 		printf("%s: error %d loading map\n", sc->sc_dv.dv_xname, rv);
851 		break;
852 	}
853 
854 	if (xs->error != XS_NOERROR) {
855 		dpt_ccb_free(sc, ccb);
856 		scsipi_done(xs);
857 		return (-1);
858 	}
859 
860 	bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
861 	    (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
862 	    BUS_DMASYNC_PREWRITE);
863 
864 	/* Don't bother using scatter/gather for just 1 seg */
865 	if (xfer->dm_nsegs == 1) {
866 		cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
867 		cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
868 	} else {
869 		/*
870 		 * Load the hardware scatter/gather map with
871 		 * the contents of the DMA map.
872 		 */
873 		sg = ccb->ccb_sg;
874 		ds = xfer->dm_segs;
875 		for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
876  			sg->sg_addr = htobe32(ds->ds_addr);
877  			sg->sg_len =  htobe32(ds->ds_len);
878  		}
879 	 	cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
880 		    sc->sc_dmamap->dm_segs[0].ds_addr +
881 		    offsetof(struct dpt_ccb, ccb_sg));
882 		cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
883 		cp->cp_ctl0 |= CP_C0_SCATTER;
884 	}
885 
886 	return (0);
887 }
888 
889 /*
890  * Unmap a transfer.
891  */
892 static void
893 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
894 {
895 
896 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
897 	    ccb->ccb_dmamap_xfer->dm_mapsize,
898 	    (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
899 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
900 	bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
901 }
902 
903 /*
904  * Adjust the size of each I/O before it passes to the SCSI layer.
905  */
906 static void
907 dpt_minphys(struct buf *bp)
908 {
909 
910 	if (bp->b_bcount > DPT_MAX_XFER)
911 		bp->b_bcount = DPT_MAX_XFER;
912 	minphys(bp);
913 }
914 
915 /*
916  * Start a SCSI command.
917  */
918 static void
919 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
920 		   void *arg)
921 {
922 	struct dpt_softc *sc;
923 	struct scsipi_xfer *xs;
924 	int flags;
925 	struct scsipi_periph *periph;
926 	struct dpt_ccb *ccb;
927 	struct eata_cp *cp;
928 
929 	sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev;
930 
931 	switch (req) {
932 	case ADAPTER_REQ_RUN_XFER:
933 		xs = arg;
934 		periph = xs->xs_periph;
935 		flags = xs->xs_control;
936 
937 #ifdef DIAGNOSTIC
938 		/* Cmds must be no more than 12 bytes for us. */
939 		if (xs->cmdlen > 12) {
940 			xs->error = XS_DRIVER_STUFFUP;
941 			scsipi_done(xs);
942 			break;
943 		}
944 #endif
945 		/*
946 		 * XXX We can't reset devices just yet.  Apparently some
947 		 * older firmware revisions don't even support it.
948 		 */
949 		if ((flags & XS_CTL_RESET) != 0) {
950 			xs->error = XS_DRIVER_STUFFUP;
951 			scsipi_done(xs);
952 			break;
953 		}
954 
955 		/*
956 		 * Get a CCB and fill it.
957 		 */
958 		ccb = dpt_ccb_alloc(sc);
959 		ccb->ccb_xs = xs;
960 		ccb->ccb_timeout = xs->timeout;
961 
962 		cp = &ccb->ccb_eata_cp;
963 		memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
964 		cp->cp_ccbid = ccb->ccb_id;
965 		cp->cp_senselen = sizeof(ccb->ccb_sense);
966 		cp->cp_stataddr = htobe32(sc->sc_stppa);
967 		cp->cp_ctl0 = CP_C0_AUTO_SENSE;
968 		cp->cp_ctl1 = 0;
969 		cp->cp_ctl2 = 0;
970 		cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
971 		cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
972 		cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
973 		cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
974 
975 		if ((flags & XS_CTL_DATA_IN) != 0)
976 			cp->cp_ctl0 |= CP_C0_DATA_IN;
977 		if ((flags & XS_CTL_DATA_OUT) != 0)
978 			cp->cp_ctl0 |= CP_C0_DATA_OUT;
979 		if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
980 			cp->cp_ctl0 |= CP_C0_INTERPRET;
981 
982 		/* Synchronous xfers musn't write-back through the cache. */
983 		if (xs->bp != NULL)
984 			if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
985 				cp->cp_ctl2 |= CP_C2_NO_CACHE;
986 
987 		cp->cp_senseaddr =
988 		    htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
989 		    CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
990 
991 		if (xs->datalen != 0) {
992 			if (dpt_ccb_map(sc, ccb))
993 				break;
994 		} else {
995 			cp->cp_dataaddr = 0;
996 			cp->cp_datalen = 0;
997 		}
998 
999 		/* Sync up CCB and status packet. */
1000 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1001 		    CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
1002 		    BUS_DMASYNC_PREWRITE);
1003 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1004 		    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1005 
1006 		/*
1007 		 * Start the command.
1008 		 */
1009 		if ((xs->xs_control & XS_CTL_POLL) != 0)
1010 			ccb->ccb_flg |= CCB_PRIVATE;
1011 
1012 		if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
1013 			printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1014 			xs->error = XS_DRIVER_STUFFUP;
1015 			if (xs->datalen != 0)
1016 				dpt_ccb_unmap(sc, ccb);
1017 			dpt_ccb_free(sc, ccb);
1018 			break;
1019 		}
1020 
1021 		if ((xs->xs_control & XS_CTL_POLL) == 0)
1022 			break;
1023 
1024 		if (dpt_ccb_poll(sc, ccb)) {
1025 			dpt_ccb_abort(sc, ccb);
1026 			/* Wait for abort to complete... */
1027 			if (dpt_ccb_poll(sc, ccb))
1028 				dpt_ccb_abort(sc, ccb);
1029 		}
1030 
1031 		dpt_ccb_done(sc, ccb);
1032 		break;
1033 
1034 	case ADAPTER_REQ_GROW_RESOURCES:
1035 		/*
1036 		 * Not supported, since we allocate the maximum number of
1037 		 * CCBs up front.
1038 		 */
1039 		break;
1040 
1041 	case ADAPTER_REQ_SET_XFER_MODE:
1042 		/*
1043 		 * This will be handled by the HBA itself, and we can't
1044 		 * modify that (ditto for tagged queueing).
1045 		 */
1046 		break;
1047 	}
1048 }
1049 
1050 /*
1051  * Get inquiry data from the adapter.
1052  */
1053 static void
1054 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
1055 {
1056 	struct dpt_ccb *ccb;
1057 	struct eata_cp *cp;
1058 
1059 	*ei = (struct eata_inquiry_data *)sc->sc_scr;
1060 
1061 	/* Get a CCB and mark as private */
1062 	ccb = dpt_ccb_alloc(sc);
1063 	ccb->ccb_flg |= CCB_PRIVATE;
1064 	ccb->ccb_timeout = 200;
1065 
1066 	/* Put all the arguments into the CCB. */
1067 	cp = &ccb->ccb_eata_cp;
1068 	cp->cp_ccbid = ccb->ccb_id;
1069 	cp->cp_senselen = sizeof(ccb->ccb_sense);
1070 	cp->cp_senseaddr = 0;
1071 	cp->cp_stataddr = htobe32(sc->sc_stppa);
1072 	cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1073 	cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1074 	cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1075 	cp->cp_ctl1 = 0;
1076 	cp->cp_ctl2 = 0;
1077 	cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1078 	cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1079 
1080 	/* Put together the SCSI inquiry command. */
1081 	memset(&cp->cp_cdb_cmd, 0, 12);
1082 	cp->cp_cdb_cmd = INQUIRY;
1083 	cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1084 
1085 	/* Sync up CCB, status packet and scratch area. */
1086 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1087 	    sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1088 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1089 	    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1090 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1091 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1092 
1093 	/* Start the command and poll on completion. */
1094 	if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1095 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1096 
1097 	if (dpt_ccb_poll(sc, ccb))
1098 		panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1099 
1100 	if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1101 	    ccb->ccb_scsi_status != SCSI_OK)
1102 		panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1103 		    sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1104 		    ccb->ccb_scsi_status);
1105 
1106 	/* Sync up the DMA map and free CCB, returning. */
1107 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1108 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1109 	dpt_ccb_free(sc, ccb);
1110 }
1111 
1112 int
1113 dptopen(dev_t dev, int flag, int mode, struct proc *p)
1114 {
1115 
1116 	if (securelevel > 1)
1117 		return (EPERM);
1118 	if (device_lookup(&dpt_cd, minor(dev)) == NULL)
1119 		return (ENXIO);
1120 
1121 	return (0);
1122 }
1123 
1124 int
1125 dptioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1126 {
1127 	struct dpt_softc *sc;
1128 	int rv;
1129 
1130 	sc = device_lookup(&dpt_cd, minor(dev));
1131 
1132 	switch (cmd & 0xffff) {
1133 	case DPT_SIGNATURE:
1134 		memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig)));
1135 		break;
1136 
1137 	case DPT_CTRLINFO:
1138 		dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data);
1139 		break;
1140 
1141 	case DPT_SYSINFO:
1142 		dpt_sysinfo(sc, (struct dpt_sysinfo *)data);
1143 		break;
1144 
1145 	case DPT_BLINKLED:
1146 		/*
1147 		 * XXX Don't know how to get this from EATA boards.  I think
1148 		 * it involves waiting for a "DPT" sequence from HA_ERROR
1149 		 * and then reading one of the HA_ICMD registers.
1150 		 */
1151 		*(int *)data = 0;
1152 		break;
1153 
1154 	case DPT_EATAUSRCMD:
1155 		if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) {
1156 			DPRINTF(("%s: ucp %lu vs %lu bytes\n",
1157 			    sc->sc_dv.dv_xname, IOCPARM_LEN(cmd),
1158 			    (unsigned long int)sizeof(struct eata_ucp)));
1159 			return (EINVAL);
1160 		}
1161 
1162 		if (sc->sc_uactive++)
1163 			tsleep(&sc->sc_uactive, PRIBIO, "dptslp", 0);
1164 
1165 		rv = dpt_passthrough(sc, (struct eata_ucp *)data, p);
1166 
1167 		sc->sc_uactive--;
1168 		wakeup_one(&sc->sc_uactive);
1169 		return (rv);
1170 
1171 	default:
1172 		DPRINTF(("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd));
1173 		return (ENOTTY);
1174 	}
1175 
1176 	return (0);
1177 }
1178 
1179 void
1180 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info)
1181 {
1182 
1183 	memset(info, 0, sizeof(*info));
1184 	info->id = sc->sc_hbaid[0];
1185 	info->vect = sc->sc_isairq;
1186 	info->base = sc->sc_isaport;
1187 	info->qdepth = sc->sc_nccbs;
1188 	info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg);
1189 	info->heads = 16;
1190 	info->sectors = 63;
1191 	info->do_drive32 = 1;
1192 	info->primary = 1;
1193 	info->cpLength = sizeof(struct eata_cp);
1194 	info->spLength = sizeof(struct eata_sp);
1195 	info->drqNum = sc->sc_isadrq;
1196 }
1197 
1198 void
1199 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info)
1200 {
1201 #ifdef i386
1202 	int i, j;
1203 #endif
1204 
1205 	memset(info, 0, sizeof(*info));
1206 
1207 #ifdef i386
1208 	outb (0x70, 0x12);
1209 	i = inb(0x71);
1210 	j = i >> 4;
1211 	if (i == 0x0f) {
1212 		outb (0x70, 0x19);
1213 		j = inb (0x71);
1214 	}
1215 	info->drive0CMOS = j;
1216 
1217 	j = i & 0x0f;
1218 	if (i == 0x0f) {
1219 		outb (0x70, 0x1a);
1220 		j = inb (0x71);
1221 	}
1222 	info->drive1CMOS = j;
1223 	info->processorFamily = dpt_sig.dsProcessorFamily;
1224 
1225 	/*
1226 	 * Get the conventional memory size from CMOS.
1227 	 */
1228 	outb(0x70, 0x16);
1229 	j = inb(0x71);
1230 	j <<= 8;
1231 	outb(0x70, 0x15);
1232 	j |= inb(0x71);
1233 	info->conventionalMemSize = j;
1234 
1235 	/*
1236 	 * Get the extended memory size from CMOS.
1237 	 */
1238 	outb(0x70, 0x31);
1239 	j = inb(0x71);
1240 	j <<= 8;
1241 	outb(0x70, 0x30);
1242 	j |= inb(0x71);
1243 	info->extendedMemSize = j;
1244 
1245 	switch (cpu_class) {
1246 	case CPUCLASS_386:
1247 		info->processorType = PROC_386;
1248 		break;
1249 	case CPUCLASS_486:
1250 		info->processorType = PROC_486;
1251 		break;
1252 	case CPUCLASS_586:
1253 		info->processorType = PROC_PENTIUM;
1254 		break;
1255 	case CPUCLASS_686:
1256 	default:
1257 		info->processorType = PROC_SEXIUM;
1258 		break;
1259 	}
1260 
1261 	info->flags = SI_CMOS_Valid | SI_BusTypeValid |
1262 	    SI_MemorySizeValid | SI_NO_SmartROM;
1263 #else
1264 	info->flags = SI_BusTypeValid | SI_NO_SmartROM;
1265 #endif
1266 
1267 	info->busType = sc->sc_bustype;
1268 }
1269 
1270 int
1271 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct proc *proc)
1272 {
1273 	struct dpt_ccb *ccb;
1274 	struct eata_sp sp;
1275 	struct eata_cp *cp;
1276 	struct eata_sg *sg;
1277 	bus_dmamap_t xfer = 0; /* XXX: gcc */
1278 	bus_dma_segment_t *ds;
1279 	int datain = 0, s, rv = 0, i, uslen; /* XXX: gcc */
1280 
1281 	/*
1282 	 * Get a CCB and fill.
1283 	 */
1284 	ccb = dpt_ccb_alloc(sc);
1285 	ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT;
1286 	ccb->ccb_timeout = 0;
1287 	ccb->ccb_savesp = &sp;
1288 
1289 	cp = &ccb->ccb_eata_cp;
1290 	memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp));
1291 	uslen = cp->cp_senselen;
1292 	cp->cp_ccbid = ccb->ccb_id;
1293 	cp->cp_senselen = sizeof(ccb->ccb_sense);
1294 	cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
1295 	    CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
1296 	cp->cp_stataddr = htobe32(sc->sc_stppa);
1297 
1298 	/*
1299 	 * Map data transfers.
1300 	 */
1301 	if (ucp->ucp_dataaddr && ucp->ucp_datalen) {
1302 		xfer = ccb->ccb_dmamap_xfer;
1303 		datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0);
1304 
1305 		if (ucp->ucp_datalen > DPT_MAX_XFER) {
1306 			DPRINTF(("%s: xfer too big\n", sc->sc_dv.dv_xname));
1307 			dpt_ccb_free(sc, ccb);
1308 			return (EFBIG);
1309 		}
1310 		rv = bus_dmamap_load(sc->sc_dmat, xfer,
1311 		    ucp->ucp_dataaddr, ucp->ucp_datalen, proc,
1312 		    BUS_DMA_WAITOK | BUS_DMA_STREAMING |
1313 		    (datain ? BUS_DMA_READ : BUS_DMA_WRITE));
1314 		if (rv != 0) {
1315 			DPRINTF(("%s: map failed; %d\n", sc->sc_dv.dv_xname,
1316 			    rv));
1317 			dpt_ccb_free(sc, ccb);
1318 			return (rv);
1319 		}
1320 
1321 		bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1322 		    (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1323 
1324 		sg = ccb->ccb_sg;
1325 		ds = xfer->dm_segs;
1326 		for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
1327 	 		sg->sg_addr = htobe32(ds->ds_addr);
1328 	 		sg->sg_len = htobe32(ds->ds_len);
1329  		}
1330 		cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
1331 		    sc->sc_dmamap->dm_segs[0].ds_addr +
1332 		    offsetof(struct dpt_ccb, ccb_sg));
1333 		cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
1334 		cp->cp_ctl0 |= CP_C0_SCATTER;
1335 	} else {
1336 		cp->cp_dataaddr = 0;
1337 		cp->cp_datalen = 0;
1338 	}
1339 
1340 	/*
1341 	 * Start the command and sleep on completion.
1342 	 */
1343 	PHOLD(curlwp);	/* XXXJRT curlwp */
1344 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1345 	    sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1346 	s = splbio();
1347 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1348 	    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1349 	if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1350 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1351 	tsleep(ccb, PWAIT, "dptucmd", 0);
1352 	splx(s);
1353 	PRELE(curlwp);	/* XXXJRT curlwp */
1354 
1355 	/*
1356 	 * Sync up the DMA map and copy out results.
1357 	 */
1358 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1359 	    sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
1360 
1361 	if (cp->cp_datalen != 0) {
1362 		bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1363 		    (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
1364 		bus_dmamap_unload(sc->sc_dmat, xfer);
1365 	}
1366 
1367 	if (ucp->ucp_stataddr != NULL) {
1368 		rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp));
1369 		if (rv != 0)
1370 			DPRINTF(("%s: sp copyout() failed\n",
1371 			    sc->sc_dv.dv_xname));
1372 	}
1373 	if (rv == 0 && ucp->ucp_senseaddr != NULL) {
1374 		i = min(uslen, sizeof(ccb->ccb_sense));
1375 		rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i);
1376 		if (rv != 0)
1377 			DPRINTF(("%s: sense copyout() failed\n",
1378 			    sc->sc_dv.dv_xname));
1379 	}
1380 
1381 	ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status;
1382 	ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status;
1383 	dpt_ccb_free(sc, ccb);
1384 	return (rv);
1385 }
1386