xref: /netbsd-src/sys/dev/ic/dpt.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: dpt.c,v 1.70 2014/03/16 05:20:27 dholland Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9  * Aerospace Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
35  * Copyright (c) 2000 Adaptec Corporation
36  * All rights reserved.
37  *
38  * TERMS AND CONDITIONS OF USE
39  *
40  * Redistribution and use in source form, with or without modification, are
41  * permitted provided that redistributions of source code must retain the
42  * above copyright notice, this list of conditions and the following disclaimer.
43  *
44  * This software is provided `as is' by Adaptec and any express or implied
45  * warranties, including, but not limited to, the implied warranties of
46  * merchantability and fitness for a particular purpose, are disclaimed. In no
47  * event shall Adaptec be liable for any direct, indirect, incidental, special,
48  * exemplary or consequential damages (including, but not limited to,
49  * procurement of substitute goods or services; loss of use, data, or profits;
50  * or business interruptions) however caused and on any theory of liability,
51  * whether in contract, strict liability, or tort (including negligence or
52  * otherwise) arising in any way out of the use of this driver software, even
53  * if advised of the possibility of such damage.
54  */
55 
56 /*
57  * Portions of this code fall under the following copyright:
58  *
59  * Originally written by Julian Elischer (julian@tfs.com)
60  * for TRW Financial Systems for use under the MACH(2.5) operating system.
61  *
62  * TRW Financial Systems, in accordance with their agreement with Carnegie
63  * Mellon University, makes this software available to CMU to distribute
64  * or use in any manner that they see fit as long as this message is kept with
65  * the software. For this reason TFS also grants any other persons or
66  * organisations permission to use or modify this software.
67  *
68  * TFS supplies this software to be publicly redistributed
69  * on the understanding that TFS is not responsible for the correct
70  * functioning of this software in any circumstances.
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.70 2014/03/16 05:20:27 dholland Exp $");
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/device.h>
79 #include <sys/queue.h>
80 #include <sys/buf.h>
81 #include <sys/endian.h>
82 #include <sys/conf.h>
83 #include <sys/kauth.h>
84 #include <sys/proc.h>
85 #include <sys/mutex.h>
86 
87 #include <sys/bus.h>
88 #ifdef i386
89 #include <machine/pio.h>
90 #include <machine/cputypes.h>
91 #endif
92 
93 #include <dev/scsipi/scsi_all.h>
94 #include <dev/scsipi/scsipi_all.h>
95 #include <dev/scsipi/scsiconf.h>
96 
97 #include <dev/ic/dptreg.h>
98 #include <dev/ic/dptvar.h>
99 
100 #include <dev/i2o/dptivar.h>
101 
102 #ifdef DEBUG
103 #define	DPRINTF(x)		printf x
104 #else
105 #define	DPRINTF(x)
106 #endif
107 
108 #define dpt_inb(x, o)		\
109     bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
110 #define dpt_outb(x, o, d)	\
111     bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
112 
113 static const char * const dpt_cname[] = {
114 	"3334", "SmartRAID IV",
115 	"3332", "SmartRAID IV",
116 	"2144", "SmartCache IV",
117 	"2044", "SmartCache IV",
118 	"2142", "SmartCache IV",
119 	"2042", "SmartCache IV",
120 	"2041", "SmartCache IV",
121 	"3224", "SmartRAID III",
122 	"3222", "SmartRAID III",
123 	"3021", "SmartRAID III",
124 	"2124", "SmartCache III",
125 	"2024", "SmartCache III",
126 	"2122", "SmartCache III",
127 	"2022", "SmartCache III",
128 	"2021", "SmartCache III",
129 	"2012", "SmartCache Plus",
130 	"2011", "SmartCache Plus",
131 	NULL,   "<unknown>",
132 };
133 
134 static void	*dpt_sdh;
135 
136 dev_type_open(dptopen);
137 dev_type_ioctl(dptioctl);
138 
139 const struct cdevsw dpt_cdevsw = {
140 	.d_open = dptopen,
141 	.d_close = nullclose,
142 	.d_read = noread,
143 	.d_write = nowrite,
144 	.d_ioctl = dptioctl,
145 	.d_stop = nostop,
146 	.d_tty = notty,
147 	.d_poll = nopoll,
148 	.d_mmap = nommap,
149 	.d_kqfilter = nokqfilter,
150 	.d_flag = D_OTHER,
151 };
152 
153 extern struct cfdriver dpt_cd;
154 
155 static struct dpt_sig dpt_sig = {
156 	{ 'd', 'P', 't', 'S', 'i', 'G'},
157 	SIG_VERSION,
158 #if defined(i386)
159 	PROC_INTEL,
160 #elif defined(powerpc)
161 	PROC_POWERPC,
162 #elif defined(alpha)
163 	PROC_ALPHA,
164 #elif defined(__mips__)
165 	PROC_MIPS,
166 #elif defined(sparc64)
167 	PROC_ULTRASPARC,
168 #else
169 	0xff,
170 #endif
171 #if defined(i386)
172 	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
173 #else
174 	0,
175 #endif
176 	FT_HBADRVR,
177 	0,
178 	OEM_DPT,
179 	OS_FREE_BSD,	/* XXX */
180 	CAP_ABOVE16MB,
181 	DEV_ALL,
182 	ADF_ALL_EATA,
183 	0,
184 	0,
185 	DPT_VERSION,
186 	DPT_REVISION,
187 	DPT_SUBREVISION,
188 	DPT_MONTH,
189 	DPT_DAY,
190 	DPT_YEAR,
191 	""		/* Will be filled later */
192 };
193 
194 static void	dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
195 static void	dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
196 static int	dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
197 static int	dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
198 static void	dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
199 static int	dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
200 static void	dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *);
201 static void	dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
202 static void	dpt_minphys(struct buf *);
203 static int	dpt_passthrough(struct dpt_softc *, struct eata_ucp *,
204 				struct lwp *);
205 static void	dpt_scsipi_request(struct scsipi_channel *,
206 				   scsipi_adapter_req_t, void *);
207 static void	dpt_shutdown(void *);
208 static void	dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *);
209 static int	dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
210 
211 static inline struct dpt_ccb	*dpt_ccb_alloc(struct dpt_softc *);
212 static inline void	dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
213 
214 static inline struct dpt_ccb *
215 dpt_ccb_alloc(struct dpt_softc *sc)
216 {
217 	struct dpt_ccb *ccb;
218 	int s;
219 
220 	s = splbio();
221 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
222 	SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
223 	splx(s);
224 
225 	return (ccb);
226 }
227 
228 static inline void
229 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
230 {
231 	int s;
232 
233 	ccb->ccb_flg = 0;
234 	ccb->ccb_savesp = NULL;
235 	s = splbio();
236 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
237 	splx(s);
238 }
239 
240 /*
241  * Handle an interrupt from the HBA.
242  */
243 int
244 dpt_intr(void *cookie)
245 {
246 	struct dpt_softc *sc;
247 	struct dpt_ccb *ccb;
248 	struct eata_sp *sp;
249 	int forus;
250 
251 	sc = cookie;
252 	sp = sc->sc_stp;
253 	forus = 0;
254 
255 	for (;;) {
256 		/*
257 		 * HBA might have interrupted while we were dealing with the
258 		 * last completed command, since we ACK before we deal; keep
259 		 * polling.
260 		 */
261 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
262 			break;
263 		forus = 1;
264 
265 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
266 		    sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
267 
268 		/* Might have looped before HBA can reset HBA_AUX_INTR. */
269 		if (sp->sp_ccbid == -1) {
270 			DELAY(50);
271 
272 			if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
273 				return (0);
274 
275 			printf("%s: no status\n", device_xname(sc->sc_dev));
276 
277 			/* Re-sync DMA map */
278 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
279 			    sc->sc_stpoff, sizeof(struct eata_sp),
280 			    BUS_DMASYNC_POSTREAD);
281 		}
282 
283 		/* Make sure CCB ID from status packet is realistic. */
284 		if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
285 			printf("%s: bogus status (returned CCB id %d)\n",
286 			    device_xname(sc->sc_dev), sp->sp_ccbid);
287 
288 			/* Ack the interrupt */
289 			sp->sp_ccbid = -1;
290 			(void)dpt_inb(sc, HA_STATUS);
291 			continue;
292 		}
293 
294 		/* Sync up DMA map and cache cmd status. */
295 		ccb = sc->sc_ccbs + sp->sp_ccbid;
296 
297 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
298 		    sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
299 
300 		ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
301 		ccb->ccb_scsi_status = sp->sp_scsi_status;
302 		if (ccb->ccb_savesp != NULL)
303 			memcpy(ccb->ccb_savesp, sp, sizeof(*sp));
304 
305 		/*
306 		 * Ack the interrupt and process the CCB.  If this
307 		 * is a private CCB it's up to dpt_ccb_poll() to
308 		 * notice.
309 		 */
310 		sp->sp_ccbid = -1;
311 		ccb->ccb_flg |= CCB_INTR;
312 		(void)dpt_inb(sc, HA_STATUS);
313 		if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
314 			dpt_ccb_done(sc, ccb);
315 		else if ((ccb->ccb_flg & CCB_WAIT) != 0)
316 			wakeup(ccb);
317 	}
318 
319 	return (forus);
320 }
321 
322 /*
323  * Initialize and attach the HBA.  This is the entry point from bus
324  * specific probe-and-attach code.
325  */
326 void
327 dpt_init(struct dpt_softc *sc, const char *intrstr)
328 {
329 	struct scsipi_adapter *adapt;
330 	struct scsipi_channel *chan;
331 	struct eata_inquiry_data *ei;
332 	int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
333 	bus_dma_segment_t seg;
334 	struct eata_cfg *ec;
335 	struct dpt_ccb *ccb;
336 	char model[__arraycount(ei->ei_model) + __arraycount(ei->ei_suffix) + 1];
337 	char vendor[__arraycount(ei->ei_vendor) + 1];
338 
339 	ec = &sc->sc_ec;
340 	snprintf(dpt_sig.dsDescription, sizeof(dpt_sig.dsDescription),
341 	    "NetBSD %s DPT driver", osrelease);
342 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
343 
344 	/*
345 	 * Allocate the CCB/status packet/scratch DMA map and load.
346 	 */
347 	sc->sc_nccbs =
348 	    min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
349 	sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
350 	sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
351 	mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
352 	    DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
353 
354 	if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
355 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
356 		aprint_error_dev(sc->sc_dev, "unable to allocate CCBs, rv = %d\n", rv);
357 		return;
358 	}
359 
360 	if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
361 	    (void **)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
362 		aprint_error_dev(sc->sc_dev, "unable to map CCBs, rv = %d\n",
363 		    rv);
364 		return;
365 	}
366 
367 	if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
368 	    mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
369 		aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, rv = %d\n", rv);
370 		return;
371 	}
372 
373 	if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
374 	    sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
375 		aprint_error_dev(sc->sc_dev, "unable to load CCB DMA map, rv = %d\n", rv);
376 		return;
377 	}
378 
379 	sc->sc_stp = (struct eata_sp *)((char *)sc->sc_ccbs + sc->sc_stpoff);
380 	sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
381 	sc->sc_scr = (char *)sc->sc_ccbs + sc->sc_scroff;
382 	sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
383 	sc->sc_stp->sp_ccbid = -1;
384 
385 	/*
386 	 * Create the CCBs.
387 	 */
388 	SLIST_INIT(&sc->sc_ccb_free);
389 	memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
390 
391 	for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
392 		rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
393 		    DPT_SG_SIZE, DPT_MAX_XFER, 0,
394 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
395 		    &ccb->ccb_dmamap_xfer);
396 		if (rv) {
397 			aprint_error_dev(sc->sc_dev, "can't create ccb dmamap (%d)\n", rv);
398 			break;
399 		}
400 
401 		ccb->ccb_id = i;
402 		ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
403 		    CCB_OFF(sc, ccb);
404 		SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
405 	}
406 
407 	if (i == 0) {
408 		aprint_error_dev(sc->sc_dev, "unable to create CCBs\n");
409 		return;
410 	} else if (i != sc->sc_nccbs) {
411 		aprint_error_dev(sc->sc_dev, "%d/%d CCBs created!\n",
412 		    i, sc->sc_nccbs);
413 		sc->sc_nccbs = i;
414 	}
415 
416 	/* Set shutdownhook before we start any device activity. */
417 	if (dpt_sdh == NULL)
418 		dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
419 
420 	/* Get the inquiry data from the HBA. */
421 	dpt_hba_inquire(sc, &ei);
422 
423 	/*
424 	 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
425 	 * dpt0: interrupting at irq 10
426 	 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
427 	 */
428 	for (i = 0; i < __arraycount(ei->ei_vendor) && ei->ei_vendor[i] != ' ';
429 	    i++)
430 		vendor[i] = ei->ei_vendor[i];
431 	vendor[i] = '\0';
432 
433 	for (i = 0; i < __arraycount(ei->ei_model) && ei->ei_model[i] != ' ';
434 	    i++)
435 		model[i] = ei->ei_model[i];
436 	for (j = 0; j < __arraycount(ei->ei_suffix) && ei->ei_suffix[j] != ' ';
437 	    i++, j++)
438 		model[i] = ei->ei_suffix[j];
439 	model[i] = '\0';
440 
441 	/* Find the marketing name for the board. */
442 	for (i = 0; dpt_cname[i] != NULL; i += 2)
443 		if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
444 			break;
445 
446 	aprint_normal("%s %s (%s)\n", vendor, dpt_cname[i + 1], model);
447 
448 	if (intrstr != NULL)
449 		aprint_normal_dev(sc->sc_dev, "interrupting at %s\n",
450 		    intrstr);
451 
452 	maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
453 	    EC_F3_MAX_CHANNEL_SHIFT;
454 	maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
455 	    EC_F3_MAX_TARGET_SHIFT;
456 
457 	aprint_normal_dev(sc->sc_dev, "%d queued commands, %d channel(s), adapter on ID(s)",
458 	    sc->sc_nccbs, maxchannel + 1);
459 
460 	for (i = 0; i <= maxchannel; i++) {
461 		sc->sc_hbaid[i] = ec->ec_hba[3 - i];
462 		aprint_normal(" %d", sc->sc_hbaid[i]);
463 	}
464 	aprint_normal("\n");
465 
466 	/*
467 	 * Reset the SCSI controller chip(s) and bus.  XXX Do we need to do
468 	 * this for each bus?
469 	 */
470 	if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
471 		panic("%s: dpt_cmd failed", device_xname(sc->sc_dev));
472 
473 	/* Fill in the scsipi_adapter. */
474 	adapt = &sc->sc_adapt;
475 	memset(adapt, 0, sizeof(*adapt));
476 	adapt->adapt_dev = sc->sc_dev;
477 	adapt->adapt_nchannels = maxchannel + 1;
478 	adapt->adapt_openings = sc->sc_nccbs - 1;
479 	adapt->adapt_max_periph = sc->sc_nccbs - 1;
480 	adapt->adapt_request = dpt_scsipi_request;
481 	adapt->adapt_minphys = dpt_minphys;
482 
483 	for (i = 0; i <= maxchannel; i++) {
484 		/* Fill in the scsipi_channel. */
485 		chan = &sc->sc_chans[i];
486 		memset(chan, 0, sizeof(*chan));
487 		chan->chan_adapter = adapt;
488 		chan->chan_bustype = &scsi_bustype;
489 		chan->chan_channel = i;
490 		chan->chan_ntargets = maxtarget + 1;
491 		chan->chan_nluns = ec->ec_maxlun + 1;
492 		chan->chan_id = sc->sc_hbaid[i];
493 		config_found(sc->sc_dev, chan, scsiprint);
494 	}
495 }
496 
497 /*
498  * Read the EATA configuration from the HBA and perform some sanity checks.
499  */
500 int
501 dpt_readcfg(struct dpt_softc *sc)
502 {
503 	struct eata_cfg *ec;
504 	int i, j, stat;
505 	u_int16_t *p;
506 
507 	ec = &sc->sc_ec;
508 
509 	/* Older firmware may puke if we talk to it too soon after reset. */
510 	dpt_outb(sc, HA_COMMAND, CP_RESET);
511 	DELAY(750000);
512 
513 	for (i = 1000; i; i--) {
514 		if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
515 			break;
516 		DELAY(2000);
517 	}
518 
519 	if (i == 0) {
520 		printf("%s: HBA not ready after reset (hba status:%02x)\n",
521 		    device_xname(sc->sc_dev), dpt_inb(sc, HA_STATUS));
522 		return (-1);
523 	}
524 
525 	while((((stat = dpt_inb(sc, HA_STATUS))
526 	    != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
527 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
528 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
529 	    || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
530 		/* RAID drives still spinning up? */
531 		if(dpt_inb(sc, HA_ERROR) != 'D' ||
532 		   dpt_inb(sc, HA_ERROR + 1) != 'P' ||
533 		   dpt_inb(sc, HA_ERROR + 2) != 'T') {
534 			printf("%s: HBA not ready\n", device_xname(sc->sc_dev));
535 			return (-1);
536 		}
537 	}
538 
539 	/*
540 	 * Issue the read-config command and wait for the data to appear.
541 	 *
542 	 * Apparently certian firmware revisions won't DMA later on if we
543 	 * request the config data using PIO, but it makes it a lot easier
544 	 * as no DMA setup is required.
545 	 */
546 	dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
547 	memset(ec, 0, sizeof(*ec));
548 	i = ((int)(uintptr_t)&((struct eata_cfg *)0)->ec_cfglen +
549 	    sizeof(ec->ec_cfglen)) >> 1;
550 	p = (u_int16_t *)ec;
551 
552 	if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
553 		printf("%s: cfg data didn't appear (hba status:%02x)\n",
554 		    device_xname(sc->sc_dev), dpt_inb(sc, HA_STATUS));
555 		return (-1);
556 	}
557 
558 	/* Begin reading. */
559 	while (i--)
560 		*p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
561 
562 	if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
563 	    - (int)(uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen))
564 	    - sizeof(ec->ec_cfglen)))
565 		i = sizeof(struct eata_cfg)
566 		  - (int)(uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen))
567 		  - sizeof(ec->ec_cfglen);
568 
569 	j = i + (int)(uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
570 	    sizeof(ec->ec_cfglen);
571 	i >>= 1;
572 
573 	while (i--)
574 		*p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
575 
576 	/* Flush until we have read 512 bytes. */
577 	i = (512 - j + 1) >> 1;
578 	while (i--)
579 		(void)bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
580 
581 	/* Defaults for older firmware... */
582 	if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
583 		ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
584 
585 	if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
586 		aprint_error_dev(sc->sc_dev, "HBA error\n");
587 		return (-1);
588 	}
589 
590 	if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
591 		aprint_error_dev(sc->sc_dev, "EATA signature mismatch\n");
592 		return (-1);
593 	}
594 
595 	if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
596 		aprint_error_dev(sc->sc_dev, "ec_hba field invalid\n");
597 		return (-1);
598 	}
599 
600 	if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
601 		aprint_error_dev(sc->sc_dev, "DMA not supported\n");
602 		return (-1);
603 	}
604 
605 	return (0);
606 }
607 
608 /*
609  * Our `shutdownhook' to cleanly shut down the HBA.  The HBA must flush all
610  * data from it's cache and mark array groups as clean.
611  *
612  * XXX This doesn't always work (i.e., the HBA may still be flushing after
613  * we tell root that it's safe to power off).
614  */
615 static void
616 dpt_shutdown(void *cookie)
617 {
618 	extern struct cfdriver dpt_cd;
619 	struct dpt_softc *sc;
620 	int i;
621 
622 	printf("shutting down dpt devices...");
623 
624 	for (i = 0; i < dpt_cd.cd_ndevs; i++) {
625 		if ((sc = device_lookup_private(&dpt_cd, i)) == NULL)
626 			continue;
627 		dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
628 	}
629 
630 	delay(10000*1000);
631 	printf(" done\n");
632 }
633 
634 /*
635  * Send an EATA command to the HBA.
636  */
637 static int
638 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
639 {
640 	u_int32_t pa;
641 	int i, s;
642 
643 	s = splbio();
644 
645 	for (i = 20000; i != 0; i--) {
646 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
647 			break;
648 		DELAY(50);
649 	}
650 	if (i == 0) {
651 		splx(s);
652 		return (-1);
653 	}
654 
655 	pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
656 	dpt_outb(sc, HA_DMA_BASE + 0, (pa      ) & 0xff);
657 	dpt_outb(sc, HA_DMA_BASE + 1, (pa >>  8) & 0xff);
658 	dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
659 	dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
660 
661 	if (eatacmd == CP_IMMEDIATE)
662 		dpt_outb(sc, HA_ICMD, icmd);
663 
664 	dpt_outb(sc, HA_COMMAND, eatacmd);
665 
666 	splx(s);
667 	return (0);
668 }
669 
670 /*
671  * Wait for the HBA status register to reach a specific state.
672  */
673 static int
674 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
675 {
676 
677 	for (ms *= 10; ms != 0; ms--) {
678 		if ((dpt_inb(sc, HA_STATUS) & mask) == state)
679 			return (0);
680 		DELAY(100);
681 	}
682 
683 	return (-1);
684 }
685 
686 /*
687  * Spin waiting for a command to finish.  The timeout value from the CCB is
688  * used.  The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
689  * recycled before we get a look at it.
690  */
691 static int
692 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
693 {
694 	int i, s;
695 
696 #ifdef DEBUG
697 	if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
698 		panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
699 #endif
700 
701 	s = splbio();
702 
703 	if ((ccb->ccb_flg & CCB_INTR) != 0) {
704 		splx(s);
705 		return (0);
706 	}
707 
708 	for (i = ccb->ccb_timeout * 20; i != 0; i--) {
709 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
710 			dpt_intr(sc);
711 		if ((ccb->ccb_flg & CCB_INTR) != 0)
712 			break;
713 		DELAY(50);
714 	}
715 
716 	splx(s);
717 	return (i == 0);
718 }
719 
720 /*
721  * We have a command which has been processed by the HBA, so now we look to
722  * see how the operation went.  CCBs marked CCB_PRIVATE are not passed here
723  * by dpt_intr().
724  */
725 static void
726 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
727 {
728 	struct scsipi_xfer *xs;
729 
730 	xs = ccb->ccb_xs;
731 
732 	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
733 
734 	/*
735 	 * If we were a data transfer, unload the map that described the
736 	 * data buffer.
737 	 */
738 	if (xs->datalen != 0)
739 		dpt_ccb_unmap(sc, ccb);
740 
741 	if (xs->error == XS_NOERROR) {
742 		if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
743 			switch (ccb->ccb_hba_status) {
744 			case SP_HBA_ERROR_SEL_TO:
745 				xs->error = XS_SELTIMEOUT;
746 				break;
747 			case SP_HBA_ERROR_RESET:
748 				xs->error = XS_RESET;
749 				break;
750 			default:
751 				printf("%s: HBA status %x\n",
752 				    device_xname(sc->sc_dev), ccb->ccb_hba_status);
753 				xs->error = XS_DRIVER_STUFFUP;
754 				break;
755 			}
756 		} else if (ccb->ccb_scsi_status != SCSI_OK) {
757 			switch (ccb->ccb_scsi_status) {
758 			case SCSI_CHECK:
759 				memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
760 				    sizeof(xs->sense.scsi_sense));
761 				xs->error = XS_SENSE;
762 				break;
763 			case SCSI_BUSY:
764 			case SCSI_QUEUE_FULL:
765 				xs->error = XS_BUSY;
766 				break;
767 			default:
768 				scsipi_printaddr(xs->xs_periph);
769 				printf("SCSI status %x\n",
770 				    ccb->ccb_scsi_status);
771 				xs->error = XS_DRIVER_STUFFUP;
772 				break;
773 			}
774 		} else
775 			xs->resid = 0;
776 
777 		xs->status = ccb->ccb_scsi_status;
778 	}
779 
780 	/* Free up the CCB and mark the command as done. */
781 	dpt_ccb_free(sc, ccb);
782 	scsipi_done(xs);
783 }
784 
785 /*
786  * Specified CCB has timed out, abort it.
787  */
788 static void
789 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
790 {
791 	struct scsipi_periph *periph;
792 	struct scsipi_xfer *xs;
793 	int s;
794 
795 	xs = ccb->ccb_xs;
796 	periph = xs->xs_periph;
797 
798 	scsipi_printaddr(periph);
799 	printf("timed out (status:%02x aux status:%02x)",
800 	    dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
801 
802 	s = splbio();
803 
804 	if ((ccb->ccb_flg & CCB_ABORT) != 0) {
805 		/* Abort timed out, reset the HBA */
806 		printf(" AGAIN, resetting HBA\n");
807 		dpt_outb(sc, HA_COMMAND, CP_RESET);
808 		DELAY(750000);
809 	} else {
810 		/* Abort the operation that has timed out */
811 		printf("\n");
812 		xs->error = XS_TIMEOUT;
813 		ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
814 		ccb->ccb_flg |= CCB_ABORT;
815 		/* Start the abort */
816 		if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
817 			aprint_error_dev(sc->sc_dev, "dpt_cmd failed\n");
818 	}
819 
820 	splx(s);
821 }
822 
823 /*
824  * Map a data transfer.
825  */
826 static int
827 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
828 {
829 	struct scsipi_xfer *xs;
830 	bus_dmamap_t xfer;
831 	bus_dma_segment_t *ds;
832 	struct eata_sg *sg;
833 	struct eata_cp *cp;
834 	int rv, i;
835 
836 	xs = ccb->ccb_xs;
837 	xfer = ccb->ccb_dmamap_xfer;
838 	cp = &ccb->ccb_eata_cp;
839 
840 	rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
841 	    ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
842 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
843 	    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
844 
845 	switch (rv) {
846 	case 0:
847 		break;
848 	case ENOMEM:
849 	case EAGAIN:
850 		xs->error = XS_RESOURCE_SHORTAGE;
851 		break;
852 	default:
853 		xs->error = XS_DRIVER_STUFFUP;
854 		printf("%s: error %d loading map\n", device_xname(sc->sc_dev), rv);
855 		break;
856 	}
857 
858 	if (xs->error != XS_NOERROR) {
859 		dpt_ccb_free(sc, ccb);
860 		scsipi_done(xs);
861 		return (-1);
862 	}
863 
864 	bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
865 	    (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
866 	    BUS_DMASYNC_PREWRITE);
867 
868 	/* Don't bother using scatter/gather for just 1 seg */
869 	if (xfer->dm_nsegs == 1) {
870 		cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
871 		cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
872 	} else {
873 		/*
874 		 * Load the hardware scatter/gather map with
875 		 * the contents of the DMA map.
876 		 */
877 		sg = ccb->ccb_sg;
878 		ds = xfer->dm_segs;
879 		for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
880  			sg->sg_addr = htobe32(ds->ds_addr);
881  			sg->sg_len =  htobe32(ds->ds_len);
882  		}
883 	 	cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
884 		    sc->sc_dmamap->dm_segs[0].ds_addr +
885 		    offsetof(struct dpt_ccb, ccb_sg));
886 		cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
887 		cp->cp_ctl0 |= CP_C0_SCATTER;
888 	}
889 
890 	return (0);
891 }
892 
893 /*
894  * Unmap a transfer.
895  */
896 static void
897 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
898 {
899 
900 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
901 	    ccb->ccb_dmamap_xfer->dm_mapsize,
902 	    (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
903 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
904 	bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
905 }
906 
907 /*
908  * Adjust the size of each I/O before it passes to the SCSI layer.
909  */
910 static void
911 dpt_minphys(struct buf *bp)
912 {
913 
914 	if (bp->b_bcount > DPT_MAX_XFER)
915 		bp->b_bcount = DPT_MAX_XFER;
916 	minphys(bp);
917 }
918 
919 /*
920  * Start a SCSI command.
921  */
922 static void
923 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
924 		   void *arg)
925 {
926 	struct dpt_softc *sc;
927 	struct scsipi_xfer *xs;
928 	int flags;
929 	struct scsipi_periph *periph;
930 	struct dpt_ccb *ccb;
931 	struct eata_cp *cp;
932 
933 	sc = device_private(chan->chan_adapter->adapt_dev);
934 
935 	switch (req) {
936 	case ADAPTER_REQ_RUN_XFER:
937 		xs = arg;
938 		periph = xs->xs_periph;
939 		flags = xs->xs_control;
940 
941 #ifdef DIAGNOSTIC
942 		/* Cmds must be no more than 12 bytes for us. */
943 		if (xs->cmdlen > 12) {
944 			xs->error = XS_DRIVER_STUFFUP;
945 			scsipi_done(xs);
946 			break;
947 		}
948 #endif
949 		/*
950 		 * XXX We can't reset devices just yet.  Apparently some
951 		 * older firmware revisions don't even support it.
952 		 */
953 		if ((flags & XS_CTL_RESET) != 0) {
954 			xs->error = XS_DRIVER_STUFFUP;
955 			scsipi_done(xs);
956 			break;
957 		}
958 
959 		/*
960 		 * Get a CCB and fill it.
961 		 */
962 		ccb = dpt_ccb_alloc(sc);
963 		ccb->ccb_xs = xs;
964 		ccb->ccb_timeout = xs->timeout;
965 
966 		cp = &ccb->ccb_eata_cp;
967 		memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
968 		cp->cp_ccbid = ccb->ccb_id;
969 		cp->cp_senselen = sizeof(ccb->ccb_sense);
970 		cp->cp_stataddr = htobe32(sc->sc_stppa);
971 		cp->cp_ctl0 = CP_C0_AUTO_SENSE;
972 		cp->cp_ctl1 = 0;
973 		cp->cp_ctl2 = 0;
974 		cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
975 		cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
976 		cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
977 		cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
978 
979 		if ((flags & XS_CTL_DATA_IN) != 0)
980 			cp->cp_ctl0 |= CP_C0_DATA_IN;
981 		if ((flags & XS_CTL_DATA_OUT) != 0)
982 			cp->cp_ctl0 |= CP_C0_DATA_OUT;
983 		if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
984 			cp->cp_ctl0 |= CP_C0_INTERPRET;
985 
986 		/* Synchronous xfers musn't write-back through the cache. */
987 		if (xs->bp != NULL)
988 			if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
989 				cp->cp_ctl2 |= CP_C2_NO_CACHE;
990 
991 		cp->cp_senseaddr =
992 		    htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
993 		    CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
994 
995 		if (xs->datalen != 0) {
996 			if (dpt_ccb_map(sc, ccb))
997 				break;
998 		} else {
999 			cp->cp_dataaddr = 0;
1000 			cp->cp_datalen = 0;
1001 		}
1002 
1003 		/* Sync up CCB and status packet. */
1004 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1005 		    CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
1006 		    BUS_DMASYNC_PREWRITE);
1007 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1008 		    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1009 
1010 		/*
1011 		 * Start the command.
1012 		 */
1013 		if ((xs->xs_control & XS_CTL_POLL) != 0)
1014 			ccb->ccb_flg |= CCB_PRIVATE;
1015 
1016 		if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
1017 			aprint_error_dev(sc->sc_dev, "dpt_cmd failed\n");
1018 			xs->error = XS_DRIVER_STUFFUP;
1019 			if (xs->datalen != 0)
1020 				dpt_ccb_unmap(sc, ccb);
1021 			dpt_ccb_free(sc, ccb);
1022 			break;
1023 		}
1024 
1025 		if ((xs->xs_control & XS_CTL_POLL) == 0)
1026 			break;
1027 
1028 		if (dpt_ccb_poll(sc, ccb)) {
1029 			dpt_ccb_abort(sc, ccb);
1030 			/* Wait for abort to complete... */
1031 			if (dpt_ccb_poll(sc, ccb))
1032 				dpt_ccb_abort(sc, ccb);
1033 		}
1034 
1035 		dpt_ccb_done(sc, ccb);
1036 		break;
1037 
1038 	case ADAPTER_REQ_GROW_RESOURCES:
1039 		/*
1040 		 * Not supported, since we allocate the maximum number of
1041 		 * CCBs up front.
1042 		 */
1043 		break;
1044 
1045 	case ADAPTER_REQ_SET_XFER_MODE:
1046 		/*
1047 		 * This will be handled by the HBA itself, and we can't
1048 		 * modify that (ditto for tagged queueing).
1049 		 */
1050 		break;
1051 	}
1052 }
1053 
1054 /*
1055  * Get inquiry data from the adapter.
1056  */
1057 static void
1058 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
1059 {
1060 	struct dpt_ccb *ccb;
1061 	struct eata_cp *cp;
1062 
1063 	*ei = (struct eata_inquiry_data *)sc->sc_scr;
1064 
1065 	/* Get a CCB and mark as private */
1066 	ccb = dpt_ccb_alloc(sc);
1067 	ccb->ccb_flg |= CCB_PRIVATE;
1068 	ccb->ccb_timeout = 200;
1069 
1070 	/* Put all the arguments into the CCB. */
1071 	cp = &ccb->ccb_eata_cp;
1072 	cp->cp_ccbid = ccb->ccb_id;
1073 	cp->cp_senselen = sizeof(ccb->ccb_sense);
1074 	cp->cp_senseaddr = 0;
1075 	cp->cp_stataddr = htobe32(sc->sc_stppa);
1076 	cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1077 	cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1078 	cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1079 	cp->cp_ctl1 = 0;
1080 	cp->cp_ctl2 = 0;
1081 	cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1082 	cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1083 
1084 	/* Put together the SCSI inquiry command. */
1085 	memset(&cp->cp_cdb_cmd, 0, 12);
1086 	cp->cp_cdb_cmd = INQUIRY;
1087 	cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1088 
1089 	/* Sync up CCB, status packet and scratch area. */
1090 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1091 	    sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1092 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1093 	    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1094 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1095 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1096 
1097 	/* Start the command and poll on completion. */
1098 	if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1099 		panic("%s: dpt_cmd failed", device_xname(sc->sc_dev));
1100 
1101 	if (dpt_ccb_poll(sc, ccb))
1102 		panic("%s: inquiry timed out", device_xname(sc->sc_dev));
1103 
1104 	if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1105 	    ccb->ccb_scsi_status != SCSI_OK)
1106 		panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1107 		    device_xname(sc->sc_dev), ccb->ccb_hba_status,
1108 		    ccb->ccb_scsi_status);
1109 
1110 	/* Sync up the DMA map and free CCB, returning. */
1111 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1112 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1113 	dpt_ccb_free(sc, ccb);
1114 }
1115 
1116 int
1117 dptopen(dev_t dev, int flag, int mode, struct lwp *l)
1118 {
1119 
1120 	if (device_lookup(&dpt_cd, minor(dev)) == NULL)
1121 		return (ENXIO);
1122 
1123 	return (0);
1124 }
1125 
1126 int
1127 dptioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1128 {
1129 	struct dpt_softc *sc;
1130 	int rv;
1131 
1132 	sc = device_lookup_private(&dpt_cd, minor(dev));
1133 
1134 	switch (cmd & 0xffff) {
1135 	case DPT_SIGNATURE:
1136 		memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig)));
1137 		break;
1138 
1139 	case DPT_CTRLINFO:
1140 		dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data);
1141 		break;
1142 
1143 	case DPT_SYSINFO:
1144 		dpt_sysinfo(sc, (struct dpt_sysinfo *)data);
1145 		break;
1146 
1147 	case DPT_BLINKLED:
1148 		/*
1149 		 * XXX Don't know how to get this from EATA boards.  I think
1150 		 * it involves waiting for a "DPT" sequence from HA_ERROR
1151 		 * and then reading one of the HA_ICMD registers.
1152 		 */
1153 		*(int *)data = 0;
1154 		break;
1155 
1156 	case DPT_EATAUSRCMD:
1157 		rv = kauth_authorize_device_passthru(l->l_cred, dev,
1158 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
1159 		if (rv)
1160 			return (rv);
1161 
1162 		if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) {
1163 			DPRINTF(("%s: ucp %lu vs %lu bytes\n",
1164 			    device_xname(sc->sc_dev), IOCPARM_LEN(cmd),
1165 			    (unsigned long int)sizeof(struct eata_ucp)));
1166 			return (EINVAL);
1167 		}
1168 
1169 		mutex_enter(&sc->sc_lock);
1170 		rv = dpt_passthrough(sc, (struct eata_ucp *)data, l);
1171 		mutex_exit(&sc->sc_lock);
1172 
1173 		return (rv);
1174 
1175 	default:
1176 		DPRINTF(("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd));
1177 		return (ENOTTY);
1178 	}
1179 
1180 	return (0);
1181 }
1182 
1183 void
1184 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info)
1185 {
1186 
1187 	memset(info, 0, sizeof(*info));
1188 	info->id = sc->sc_hbaid[0];
1189 	info->vect = sc->sc_isairq;
1190 	info->base = sc->sc_isaport;
1191 	info->qdepth = sc->sc_nccbs;
1192 	info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg);
1193 	info->heads = 16;
1194 	info->sectors = 63;
1195 	info->do_drive32 = 1;
1196 	info->primary = 1;
1197 	info->cpLength = sizeof(struct eata_cp);
1198 	info->spLength = sizeof(struct eata_sp);
1199 	info->drqNum = sc->sc_isadrq;
1200 }
1201 
1202 void
1203 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info)
1204 {
1205 #ifdef i386
1206 	int i, j;
1207 #endif
1208 
1209 	memset(info, 0, sizeof(*info));
1210 
1211 #ifdef i386
1212 	outb (0x70, 0x12);
1213 	i = inb(0x71);
1214 	j = i >> 4;
1215 	if (i == 0x0f) {
1216 		outb (0x70, 0x19);
1217 		j = inb (0x71);
1218 	}
1219 	info->drive0CMOS = j;
1220 
1221 	j = i & 0x0f;
1222 	if (i == 0x0f) {
1223 		outb (0x70, 0x1a);
1224 		j = inb (0x71);
1225 	}
1226 	info->drive1CMOS = j;
1227 	info->processorFamily = dpt_sig.dsProcessorFamily;
1228 
1229 	/*
1230 	 * Get the conventional memory size from CMOS.
1231 	 */
1232 	outb(0x70, 0x16);
1233 	j = inb(0x71);
1234 	j <<= 8;
1235 	outb(0x70, 0x15);
1236 	j |= inb(0x71);
1237 	info->conventionalMemSize = j;
1238 
1239 	/*
1240 	 * Get the extended memory size from CMOS.
1241 	 */
1242 	outb(0x70, 0x31);
1243 	j = inb(0x71);
1244 	j <<= 8;
1245 	outb(0x70, 0x30);
1246 	j |= inb(0x71);
1247 	info->extendedMemSize = j;
1248 
1249 	switch (cpu_class) {
1250 	case CPUCLASS_386:
1251 		info->processorType = PROC_386;
1252 		break;
1253 	case CPUCLASS_486:
1254 		info->processorType = PROC_486;
1255 		break;
1256 	case CPUCLASS_586:
1257 		info->processorType = PROC_PENTIUM;
1258 		break;
1259 	case CPUCLASS_686:
1260 	default:
1261 		info->processorType = PROC_SEXIUM;
1262 		break;
1263 	}
1264 
1265 	info->flags = SI_CMOS_Valid | SI_BusTypeValid |
1266 	    SI_MemorySizeValid | SI_NO_SmartROM;
1267 #else
1268 	info->flags = SI_BusTypeValid | SI_NO_SmartROM;
1269 #endif
1270 
1271 	info->busType = sc->sc_bustype;
1272 }
1273 
1274 int
1275 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct lwp *l)
1276 {
1277 	struct dpt_ccb *ccb;
1278 	struct eata_sp sp;
1279 	struct eata_cp *cp;
1280 	struct eata_sg *sg;
1281 	bus_dmamap_t xfer = 0; /* XXX: gcc */
1282 	bus_dma_segment_t *ds;
1283 	int datain = 0, s, rv = 0, i, uslen; /* XXX: gcc */
1284 
1285 	/*
1286 	 * Get a CCB and fill.
1287 	 */
1288 	ccb = dpt_ccb_alloc(sc);
1289 	ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT;
1290 	ccb->ccb_timeout = 0;
1291 	ccb->ccb_savesp = &sp;
1292 
1293 	cp = &ccb->ccb_eata_cp;
1294 	memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp));
1295 	uslen = cp->cp_senselen;
1296 	cp->cp_ccbid = ccb->ccb_id;
1297 	cp->cp_senselen = sizeof(ccb->ccb_sense);
1298 	cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
1299 	    CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
1300 	cp->cp_stataddr = htobe32(sc->sc_stppa);
1301 
1302 	/*
1303 	 * Map data transfers.
1304 	 */
1305 	if (ucp->ucp_dataaddr && ucp->ucp_datalen) {
1306 		xfer = ccb->ccb_dmamap_xfer;
1307 		datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0);
1308 
1309 		if (ucp->ucp_datalen > DPT_MAX_XFER) {
1310 			DPRINTF(("%s: xfer too big\n", device_xname(sc->sc_dev)));
1311 			dpt_ccb_free(sc, ccb);
1312 			return (EFBIG);
1313 		}
1314 		rv = bus_dmamap_load(sc->sc_dmat, xfer,
1315 		    ucp->ucp_dataaddr, ucp->ucp_datalen, l->l_proc,
1316 		    BUS_DMA_WAITOK | BUS_DMA_STREAMING |
1317 		    (datain ? BUS_DMA_READ : BUS_DMA_WRITE));
1318 		if (rv != 0) {
1319 			DPRINTF(("%s: map failed; %d\n", device_xname(sc->sc_dev),
1320 			    rv));
1321 			dpt_ccb_free(sc, ccb);
1322 			return (rv);
1323 		}
1324 
1325 		bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1326 		    (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1327 
1328 		sg = ccb->ccb_sg;
1329 		ds = xfer->dm_segs;
1330 		for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
1331 	 		sg->sg_addr = htobe32(ds->ds_addr);
1332 	 		sg->sg_len = htobe32(ds->ds_len);
1333  		}
1334 		cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
1335 		    sc->sc_dmamap->dm_segs[0].ds_addr +
1336 		    offsetof(struct dpt_ccb, ccb_sg));
1337 		cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
1338 		cp->cp_ctl0 |= CP_C0_SCATTER;
1339 	} else {
1340 		cp->cp_dataaddr = 0;
1341 		cp->cp_datalen = 0;
1342 	}
1343 
1344 	/*
1345 	 * Start the command and sleep on completion.
1346 	 */
1347 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1348 	    sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1349 	s = splbio();
1350 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1351 	    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1352 	if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1353 		panic("%s: dpt_cmd failed", device_xname(sc->sc_dev));
1354 	tsleep(ccb, PWAIT, "dptucmd", 0);
1355 	splx(s);
1356 
1357 	/*
1358 	 * Sync up the DMA map and copy out results.
1359 	 */
1360 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1361 	    sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
1362 
1363 	if (cp->cp_datalen != 0) {
1364 		bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1365 		    (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
1366 		bus_dmamap_unload(sc->sc_dmat, xfer);
1367 	}
1368 
1369 	if (ucp->ucp_stataddr != NULL) {
1370 		rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp));
1371 		if (rv != 0) {
1372 			DPRINTF(("%s: sp copyout() failed\n",
1373 			    device_xname(sc->sc_dev)));
1374 		}
1375 	}
1376 	if (rv == 0 && ucp->ucp_senseaddr != NULL) {
1377 		i = min(uslen, sizeof(ccb->ccb_sense));
1378 		rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i);
1379 		if (rv != 0) {
1380 			DPRINTF(("%s: sense copyout() failed\n",
1381 			    device_xname(sc->sc_dev)));
1382 		}
1383 	}
1384 
1385 	ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status;
1386 	ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status;
1387 	dpt_ccb_free(sc, ccb);
1388 	return (rv);
1389 }
1390