xref: /netbsd-src/sys/arch/sun3/dev/xd.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: xd.c,v 1.69 2014/03/16 05:20:26 dholland Exp $	*/
2 
3 /*
4  * Copyright (c) 1995 Charles D. Cranor
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  *
30  * x d . c   x y l o g i c s   7 5 3 / 7 0 5 3   v m e / s m d   d r i v e r
31  *
32  * author: Chuck Cranor <chuck@netbsd>
33  * id: &Id: xd.c,v 1.9 1995/09/25 20:12:44 chuck Exp &
34  * started: 27-Feb-95
35  * references: [1] Xylogics Model 753 User's Manual
36  *                 part number: 166-753-001, Revision B, May 21, 1988.
37  *                 "Your Partner For Performance"
38  *             [2] other NetBSD disk device drivers
39  *
40  * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking
41  * the time to answer some of my questions about the 753/7053.
42  *
43  * note: the 753 and the 7053 are programmed the same way, but are
44  * different sizes.   the 753 is a 6U VME card, while the 7053 is a 9U
45  * VME card (found in many VME based suns).
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: xd.c,v 1.69 2014/03/16 05:20:26 dholland Exp $");
50 
51 #undef XDC_DEBUG		/* full debug */
52 #define XDC_DIAG		/* extra sanity checks */
53 #if defined(DIAGNOSTIC) && !defined(XDC_DIAG)
54 #define XDC_DIAG		/* link in with master DIAG option */
55 #endif
56 
57 #include <sys/param.h>
58 #include <sys/proc.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
61 #include <sys/file.h>
62 #include <sys/stat.h>
63 #include <sys/ioctl.h>
64 #include <sys/buf.h>
65 #include <sys/bufq.h>
66 #include <sys/uio.h>
67 #include <sys/malloc.h>
68 #include <sys/device.h>
69 #include <sys/disklabel.h>
70 #include <sys/disk.h>
71 #include <sys/syslog.h>
72 #include <sys/dkbad.h>
73 #include <sys/conf.h>
74 #include <sys/kauth.h>
75 
76 #include <uvm/uvm_extern.h>
77 
78 #include <machine/autoconf.h>
79 #include <machine/dvma.h>
80 
81 #include <dev/sun/disklabel.h>
82 
83 #include <sun3/dev/xdreg.h>
84 #include <sun3/dev/xdvar.h>
85 #include <sun3/dev/xio.h>
86 
87 #include "ioconf.h"
88 #include "locators.h"
89 
90 /*
91  * Print a complaint when no xd children were specified
92  * in the config file.  Better than a link error...
93  *
94  * XXX: Some folks say this driver should be split in two,
95  * but that seems pointless with ONLY one type of child.
96  */
97 #include "xd.h"
98 #if NXD == 0
99 #error "xdc but no xd?"
100 #endif
101 
102 /*
103  * macros
104  */
105 
106 /*
107  * XDC_TWAIT: add iorq "N" to tail of SC's wait queue
108  */
109 #define XDC_TWAIT(SC, N)						\
110 do {									\
111 	(SC)->waitq[(SC)->waitend] = (N);				\
112 	(SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB;		\
113 	(SC)->nwait++;							\
114 } while (/* CONSTCOND */ 0)
115 
116 /*
117  * XDC_HWAIT: add iorq "N" to head of SC's wait queue
118  */
119 #define XDC_HWAIT(SC, N)						\
120 do {									\
121 	(SC)->waithead = ((SC)->waithead == 0) ?			\
122 	    (XDC_MAXIOPB - 1) : ((SC)->waithead - 1);			\
123 	(SC)->waitq[(SC)->waithead] = (N);				\
124 	(SC)->nwait++;							\
125 } while (/* CONSTCOND */ 0)
126 
127 /*
128  * XDC_GET_WAITER: gets the first request waiting on the waitq
129  * and removes it (so it can be submitted)
130  */
131 #define XDC_GET_WAITER(XDCSC, RQ)					\
132 do {									\
133 	(RQ) = (XDCSC)->waitq[(XDCSC)->waithead];			\
134 	(XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB;	\
135 	xdcsc->nwait--;							\
136 } while (/* CONSTCOND */ 0)
137 
138 /*
139  * XDC_FREE: add iorq "N" to SC's free list
140  */
141 #define XDC_FREE(SC, N)							\
142 do {									\
143 	(SC)->freereq[(SC)->nfree++] = (N);				\
144 	(SC)->reqs[N].mode = 0;						\
145 	if ((SC)->nfree == 1)						\
146 		wakeup(&(SC)->nfree);					\
147 } while (/* CONSTCOND */ 0)
148 
149 
150 /*
151  * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0).
152  */
153 #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)]
154 
155 /*
156  * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC
157  */
158 #define XDC_GO(XDC, ADDR)						\
159 do {									\
160 	(XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff);				\
161 	(ADDR) = ((ADDR) >> 8);						\
162 	(XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff);				\
163 	(ADDR) = ((ADDR) >> 8);						\
164 	(XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff);				\
165 	(ADDR) = ((ADDR) >> 8);						\
166 	(XDC)->xdc_iopbaddr3 = (ADDR);					\
167 	(XDC)->xdc_iopbamod = XDC_ADDRMOD;				\
168 	(XDC)->xdc_csr = XDC_ADDIOPB; /* go! */				\
169 } while (/* CONSTCOND */ 0)
170 
171 /*
172  * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME".
173  *   LCV is a counter.  If it goes to zero then we timed out.
174  */
175 #define XDC_WAIT(XDC, LCV, TIME, BITS)					\
176 do {									\
177 	(LCV) = (TIME);							\
178 	while ((LCV) > 0) {						\
179 		if ((XDC)->xdc_csr & (BITS))				\
180 			break;						\
181 		(LCV) = (LCV) - 1;					\
182 		DELAY(1);						\
183 	}								\
184 } while (/* CONSTCOND */ 0)
185 
186 /*
187  * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd)
188  */
189 #define XDC_DONE(SC,RQ,ER)						\
190 do {									\
191 	if ((RQ) == XD_ERR_FAIL) {					\
192 		(ER) = (RQ);						\
193 	} else {							\
194 		if ((SC)->ndone-- == XDC_SUBWAITLIM)			\
195 		wakeup(&(SC)->ndone);					\
196 		(ER) = (SC)->reqs[RQ].errno;				\
197 		XDC_FREE((SC), (RQ));					\
198 	}								\
199 } while (/* CONSTCOND */ 0)
200 
201 /*
202  * XDC_ADVANCE: advance iorq's pointers by a number of sectors
203  */
204 #define XDC_ADVANCE(IORQ, N)						\
205 do {									\
206 	if (N) {							\
207 		(IORQ)->sectcnt -= (N);					\
208 		(IORQ)->blockno += (N);					\
209 		(IORQ)->dbuf += ((N) * XDFM_BPS);			\
210 	}								\
211 } while (/* CONSTCOND */ 0)
212 
213 /*
214  * note - addresses you can sleep on:
215  *   [1] & of xd_softc's "state" (waiting for a chance to attach a drive)
216  *   [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb)
217  *   [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's
218  *                                 to drop below XDC_SUBWAITLIM)
219  *   [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish)
220  */
221 
222 
223 /*
224  * function prototypes
225  * "xdc_*" functions are internal, all others are external interfaces
226  */
227 
228 /* internals */
229 int	xdc_cmd(struct xdc_softc *, int, int, int, int, int, char *, int);
230 const char *xdc_e2str(int);
231 int	xdc_error(struct xdc_softc *, struct xd_iorq *, struct xd_iopb *, int,
232 	    int);
233 int	xdc_ioctlcmd(struct xd_softc *, dev_t dev, struct xd_iocmd *);
234 void	xdc_perror(struct xd_iorq *, struct xd_iopb *, int);
235 int	xdc_piodriver(struct xdc_softc *, int, int);
236 int	xdc_remove_iorq(struct xdc_softc *);
237 int	xdc_reset(struct xdc_softc *, int, int, int, struct xd_softc *);
238 inline void xdc_rqinit(struct xd_iorq *, struct xdc_softc *, struct xd_softc *,
239 	    int, u_long, int, void *, struct buf *);
240 void	xdc_rqtopb(struct xd_iorq *, struct xd_iopb *, int, int);
241 void	xdc_start(struct xdc_softc *, int);
242 int	xdc_startbuf(struct xdc_softc *, struct xd_softc *, struct buf *);
243 int	xdc_submit_iorq(struct xdc_softc *, int, int);
244 void	xdc_tick(void *);
245 void	xdc_xdreset(struct xdc_softc *, struct xd_softc *);
246 
247 /* machine interrupt hook */
248 int	xdcintr(void *);
249 
250 /* autoconf */
251 static int	xdcmatch(device_t, cfdata_t, void *);
252 static void	xdcattach(device_t, device_t, void *);
253 static int	xdc_print(void *, const char *);
254 
255 static int	xdmatch(device_t, cfdata_t, void *);
256 static void	xdattach(device_t, device_t, void *);
257 static void	xd_init(struct xd_softc *);
258 
259 static	void xddummystrat(struct buf *);
260 int	xdgetdisklabel(struct xd_softc *, void *);
261 
262 /*
263  * cfattach's: device driver interface to autoconfig
264  */
265 
266 CFATTACH_DECL_NEW(xdc, sizeof(struct xdc_softc),
267     xdcmatch, xdcattach, NULL, NULL);
268 
269 CFATTACH_DECL_NEW(xd, sizeof(struct xd_softc),
270     xdmatch, xdattach, NULL, NULL);
271 
272 struct xdc_attach_args {	/* this is the "aux" args to xdattach */
273 	int	driveno;	/* unit number */
274 	char	*dvmabuf;	/* scratch buffer for reading disk label */
275 	int	fullmode;	/* submit mode */
276 	int	booting;	/* are we booting or not? */
277 };
278 
279 dev_type_open(xdopen);
280 dev_type_close(xdclose);
281 dev_type_read(xdread);
282 dev_type_write(xdwrite);
283 dev_type_ioctl(xdioctl);
284 dev_type_strategy(xdstrategy);
285 dev_type_dump(xddump);
286 dev_type_size(xdsize);
287 
288 const struct bdevsw xd_bdevsw = {
289 	.d_open = xdopen,
290 	.d_close = xdclose,
291 	.d_strategy = xdstrategy,
292 	.d_ioctl = xdioctl,
293 	.d_dump = xddump,
294 	.d_psize = xdsize,
295 	.d_flag = D_DISK
296 };
297 
298 const struct cdevsw xd_cdevsw = {
299 	.d_open = xdopen,
300 	.d_close = xdclose,
301 	.d_read = xdread,
302 	.d_write = xdwrite,
303 	.d_ioctl = xdioctl,
304 	.d_stop = nostop,
305 	.d_tty = notty,
306 	.d_poll = nopoll,
307 	.d_mmap = nommap,
308 	.d_kqfilter = nokqfilter,
309 	.d_flag = D_DISK
310 };
311 
312 /*
313  * dkdriver
314  */
315 
316 struct dkdriver xddkdriver = { xdstrategy };
317 
318 /*
319  * start: disk label fix code (XXX)
320  */
321 
322 static void *xd_labeldata;
323 
324 static void
325 xddummystrat(struct buf *bp)
326 {
327 	if (bp->b_bcount != XDFM_BPS)
328 		panic("%s: b_bcount", __func__);
329 	memcpy(bp->b_data, xd_labeldata, XDFM_BPS);
330 	bp->b_oflags |= BO_DONE;
331 	bp->b_cflags &= ~BC_BUSY;
332 }
333 
334 int
335 xdgetdisklabel(struct xd_softc *xd, void *b)
336 {
337 	const char *err;
338 	struct sun_disklabel *sdl;
339 
340 	/* We already have the label data in `b'; setup for dummy strategy */
341 	xd_labeldata = b;
342 
343 	/* Required parameter for readdisklabel() */
344 	xd->sc_dk.dk_label->d_secsize = XDFM_BPS;
345 
346 	err = readdisklabel(MAKEDISKDEV(0, device_unit(xd->sc_dev), RAW_PART),
347 	    xddummystrat, xd->sc_dk.dk_label, xd->sc_dk.dk_cpulabel);
348 	if (err) {
349 		printf("%s: %s\n", device_xname(xd->sc_dev), err);
350 		return XD_ERR_FAIL;
351 	}
352 
353 	/* Ok, we have the label; fill in `pcyl' if there's SunOS magic */
354 	sdl = (struct sun_disklabel *)xd->sc_dk.dk_cpulabel->cd_block;
355 	if (sdl->sl_magic == SUN_DKMAGIC)
356 		xd->pcyl = sdl->sl_pcyl;
357 	else {
358 		printf("%s: WARNING: no `pcyl' in disk label.\n",
359 		    device_xname(xd->sc_dev));
360 		xd->pcyl = xd->sc_dk.dk_label->d_ncylinders +
361 		    xd->sc_dk.dk_label->d_acylinders;
362 		printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n",
363 		    device_xname(xd->sc_dev), xd->pcyl);
364 	}
365 
366 	xd->ncyl = xd->sc_dk.dk_label->d_ncylinders;
367 	xd->acyl = xd->sc_dk.dk_label->d_acylinders;
368 	xd->nhead = xd->sc_dk.dk_label->d_ntracks;
369 	xd->nsect = xd->sc_dk.dk_label->d_nsectors;
370 	xd->sectpercyl = xd->nhead * xd->nsect;
371 	xd->sc_dk.dk_label->d_secsize = XDFM_BPS; /* not handled by
372 						  * sun->bsd */
373 	return XD_ERR_AOK;
374 }
375 
376 /*
377  * end: disk label fix code (XXX)
378  */
379 
380 /*
381  * a u t o c o n f i g   f u n c t i o n s
382  */
383 
384 /*
385  * xdcmatch: determine if xdc is present or not.   we do a
386  * soft reset to detect the xdc.
387  */
388 
389 int
390 xdcmatch(device_t parent, cfdata_t cf, void *aux)
391 {
392 	struct confargs *ca = aux;
393 
394 	/* No default VME address. */
395 	if (ca->ca_paddr == -1)
396 		return 0;
397 
398 	/* Make sure something is there... */
399 	if (bus_peek(ca->ca_bustype, ca->ca_paddr + 11, 1) == -1)
400 		return 0;
401 
402 	/* Default interrupt priority. */
403 	if (ca->ca_intpri == -1)
404 		ca->ca_intpri = 2;
405 
406 	return 1;
407 }
408 
409 /*
410  * xdcattach: attach controller
411  */
412 void
413 xdcattach(device_t parent, device_t self, void *aux)
414 {
415 	struct xdc_softc *xdc = device_private(self);
416 	struct confargs *ca = aux;
417 	struct xdc_attach_args xa;
418 	int lcv, rqno, err;
419 	struct xd_iopb_ctrl *ctl;
420 
421 	/* get addressing and intr level stuff from autoconfig and load it
422 	 * into our xdc_softc. */
423 
424 	xdc->sc_dev = self;
425 	xdc->xdc = (struct xdc *)bus_mapin(ca->ca_bustype, ca->ca_paddr,
426 	    sizeof(struct xdc));
427 	xdc->bustype = ca->ca_bustype;
428 	xdc->ipl     = ca->ca_intpri;
429 	xdc->vector  = ca->ca_intvec;
430 
431 	for (lcv = 0; lcv < XDC_MAXDEV; lcv++)
432 		xdc->sc_drives[lcv] = NULL;
433 
434 	/* allocate and zero buffers
435 	 *
436 	 * note: we simplify the code by allocating the max number of iopbs and
437 	 * iorq's up front.   thus, we avoid linked lists and the costs
438 	 * associated with them in exchange for wasting a little memory. */
439 
440 	xdc->iopbase = (struct xd_iopb *)dvma_malloc(XDC_MAXIOPB *
441 	    sizeof(struct xd_iopb));	/* KVA */
442 	memset(xdc->iopbase, 0, XDC_MAXIOPB * sizeof(struct xd_iopb));
443 	xdc->dvmaiopb = (struct xd_iopb *)dvma_kvtopa(xdc->iopbase,
444 	    xdc->bustype);
445 	xdc->reqs = malloc(XDC_MAXIOPB * sizeof(struct xd_iorq),
446 	    M_DEVBUF, M_NOWAIT | M_ZERO);
447 	if (xdc->reqs == NULL)
448 		panic("xdc malloc");
449 
450 	/* init free list, iorq to iopb pointers, and non-zero fields in the
451 	 * iopb which never change. */
452 
453 	for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
454 		xdc->reqs[lcv].iopb = &xdc->iopbase[lcv];
455 		xdc->freereq[lcv] = lcv;
456 		xdc->iopbase[lcv].fixd = 1;	/* always the same */
457 		xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */
458 		xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */
459 	}
460 	xdc->nfree = XDC_MAXIOPB;
461 	xdc->nrun = 0;
462 	xdc->waithead = xdc->waitend = xdc->nwait = 0;
463 	xdc->ndone = 0;
464 
465 	/* init queue of waiting bufs */
466 
467 	bufq_alloc(&xdc->sc_wq, "fcfs", 0);
468 	callout_init(&xdc->sc_tick_ch, 0);
469 
470 	/*
471 	 * section 7 of the manual tells us how to init the controller:
472 	 * - read controller parameters (6/0)
473 	 * - write controller parameters (5/0)
474 	 */
475 
476 	/* read controller parameters and insure we have a 753/7053 */
477 
478 	rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL);
479 	if (rqno == XD_ERR_FAIL) {
480 		aprint_error(": couldn't read controller params\n");
481 		return;		/* shouldn't ever happen */
482 	}
483 	ctl = (struct xd_iopb_ctrl *)&xdc->iopbase[rqno];
484 	if (ctl->ctype != XDCT_753) {
485 		if (xdc->reqs[rqno].errno)
486 			aprint_error(": %s: ",
487 			    xdc_e2str(xdc->reqs[rqno].errno));
488 		aprint_error(": doesn't identify as a 753/7053\n");
489 		XDC_DONE(xdc, rqno, err);
490 		return;
491 	}
492 	aprint_normal(": Xylogics 753/7053, PROM=0x%x.%02x.%02x\n",
493 	    ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev);
494 	XDC_DONE(xdc, rqno, err);
495 
496 	/* now write controller parameters (xdc_cmd sets all params for us) */
497 
498 	rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL);
499 	XDC_DONE(xdc, rqno, err);
500 	if (err) {
501 		aprint_error_dev(self, "controller config error: %s\n",
502 		    xdc_e2str(err));
503 		return;
504 	}
505 
506 	/* link in interrupt with higher level software */
507 	isr_add_vectored(xdcintr, xdc, ca->ca_intpri, ca->ca_intvec);
508 	evcnt_attach_dynamic(&xdc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
509 	    device_xname(self), "intr");
510 
511 	/* now we must look for disks using autoconfig */
512 	xa.booting = 1;
513 	for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++)
514 		(void)config_found(self, (void *)&xa, xdc_print);
515 
516 	/* start the watchdog clock */
517 	callout_reset(&xdc->sc_tick_ch, XDC_TICKCNT, xdc_tick, xdc);
518 }
519 
520 int
521 xdc_print(void *aux, const char *name)
522 {
523 	struct xdc_attach_args *xa = aux;
524 
525 	if (name != NULL)
526 		aprint_normal("%s: ", name);
527 
528 	if (xa->driveno != -1)
529 		aprint_normal(" drive %d", xa->driveno);
530 
531 	return UNCONF;
532 }
533 
534 /*
535  * xdmatch: probe for disk.
536  *
537  * note: we almost always say disk is present.   this allows us to
538  * spin up and configure a disk after the system is booted (we can
539  * call xdattach!).  Also, wire down the relationship between the
540  * xd* and xdc* devices, to simplify boot device identification.
541  */
542 int
543 xdmatch(device_t parent, cfdata_t cf, void *aux)
544 {
545 	struct xdc_attach_args *xa = aux;
546 	int xd_unit;
547 
548 	/* Match only on the "wired-down" controller+disk. */
549 	xd_unit = device_unit(parent) * 2 + xa->driveno;
550 	if (cf->cf_unit != xd_unit)
551 		return 0;
552 
553 	return 1;
554 }
555 
556 /*
557  * xdattach: attach a disk.
558  */
559 void
560 xdattach(device_t parent, device_t self, void *aux)
561 {
562 	struct xd_softc *xd = device_private(self);
563 	struct xdc_softc *xdc = device_private(parent);
564 	struct xdc_attach_args *xa = aux;
565 
566 	xd->sc_dev = self;
567 	aprint_normal("\n");
568 
569 	/*
570 	 * Always re-initialize the disk structure.  We want statistics
571 	 * to start with a clean slate.
572 	 */
573 	memset(&xd->sc_dk, 0, sizeof(xd->sc_dk));
574 	disk_init(&xd->sc_dk, device_xname(self), &xddkdriver);
575 
576 	xd->state = XD_DRIVE_UNKNOWN;	/* to start */
577 	xd->flags = 0;
578 	xd->parent = xdc;
579 
580 	xd->xd_drive = xa->driveno;
581 	xdc->sc_drives[xa->driveno] = xd;
582 
583 	/* Do init work common to attach and open. */
584 	xd_init(xd);
585 }
586 
587 /*
588  * end of autoconfig functions
589  */
590 
591 /*
592  * Initialize a disk.  This can be called from both autoconf and
593  * also from xdopen/xdstrategy.
594  */
595 static void
596 xd_init(struct xd_softc *xd)
597 {
598 	struct xdc_softc *xdc;
599 	struct dkbad *dkb;
600 	struct xd_iopb_drive *driopb;
601 	void *dvmabuf;
602 	int rqno, err, spt, mb, blk, lcv, fullmode, newstate;
603 
604 	xdc = xd->parent;
605 	xd->state = XD_DRIVE_ATTACHING;
606 	newstate = XD_DRIVE_UNKNOWN;
607 	fullmode = (cold) ? XD_SUB_POLL : XD_SUB_WAIT;
608 	dvmabuf = dvma_malloc(XDFM_BPS);
609 
610 	/* first try and reset the drive */
611 	rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fullmode);
612 	XDC_DONE(xdc, rqno, err);
613 	if (err == XD_ERR_NRDY) {
614 		printf("%s: drive %d: off-line\n",
615 		    device_xname(xd->sc_dev), xd->xd_drive);
616 		goto done;
617 	}
618 	if (err) {
619 		printf("%s: ERROR 0x%02x (%s)\n",
620 		    device_xname(xd->sc_dev), err, xdc_e2str(err));
621 		goto done;
622 	}
623 	printf("%s: drive %d ready\n",
624 	    device_xname(xd->sc_dev), xd->xd_drive);
625 
626 	/* now set format parameters */
627 
628 	rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive,
629 	    0, 0, 0, fullmode);
630 	XDC_DONE(xdc, rqno, err);
631 	if (err) {
632 		printf("%s: write format parameters failed: %s\n",
633 		    device_xname(xd->sc_dev), xdc_e2str(err));
634 		goto done;
635 	}
636 
637 	/* get drive parameters */
638 	spt = 0;
639 	rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive,
640 	    0, 0, 0, fullmode);
641 	if (rqno != XD_ERR_FAIL) {
642 		driopb = (struct xd_iopb_drive *)&xdc->iopbase[rqno];
643 		spt = driopb->sectpertrk;
644 	}
645 	XDC_DONE(xdc, rqno, err);
646 	if (err) {
647 		printf("%s: read drive parameters failed: %s\n",
648 		    device_xname(xd->sc_dev), xdc_e2str(err));
649 		goto done;
650 	}
651 
652 	/*
653 	 * now set drive parameters (to semi-bogus values) so we can read the
654 	 * disk label.
655 	 */
656 	xd->pcyl = xd->ncyl = 1;
657 	xd->acyl = 0;
658 	xd->nhead = 1;
659 	xd->nsect = 1;
660 	xd->sectpercyl = 1;
661 	for (lcv = 0; lcv < 126; lcv++)	/* init empty bad144 table */
662 		xd->dkb.bt_bad[lcv].bt_cyl =
663 		    xd->dkb.bt_bad[lcv].bt_trksec = 0xffff;
664 	rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive,
665 	    0, 0, 0, fullmode);
666 	XDC_DONE(xdc, rqno, err);
667 	if (err) {
668 		printf("%s: write drive parameters failed: %s\n",
669 		    device_xname(xd->sc_dev), xdc_e2str(err));
670 		goto done;
671 	}
672 
673 	/* read disk label */
674 	rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive,
675 	    0, 1, dvmabuf, fullmode);
676 	XDC_DONE(xdc, rqno, err);
677 	if (err) {
678 		printf("%s: reading disk label failed: %s\n",
679 		    device_xname(xd->sc_dev), xdc_e2str(err));
680 		goto done;
681 	}
682 	newstate = XD_DRIVE_NOLABEL;
683 
684 	xd->hw_spt = spt;
685 	/* Attach the disk: must be before getdisklabel to malloc label */
686 	disk_attach(&xd->sc_dk);
687 
688 	if (xdgetdisklabel(xd, dvmabuf) != XD_ERR_AOK)
689 		goto done;
690 
691 	/* inform the user of what is up */
692 	printf("%s: <%s>, pcyl %d, hw_spt %d\n",
693 	    device_xname(xd->sc_dev), (char *)dvmabuf, xd->pcyl, spt);
694 	mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS);
695 	printf("%s: %dMB, %d cyl, %d head, %d sec\n",
696 	    device_xname(xd->sc_dev), mb,
697 	    xd->ncyl, xd->nhead, xd->nsect);
698 
699 	/* now set the real drive parameters! */
700 	rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive,
701 	    0, 0, 0, fullmode);
702 	XDC_DONE(xdc, rqno, err);
703 	if (err) {
704 		printf("%s: write real drive parameters failed: %s\n",
705 		    device_xname(xd->sc_dev), xdc_e2str(err));
706 		goto done;
707 	}
708 	newstate = XD_DRIVE_ONLINE;
709 
710 	/*
711 	 * read bad144 table. this table resides on the first sector of the
712 	 * last track of the disk (i.e. second cyl of "acyl" area).
713 	 */
714 	blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */
715 	    (xd->nhead - 1) * xd->nsect;	/* last head */
716 	rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive,
717 				   blk, 1, dvmabuf, fullmode);
718 	XDC_DONE(xdc, rqno, err);
719 	if (err) {
720 		printf("%s: reading bad144 failed: %s\n",
721 		    device_xname(xd->sc_dev), xdc_e2str(err));
722 		goto done;
723 	}
724 
725 	/* check dkbad for sanity */
726 	dkb = (struct dkbad *)dvmabuf;
727 	for (lcv = 0; lcv < 126; lcv++) {
728 		if ((dkb->bt_bad[lcv].bt_cyl == 0xffff ||
729 		    dkb->bt_bad[lcv].bt_cyl == 0) &&
730 		    dkb->bt_bad[lcv].bt_trksec == 0xffff)
731 			continue;	/* blank */
732 		if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl)
733 			break;
734 		if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead)
735 			break;
736 		if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect)
737 			break;
738 	}
739 	if (lcv != 126) {
740 		printf("%s: warning: invalid bad144 sector!\n",
741 		    device_xname(xd->sc_dev));
742 	} else {
743 		memcpy(&xd->dkb, dvmabuf, XDFM_BPS);
744 	}
745 
746  done:
747 	xd->state = newstate;
748 	dvma_free(dvmabuf, XDFM_BPS);
749 }
750 
751 /*
752  * { b , c } d e v s w   f u n c t i o n s
753  */
754 
755 /*
756  * xdclose: close device
757  */
758 int
759 xdclose(dev_t dev, int flag, int fmt, struct lwp *l)
760 {
761 	struct xd_softc *xd = device_lookup_private(&xd_cd, DISKUNIT(dev));
762 	int part = DISKPART(dev);
763 
764 	/* clear mask bits */
765 
766 	switch (fmt) {
767 	case S_IFCHR:
768 		xd->sc_dk.dk_copenmask &= ~(1 << part);
769 		break;
770 	case S_IFBLK:
771 		xd->sc_dk.dk_bopenmask &= ~(1 << part);
772 		break;
773 	}
774 	xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
775 
776 	return 0;
777 }
778 
779 /*
780  * xddump: crash dump system
781  */
782 int
783 xddump(dev_t dev, daddr_t blkno, void *va, size_t sz)
784 {
785 	int unit, part;
786 	struct xd_softc *xd;
787 
788 	unit = DISKUNIT(dev);
789 	part = DISKPART(dev);
790 
791 	xd = device_lookup_private(&xd_cd, unit);
792 	if (xd == NULL)
793 		return ENXIO;
794 
795 	printf("%s%c: crash dump not supported (yet)\n",
796 	    device_xname(xd->sc_dev), 'a' + part);
797 
798 	return ENXIO;
799 
800 	/* outline: globals: "dumplo" == sector number of partition to start
801 	 * dump at (convert to physical sector with partition table)
802 	 * "dumpsize" == size of dump in clicks "physmem" == size of physical
803 	 * memory (clicks, ctob() to get bytes) (normal case: dumpsize ==
804 	 * physmem)
805 	 *
806 	 * dump a copy of physical memory to the dump device starting at sector
807 	 * "dumplo" in the swap partition (make sure > 0).   map in pages as
808 	 * we go.   use polled I/O.
809 	 *
810 	 * XXX how to handle NON_CONTIG?
811 	 */
812 }
813 
814 static enum kauth_device_req
815 xd_getkauthreq(u_char cmd)
816 {
817 	enum kauth_device_req req;
818 
819 	switch (cmd) {
820 	case XDCMD_WR:
821 	case XDCMD_XWR:
822 		req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITE;
823 		break;
824 
825 	case XDCMD_RD:
826 	case XDCMD_XRD:
827 		req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READ;
828 		break;
829 
830 	case XDCMD_RDP:
831 		req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READCONF;
832 		break;
833 
834 	case XDCMD_WRP:
835 	case XDCMD_RST:
836 		req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITECONF;
837 		break;
838 
839 	case XDCMD_NOP:
840 	case XDCMD_SK:
841 	case XDCMD_TST:
842 	default:
843 		req = 0;
844 		break;
845 	}
846 
847 	return req;
848 }
849 
850 /*
851  * xdioctl: ioctls on XD drives.   based on ioctl's of other netbsd disks.
852  */
853 int
854 xdioctl(dev_t dev, u_long command, void *addr, int flag, struct lwp *l)
855 {
856 	struct xd_softc *xd;
857 	struct xd_iocmd *xio;
858 	int     error, s, unit;
859 
860 	unit = DISKUNIT(dev);
861 
862 	xd = device_lookup_private(&xd_cd, unit);
863 	if (xd == NULL)
864 		return (ENXIO);
865 
866 	/* switch on ioctl type */
867 
868 	switch (command) {
869 	case DIOCSBAD:		/* set bad144 info */
870 		if ((flag & FWRITE) == 0)
871 			return EBADF;
872 		s = splbio();
873 		memcpy(&xd->dkb, addr, sizeof(xd->dkb));
874 		splx(s);
875 		return 0;
876 
877 	case DIOCGDINFO:	/* get disk label */
878 		memcpy(addr, xd->sc_dk.dk_label, sizeof(struct disklabel));
879 		return 0;
880 
881 	case DIOCGPART:	/* get partition info */
882 		((struct partinfo *)addr)->disklab = xd->sc_dk.dk_label;
883 		((struct partinfo *)addr)->part =
884 		    &xd->sc_dk.dk_label->d_partitions[DISKPART(dev)];
885 		return 0;
886 
887 	case DIOCSDINFO:	/* set disk label */
888 		if ((flag & FWRITE) == 0)
889 			return EBADF;
890 		error = setdisklabel(xd->sc_dk.dk_label,
891 		    (struct disklabel *)addr, /* xd->sc_dk.dk_openmask : */ 0,
892 		    xd->sc_dk.dk_cpulabel);
893 		if (error == 0) {
894 			if (xd->state == XD_DRIVE_NOLABEL)
895 				xd->state = XD_DRIVE_ONLINE;
896 		}
897 		return error;
898 
899 	case DIOCWLABEL:	/* change write status of disk label */
900 		if ((flag & FWRITE) == 0)
901 			return EBADF;
902 		if (*(int *)addr)
903 			xd->flags |= XD_WLABEL;
904 		else
905 			xd->flags &= ~XD_WLABEL;
906 		return 0;
907 
908 	case DIOCWDINFO:	/* write disk label */
909 		if ((flag & FWRITE) == 0)
910 			return EBADF;
911 		error = setdisklabel(xd->sc_dk.dk_label,
912 		    (struct disklabel *)addr, /* xd->sc_dk.dk_openmask : */ 0,
913 		    xd->sc_dk.dk_cpulabel);
914 		if (error == 0) {
915 			if (xd->state == XD_DRIVE_NOLABEL)
916 				xd->state = XD_DRIVE_ONLINE;
917 
918 			/* Simulate opening partition 0 so write succeeds. */
919 			xd->sc_dk.dk_openmask |= (1 << 0);
920 			error = writedisklabel(MAKEDISKDEV(major(dev),
921 			    DISKUNIT(dev), RAW_PART),
922 			    xdstrategy, xd->sc_dk.dk_label,
923 			    xd->sc_dk.dk_cpulabel);
924 			xd->sc_dk.dk_openmask =
925 			    xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
926 		}
927 		return error;
928 
929 	case DIOSXDCMD: {
930 		enum kauth_device_req req;
931 
932 		xio = (struct xd_iocmd *)addr;
933 		req = xd_getkauthreq(xio->cmd);
934 		if ((error = kauth_authorize_device_passthru(l->l_cred,
935 		    dev, req, xio)) != 0)
936 			return error;
937 		return xdc_ioctlcmd(xd, dev, xio);
938 		}
939 
940 	default:
941 		return ENOTTY;
942 	}
943 }
944 
945 /*
946  * xdopen: open drive
947  */
948 int
949 xdopen(dev_t dev, int flag, int fmt, struct lwp *l)
950 {
951 	int err, unit, part, s;
952 	struct xd_softc *xd;
953 
954 	/* first, could it be a valid target? */
955 	unit = DISKUNIT(dev);
956 	xd = device_lookup_private(&xd_cd, unit);
957 	if (xd == NULL)
958 		return ENXIO;
959 	part = DISKPART(dev);
960 	err = 0;
961 
962 	/*
963 	 * If some other processing is doing init, sleep.
964 	 */
965 	s = splbio();
966 	while (xd->state == XD_DRIVE_ATTACHING) {
967 		if (tsleep(&xd->state, PRIBIO, "xdopen", 0)) {
968 			err = EINTR;
969 			goto done;
970 		}
971 	}
972 	/* Do we need to init the drive? */
973 	if (xd->state == XD_DRIVE_UNKNOWN) {
974 		xd_init(xd);
975 		wakeup(&xd->state);
976 	}
977 	/* Was the init successful? */
978 	if (xd->state == XD_DRIVE_UNKNOWN) {
979 		err = EIO;
980 		goto done;
981 	}
982 
983 	/* check for partition */
984 	if (part != RAW_PART &&
985 	    (part >= xd->sc_dk.dk_label->d_npartitions ||
986 		xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
987 		err = ENXIO;
988 		goto done;
989 	}
990 
991 	/* set open masks */
992 	switch (fmt) {
993 	case S_IFCHR:
994 		xd->sc_dk.dk_copenmask |= (1 << part);
995 		break;
996 	case S_IFBLK:
997 		xd->sc_dk.dk_bopenmask |= (1 << part);
998 		break;
999 	}
1000 	xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
1001 
1002  done:
1003 	splx(s);
1004 	return err;
1005 }
1006 
1007 int
1008 xdread(dev_t dev, struct uio *uio, int flags)
1009 {
1010 
1011 	return physio(xdstrategy, NULL, dev, B_READ, minphys, uio);
1012 }
1013 
1014 int
1015 xdwrite(dev_t dev, struct uio *uio, int flags)
1016 {
1017 
1018 	return physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio);
1019 }
1020 
1021 
1022 /*
1023  * xdsize: return size of a partition for a dump
1024  */
1025 int
1026 xdsize(dev_t dev)
1027 {
1028 	struct xd_softc *xdsc;
1029 	int unit, part, size, omask;
1030 
1031 	/* valid unit? */
1032 	unit = DISKUNIT(dev);
1033 	xdsc = device_lookup_private(&xd_cd, unit);
1034 	if (xdsc == NULL)
1035 		return -1;
1036 
1037 	part = DISKPART(dev);
1038 	omask = xdsc->sc_dk.dk_openmask & (1 << part);
1039 
1040 	if (omask == 0 && xdopen(dev, 0, S_IFBLK, NULL) != 0)
1041 		return -1;
1042 
1043 	/* do it */
1044 	if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1045 		size = -1;	/* only give valid size for swap partitions */
1046 	else
1047 		size = xdsc->sc_dk.dk_label->d_partitions[part].p_size *
1048 		    (xdsc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1049 	if (omask == 0 && xdclose(dev, 0, S_IFBLK, NULL) != 0)
1050 		return -1;
1051 	return size;
1052 }
1053 
1054 /*
1055  * xdstrategy: buffering system interface to xd.
1056  */
1057 void
1058 xdstrategy(struct buf *bp)
1059 {
1060 	struct xd_softc *xd;
1061 	struct xdc_softc *parent;
1062 	int s, unit;
1063 
1064 	unit = DISKUNIT(bp->b_dev);
1065 
1066 	/* check for live device */
1067 
1068 	xd = device_lookup_private(&xd_cd, unit);
1069 	if (xd == NULL ||
1070 	    bp->b_blkno < 0 ||
1071 	    (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) {
1072 		bp->b_error = EINVAL;
1073 		goto done;
1074 	}
1075 
1076 	/* There should always be an open first. */
1077 	if (xd->state == XD_DRIVE_UNKNOWN) {
1078 		bp->b_error = EIO;
1079 		goto done;
1080 	}
1081 
1082 	if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) {
1083 		/* no I/O to unlabeled disks, unless raw partition */
1084 		bp->b_error = EIO;
1085 		goto done;
1086 	}
1087 	/* short circuit zero length request */
1088 
1089 	if (bp->b_bcount == 0)
1090 		goto done;
1091 
1092 	/* check bounds with label (disksubr.c).  Determine the size of the
1093 	 * transfer, and make sure it is within the boundaries of the
1094 	 * partition. Adjust transfer if needed, and signal errors or early
1095 	 * completion. */
1096 
1097 	if (bounds_check_with_label(&xd->sc_dk, bp,
1098 		(xd->flags & XD_WLABEL) != 0) <= 0)
1099 		goto done;
1100 
1101 	/*
1102 	 * now we know we have a valid buf structure that we need to do I/O
1103 	 * on.
1104 	 *
1105 	 * note that we don't disksort because the controller has a sorting
1106 	 * algorithm built into the hardware.
1107 	 */
1108 
1109 	s = splbio();		/* protect the queues */
1110 
1111 	/* first, give jobs in front of us a chance */
1112 	parent = xd->parent;
1113 	while (parent->nfree > 0 && bufq_peek(parent->sc_wq) != NULL)
1114 		if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK)
1115 			break;
1116 
1117 	/*
1118 	 * if there are no free iorq's, then we just queue and return. the
1119 	 * buffs will get picked up later by xdcintr().
1120 	 */
1121 	if (parent->nfree == 0) {
1122 		bufq_put(parent->sc_wq, bp);
1123 		splx(s);
1124 		return;
1125 	}
1126 
1127 	/* now we have free iopb's and we are at splbio... start 'em up */
1128 	if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) {
1129 		return;
1130 	}
1131 
1132 	/* done! */
1133 
1134 	splx(s);
1135 	return;
1136 
1137  done:
1138 	/* tells upper layers we are done with this buf */
1139 	bp->b_resid = bp->b_bcount;
1140 	biodone(bp);
1141 }
1142 /*
1143  * end of {b,c}devsw functions
1144  */
1145 
1146 /*
1147  * i n t e r r u p t   f u n c t i o n
1148  *
1149  * xdcintr: hardware interrupt.
1150  */
1151 int
1152 xdcintr(void *v)
1153 {
1154 	struct xdc_softc *xdcsc = v;
1155 
1156 	/* kick the event counter */
1157 	xdcsc->sc_intrcnt.ev_count++;
1158 
1159 	/* remove as many done IOPBs as possible */
1160 	xdc_remove_iorq(xdcsc);
1161 
1162 	/* start any iorq's already waiting */
1163 	xdc_start(xdcsc, XDC_MAXIOPB);
1164 
1165 	/* fill up any remaining iorq's with queue'd buffers */
1166 	while (xdcsc->nfree > 0 && bufq_peek(xdcsc->sc_wq) != NULL)
1167 		if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK)
1168 			break;
1169 
1170 	return 1;
1171 }
1172 /*
1173  * end of interrupt function
1174  */
1175 
1176 /*
1177  * i n t e r n a l   f u n c t i o n s
1178  */
1179 
1180 /*
1181  * xdc_rqinit: fill out the fields of an I/O request
1182  */
1183 
1184 inline void
1185 xdc_rqinit(struct xd_iorq *rq, struct xdc_softc *xdc, struct xd_softc *xd,
1186     int md, u_long blk, int cnt, void *db, struct buf *bp)
1187 {
1188 
1189 	rq->xdc = xdc;
1190 	rq->xd = xd;
1191 	rq->ttl = XDC_MAXTTL + 10;
1192 	rq->mode = md;
1193 	rq->tries = rq->errno = rq->lasterror = 0;
1194 	rq->blockno = blk;
1195 	rq->sectcnt = cnt;
1196 	rq->dbuf = rq->dbufbase = db;
1197 	rq->buf = bp;
1198 }
1199 
1200 /*
1201  * xdc_rqtopb: load up an IOPB based on an iorq
1202  */
1203 void
1204 xdc_rqtopb(struct xd_iorq *iorq, struct xd_iopb *iopb, int cmd, int subfun)
1205 {
1206 	u_long  block, dp;
1207 
1208 	/* standard stuff */
1209 
1210 	iopb->errs = iopb->done = 0;
1211 	iopb->comm = cmd;
1212 	iopb->errno = iopb->status = 0;
1213 	iopb->subfun = subfun;
1214 	if (iorq->xd)
1215 		iopb->unit = iorq->xd->xd_drive;
1216 	else
1217 		iopb->unit = 0;
1218 
1219 	/* check for alternate IOPB format */
1220 
1221 	if (cmd == XDCMD_WRP) {
1222 		switch (subfun) {
1223 		case XDFUN_CTL:{
1224 			struct xd_iopb_ctrl *ctrl =
1225 			    (struct xd_iopb_ctrl *)iopb;
1226 			iopb->lll = 0;
1227 			iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL)
1228 			    ? 0 : iorq->xdc->ipl;
1229 			ctrl->param_a = XDPA_TMOD | XDPA_DACF;
1230 			ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC;
1231 			ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR |
1232 			    XDPC_RBC | XDPC_ECC2;
1233 			ctrl->throttle = XDC_THROTTLE;
1234 #ifdef sparc
1235 			if (CPU_ISSUN4 && cpuinfo.cpu_type == CPUTYP_4_300)
1236 				ctrl->delay = XDC_DELAY_4_300;
1237 			else
1238 				ctrl->delay = XDC_DELAY_SPARC;
1239 #endif
1240 #ifdef sun3
1241 			ctrl->delay = XDC_DELAY_SUN3;
1242 #endif
1243 			break;
1244 			}
1245 		case XDFUN_DRV:{
1246 			struct xd_iopb_drive *drv =
1247 			    (struct xd_iopb_drive *)iopb;
1248 			/* we assume that the disk label has the right
1249 			 * info */
1250 			if (XD_STATE(iorq->mode) == XD_SUB_POLL)
1251 				drv->dparam_ipl = (XDC_DPARAM << 3);
1252 			else
1253 				drv->dparam_ipl = (XDC_DPARAM << 3) |
1254 				    iorq->xdc->ipl;
1255 			drv->maxsect = iorq->xd->nsect - 1;
1256 			drv->maxsector = drv->maxsect;
1257 			/* note: maxsector != maxsect only if you are
1258 			 * doing cyl sparing */
1259 			drv->headoff = 0;
1260 			drv->maxcyl = iorq->xd->pcyl - 1;
1261 			drv->maxhead = iorq->xd->nhead - 1;
1262 			break;
1263 			}
1264 		case XDFUN_FMT:
1265 		    {
1266 			struct xd_iopb_format *form =
1267 			    (struct xd_iopb_format *)iopb;
1268 
1269 			if (XD_STATE(iorq->mode) == XD_SUB_POLL)
1270 				form->interleave_ipl = (XDC_INTERLEAVE << 3);
1271 			else
1272 				form->interleave_ipl = (XDC_INTERLEAVE << 3) |
1273 				    iorq->xdc->ipl;
1274 			form->field1 = XDFM_FIELD1;
1275 			form->field2 = XDFM_FIELD2;
1276 			form->field3 = XDFM_FIELD3;
1277 			form->field4 = XDFM_FIELD4;
1278 			form->bytespersec = XDFM_BPS;
1279 			form->field6 = XDFM_FIELD6;
1280 			form->field7 = XDFM_FIELD7;
1281 			break;
1282 		    }
1283 		}
1284 	} else {
1285 
1286 		/* normal IOPB case (harmless to RDP command) */
1287 
1288 		iopb->lll = 0;
1289 		iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL)
1290 		    ? 0 : iorq->xdc->ipl;
1291 		iopb->sectcnt = iorq->sectcnt;
1292 		block = iorq->blockno;
1293 		if (iorq->xd == NULL || block == 0) {
1294 			iopb->sectno = iopb->headno = iopb->cylno = 0;
1295 		} else {
1296 			iopb->sectno = block % iorq->xd->nsect;
1297 			block = block / iorq->xd->nsect;
1298 			iopb->headno = block % iorq->xd->nhead;
1299 			block = block / iorq->xd->nhead;
1300 			iopb->cylno = block;
1301 		}
1302 		iopb->daddr = dp = (iorq->dbuf == NULL) ? 0 :
1303 		    dvma_kvtopa(iorq->dbuf, iorq->xdc->bustype);
1304 		iopb->addrmod = XDC_ADDRMOD;
1305 	}
1306 }
1307 
1308 /*
1309  * xdc_cmd: front end for POLL'd and WAIT'd commands.  Returns rqno.
1310  * If you've already got an IORQ, you can call submit directly (currently
1311  * there is no need to do this).    NORM requests are handled separately.
1312  */
1313 int
1314 xdc_cmd(struct xdc_softc *xdcsc, int cmd, int subfn, int unit, int block,
1315     int scnt, char *dptr, int fullmode)
1316 {
1317 	struct xd_iorq *iorq;
1318 	struct xd_iopb *iopb;
1319 	int rqno, retry;
1320 	int submode = XD_STATE(fullmode);
1321 
1322 	/* get iorq/iopb */
1323 	switch (submode) {
1324 	case XD_SUB_POLL:
1325 		while (xdcsc->nfree == 0) {
1326 			if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK)
1327 				return XD_ERR_FAIL;
1328 		}
1329 		break;
1330 	case XD_SUB_WAIT:
1331 		retry = 1;
1332 		while (retry) {
1333 			while (xdcsc->nfree == 0) {
1334 			    if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0))
1335 				return XD_ERR_FAIL;
1336 			}
1337 			while (xdcsc->ndone > XDC_SUBWAITLIM) {
1338 			    if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0))
1339 				return XD_ERR_FAIL;
1340 			}
1341 			if (xdcsc->nfree)
1342 				retry = 0;	/* got it */
1343 		}
1344 		break;
1345 	default:
1346 		return XD_ERR_FAIL;	/* illegal */
1347 	}
1348 	if (xdcsc->nfree == 0)
1349 		panic("xdcmd nfree");
1350 	rqno = XDC_RQALLOC(xdcsc);
1351 	iorq = &xdcsc->reqs[rqno];
1352 	iopb = iorq->iopb;
1353 
1354 
1355 	/* init iorq/iopb */
1356 	xdc_rqinit(iorq, xdcsc,
1357 	    (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit],
1358 	    fullmode, block, scnt, dptr, NULL);
1359 
1360 	/* load IOPB from iorq */
1361 	xdc_rqtopb(iorq, iopb, cmd, subfn);
1362 
1363 	/* submit it for processing */
1364 	xdc_submit_iorq(xdcsc, rqno, fullmode);	/* error code will be in iorq */
1365 
1366 	return rqno;
1367 }
1368 
1369 /*
1370  * xdc_startbuf
1371  * start a buffer running, assumes nfree > 0
1372  */
1373 int
1374 xdc_startbuf(struct xdc_softc *xdcsc, struct xd_softc *xdsc, struct buf *bp)
1375 {
1376 	int rqno, partno;
1377 	struct xd_iorq *iorq;
1378 	struct xd_iopb *iopb;
1379 	u_long  block;
1380 	void *dbuf;
1381 
1382 	if (xdcsc->nfree == 0)
1383 		panic("xdc_startbuf free");
1384 	rqno = XDC_RQALLOC(xdcsc);
1385 	iorq = &xdcsc->reqs[rqno];
1386 	iopb = iorq->iopb;
1387 
1388 	/* get buf */
1389 
1390 	if (bp == NULL) {
1391 		bp = bufq_get(xdcsc->sc_wq);
1392 		if (bp == NULL)
1393 			panic("%s bp", __func__);
1394 		xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)];
1395 	}
1396 	partno = DISKPART(bp->b_dev);
1397 #ifdef XDC_DEBUG
1398 	printf("xdc_startbuf: %s%c: %s block %d\n", device_xname(xdsc->sc_dev),
1399 	    'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno);
1400 	printf("%s: b_bcount %d, b_data 0x%x\n", __func__,
1401 	    bp->b_bcount, bp->b_data);
1402 #endif
1403 
1404 	/*
1405 	 * load request.  we have to calculate the correct block number based
1406 	 * on partition info.
1407 	 *
1408 	 * also, note that there are two kinds of buf structures, those with
1409 	 * B_PHYS set and those without B_PHYS.   if B_PHYS is set, then it is
1410 	 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users'
1411 	 * buffer which has already been mapped into DVMA space. (Not on sun3)
1412 	 * However, if B_PHYS is not set, then the buffer is a normal system
1413 	 * buffer which does *not* live in DVMA space.  In that case we call
1414 	 * dvma_mapin to map it into DVMA space so we can do the DMA to it.
1415 	 *
1416 	 * in cases where we do a dvma_mapin, note that iorq points to the
1417 	 * buffer as mapped into DVMA space, where as the bp->b_data points
1418 	 * to its non-DVMA mapping.
1419 	 *
1420 	 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped
1421 	 * into dvma space, only that it was remapped into the kernel.
1422 	 * We ALWAYS have to remap the kernel buf into DVMA space.
1423 	 * (It is done inexpensively, using whole segments!)
1424 	 */
1425 
1426 	block = bp->b_blkno + ((partno == RAW_PART) ? 0 :
1427 	    xdsc->sc_dk.dk_label->d_partitions[partno].p_offset);
1428 
1429 	dbuf = dvma_mapin(bp->b_data, bp->b_bcount, 0);
1430 	if (dbuf == NULL) {	/* out of DVMA space */
1431 		printf("%s: warning: out of DVMA space\n",
1432 		    device_xname(xdcsc->sc_dev));
1433 		XDC_FREE(xdcsc, rqno);
1434 		bufq_put(xdcsc->sc_wq, bp);
1435 		return XD_ERR_FAIL;	/* XXX: need some sort of
1436 		                         * call-back scheme here? */
1437 	}
1438 
1439 	/* init iorq and load iopb from it */
1440 
1441 	xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block,
1442 	    bp->b_bcount / XDFM_BPS, dbuf, bp);
1443 
1444 	xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0);
1445 
1446 	/* Instrumentation. */
1447 	disk_busy(&xdsc->sc_dk);
1448 
1449 	/* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */
1450 
1451 	xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM);
1452 	return XD_ERR_AOK;
1453 }
1454 
1455 
1456 /*
1457  * xdc_submit_iorq: submit an iorq for processing.  returns XD_ERR_AOK
1458  * if ok.  if it fail returns an error code.  type is XD_SUB_*.
1459  *
1460  * note: caller frees iorq in all cases except NORM
1461  *
1462  * return value:
1463  *   NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request)
1464  *   WAIT: XD_AOK (success), <error-code> (failed)
1465  *   POLL: <same as WAIT>
1466  *   NOQ : <same as NORM>
1467  *
1468  * there are three sources for i/o requests:
1469  * [1] xdstrategy: normal block I/O, using "struct buf" system.
1470  * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts.
1471  * [3] open/ioctl: these are I/O requests done in the context of a process,
1472  *                 and the process should block until they are done.
1473  *
1474  * software state is stored in the iorq structure.  each iorq has an
1475  * iopb structure.  the hardware understands the iopb structure.
1476  * every command must go through an iopb.  a 7053 can only handle
1477  * XDC_MAXIOPB (31) active iopbs at one time.  iopbs are allocated in
1478  * DVMA space at boot up time.  what happens if we run out of iopb's?
1479  * for i/o type [1], the buffers are queued at the "buff" layer and
1480  * picked up later by the interrupt routine.  for case [2] the
1481  * programmed i/o driver is called with a special flag that says
1482  * return when one iopb is free.  for case [3] the process can sleep
1483  * on the iorq free list until some iopbs are available.
1484  */
1485 
1486 int
1487 xdc_submit_iorq(struct xdc_softc *xdcsc, int iorqno, int type)
1488 {
1489 	u_long  iopbaddr;
1490 	struct xd_iorq *iorq = &xdcsc->reqs[iorqno];
1491 
1492 #ifdef XDC_DEBUG
1493 	printf("xdc_submit_iorq(%s, no=%d, type=%d)\n",
1494 	    device_xname(xdcsc->sc_dev), iorqno, type);
1495 #endif
1496 
1497 	/* first check and see if controller is busy */
1498 	if (xdcsc->xdc->xdc_csr & XDC_ADDING) {
1499 #ifdef XDC_DEBUG
1500 		printf("%s: XDC not ready (ADDING)\n", __func__);
1501 #endif
1502 		if (type == XD_SUB_NOQ)
1503 			return XD_ERR_FAIL;	/* failed */
1504 		XDC_TWAIT(xdcsc, iorqno);	/* put at end of waitq */
1505 		switch (type) {
1506 		case XD_SUB_NORM:
1507 			return XD_ERR_AOK;	/* success */
1508 		case XD_SUB_WAIT:
1509 			while (iorq->iopb->done == 0) {
1510 				(void)tsleep(iorq, PRIBIO, "xdciorq", 0);
1511 			}
1512 			return iorq->errno;
1513 		case XD_SUB_POLL:
1514 			return xdc_piodriver(xdcsc, iorqno, 0);
1515 		default:
1516 			panic("%s adding", __func__);
1517 		}
1518 	}
1519 #ifdef XDC_DEBUG
1520 	{
1521 		u_char *rio = (u_char *)iorq->iopb;
1522 		int sz = sizeof(struct xd_iopb), lcv;
1523 		printf("%s: aio #%d [",
1524 		    device_xname(xdcsc->sc_dev), iorq - xdcsc->reqs);
1525 		for (lcv = 0; lcv < sz; lcv++)
1526 			printf(" %02x", rio[lcv]);
1527 		printf("]\n");
1528 	}
1529 #endif				/* XDC_DEBUG */
1530 
1531 	/* controller not busy, start command */
1532 	iopbaddr = dvma_kvtopa(iorq->iopb, xdcsc->bustype);
1533 	XDC_GO(xdcsc->xdc, iopbaddr);	/* go! */
1534 	xdcsc->nrun++;
1535 	/* command now running, wrap it up */
1536 	switch (type) {
1537 	case XD_SUB_NORM:
1538 	case XD_SUB_NOQ:
1539 		return XD_ERR_AOK;	/* success */
1540 	case XD_SUB_WAIT:
1541 		while (iorq->iopb->done == 0) {
1542 			(void)tsleep(iorq, PRIBIO, "xdciorq", 0);
1543 		}
1544 		return iorq->errno;
1545 	case XD_SUB_POLL:
1546 		return xdc_piodriver(xdcsc, iorqno, 0);
1547 	default:
1548 		panic("%s wrap up", __func__);
1549 	}
1550 	panic("%s: impossible", __func__);
1551 	return 0;	/* not reached */
1552 }
1553 
1554 
1555 /*
1556  * xdc_piodriver
1557  *
1558  * programmed i/o driver.   this function takes over the computer
1559  * and drains off all i/o requests.   it returns the status of the iorq
1560  * the caller is interesting in.   if freeone is true, then it returns
1561  * when there is a free iorq.
1562  */
1563 int
1564 xdc_piodriver(struct xdc_softc *xdcsc, int iorqno, int freeone)
1565 {
1566 	int nreset = 0;
1567 	int retval = 0;
1568 	u_long  count;
1569 	struct xdc *xdc = xdcsc->xdc;
1570 #ifdef XDC_DEBUG
1571 	printf("%s(%s, %d, freeone=%d)\n", __func__,
1572 	    device_xname(xdcsc->sc_dev), iorqno, freeone);
1573 #endif
1574 
1575 	while (xdcsc->nwait || xdcsc->nrun) {
1576 #ifdef XDC_DEBUG
1577 		printf("%s: wait=%d, run=%d\n", __func__,
1578 		    xdcsc->nwait, xdcsc->nrun);
1579 #endif
1580 		XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR));
1581 #ifdef XDC_DEBUG
1582 		printf("%s: done wait with count = %d\n", __func__, count);
1583 #endif
1584 		/* we expect some progress soon */
1585 		if (count == 0 && nreset >= 2) {
1586 			xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0);
1587 #ifdef XDC_DEBUG
1588 			printf("%s: timeout\n", __func__);
1589 #endif
1590 			return XD_ERR_FAIL;
1591 		}
1592 		if (count == 0) {
1593 			if (xdc_reset(xdcsc, 0,
1594 			    (nreset++ == 0) ? XD_RSET_NONE : iorqno,
1595 			    XD_ERR_FAIL, 0) == XD_ERR_FAIL)
1596 				return XD_ERR_FAIL;	/* flushes all but POLL
1597 							 * requests, resets */
1598 			continue;
1599 		}
1600 		xdc_remove_iorq(xdcsc);	/* could resubmit request */
1601 		if (freeone) {
1602 			if (xdcsc->nrun < XDC_MAXIOPB) {
1603 #ifdef XDC_DEBUG
1604 				printf("%s: done: one free\n", __func__);
1605 #endif
1606 				return XD_ERR_AOK;
1607 			}
1608 			continue;	/* don't xdc_start */
1609 		}
1610 		xdc_start(xdcsc, XDC_MAXIOPB);
1611 	}
1612 
1613 	/* get return value */
1614 
1615 	retval = xdcsc->reqs[iorqno].errno;
1616 
1617 #ifdef XDC_DEBUG
1618 	printf("%s: done, retval = 0x%x (%s)\n", __func__,
1619 	    xdcsc->reqs[iorqno].errno, xdc_e2str(xdcsc->reqs[iorqno].errno));
1620 #endif
1621 
1622 	/*
1623 	 * now that we've drained everything, start up any bufs that have
1624 	 * queued
1625 	 */
1626 
1627 	while (xdcsc->nfree > 0 && bufq_peek(xdcsc->sc_wq) != NULL)
1628 		if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK)
1629 			break;
1630 
1631 	return retval;
1632 }
1633 
1634 /*
1635  * xdc_reset: reset one drive.   NOTE: assumes xdc was just reset.
1636  * we steal iopb[0] for this, but we put it back when we are done.
1637  */
1638 void
1639 xdc_xdreset(struct xdc_softc *xdcsc, struct xd_softc *xdsc)
1640 {
1641 	struct xd_iopb tmpiopb;
1642 	u_long  addr;
1643 	int del;
1644 
1645 	memcpy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb));
1646 	memset(xdcsc->iopbase, 0, sizeof(tmpiopb));
1647 	xdcsc->iopbase->comm = XDCMD_RST;
1648 	xdcsc->iopbase->unit = xdsc->xd_drive;
1649 	addr = (u_long)xdcsc->dvmaiopb;
1650 	XDC_GO(xdcsc->xdc, addr);	/* go! */
1651 	XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB);
1652 	if (del <= 0 || xdcsc->iopbase->errs) {
1653 		printf("%s: off-line: %s\n", device_xname(xdcsc->sc_dev),
1654 		    xdc_e2str(xdcsc->iopbase->errno));
1655 		xdcsc->xdc->xdc_csr = XDC_RESET;
1656 		XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET);
1657 		if (del <= 0)
1658 			panic("%s", __func__);
1659 	} else {
1660 		xdcsc->xdc->xdc_csr = XDC_CLRRIO;	/* clear RIO */
1661 	}
1662 	memcpy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb));
1663 }
1664 
1665 
1666 /*
1667  * xdc_reset: reset everything: requests are marked as errors except
1668  * a polled request (which is resubmitted)
1669  */
1670 int
1671 xdc_reset(struct xdc_softc *xdcsc, int quiet, int blastmode, int error,
1672     struct xd_softc *xdsc)
1673 {
1674 	int del = 0, lcv, retval = XD_ERR_AOK;
1675 	int oldfree = xdcsc->nfree;
1676 	struct xd_iorq *iorq;
1677 
1678 	/* soft reset hardware */
1679 
1680 	if (quiet == 0)
1681 		printf("%s: soft reset\n", device_xname(xdcsc->sc_dev));
1682 	xdcsc->xdc->xdc_csr = XDC_RESET;
1683 	XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET);
1684 	if (del <= 0) {
1685 		blastmode = XD_RSET_ALL;	/* dead, flush all requests */
1686 		retval = XD_ERR_FAIL;
1687 	}
1688 	if (xdsc)
1689 		xdc_xdreset(xdcsc, xdsc);
1690 
1691 	/* fix queues based on "blast-mode" */
1692 
1693 	for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
1694 		iorq = &xdcsc->reqs[lcv];
1695 
1696 		if (XD_STATE(iorq->mode) != XD_SUB_POLL &&
1697 		    XD_STATE(iorq->mode) != XD_SUB_WAIT &&
1698 		    XD_STATE(iorq->mode) != XD_SUB_NORM)
1699 			/* is it active? */
1700 			continue;
1701 
1702 		xdcsc->nrun--;	/* it isn't running any more */
1703 		if (blastmode == XD_RSET_ALL || blastmode != lcv) {
1704 			/* failed */
1705 			iorq->errno = error;
1706 			xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1;
1707 			switch (XD_STATE(iorq->mode)) {
1708 			case XD_SUB_NORM:
1709 				iorq->buf->b_error = EIO;
1710 				iorq->buf->b_resid = iorq->sectcnt * XDFM_BPS;
1711 				/* Sun3: map/unmap regardless of B_PHYS */
1712 				dvma_mapout(iorq->dbufbase,
1713 				    iorq->buf->b_bcount);
1714 				disk_unbusy(&iorq->xd->sc_dk,
1715 				    (iorq->buf->b_bcount - iorq->buf->b_resid),
1716 				    (iorq->buf->b_flags & B_READ));
1717 				biodone(iorq->buf);
1718 				XDC_FREE(xdcsc, lcv);	/* add to free list */
1719 				break;
1720 			case XD_SUB_WAIT:
1721 				wakeup(iorq);
1722 			case XD_SUB_POLL:
1723 				xdcsc->ndone++;
1724 				iorq->mode =
1725 				    XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
1726 				break;
1727 			}
1728 
1729 		} else {
1730 
1731 			/* resubmit, put at front of wait queue */
1732 			XDC_HWAIT(xdcsc, lcv);
1733 		}
1734 	}
1735 
1736 	/*
1737 	 * now, if stuff is waiting, start it.
1738 	 * since we just reset it should go
1739 	 */
1740 	xdc_start(xdcsc, XDC_MAXIOPB);
1741 
1742 	/* ok, we did it */
1743 	if (oldfree == 0 && xdcsc->nfree)
1744 		wakeup(&xdcsc->nfree);
1745 
1746 #ifdef XDC_DIAG
1747 	del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone;
1748 	if (del != XDC_MAXIOPB)
1749 		printf("%s: diag: xdc_reset miscount (%d should be %d)!\n",
1750 		    device_xname(xdcsc->sc_dev), del, XDC_MAXIOPB);
1751 	else
1752 		if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM)
1753 			printf("%s: diag: lots of done jobs (%d)\n",
1754 			    device_xname(xdcsc->sc_dev), xdcsc->ndone);
1755 #endif
1756 	printf("RESET DONE\n");
1757 	return retval;
1758 }
1759 
1760 /*
1761  * xdc_start: start all waiting buffers
1762  */
1763 void
1764 xdc_start(struct xdc_softc *xdcsc, int maxio)
1765 {
1766 	int rqno;
1767 	while (maxio && xdcsc->nwait &&
1768 		(xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) {
1769 		XDC_GET_WAITER(xdcsc, rqno);	/* note: rqno is an "out"
1770 						 * param */
1771 		if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK)
1772 			panic("%s", __func__);	/* should never happen */
1773 		maxio--;
1774 	}
1775 }
1776 
1777 /*
1778  * xdc_remove_iorq: remove "done" IOPB's.
1779  */
1780 int
1781 xdc_remove_iorq(struct xdc_softc *xdcsc)
1782 {
1783 	int errno, rqno, comm, errs;
1784 	struct xdc *xdc = xdcsc->xdc;
1785 	struct xd_iopb *iopb;
1786 	struct xd_iorq *iorq;
1787 	struct buf *bp;
1788 
1789 	if (xdc->xdc_csr & XDC_F_ERROR) {
1790 		/*
1791 		 * FATAL ERROR: should never happen under normal use. This
1792 		 * error is so bad, you can't even tell which IOPB is bad, so
1793 		 * we dump them all.
1794 		 */
1795 		errno = xdc->xdc_f_err;
1796 		printf("%s: fatal error 0x%02x: %s\n",
1797 		    device_xname(xdcsc->sc_dev), errno, xdc_e2str(errno));
1798 		if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errno, 0) != XD_ERR_AOK) {
1799 			printf("%s: soft reset failed!\n",
1800 			    device_xname(xdcsc->sc_dev));
1801 			panic("%s: controller DEAD", __func__);
1802 		}
1803 		return XD_ERR_AOK;
1804 	}
1805 
1806 	/*
1807 	 * get iopb that is done
1808 	 *
1809 	 * hmm... I used to read the address of the done IOPB off the VME
1810 	 * registers and calculate the rqno directly from that.   that worked
1811 	 * until I started putting a load on the controller.   when loaded, i
1812 	 * would get interrupts but neither the REMIOPB or F_ERROR bits would
1813 	 * be set, even after DELAY'ing a while!   later on the timeout
1814 	 * routine would detect IOPBs that were marked "running" but their
1815 	 * "done" bit was set.   rather than dealing directly with this
1816 	 * problem, it is just easier to look at all running IOPB's for the
1817 	 * done bit.
1818 	 */
1819 	if (xdc->xdc_csr & XDC_REMIOPB) {
1820 		xdc->xdc_csr = XDC_CLRRIO;
1821 	}
1822 
1823 	for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) {
1824 		iorq = &xdcsc->reqs[rqno];
1825 		if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE)
1826 			continue;	/* free, or done */
1827 		iopb = &xdcsc->iopbase[rqno];
1828 		if (iopb->done == 0)
1829 			continue;	/* not done yet */
1830 
1831 #ifdef XDC_DEBUG
1832 		{
1833 			u_char *rio = (u_char *)iopb;
1834 			int sz = sizeof(struct xd_iopb), lcv;
1835 
1836 			printf("%s: rio #%d [",
1837 			    device_xname(xdcsc->sc_dev), rqno);
1838 			for (lcv = 0; lcv < sz; lcv++)
1839 				printf(" %02x", rio[lcv]);
1840 			printf("]\n");
1841 		}
1842 #endif				/* XDC_DEBUG */
1843 
1844 		xdcsc->nrun--;
1845 
1846 		comm = iopb->comm;
1847 		errs = iopb->errs;
1848 
1849 		if (errs)
1850 			iorq->errno = iopb->errno;
1851 		else
1852 			iorq->errno = 0;
1853 
1854 		/* handle non-fatal errors */
1855 
1856 		if (errs &&
1857 		    xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK)
1858 			continue;	/* AOK: we resubmitted it */
1859 
1860 
1861 		/* this iorq is now done (hasn't been restarted or anything) */
1862 
1863 		if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror)
1864 			xdc_perror(iorq, iopb, 0);
1865 
1866 		/* now, if read/write check to make sure we got all the data
1867 		 * we needed. (this may not be the case if we got an error in
1868 		 * the middle of a multisector request).   */
1869 
1870 		if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 &&
1871 		    (comm == XDCMD_RD || comm == XDCMD_WR)) {
1872 			/* we just successfully processed a bad144 sector
1873 			 * note: if we are in bad 144 mode, the pointers have
1874 			 * been advanced already (see above) and are pointing
1875 			 * at the bad144 sector.   to exit bad144 mode, we
1876 			 * must advance the pointers 1 sector and issue a new
1877 			 * request if there are still sectors left to process
1878 			 *
1879 			 */
1880 			XDC_ADVANCE(iorq, 1);	/* advance 1 sector */
1881 
1882 			/* exit b144 mode */
1883 			iorq->mode = iorq->mode & (~XD_MODE_B144);
1884 
1885 			if (iorq->sectcnt) {	/* more to go! */
1886 				iorq->lasterror = iorq->errno = iopb->errno = 0;
1887 				iopb->errs = iopb->done = 0;
1888 				iorq->tries = 0;
1889 				iopb->sectcnt = iorq->sectcnt;
1890 				iopb->cylno =
1891 				    iorq->blockno / iorq->xd->sectpercyl;
1892 				iopb->headno =
1893 				    (iorq->blockno / iorq->xd->nhead) %
1894 				    iorq->xd->nhead;
1895 				iopb->sectno = iorq->blockno % XDFM_BPS;
1896 				iopb->daddr =
1897 				    dvma_kvtopa(iorq->dbuf, xdcsc->bustype);
1898 				XDC_HWAIT(xdcsc, rqno);
1899 				xdc_start(xdcsc, 1);	/* resubmit */
1900 				continue;
1901 			}
1902 		}
1903 		/* final cleanup, totally done with this request */
1904 
1905 		switch (XD_STATE(iorq->mode)) {
1906 		case XD_SUB_NORM:
1907 			bp = iorq->buf;
1908 			if (errs) {
1909 				bp->b_error = EIO;
1910 				bp->b_resid = iorq->sectcnt * XDFM_BPS;
1911 			} else {
1912 				bp->b_resid = 0;	/* done */
1913 			}
1914 			/* Sun3: map/unmap regardless of B_PHYS */
1915 			dvma_mapout(iorq->dbufbase, iorq->buf->b_bcount);
1916 			disk_unbusy(&iorq->xd->sc_dk,
1917 			    (bp->b_bcount - bp->b_resid),
1918 			    (bp->b_flags & B_READ));
1919 			XDC_FREE(xdcsc, rqno);
1920 			biodone(bp);
1921 			break;
1922 		case XD_SUB_WAIT:
1923 			iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
1924 			xdcsc->ndone++;
1925 			wakeup(iorq);
1926 			break;
1927 		case XD_SUB_POLL:
1928 			iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
1929 			xdcsc->ndone++;
1930 			break;
1931 		}
1932 	}
1933 
1934 	return XD_ERR_AOK;
1935 }
1936 
1937 /*
1938  * xdc_perror: print error.
1939  * - if still_trying is true: we got an error, retried and got a
1940  *   different error.  in that case lasterror is the old error,
1941  *   and errno is the new one.
1942  * - if still_trying is not true, then if we ever had an error it
1943  *   is in lasterror. also, if iorq->errno == 0, then we recovered
1944  *   from that error (otherwise iorq->errno == iorq->lasterror).
1945  */
1946 void
1947 xdc_perror(struct xd_iorq *iorq, struct xd_iopb *iopb, int still_trying)
1948 {
1949 	int error = iorq->lasterror;
1950 
1951 	printf("%s", (iorq->xd) ?
1952 	    device_xname(iorq->xd->sc_dev) :
1953 	    device_xname(iorq->xdc->sc_dev));
1954 	if (iorq->buf)
1955 		printf("%c: ", 'a' + (char)DISKPART(iorq->buf->b_dev));
1956 	if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR)
1957 		printf("%s %d/%d/%d: ",
1958 		    (iopb->comm == XDCMD_RD) ? "read" : "write",
1959 		    iopb->cylno, iopb->headno, iopb->sectno);
1960 	printf("%s", xdc_e2str(error));
1961 
1962 	if (still_trying)
1963 		printf(" [still trying, new error=%s]", xdc_e2str(iorq->errno));
1964 	else
1965 		if (iorq->errno == 0)
1966 			printf(" [recovered in %d tries]", iorq->tries);
1967 
1968 	printf("\n");
1969 }
1970 
1971 /*
1972  * xdc_error: non-fatal error encountered... recover.
1973  * return AOK if resubmitted, return FAIL if this iopb is done
1974  */
1975 int
1976 xdc_error(struct xdc_softc *xdcsc, struct xd_iorq *iorq, struct xd_iopb *iopb,
1977     int rqno, int comm)
1978 
1979 {
1980 	int errno = iorq->errno;
1981 	int erract = errno & XD_ERA_MASK;
1982 	int oldmode, advance, i;
1983 
1984 	if (erract == XD_ERA_RSET) {	/* some errors require a reset */
1985 		oldmode = iorq->mode;
1986 		iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode);
1987 		xdcsc->ndone++;
1988 		/* make xdc_start ignore us */
1989 		xdc_reset(xdcsc, 1, XD_RSET_NONE, errno, iorq->xd);
1990 		iorq->mode = oldmode;
1991 		xdcsc->ndone--;
1992 	}
1993 	/* check for read/write to a sector in bad144 table if bad: redirect
1994 	 * request to bad144 area */
1995 
1996 	if ((comm == XDCMD_RD || comm == XDCMD_WR) &&
1997 	    (iorq->mode & XD_MODE_B144) == 0) {
1998 		advance = iorq->sectcnt - iopb->sectcnt;
1999 		XDC_ADVANCE(iorq, advance);
2000 		if ((i = isbad(&iorq->xd->dkb,
2001 		    iorq->blockno / iorq->xd->sectpercyl,
2002 		    (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead,
2003 		    iorq->blockno % iorq->xd->nsect)) != -1) {
2004 			iorq->mode |= XD_MODE_B144;	/* enter bad144 mode &
2005 							 * redirect */
2006 			iopb->errno = iopb->done = iopb->errs = 0;
2007 			iopb->sectcnt = 1;
2008 			iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2;
2009 			/* second to last acyl */
2010 			i = iorq->xd->sectpercyl - 1 - i;	/* follow bad144
2011 								 * standard */
2012 			iopb->headno = i / iorq->xd->nhead;
2013 			iopb->sectno = i % iorq->xd->nhead;
2014 			XDC_HWAIT(xdcsc, rqno);
2015 			xdc_start(xdcsc, 1);	/* resubmit */
2016 			return XD_ERR_AOK;	/* recovered! */
2017 		}
2018 	}
2019 
2020 	/*
2021 	 * it isn't a bad144 sector, must be real error! see if we can retry
2022 	 * it?
2023 	 */
2024 	if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror)
2025 		xdc_perror(iorq, iopb, 1);	/* inform of error state
2026 						 * change */
2027 	iorq->lasterror = errno;
2028 
2029 	if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD)
2030 	    && iorq->tries < XDC_MAXTRIES) {	/* retry? */
2031 		iorq->tries++;
2032 		iorq->errno = iopb->errno = iopb->done = iopb->errs = 0;
2033 		XDC_HWAIT(xdcsc, rqno);
2034 		xdc_start(xdcsc, 1);	/* restart */
2035 		return XD_ERR_AOK;	/* recovered! */
2036 	}
2037 
2038 	/* failed to recover from this error */
2039 	return XD_ERR_FAIL;
2040 }
2041 
2042 /*
2043  * xdc_tick: make sure xd is still alive and ticking (err, kicking).
2044  */
2045 void
2046 xdc_tick(void *arg)
2047 {
2048 	struct xdc_softc *xdcsc = arg;
2049 	int     lcv, s, reset = 0;
2050 #ifdef XDC_DIAG
2051 	int     nwait, nrun, nfree, ndone, whd = 0;
2052 	uint8_t  fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB];
2053 	s = splbio();
2054 	nwait = xdcsc->nwait;
2055 	nrun = xdcsc->nrun;
2056 	nfree = xdcsc->nfree;
2057 	ndone = xdcsc->ndone;
2058 	memcpy(wqc, xdcsc->waitq, sizeof(wqc));
2059 	memcpy(fqc, xdcsc->freereq, sizeof(fqc));
2060 	splx(s);
2061 	if (nwait + nrun + nfree + ndone != XDC_MAXIOPB) {
2062 		printf("%s: diag: IOPB miscount "
2063 		    "(got w/f/r/d %d/%d/%d/%d, wanted %d)\n",
2064 		    device_xname(xdcsc->sc_dev), nwait, nfree, nrun, ndone,
2065 		    XDC_MAXIOPB);
2066 		memset(mark, 0, sizeof(mark));
2067 		printf("FREE: ");
2068 		for (lcv = nfree; lcv > 0; lcv--) {
2069 			printf("%d ", fqc[lcv - 1]);
2070 			mark[fqc[lcv - 1]] = 1;
2071 		}
2072 		printf("\nWAIT: ");
2073 		lcv = nwait;
2074 		while (lcv > 0) {
2075 			printf("%d ", wqc[whd]);
2076 			mark[wqc[whd]] = 1;
2077 			whd = (whd + 1) % XDC_MAXIOPB;
2078 			lcv--;
2079 		}
2080 		printf("\n");
2081 		for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2082 			if (mark[lcv] == 0) {
2083 				printf("MARK: running %d: mode %d done %d "
2084 				    "errs %d errno 0x%x ttl %d buf %p\n",
2085 				    lcv, xdcsc->reqs[lcv].mode,
2086 				    xdcsc->iopbase[lcv].done,
2087 				    xdcsc->iopbase[lcv].errs,
2088 				    xdcsc->iopbase[lcv].errno,
2089 				    xdcsc->reqs[lcv].ttl,
2090 				    xdcsc->reqs[lcv].buf);
2091 			}
2092 		}
2093 	} else
2094 		if (ndone > XDC_MAXIOPB - XDC_SUBWAITLIM)
2095 			printf("%s: diag: lots of done jobs (%d)\n",
2096 			    device_xname(xdcsc->sc_dev), ndone);
2097 
2098 #endif
2099 #ifdef XDC_DEBUG
2100 	printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n",
2101 	    device_xname(xdcsc->sc_dev),
2102 	    xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun,
2103 	    xdcsc->ndone);
2104 	for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2105 		if (xdcsc->reqs[lcv].mode) {
2106 			printf("running %d: "
2107 			    "mode %d done %d errs %d errno 0x%x\n", lcv,
2108 			    xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done,
2109 			    xdcsc->iopbase[lcv].errs,
2110 			    xdcsc->iopbase[lcv].errno);
2111 		}
2112 	}
2113 #endif
2114 
2115 	/* reduce ttl for each request if one goes to zero, reset xdc */
2116 	s = splbio();
2117 	for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2118 		if (xdcsc->reqs[lcv].mode == 0 ||
2119 		    XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE)
2120 			continue;
2121 		xdcsc->reqs[lcv].ttl--;
2122 		if (xdcsc->reqs[lcv].ttl == 0)
2123 			reset = 1;
2124 	}
2125 	if (reset) {
2126 		printf("%s: watchdog timeout\n", device_xname(xdcsc->sc_dev));
2127 		xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL);
2128 	}
2129 	splx(s);
2130 
2131 	/* until next time */
2132 
2133 	callout_reset(&xdcsc->sc_tick_ch, XDC_TICKCNT, xdc_tick, xdcsc);
2134 }
2135 
2136 /*
2137  * xdc_ioctlcmd: this function provides a user level interface to the
2138  * controller via ioctl.   this allows "format" programs to be written
2139  * in user code, and is also useful for some debugging.   we return
2140  * an error code.   called at user priority.
2141  */
2142 int
2143 xdc_ioctlcmd(struct xd_softc *xd, dev_t dev, struct xd_iocmd *xio)
2144 {
2145 	int s, err, rqno;
2146 	void *dvmabuf = NULL;
2147 	struct xdc_softc *xdcsc;
2148 
2149 	/* check sanity of requested command */
2150 
2151 	switch (xio->cmd) {
2152 
2153 	case XDCMD_NOP:	/* no op: everything should be zero */
2154 		if (xio->subfn || xio->dptr || xio->dlen ||
2155 		    xio->block || xio->sectcnt)
2156 			return EINVAL;
2157 		break;
2158 
2159 	case XDCMD_RD:		/* read / write sectors (up to XD_IOCMD_MAXS) */
2160 	case XDCMD_WR:
2161 		if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS ||
2162 		    xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL)
2163 			return EINVAL;
2164 		break;
2165 
2166 	case XDCMD_SK:		/* seek: doesn't seem useful to export this */
2167 		return EINVAL;
2168 
2169 	case XDCMD_WRP:	/* write parameters */
2170 		return EINVAL;	/* not useful, except maybe drive
2171 				 * parameters... but drive parameters should
2172 				 * go via disklabel changes */
2173 
2174 	case XDCMD_RDP:	/* read parameters */
2175 		if (xio->subfn != XDFUN_DRV ||
2176 		    xio->dlen || xio->block || xio->dptr)
2177 			return EINVAL;		/* allow read drive params to
2178 						 * get hw_spt */
2179 		xio->sectcnt = xd->hw_spt;	/* we already know the answer */
2180 		return 0;
2181 		break;
2182 
2183 	case XDCMD_XRD:	/* extended read/write */
2184 	case XDCMD_XWR:
2185 
2186 		switch (xio->subfn) {
2187 
2188 		case XDFUN_THD:/* track headers */
2189 			if (xio->sectcnt != xd->hw_spt ||
2190 			    (xio->block % xd->nsect) != 0 ||
2191 			    xio->dlen != XD_IOCMD_HSZ * xd->hw_spt ||
2192 			    xio->dptr == NULL)
2193 				return EINVAL;
2194 			xio->sectcnt = 0;
2195 			break;
2196 
2197 		case XDFUN_FMT:/* NOTE: also XDFUN_VFY */
2198 			if (xio->cmd == XDCMD_XRD)
2199 				return EINVAL;	/* no XDFUN_VFY */
2200 			if (xio->sectcnt || xio->dlen ||
2201 			    (xio->block % xd->nsect) != 0 || xio->dptr)
2202 				return EINVAL;
2203 			break;
2204 
2205 		case XDFUN_HDR:/* header, header verify, data, data ECC */
2206 			return EINVAL;	/* not yet */
2207 
2208 		case XDFUN_DM:	/* defect map */
2209 		case XDFUN_DMX:/* defect map (alternate location) */
2210 			if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ ||
2211 			    (xio->block % xd->nsect) != 0 || xio->dptr == NULL)
2212 				return EINVAL;
2213 			break;
2214 
2215 		default:
2216 			return EINVAL;
2217 		}
2218 		break;
2219 
2220 	case XDCMD_TST:	/* diagnostics */
2221 		return EINVAL;
2222 
2223 	default:
2224 		return EINVAL;/* ??? */
2225 	}
2226 
2227 	/* create DVMA buffer for request if needed */
2228 
2229 	if (xio->dlen) {
2230 		dvmabuf = dvma_malloc(xio->dlen);
2231 		if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) {
2232 			err = copyin(xio->dptr, dvmabuf, xio->dlen);
2233 			if (err) {
2234 				dvma_free(dvmabuf, xio->dlen);
2235 				return err;
2236 			}
2237 		}
2238 	}
2239 	/* do it! */
2240 
2241 	err = 0;
2242 	xdcsc = xd->parent;
2243 	s = splbio();
2244 	rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block,
2245 	    xio->sectcnt, dvmabuf, XD_SUB_WAIT);
2246 	if (rqno == XD_ERR_FAIL) {
2247 		err = EIO;
2248 		goto done;
2249 	}
2250 	xio->errno = xdcsc->reqs[rqno].errno;
2251 	xio->tries = xdcsc->reqs[rqno].tries;
2252 	XDC_DONE(xdcsc, rqno, err);
2253 
2254 	if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD)
2255 		err = copyout(dvmabuf, xio->dptr, xio->dlen);
2256 
2257  done:
2258 	splx(s);
2259 	if (dvmabuf)
2260 		dvma_free(dvmabuf, xio->dlen);
2261 	return err;
2262 }
2263 
2264 /*
2265  * xdc_e2str: convert error code number into an error string
2266  */
2267 const char *
2268 xdc_e2str(int no)
2269 {
2270 
2271 	switch (no) {
2272 	case XD_ERR_FAIL:
2273 		return "Software fatal error";
2274 	case XD_ERR_AOK:
2275 		return "Successful completion";
2276 	case XD_ERR_ICYL:
2277 		return "Illegal cylinder address";
2278 	case XD_ERR_IHD:
2279 		return "Illegal head address";
2280 	case XD_ERR_ISEC:
2281 		return "Illgal sector address";
2282 	case XD_ERR_CZER:
2283 		return "Count zero";
2284 	case XD_ERR_UIMP:
2285 		return "Unimplemented command";
2286 	case XD_ERR_IF1:
2287 		return "Illegal field length 1";
2288 	case XD_ERR_IF2:
2289 		return "Illegal field length 2";
2290 	case XD_ERR_IF3:
2291 		return "Illegal field length 3";
2292 	case XD_ERR_IF4:
2293 		return "Illegal field length 4";
2294 	case XD_ERR_IF5:
2295 		return "Illegal field length 5";
2296 	case XD_ERR_IF6:
2297 		return "Illegal field length 6";
2298 	case XD_ERR_IF7:
2299 		return "Illegal field length 7";
2300 	case XD_ERR_ISG:
2301 		return "Illegal scatter/gather length";
2302 	case XD_ERR_ISPT:
2303 		return "Not enough sectors per track";
2304 	case XD_ERR_ALGN:
2305 		return "Next IOPB address alignment error";
2306 	case XD_ERR_SGAL:
2307 		return "Scatter/gather address alignment error";
2308 	case XD_ERR_SGEC:
2309 		return "Scatter/gather with auto-ECC";
2310 	case XD_ERR_SECC:
2311 		return "Soft ECC corrected";
2312 	case XD_ERR_SIGN:
2313 		return "ECC ignored";
2314 	case XD_ERR_ASEK:
2315 		return "Auto-seek retry recovered";
2316 	case XD_ERR_RTRY:
2317 		return "Soft retry recovered";
2318 	case XD_ERR_HECC:
2319 		return "Hard data ECC";
2320 	case XD_ERR_NHDR:
2321 		return "Header not found";
2322 	case XD_ERR_NRDY:
2323 		return "Drive not ready";
2324 	case XD_ERR_TOUT:
2325 		return "Operation timeout";
2326 	case XD_ERR_VTIM:
2327 		return "VMEDMA timeout";
2328 	case XD_ERR_DSEQ:
2329 		return "Disk sequencer error";
2330 	case XD_ERR_HDEC:
2331 		return "Header ECC error";
2332 	case XD_ERR_RVFY:
2333 		return "Read verify";
2334 	case XD_ERR_VFER:
2335 		return "Fatail VMEDMA error";
2336 	case XD_ERR_VBUS:
2337 		return "VMEbus error";
2338 	case XD_ERR_DFLT:
2339 		return "Drive faulted";
2340 	case XD_ERR_HECY:
2341 		return "Header error/cyliner";
2342 	case XD_ERR_HEHD:
2343 		return "Header error/head";
2344 	case XD_ERR_NOCY:
2345 		return "Drive not on-cylinder";
2346 	case XD_ERR_SEEK:
2347 		return "Seek error";
2348 	case XD_ERR_ILSS:
2349 		return "Illegal sector size";
2350 	case XD_ERR_SEC:
2351 		return "Soft ECC";
2352 	case XD_ERR_WPER:
2353 		return "Write-protect error";
2354 	case XD_ERR_IRAM:
2355 		return "IRAM self test failure";
2356 	case XD_ERR_MT3:
2357 		return "Maintenance test 3 failure (DSKCEL RAM)";
2358 	case XD_ERR_MT4:
2359 		return "Maintenance test 4 failure (header shift reg)";
2360 	case XD_ERR_MT5:
2361 		return "Maintenance test 5 failure (VMEDMA regs)";
2362 	case XD_ERR_MT6:
2363 		return "Maintenance test 6 failure (REGCEL chip)";
2364 	case XD_ERR_MT7:
2365 		return "Maintenance test 7 failure (buffer parity)";
2366 	case XD_ERR_MT8:
2367 		return "Maintenance test 8 failure (disk FIFO)";
2368 	case XD_ERR_IOCK:
2369 		return "IOPB checksum miscompare";
2370 	case XD_ERR_IODM:
2371 		return "IOPB DMA fatal";
2372 	case XD_ERR_IOAL:
2373 		return "IOPB address alignment error";
2374 	case XD_ERR_FIRM:
2375 		return "Firmware error";
2376 	case XD_ERR_MMOD:
2377 		return "Illegal maintenance mode test number";
2378 	case XD_ERR_ACFL:
2379 		return "ACFAIL asserted";
2380 	default:
2381 		return "Unknown error";
2382 	}
2383 }
2384