1 /* $NetBSD: xd.c,v 1.10 1996/10/13 03:47:39 christos Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1995 Charles D. Cranor 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * 36 * x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r 37 * 38 * author: Chuck Cranor <chuck@ccrc.wustl.edu> 39 * id: $NetBSD: xd.c,v 1.10 1996/10/13 03:47:39 christos Exp $ 40 * started: 27-Feb-95 41 * references: [1] Xylogics Model 753 User's Manual 42 * part number: 166-753-001, Revision B, May 21, 1988. 43 * "Your Partner For Performance" 44 * [2] other NetBSD disk device drivers 45 * 46 * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking 47 * the time to answer some of my questions about the 753/7053. 48 * 49 * note: the 753 and the 7053 are programmed the same way, but are 50 * different sizes. the 753 is a 6U VME card, while the 7053 is a 9U 51 * VME card (found in many VME based suns). 52 */ 53 54 #undef XDC_DEBUG /* full debug */ 55 #define XDC_DIAG /* extra sanity checks */ 56 #if defined(DIAGNOSTIC) && !defined(XDC_DIAG) 57 #define XDC_DIAG /* link in with master DIAG option */ 58 #endif 59 60 #include <sys/param.h> 61 #include <sys/proc.h> 62 #include <sys/systm.h> 63 #include <sys/kernel.h> 64 #include <sys/conf.h> 65 #include <sys/file.h> 66 #include <sys/stat.h> 67 #include <sys/ioctl.h> 68 #include <sys/buf.h> 69 #include <sys/uio.h> 70 #include <sys/malloc.h> 71 #include <sys/device.h> 72 #include <sys/disklabel.h> 73 #include <sys/disk.h> 74 #include <sys/syslog.h> 75 #include <sys/dkbad.h> 76 #include <vm/vm.h> 77 #include <vm/vm_kern.h> 78 79 #include <machine/autoconf.h> 80 #include <machine/sun_disklabel.h> 81 #include <machine/dvma.h> 82 83 #include <sun3/dev/xdreg.h> 84 #include <sun3/dev/xdvar.h> 85 #include <sun3/dev/xio.h> 86 87 /* 88 * macros 89 */ 90 91 /* 92 * XDC_TWAIT: add iorq "N" to tail of SC's wait queue 93 */ 94 #define XDC_TWAIT(SC, N) { \ 95 (SC)->waitq[(SC)->waitend] = (N); \ 96 (SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \ 97 (SC)->nwait++; \ 98 } 99 100 /* 101 * XDC_HWAIT: add iorq "N" to head of SC's wait queue 102 */ 103 #define XDC_HWAIT(SC, N) { \ 104 (SC)->waithead = ((SC)->waithead == 0) ? \ 105 (XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \ 106 (SC)->waitq[(SC)->waithead] = (N); \ 107 (SC)->nwait++; \ 108 } 109 110 /* 111 * XDC_GET_WAITER: gets the first request waiting on the waitq 112 * and removes it (so it can be submitted) 113 */ 114 #define XDC_GET_WAITER(XDCSC, RQ) { \ 115 (RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \ 116 (XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \ 117 xdcsc->nwait--; \ 118 } 119 120 /* 121 * XDC_FREE: add iorq "N" to SC's free list 122 */ 123 #define XDC_FREE(SC, N) { \ 124 (SC)->freereq[(SC)->nfree++] = (N); \ 125 (SC)->reqs[N].mode = 0; \ 126 if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \ 127 } 128 129 130 /* 131 * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0). 132 */ 133 #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)] 134 135 /* 136 * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC 137 */ 138 #define XDC_GO(XDC, ADDR) { \ 139 (XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \ 140 (ADDR) = ((ADDR) >> 8); \ 141 (XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \ 142 (ADDR) = ((ADDR) >> 8); \ 143 (XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \ 144 (ADDR) = ((ADDR) >> 8); \ 145 (XDC)->xdc_iopbaddr3 = (ADDR); \ 146 (XDC)->xdc_iopbamod = XDC_ADDRMOD; \ 147 (XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \ 148 } 149 150 /* 151 * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME". 152 * LCV is a counter. If it goes to zero then we timed out. 153 */ 154 #define XDC_WAIT(XDC, LCV, TIME, BITS) { \ 155 (LCV) = (TIME); \ 156 while ((LCV) > 0) { \ 157 if ((XDC)->xdc_csr & (BITS)) break; \ 158 (LCV) = (LCV) - 1; \ 159 DELAY(1); \ 160 } \ 161 } 162 163 /* 164 * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd) 165 */ 166 #define XDC_DONE(SC,RQ,ER) { \ 167 if ((RQ) == XD_ERR_FAIL) { \ 168 (ER) = (RQ); \ 169 } else { \ 170 if ((SC)->ndone-- == XDC_SUBWAITLIM) \ 171 wakeup(&(SC)->ndone); \ 172 (ER) = (SC)->reqs[RQ].errno; \ 173 XDC_FREE((SC), (RQ)); \ 174 } \ 175 } 176 177 /* 178 * XDC_ADVANCE: advance iorq's pointers by a number of sectors 179 */ 180 #define XDC_ADVANCE(IORQ, N) { \ 181 if (N) { \ 182 (IORQ)->sectcnt -= (N); \ 183 (IORQ)->blockno += (N); \ 184 (IORQ)->dbuf += ((N)*XDFM_BPS); \ 185 } \ 186 } 187 188 /* 189 * note - addresses you can sleep on: 190 * [1] & of xd_softc's "state" (waiting for a chance to attach a drive) 191 * [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb) 192 * [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's 193 * to drop below XDC_SUBWAITLIM) 194 * [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish) 195 */ 196 197 198 /* 199 * function prototypes 200 * "xdc_*" functions are internal, all others are external interfaces 201 */ 202 203 /* internals */ 204 int xdc_cmd __P((struct xdc_softc *, int, int, int, int, int, char *, int)); 205 char *xdc_e2str __P((int)); 206 int xdc_error __P((struct xdc_softc *, struct xd_iorq *, 207 struct xd_iopb *, int, int)); 208 int xdc_ioctlcmd __P((struct xd_softc *, dev_t dev, struct xd_iocmd *)); 209 void xdc_perror __P((struct xd_iorq *, struct xd_iopb *, int)); 210 int xdc_piodriver __P((struct xdc_softc *, int, int)); 211 int xdc_remove_iorq __P((struct xdc_softc *)); 212 int xdc_reset __P((struct xdc_softc *, int, int, int, struct xd_softc *)); 213 inline void xdc_rqinit __P((struct xd_iorq *, struct xdc_softc *, 214 struct xd_softc *, int, u_long, int, 215 caddr_t, struct buf *)); 216 void xdc_rqtopb __P((struct xd_iorq *, struct xd_iopb *, int, int)); 217 int xdc_start __P((struct xdc_softc *, int)); 218 int xdc_startbuf __P((struct xdc_softc *, struct xd_softc *, struct buf *)); 219 int xdc_submit_iorq __P((struct xdc_softc *, int, int)); 220 void xdc_tick __P((void *)); 221 int xdc_xdreset __P((struct xdc_softc *, struct xd_softc *)); 222 223 /* machine interrupt hook */ 224 int xdcintr __P((void *)); 225 226 /* {b,c}devsw */ 227 int xdclose __P((dev_t, int, int)); 228 int xddump __P((dev_t)); 229 int xdioctl __P((dev_t, u_long, caddr_t, int, struct proc *)); 230 int xdopen __P((dev_t, int, int)); 231 int xdread __P((dev_t, struct uio *)); 232 int xdwrite __P((dev_t, struct uio *)); 233 int xdsize __P((dev_t)); 234 void xdstrategy __P((struct buf *)); 235 236 /* autoconf */ 237 int xdcmatch __P((struct device *, void *, void *)); 238 void xdcattach __P((struct device *, struct device *, void *)); 239 int xdmatch __P((struct device *, void *, void *)); 240 void xdattach __P((struct device *, struct device *, void *)); 241 242 static void xddummystrat __P((struct buf *)); 243 int xdgetdisklabel __P((struct xd_softc *, void *)); 244 245 /* 246 * cfdrivers: device driver interface to autoconfig 247 */ 248 249 struct cfattach xdc_ca = { 250 sizeof(struct xdc_softc), xdcmatch, xdcattach 251 }; 252 253 struct cfdriver xdc_cd = { 254 NULL, "xdc", DV_DULL 255 }; 256 257 struct cfattach xd_ca = { 258 sizeof(struct xd_softc), xdmatch, xdattach 259 }; 260 261 struct cfdriver xd_cd = { 262 NULL, "xd", DV_DISK 263 }; 264 265 struct xdc_attach_args { /* this is the "aux" args to xdattach */ 266 int driveno; /* unit number */ 267 char *dvmabuf; /* scratch buffer for reading disk label */ 268 int fullmode; /* submit mode */ 269 int booting; /* are we booting or not? */ 270 }; 271 272 /* 273 * dkdriver 274 */ 275 276 struct dkdriver xddkdriver = {xdstrategy}; 277 278 /* 279 * start: disk label fix code (XXX) 280 */ 281 282 static void *xd_labeldata; 283 284 static void 285 xddummystrat(bp) 286 struct buf *bp; 287 { 288 if (bp->b_bcount != XDFM_BPS) 289 panic("xddummystrat"); 290 bcopy(xd_labeldata, bp->b_un.b_addr, XDFM_BPS); 291 bp->b_flags |= B_DONE; 292 bp->b_flags &= ~B_BUSY; 293 } 294 295 int 296 xdgetdisklabel(xd, b) 297 struct xd_softc *xd; 298 void *b; 299 { 300 char *err; 301 struct sun_disklabel *sdl; 302 303 /* We already have the label data in `b'; setup for dummy strategy */ 304 xd_labeldata = b; 305 306 /* Required parameter for readdisklabel() */ 307 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; 308 309 err = readdisklabel(MAKEDISKDEV(0, xd->sc_dev.dv_unit, RAW_PART), 310 xddummystrat, 311 xd->sc_dk.dk_label, xd->sc_dk.dk_cpulabel); 312 if (err) { 313 printf("%s: %s\n", xd->sc_dev.dv_xname, err); 314 return(XD_ERR_FAIL); 315 } 316 317 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 318 sdl = (struct sun_disklabel *)xd->sc_dk.dk_cpulabel->cd_block; 319 if (sdl->sl_magic == SUN_DKMAGIC) 320 xd->pcyl = sdl->sl_pcyl; 321 else { 322 printf("%s: WARNING: no `pcyl' in disk label.\n", 323 xd->sc_dev.dv_xname); 324 xd->pcyl = xd->sc_dk.dk_label->d_ncylinders + 325 xd->sc_dk.dk_label->d_acylinders; 326 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 327 xd->sc_dev.dv_xname, xd->pcyl); 328 } 329 330 xd->ncyl = xd->sc_dk.dk_label->d_ncylinders; 331 xd->acyl = xd->sc_dk.dk_label->d_acylinders; 332 xd->nhead = xd->sc_dk.dk_label->d_ntracks; 333 xd->nsect = xd->sc_dk.dk_label->d_nsectors; 334 xd->sectpercyl = xd->nhead * xd->nsect; 335 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; /* not handled by 336 * sun->bsd */ 337 return(XD_ERR_AOK); 338 } 339 340 /* 341 * end: disk label fix code (XXX) 342 */ 343 344 /* 345 * a u t o c o n f i g f u n c t i o n s 346 */ 347 348 /* 349 * xdcmatch: determine if xdc is present or not. we do a 350 * soft reset to detect the xdc. 351 */ 352 353 int xdcmatch(parent, match, aux) 354 struct device *parent; 355 void *match, *aux; 356 { 357 struct cfdata *cf = match; 358 struct confargs *ca = aux; 359 int x; 360 361 if (ca->ca_bustype != BUS_VME32) 362 return (0); 363 364 /* Default interrupt priority always splbio==2 */ 365 if (ca->ca_intpri == -1) 366 ca->ca_intpri = 2; 367 368 x = bus_peek(ca->ca_bustype, ca->ca_paddr + 11, 1); 369 if (x == -1) 370 return (0); 371 372 return (1); 373 } 374 375 /* 376 * xdcattach: attach controller 377 */ 378 void 379 xdcattach(parent, self, aux) 380 struct device *parent, *self; 381 void *aux; 382 383 { 384 struct xdc_softc *xdc = (void *) self; 385 struct confargs *ca = aux; 386 struct xdc_attach_args xa; 387 int lcv, rqno, err, pri; 388 struct xd_iopb_ctrl *ctl; 389 390 /* get addressing and intr level stuff from autoconfig and load it 391 * into our xdc_softc. */ 392 393 xdc->xdc = (struct xdc *) 394 bus_mapin(ca->ca_bustype, ca->ca_paddr, sizeof(struct xdc)); 395 xdc->ipl = ca->ca_intpri; 396 xdc->vector = ca->ca_intvec; 397 398 for (lcv = 0; lcv < XDC_MAXDEV; lcv++) 399 xdc->sc_drives[lcv] = (struct xd_softc *) 0; 400 401 /* allocate and zero buffers 402 * 403 * note: we simplify the code by allocating the max number of iopbs and 404 * iorq's up front. thus, we avoid linked lists and the costs 405 * associated with them in exchange for wasting a little memory. */ 406 407 xdc->iopbase = (struct xd_iopb *) 408 dvma_malloc(XDC_MAXIOPB * sizeof(struct xd_iopb)); /* KVA */ 409 bzero(xdc->iopbase, XDC_MAXIOPB * sizeof(struct xd_iopb)); 410 xdc->dvmaiopb = (struct xd_iopb *) 411 dvma_kvtopa((long) xdc->iopbase, BUS_VME32); 412 xdc->reqs = (struct xd_iorq *) 413 malloc(XDC_MAXIOPB * sizeof(struct xd_iorq), M_DEVBUF, M_NOWAIT); 414 if (xdc->reqs == NULL) 415 panic("xdc malloc"); 416 bzero(xdc->reqs, XDC_MAXIOPB * sizeof(struct xd_iorq)); 417 418 /* init free list, iorq to iopb pointers, and non-zero fields in the 419 * iopb which never change. */ 420 421 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 422 xdc->reqs[lcv].iopb = &xdc->iopbase[lcv]; 423 xdc->freereq[lcv] = lcv; 424 xdc->iopbase[lcv].fixd = 1; /* always the same */ 425 xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */ 426 xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */ 427 } 428 xdc->nfree = XDC_MAXIOPB; 429 xdc->nrun = 0; 430 xdc->waithead = xdc->waitend = xdc->nwait = 0; 431 xdc->ndone = 0; 432 433 /* init queue of waiting bufs */ 434 435 xdc->sc_wq.b_active = 0; 436 xdc->sc_wq.b_actf = 0; 437 xdc->sc_wq.b_actb = &xdc->sc_wq.b_actf; 438 439 /* 440 * section 7 of the manual tells us how to init the controller: 441 * - read controller parameters (6/0) 442 * - write controller parameters (5/0) 443 */ 444 445 /* read controller parameters and insure we have a 753/7053 */ 446 447 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 448 if (rqno == XD_ERR_FAIL) { 449 printf(": couldn't read controller params\n"); 450 return; /* shouldn't ever happen */ 451 } 452 ctl = (struct xd_iopb_ctrl *) & xdc->iopbase[rqno]; 453 if (ctl->ctype != XDCT_753) { 454 if (xdc->reqs[rqno].errno) 455 printf(": %s: ", xdc_e2str(xdc->reqs[rqno].errno)); 456 printf(": doesn't identify as a 753/7053\n"); 457 XDC_DONE(xdc, rqno, err); 458 return; 459 } 460 printf(": Xylogics 753/7053, PROM=%x.%02x.%02x\n", 461 ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev); 462 XDC_DONE(xdc, rqno, err); 463 464 /* now write controller parameters (xdc_cmd sets all params for us) */ 465 466 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 467 XDC_DONE(xdc, rqno, err); 468 if (err) { 469 printf("%s: controller config error: %s\n", 470 xdc->sc_dev.dv_xname, xdc_e2str(err)); 471 return; 472 } 473 474 /* link in interrupt with higher level software */ 475 isr_add_vectored(xdcintr, (void *)xdc, 476 ca->ca_intpri, ca->ca_intvec); 477 evcnt_attach(&xdc->sc_dev, "intr", &xdc->sc_intrcnt); 478 479 /* now we must look for disks using autoconfig */ 480 xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS); 481 xa.fullmode = XD_SUB_POLL; 482 xa.booting = 1; 483 484 for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++) 485 (void) config_found(self, (void *) &xa, NULL); 486 487 dvma_free(xa.dvmabuf, XDFM_BPS); 488 489 /* start the watchdog clock */ 490 timeout(xdc_tick, xdc, XDC_TICKCNT); 491 } 492 493 /* 494 * xdmatch: probe for disk. 495 * 496 * note: we almost always say disk is present. this allows us to 497 * spin up and configure a disk after the system is booted (we can 498 * call xdattach!). 499 */ 500 int 501 xdmatch(parent, match, aux) 502 struct device *parent; 503 void *match, *aux; 504 505 { 506 struct xdc_softc *xdc = (void *) parent; 507 struct cfdata *cf = match; 508 struct xdc_attach_args *xa = aux; 509 510 /* looking for autoconf wildcard or exact match */ 511 512 if (cf->cf_loc[0] != -1 && cf->cf_loc[0] != xa->driveno) 513 return 0; 514 515 return 1; 516 517 } 518 519 /* 520 * xdattach: attach a disk. this can be called from autoconf and also 521 * from xdopen/xdstrategy. 522 */ 523 void 524 xdattach(parent, self, aux) 525 struct device *parent, *self; 526 void *aux; 527 528 { 529 struct xd_softc *xd = (void *) self; 530 struct xdc_softc *xdc = (void *) parent; 531 struct xdc_attach_args *xa = aux; 532 int rqno, err, spt, mb, blk, lcv, fmode, s, newstate; 533 struct xd_iopb_drive *driopb; 534 struct dkbad *dkb; 535 struct bootpath *bp; 536 537 /* 538 * Always re-initialize the disk structure. We want statistics 539 * to start with a clean slate. 540 */ 541 bzero(&xd->sc_dk, sizeof(xd->sc_dk)); 542 xd->sc_dk.dk_driver = &xddkdriver; 543 xd->sc_dk.dk_name = xd->sc_dev.dv_xname; 544 545 /* if booting, init the xd_softc */ 546 547 if (xa->booting) { 548 xd->state = XD_DRIVE_UNKNOWN; /* to start */ 549 xd->flags = 0; 550 xd->parent = xdc; 551 } 552 xd->xd_drive = xa->driveno; 553 fmode = xa->fullmode; 554 xdc->sc_drives[xa->driveno] = xd; 555 556 /* if not booting, make sure we are the only process in the attach for 557 * this drive. if locked out, sleep on it. */ 558 559 if (!xa->booting) { 560 s = splbio(); 561 while (xd->state == XD_DRIVE_ATTACHING) { 562 if (tsleep(&xd->state, PRIBIO, "xdattach", 0)) { 563 splx(s); 564 return; 565 } 566 } 567 printf("%s at %s", 568 xd->sc_dev.dv_xname, 569 xd->parent->sc_dev.dv_xname); 570 } 571 /* we now have control */ 572 573 xd->state = XD_DRIVE_ATTACHING; 574 newstate = XD_DRIVE_UNKNOWN; 575 576 /* first try and reset the drive */ 577 578 rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fmode); 579 XDC_DONE(xdc, rqno, err); 580 if (err == XD_ERR_NRDY) { 581 printf(" drive %d: off-line\n", xa->driveno); 582 goto done; 583 } 584 if (err) { 585 printf(": ERROR 0x%02x (%s)\n", err, xdc_e2str(err)); 586 goto done; 587 } 588 printf(" drive %d: ready\n", xa->driveno); 589 590 /* now set format parameters */ 591 592 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive, 0, 0, 0, fmode); 593 XDC_DONE(xdc, rqno, err); 594 if (err) { 595 printf("%s: write format parameters failed: %s\n", 596 xd->sc_dev.dv_xname, xdc_e2str(err)); 597 goto done; 598 } 599 600 /* get drive parameters */ 601 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 602 if (rqno != XD_ERR_FAIL) { 603 driopb = (struct xd_iopb_drive *) & xdc->iopbase[rqno]; 604 spt = driopb->sectpertrk; 605 } 606 XDC_DONE(xdc, rqno, err); 607 if (err) { 608 printf("%s: read drive parameters failed: %s\n", 609 xd->sc_dev.dv_xname, xdc_e2str(err)); 610 goto done; 611 } 612 613 /* 614 * now set drive parameters (to semi-bogus values) so we can read the 615 * disk label. 616 */ 617 xd->pcyl = xd->ncyl = 1; 618 xd->acyl = 0; 619 xd->nhead = 1; 620 xd->nsect = 1; 621 xd->sectpercyl = 1; 622 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 623 xd->dkb.bt_bad[lcv].bt_cyl = xd->dkb.bt_bad[lcv].bt_trksec = 0xffff; 624 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 625 XDC_DONE(xdc, rqno, err); 626 if (err) { 627 printf("%s: write drive parameters failed: %s\n", 628 xd->sc_dev.dv_xname, xdc_e2str(err)); 629 goto done; 630 } 631 632 /* read disk label */ 633 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 0, 1, 634 xa->dvmabuf, fmode); 635 XDC_DONE(xdc, rqno, err); 636 if (err) { 637 printf("%s: reading disk label failed: %s\n", 638 xd->sc_dev.dv_xname, xdc_e2str(err)); 639 goto done; 640 } 641 newstate = XD_DRIVE_NOLABEL; 642 643 xd->hw_spt = spt; 644 /* Attach the disk: must be before getdisklabel to malloc label */ 645 disk_attach(&xd->sc_dk); 646 647 if (xdgetdisklabel(xd, xa->dvmabuf) != XD_ERR_AOK) 648 goto done; 649 650 /* inform the user of what is up */ 651 printf("%s: <%s>, pcyl %d, hw_spt %d\n", 652 xd->sc_dev.dv_xname, 653 xa->dvmabuf, xd->pcyl, spt); 654 mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS); 655 printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n", 656 xd->sc_dev.dv_xname, mb, 657 xd->ncyl, xd->nhead, xd->nsect, XDFM_BPS); 658 659 /* now set the real drive parameters! */ 660 661 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 662 XDC_DONE(xdc, rqno, err); 663 if (err) { 664 printf("%s: write real drive parameters failed: %s\n", 665 xd->sc_dev.dv_xname, xdc_e2str(err)); 666 goto done; 667 } 668 newstate = XD_DRIVE_ONLINE; 669 670 /* 671 * read bad144 table. this table resides on the first sector of the 672 * last track of the disk (i.e. second cyl of "acyl" area). 673 */ 674 675 blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */ 676 (xd->nhead - 1) * xd->nsect; /* last head */ 677 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, blk, 1, xa->dvmabuf, fmode); 678 XDC_DONE(xdc, rqno, err); 679 if (err) { 680 printf("%s: reading bad144 failed: %s\n", 681 xd->sc_dev.dv_xname, xdc_e2str(err)); 682 goto done; 683 } 684 685 /* check dkbad for sanity */ 686 dkb = (struct dkbad *) xa->dvmabuf; 687 for (lcv = 0; lcv < 126; lcv++) { 688 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 689 dkb->bt_bad[lcv].bt_cyl == 0) && 690 dkb->bt_bad[lcv].bt_trksec == 0xffff) 691 continue; /* blank */ 692 if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl) 693 break; 694 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead) 695 break; 696 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect) 697 break; 698 } 699 if (lcv != 126) { 700 printf("%s: warning: invalid bad144 sector!\n", 701 xd->sc_dev.dv_xname); 702 } else { 703 bcopy(xa->dvmabuf, &xd->dkb, XDFM_BPS); 704 } 705 706 /* XXX - Where is this and what does it do? -gwr */ 707 dk_establish(&xd->sc_dk, &xd->sc_dev); 708 709 done: 710 xd->state = newstate; 711 if (!xa->booting) { 712 wakeup(&xd->state); 713 splx(s); 714 } 715 } 716 717 /* 718 * end of autoconfig functions 719 */ 720 721 /* 722 * { b , c } d e v s w f u n c t i o n s 723 */ 724 725 /* 726 * xdclose: close device 727 */ 728 int 729 xdclose(dev, flag, fmt) 730 dev_t dev; 731 int flag, fmt; 732 733 { 734 struct xd_softc *xd = xd_cd.cd_devs[DISKUNIT(dev)]; 735 int part = DISKPART(dev); 736 737 /* clear mask bits */ 738 739 switch (fmt) { 740 case S_IFCHR: 741 xd->sc_dk.dk_copenmask &= ~(1 << part); 742 break; 743 case S_IFBLK: 744 xd->sc_dk.dk_bopenmask &= ~(1 << part); 745 break; 746 } 747 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 748 749 return 0; 750 } 751 752 /* 753 * xddump: crash dump system 754 */ 755 int 756 xddump(dev) 757 dev_t dev; 758 759 { 760 int unit, part; 761 struct xd_softc *xd; 762 763 unit = DISKUNIT(dev); 764 if (unit >= xd_cd.cd_ndevs) 765 return ENXIO; 766 part = DISKPART(dev); 767 768 xd = xd_cd.cd_devs[unit]; 769 770 printf("%s%c: crash dump not supported (yet)\n", 771 xd->sc_dev.dv_xname, 'a' + part); 772 773 return ENXIO; 774 775 /* outline: globals: "dumplo" == sector number of partition to start 776 * dump at (convert to physical sector with partition table) 777 * "dumpsize" == size of dump in clicks "physmem" == size of physical 778 * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 779 * physmem) 780 * 781 * dump a copy of physical memory to the dump device starting at sector 782 * "dumplo" in the swap partition (make sure > 0). map in pages as 783 * we go. use polled I/O. 784 * 785 * XXX how to handle NON_CONTIG? */ 786 787 } 788 789 /* 790 * xdioctl: ioctls on XD drives. based on ioctl's of other netbsd disks. 791 */ 792 int 793 xdioctl(dev, command, addr, flag, p) 794 dev_t dev; 795 u_long command; 796 caddr_t addr; 797 int flag; 798 struct proc *p; 799 800 { 801 struct xd_softc *xd; 802 struct xd_iocmd *xio; 803 int error, s, unit; 804 805 unit = DISKUNIT(dev); 806 807 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL) 808 return (ENXIO); 809 810 /* switch on ioctl type */ 811 812 switch (command) { 813 case DIOCSBAD: /* set bad144 info */ 814 if ((flag & FWRITE) == 0) 815 return EBADF; 816 s = splbio(); 817 bcopy(addr, &xd->dkb, sizeof(xd->dkb)); 818 splx(s); 819 return 0; 820 821 case DIOCGDINFO: /* get disk label */ 822 bcopy(xd->sc_dk.dk_label, addr, sizeof(struct disklabel)); 823 return 0; 824 825 case DIOCGPART: /* get partition info */ 826 ((struct partinfo *) addr)->disklab = xd->sc_dk.dk_label; 827 ((struct partinfo *) addr)->part = 828 &xd->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 829 return 0; 830 831 case DIOCSDINFO: /* set disk label */ 832 if ((flag & FWRITE) == 0) 833 return EBADF; 834 error = setdisklabel(xd->sc_dk.dk_label, 835 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0, 836 xd->sc_dk.dk_cpulabel); 837 if (error == 0) { 838 if (xd->state == XD_DRIVE_NOLABEL) 839 xd->state = XD_DRIVE_ONLINE; 840 } 841 return error; 842 843 case DIOCWLABEL: /* change write status of disk label */ 844 if ((flag & FWRITE) == 0) 845 return EBADF; 846 if (*(int *) addr) 847 xd->flags |= XD_WLABEL; 848 else 849 xd->flags &= ~XD_WLABEL; 850 return 0; 851 852 case DIOCWDINFO: /* write disk label */ 853 if ((flag & FWRITE) == 0) 854 return EBADF; 855 error = setdisklabel(xd->sc_dk.dk_label, 856 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0, 857 xd->sc_dk.dk_cpulabel); 858 if (error == 0) { 859 if (xd->state == XD_DRIVE_NOLABEL) 860 xd->state = XD_DRIVE_ONLINE; 861 862 /* Simulate opening partition 0 so write succeeds. */ 863 xd->sc_dk.dk_openmask |= (1 << 0); 864 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART), 865 xdstrategy, xd->sc_dk.dk_label, 866 xd->sc_dk.dk_cpulabel); 867 xd->sc_dk.dk_openmask = 868 xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 869 } 870 return error; 871 872 case DIOSXDCMD: 873 xio = (struct xd_iocmd *) addr; 874 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 875 return (error); 876 return (xdc_ioctlcmd(xd, dev, xio)); 877 878 default: 879 return ENOTTY; 880 } 881 } 882 /* 883 * xdopen: open drive 884 */ 885 886 int 887 xdopen(dev, flag, fmt) 888 dev_t dev; 889 int flag, fmt; 890 891 { 892 int unit, part; 893 struct xd_softc *xd; 894 struct xdc_attach_args xa; 895 896 /* first, could it be a valid target? */ 897 898 unit = DISKUNIT(dev); 899 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL) 900 return (ENXIO); 901 part = DISKPART(dev); 902 903 /* do we need to attach the drive? */ 904 905 if (xd->state == XD_DRIVE_UNKNOWN) { 906 xa.driveno = xd->xd_drive; 907 xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS); 908 xa.fullmode = XD_SUB_WAIT; 909 xa.booting = 0; 910 xdattach((struct device *) xd->parent, (struct device *) xd, &xa); 911 dvma_free(xa.dvmabuf, XDFM_BPS); 912 if (xd->state == XD_DRIVE_UNKNOWN) { 913 return (EIO); 914 } 915 } 916 /* check for partition */ 917 918 if (part != RAW_PART && 919 (part >= xd->sc_dk.dk_label->d_npartitions || 920 xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 921 return (ENXIO); 922 } 923 /* set open masks */ 924 925 switch (fmt) { 926 case S_IFCHR: 927 xd->sc_dk.dk_copenmask |= (1 << part); 928 break; 929 case S_IFBLK: 930 xd->sc_dk.dk_bopenmask |= (1 << part); 931 break; 932 } 933 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 934 935 return 0; 936 } 937 938 int 939 xdread(dev, uio) 940 dev_t dev; 941 struct uio *uio; 942 { 943 944 return (physio(xdstrategy, NULL, dev, B_READ, minphys, uio)); 945 } 946 947 int 948 xdwrite(dev, uio) 949 dev_t dev; 950 struct uio *uio; 951 { 952 953 return (physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio)); 954 } 955 956 957 /* 958 * xdsize: return size of a partition for a dump 959 */ 960 961 int 962 xdsize(dev) 963 dev_t dev; 964 965 { 966 struct xd_softc *xdsc; 967 int unit, part, size; 968 969 /* valid unit? try an open */ 970 971 if (xdopen(dev, 0, S_IFBLK) != 0) 972 return (-1); 973 974 /* do it */ 975 976 xdsc = xd_cd.cd_devs[DISKUNIT(dev)]; 977 part = DISKPART(dev); 978 if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 979 size = -1; /* only give valid size for swap partitions */ 980 else 981 size = xdsc->sc_dk.dk_label->d_partitions[part].p_size; 982 if (xdclose(dev, 0, S_IFBLK) != 0) 983 return -1; 984 return size; 985 } 986 /* 987 * xdstrategy: buffering system interface to xd. 988 */ 989 990 void 991 xdstrategy(bp) 992 struct buf *bp; 993 994 { 995 struct xd_softc *xd; 996 struct xdc_softc *parent; 997 struct buf *wq; 998 int s, unit; 999 struct xdc_attach_args xa; 1000 1001 unit = DISKUNIT(bp->b_dev); 1002 1003 /* check for live device */ 1004 1005 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == 0 || 1006 bp->b_blkno < 0 || 1007 (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) { 1008 bp->b_error = EINVAL; 1009 goto bad; 1010 } 1011 /* do we need to attach the drive? */ 1012 1013 if (xd->state == XD_DRIVE_UNKNOWN) { 1014 xa.driveno = xd->xd_drive; 1015 xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS); 1016 xa.fullmode = XD_SUB_WAIT; 1017 xa.booting = 0; 1018 xdattach((struct device *)xd->parent, (struct device *)xd, &xa); 1019 dvma_free(xa.dvmabuf, XDFM_BPS); 1020 if (xd->state == XD_DRIVE_UNKNOWN) { 1021 bp->b_error = EIO; 1022 goto bad; 1023 } 1024 } 1025 if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 1026 /* no I/O to unlabeled disks, unless raw partition */ 1027 bp->b_error = EIO; 1028 goto bad; 1029 } 1030 /* short circuit zero length request */ 1031 1032 if (bp->b_bcount == 0) 1033 goto done; 1034 1035 /* check bounds with label (disksubr.c). Determine the size of the 1036 * transfer, and make sure it is within the boundaries of the 1037 * partition. Adjust transfer if needed, and signal errors or early 1038 * completion. */ 1039 1040 if (bounds_check_with_label(bp, xd->sc_dk.dk_label, 1041 (xd->flags & XD_WLABEL) != 0) <= 0) 1042 goto done; 1043 1044 /* 1045 * now we know we have a valid buf structure that we need to do I/O 1046 * on. 1047 * 1048 * note that we don't disksort because the controller has a sorting 1049 * algorithm built into the hardware. 1050 */ 1051 1052 s = splbio(); /* protect the queues */ 1053 1054 /* first, give jobs in front of us a chance */ 1055 1056 parent = xd->parent; 1057 while (parent->nfree > 0 && parent->sc_wq.b_actf) 1058 if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK) 1059 break; 1060 1061 /* if there are no free iorq's, then we just queue and return. the 1062 * buffs will get picked up later by xdcintr(). */ 1063 1064 if (parent->nfree == 0) { 1065 wq = &xd->parent->sc_wq; 1066 bp->b_actf = 0; 1067 bp->b_actb = wq->b_actb; 1068 *wq->b_actb = bp; 1069 wq->b_actb = &bp->b_actf; 1070 splx(s); 1071 return; 1072 } 1073 /* now we have free iopb's and we are at splbio... start 'em up */ 1074 1075 if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) { 1076 return; 1077 } 1078 1079 /* done! */ 1080 1081 splx(s); 1082 return; 1083 1084 bad: /* tells upper layers we have an error */ 1085 bp->b_flags |= B_ERROR; 1086 done: /* tells upper layers we are done with this 1087 * buf */ 1088 bp->b_resid = bp->b_bcount; 1089 biodone(bp); 1090 } 1091 /* 1092 * end of {b,c}devsw functions 1093 */ 1094 1095 /* 1096 * i n t e r r u p t f u n c t i o n 1097 * 1098 * xdcintr: hardware interrupt. 1099 */ 1100 int 1101 xdcintr(v) 1102 void *v; 1103 1104 { 1105 struct xdc_softc *xdcsc = v; 1106 struct xd_softc *xd; 1107 struct buf *bp; 1108 1109 /* kick the event counter */ 1110 1111 xdcsc->sc_intrcnt.ev_count++; 1112 1113 /* remove as many done IOPBs as possible */ 1114 1115 xdc_remove_iorq(xdcsc); 1116 1117 /* start any iorq's already waiting */ 1118 1119 xdc_start(xdcsc, XDC_MAXIOPB); 1120 1121 /* fill up any remaining iorq's with queue'd buffers */ 1122 1123 while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf) 1124 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1125 break; 1126 1127 return (1); 1128 } 1129 /* 1130 * end of interrupt function 1131 */ 1132 1133 /* 1134 * i n t e r n a l f u n c t i o n s 1135 */ 1136 1137 /* 1138 * xdc_rqinit: fill out the fields of an I/O request 1139 */ 1140 1141 inline void 1142 xdc_rqinit(rq, xdc, xd, md, blk, cnt, db, bp) 1143 struct xd_iorq *rq; 1144 struct xdc_softc *xdc; 1145 struct xd_softc *xd; 1146 int md; 1147 u_long blk; 1148 int cnt; 1149 caddr_t db; 1150 struct buf *bp; 1151 { 1152 rq->xdc = xdc; 1153 rq->xd = xd; 1154 rq->ttl = XDC_MAXTTL + 10; 1155 rq->mode = md; 1156 rq->tries = rq->errno = rq->lasterror = 0; 1157 rq->blockno = blk; 1158 rq->sectcnt = cnt; 1159 rq->dbuf = rq->dbufbase = db; 1160 rq->buf = bp; 1161 } 1162 /* 1163 * xdc_rqtopb: load up an IOPB based on an iorq 1164 */ 1165 1166 void 1167 xdc_rqtopb(iorq, iopb, cmd, subfun) 1168 struct xd_iorq *iorq; 1169 struct xd_iopb *iopb; 1170 int cmd, subfun; 1171 1172 { 1173 u_long block, dp; 1174 1175 /* standard stuff */ 1176 1177 iopb->errs = iopb->done = 0; 1178 iopb->comm = cmd; 1179 iopb->errno = iopb->status = 0; 1180 iopb->subfun = subfun; 1181 if (iorq->xd) 1182 iopb->unit = iorq->xd->xd_drive; 1183 else 1184 iopb->unit = 0; 1185 1186 /* check for alternate IOPB format */ 1187 1188 if (cmd == XDCMD_WRP) { 1189 switch (subfun) { 1190 case XDFUN_CTL:{ 1191 struct xd_iopb_ctrl *ctrl = 1192 (struct xd_iopb_ctrl *) iopb; 1193 iopb->lll = 0; 1194 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1195 ? 0 1196 : iorq->xdc->ipl; 1197 ctrl->param_a = XDPA_TMOD | XDPA_DACF; 1198 ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC; 1199 ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR | 1200 XDPC_RBC | XDPC_ECC2; 1201 ctrl->throttle = XDC_THROTTLE; 1202 #ifdef sparc 1203 if (cputyp == CPU_SUN4 && cpumod == SUN4_300) 1204 ctrl->delay = XDC_DELAY_4_300; 1205 else 1206 ctrl->delay = XDC_DELAY_SPARC; 1207 #endif 1208 #ifdef sun3 1209 ctrl->delay = XDC_DELAY_SUN3; 1210 #endif 1211 break; 1212 } 1213 case XDFUN_DRV:{ 1214 struct xd_iopb_drive *drv = 1215 (struct xd_iopb_drive *)iopb; 1216 /* we assume that the disk label has the right 1217 * info */ 1218 if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1219 drv->dparam_ipl = (XDC_DPARAM << 3); 1220 else 1221 drv->dparam_ipl = (XDC_DPARAM << 3) | 1222 iorq->xdc->ipl; 1223 drv->maxsect = iorq->xd->nsect - 1; 1224 drv->maxsector = drv->maxsect; 1225 /* note: maxsector != maxsect only if you are 1226 * doing cyl sparing */ 1227 drv->headoff = 0; 1228 drv->maxcyl = iorq->xd->pcyl - 1; 1229 drv->maxhead = iorq->xd->nhead - 1; 1230 break; 1231 } 1232 case XDFUN_FMT:{ 1233 struct xd_iopb_format *form = 1234 (struct xd_iopb_format *) iopb; 1235 if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1236 form->interleave_ipl = (XDC_INTERLEAVE << 3); 1237 else 1238 form->interleave_ipl = (XDC_INTERLEAVE << 3) | 1239 iorq->xdc->ipl; 1240 form->field1 = XDFM_FIELD1; 1241 form->field2 = XDFM_FIELD2; 1242 form->field3 = XDFM_FIELD3; 1243 form->field4 = XDFM_FIELD4; 1244 form->bytespersec = XDFM_BPS; 1245 form->field6 = XDFM_FIELD6; 1246 form->field7 = XDFM_FIELD7; 1247 break; 1248 } 1249 } 1250 } else { 1251 1252 /* normal IOPB case (harmless to RDP command) */ 1253 1254 iopb->lll = 0; 1255 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1256 ? 0 1257 : iorq->xdc->ipl; 1258 iopb->sectcnt = iorq->sectcnt; 1259 block = iorq->blockno; 1260 if (iorq->xd == NULL || block == 0) { 1261 iopb->sectno = iopb->headno = iopb->cylno = 0; 1262 } else { 1263 iopb->sectno = block % iorq->xd->nsect; 1264 block = block / iorq->xd->nsect; 1265 iopb->headno = block % iorq->xd->nhead; 1266 block = block / iorq->xd->nhead; 1267 iopb->cylno = block; 1268 } 1269 iopb->daddr = dp = (iorq->dbuf == NULL) ? 0 : 1270 dvma_kvtopa((long)iorq->dbuf, BUS_VME32); 1271 iopb->addrmod = XDC_ADDRMOD; 1272 } 1273 } 1274 1275 /* 1276 * xdc_cmd: front end for POLL'd and WAIT'd commands. Returns rqno. 1277 * If you've already got an IORQ, you can call submit directly (currently 1278 * there is no need to do this). NORM requests are handled seperately. 1279 */ 1280 int 1281 xdc_cmd(xdcsc, cmd, subfn, unit, block, scnt, dptr, fullmode) 1282 struct xdc_softc *xdcsc; 1283 int cmd, subfn, unit, block, scnt; 1284 char *dptr; 1285 int fullmode; 1286 1287 { 1288 int rqno, submode = XD_STATE(fullmode), retry; 1289 u_long dp; 1290 struct xd_iorq *iorq; 1291 struct xd_iopb *iopb; 1292 1293 /* get iorq/iopb */ 1294 switch (submode) { 1295 case XD_SUB_POLL: 1296 while (xdcsc->nfree == 0) { 1297 if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK) 1298 return (XD_ERR_FAIL); 1299 } 1300 break; 1301 case XD_SUB_WAIT: 1302 retry = 1; 1303 while (retry) { 1304 while (xdcsc->nfree == 0) { 1305 if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0)) 1306 return (XD_ERR_FAIL); 1307 } 1308 while (xdcsc->ndone > XDC_SUBWAITLIM) { 1309 if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0)) 1310 return (XD_ERR_FAIL); 1311 } 1312 if (xdcsc->nfree) 1313 retry = 0; /* got it */ 1314 } 1315 break; 1316 default: 1317 return (XD_ERR_FAIL); /* illegal */ 1318 } 1319 if (xdcsc->nfree == 0) 1320 panic("xdcmd nfree"); 1321 rqno = XDC_RQALLOC(xdcsc); 1322 iorq = &xdcsc->reqs[rqno]; 1323 iopb = iorq->iopb; 1324 1325 1326 /* init iorq/iopb */ 1327 1328 xdc_rqinit(iorq, xdcsc, 1329 (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit], 1330 fullmode, block, scnt, dptr, NULL); 1331 1332 /* load IOPB from iorq */ 1333 1334 xdc_rqtopb(iorq, iopb, cmd, subfn); 1335 1336 /* submit it for processing */ 1337 1338 xdc_submit_iorq(xdcsc, rqno, fullmode); /* error code will be in iorq */ 1339 1340 return (rqno); 1341 } 1342 /* 1343 * xdc_startbuf 1344 * start a buffer running, assumes nfree > 0 1345 */ 1346 1347 int 1348 xdc_startbuf(xdcsc, xdsc, bp) 1349 struct xdc_softc *xdcsc; 1350 struct xd_softc *xdsc; 1351 struct buf *bp; 1352 1353 { 1354 int rqno, partno; 1355 struct xd_iorq *iorq; 1356 struct xd_iopb *iopb; 1357 struct buf *wq; 1358 u_long block, dp; 1359 caddr_t dbuf; 1360 1361 if (!xdcsc->nfree) 1362 panic("xdc_startbuf free"); 1363 rqno = XDC_RQALLOC(xdcsc); 1364 iorq = &xdcsc->reqs[rqno]; 1365 iopb = iorq->iopb; 1366 1367 /* get buf */ 1368 1369 if (bp == NULL) { 1370 bp = xdcsc->sc_wq.b_actf; 1371 if (!bp) 1372 panic("xdc_startbuf bp"); 1373 wq = bp->b_actf; 1374 if (wq) 1375 wq->b_actb = bp->b_actb; 1376 else 1377 xdcsc->sc_wq.b_actb = bp->b_actb; 1378 *bp->b_actb = wq; 1379 xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)]; 1380 } 1381 partno = DISKPART(bp->b_dev); 1382 #ifdef XDC_DEBUG 1383 printf("xdc_startbuf: %s%c: %s block %d\n", xdsc->sc_dev.dv_xname, 1384 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno); 1385 printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n", 1386 bp->b_bcount, bp->b_data); 1387 #endif 1388 1389 /* 1390 * load request. we have to calculate the correct block number based 1391 * on partition info. 1392 * 1393 * also, note that there are two kinds of buf structures, those with 1394 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is 1395 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users' 1396 * buffer which has already been mapped into DVMA space. (Not on sun3) 1397 * However, if B_PHYS is not set, then the buffer is a normal system 1398 * buffer which does *not* live in DVMA space. In that case we call 1399 * dvma_mapin to map it into DVMA space so we can do the DMA to it. 1400 * 1401 * in cases where we do a dvma_mapin, note that iorq points to the buffer 1402 * as mapped into DVMA space, where as the bp->b_data points to its 1403 * non-DVMA mapping. 1404 * 1405 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped 1406 * into dvma space, only that it was remapped into the kernel. 1407 * We ALWAYS have to remap the kernel buf into DVMA space. 1408 * (It is done inexpensively, using whole segments!) 1409 */ 1410 1411 block = bp->b_blkno + ((partno == RAW_PART) ? 0 : 1412 xdsc->sc_dk.dk_label->d_partitions[partno].p_offset); 1413 1414 dbuf = dvma_mapin(bp->b_data, bp->b_bcount); 1415 if (dbuf == NULL) { /* out of DVMA space */ 1416 printf("%s: warning: out of DVMA space\n", xdcsc->sc_dev.dv_xname); 1417 XDC_FREE(xdcsc, rqno); 1418 wq = &xdcsc->sc_wq; /* put at end of queue */ 1419 bp->b_actf = 0; 1420 bp->b_actb = wq->b_actb; 1421 *wq->b_actb = bp; 1422 wq->b_actb = &bp->b_actf; 1423 return (XD_ERR_FAIL); /* XXX: need some sort of 1424 * call-back scheme here? */ 1425 } 1426 1427 /* init iorq and load iopb from it */ 1428 1429 xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block, 1430 bp->b_bcount / XDFM_BPS, dbuf, bp); 1431 1432 xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0); 1433 1434 /* Instrumentation. */ 1435 disk_busy(&xdsc->sc_dk); 1436 1437 /* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */ 1438 1439 xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM); 1440 return (XD_ERR_AOK); 1441 } 1442 1443 1444 /* 1445 * xdc_submit_iorq: submit an iorq for processing. returns XD_ERR_AOK 1446 * if ok. if it fail returns an error code. type is XD_SUB_*. 1447 * 1448 * note: caller frees iorq in all cases except NORM 1449 * 1450 * return value: 1451 * NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request) 1452 * WAIT: XD_AOK (success), <error-code> (failed) 1453 * POLL: <same as WAIT> 1454 * NOQ : <same as NORM> 1455 * 1456 * there are three sources for i/o requests: 1457 * [1] xdstrategy: normal block I/O, using "struct buf" system. 1458 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1459 * [3] open/ioctl: these are I/O requests done in the context of a process, 1460 * and the process should block until they are done. 1461 * 1462 * software state is stored in the iorq structure. each iorq has an 1463 * iopb structure. the hardware understands the iopb structure. 1464 * every command must go through an iopb. a 7053 can only handle 1465 * XDC_MAXIOPB (31) active iopbs at one time. iopbs are allocated in 1466 * DVMA space at boot up time. what happens if we run out of iopb's? 1467 * for i/o type [1], the buffers are queued at the "buff" layer and 1468 * picked up later by the interrupt routine. for case [2] the 1469 * programmed i/o driver is called with a special flag that says 1470 * return when one iopb is free. for case [3] the process can sleep 1471 * on the iorq free list until some iopbs are avaliable. 1472 */ 1473 1474 1475 int 1476 xdc_submit_iorq(xdcsc, iorqno, type) 1477 struct xdc_softc *xdcsc; 1478 int iorqno; 1479 int type; 1480 1481 { 1482 u_long iopbaddr; 1483 struct xd_iorq *iorq = &xdcsc->reqs[iorqno]; 1484 1485 #ifdef XDC_DEBUG 1486 printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", xdcsc->sc_dev.dv_xname, 1487 iorqno, type); 1488 #endif 1489 1490 /* first check and see if controller is busy */ 1491 if (xdcsc->xdc->xdc_csr & XDC_ADDING) { 1492 #ifdef XDC_DEBUG 1493 printf("xdc_submit_iorq: XDC not ready (ADDING)\n"); 1494 #endif 1495 if (type == XD_SUB_NOQ) 1496 return (XD_ERR_FAIL); /* failed */ 1497 XDC_TWAIT(xdcsc, iorqno); /* put at end of waitq */ 1498 switch (type) { 1499 case XD_SUB_NORM: 1500 return XD_ERR_AOK; /* success */ 1501 case XD_SUB_WAIT: 1502 while (iorq->iopb->done == 0) { 1503 sleep(iorq, PRIBIO); 1504 } 1505 return (iorq->errno); 1506 case XD_SUB_POLL: 1507 return (xdc_piodriver(xdcsc, iorqno, 0)); 1508 default: 1509 panic("xdc_submit_iorq adding"); 1510 } 1511 } 1512 #ifdef XDC_DEBUG 1513 { 1514 u_char *rio = (u_char *) iorq->iopb; 1515 int sz = sizeof(struct xd_iopb), lcv; 1516 printf("%s: aio #%d [", 1517 xdcsc->sc_dev.dv_xname, iorq - xdcsc->reqs); 1518 for (lcv = 0; lcv < sz; lcv++) 1519 printf(" %02x", rio[lcv]); 1520 printf("]\n"); 1521 } 1522 #endif /* XDC_DEBUG */ 1523 1524 /* controller not busy, start command */ 1525 iopbaddr = dvma_kvtopa((long) iorq->iopb, BUS_VME32); 1526 XDC_GO(xdcsc->xdc, iopbaddr); /* go! */ 1527 xdcsc->nrun++; 1528 /* command now running, wrap it up */ 1529 switch (type) { 1530 case XD_SUB_NORM: 1531 case XD_SUB_NOQ: 1532 return (XD_ERR_AOK); /* success */ 1533 case XD_SUB_WAIT: 1534 while (iorq->iopb->done == 0) { 1535 sleep(iorq, PRIBIO); 1536 } 1537 return (iorq->errno); 1538 case XD_SUB_POLL: 1539 return (xdc_piodriver(xdcsc, iorqno, 0)); 1540 default: 1541 panic("xdc_submit_iorq wrap up"); 1542 } 1543 panic("xdc_submit_iorq"); 1544 return 0; /* not reached */ 1545 } 1546 1547 1548 /* 1549 * xdc_piodriver 1550 * 1551 * programmed i/o driver. this function takes over the computer 1552 * and drains off all i/o requests. it returns the status of the iorq 1553 * the caller is interesting in. if freeone is true, then it returns 1554 * when there is a free iorq. 1555 */ 1556 int 1557 xdc_piodriver(xdcsc, iorqno, freeone) 1558 struct xdc_softc *xdcsc; 1559 char iorqno; 1560 int freeone; 1561 1562 { 1563 int nreset = 0; 1564 int retval = 0; 1565 u_long count; 1566 struct xdc *xdc = xdcsc->xdc; 1567 #ifdef XDC_DEBUG 1568 printf("xdc_piodriver(%s, %d, freeone=%d)\n", xdcsc->sc_dev.dv_xname, 1569 iorqno, freeone); 1570 #endif 1571 1572 while (xdcsc->nwait || xdcsc->nrun) { 1573 #ifdef XDC_DEBUG 1574 printf("xdc_piodriver: wait=%d, run=%d\n", 1575 xdcsc->nwait, xdcsc->nrun); 1576 #endif 1577 XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR)); 1578 #ifdef XDC_DEBUG 1579 printf("xdc_piodriver: done wait with count = %d\n", count); 1580 #endif 1581 /* we expect some progress soon */ 1582 if (count == 0 && nreset >= 2) { 1583 xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0); 1584 #ifdef XDC_DEBUG 1585 printf("xdc_piodriver: timeout\n"); 1586 #endif 1587 return (XD_ERR_FAIL); 1588 } 1589 if (count == 0) { 1590 if (xdc_reset(xdcsc, 0, 1591 (nreset++ == 0) ? XD_RSET_NONE : iorqno, 1592 XD_ERR_FAIL, 1593 0) == XD_ERR_FAIL) 1594 return (XD_ERR_FAIL); /* flushes all but POLL 1595 * requests, resets */ 1596 continue; 1597 } 1598 xdc_remove_iorq(xdcsc); /* could resubmit request */ 1599 if (freeone) { 1600 if (xdcsc->nrun < XDC_MAXIOPB) { 1601 #ifdef XDC_DEBUG 1602 printf("xdc_piodriver: done: one free\n"); 1603 #endif 1604 return (XD_ERR_AOK); 1605 } 1606 continue; /* don't xdc_start */ 1607 } 1608 xdc_start(xdcsc, XDC_MAXIOPB); 1609 } 1610 1611 /* get return value */ 1612 1613 retval = xdcsc->reqs[iorqno].errno; 1614 1615 #ifdef XDC_DEBUG 1616 printf("xdc_piodriver: done, retval = 0x%x (%s)\n", 1617 xdcsc->reqs[iorqno].errno, xdc_e2str(xdcsc->reqs[iorqno].errno)); 1618 #endif 1619 1620 /* now that we've drained everything, start up any bufs that have 1621 * queued */ 1622 1623 while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf) 1624 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1625 break; 1626 1627 return (retval); 1628 } 1629 1630 /* 1631 * xdc_reset: reset one drive. NOTE: assumes xdc was just reset. 1632 * we steal iopb[0] for this, but we put it back when we are done. 1633 */ 1634 int 1635 xdc_xdreset(xdcsc, xdsc) 1636 struct xdc_softc *xdcsc; 1637 struct xd_softc *xdsc; 1638 1639 { 1640 struct xd_iopb tmpiopb; 1641 u_long addr; 1642 int del; 1643 bcopy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb)); 1644 bzero(xdcsc->iopbase, sizeof(tmpiopb)); 1645 xdcsc->iopbase->comm = XDCMD_RST; 1646 xdcsc->iopbase->unit = xdsc->xd_drive; 1647 addr = (u_long) xdcsc->dvmaiopb; 1648 XDC_GO(xdcsc->xdc, addr); /* go! */ 1649 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB); 1650 if (del <= 0 || xdcsc->iopbase->errs) { 1651 printf("%s: off-line: %s\n", xdcsc->sc_dev.dv_xname, 1652 xdc_e2str(xdcsc->iopbase->errno)); 1653 xdcsc->xdc->xdc_csr = XDC_RESET; 1654 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1655 if (del <= 0) 1656 panic("xdc_reset"); 1657 } else { 1658 xdcsc->xdc->xdc_csr = XDC_CLRRIO; /* clear RIO */ 1659 } 1660 bcopy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb)); 1661 } 1662 1663 1664 /* 1665 * xdc_reset: reset everything: requests are marked as errors except 1666 * a polled request (which is resubmitted) 1667 */ 1668 int 1669 xdc_reset(xdcsc, quiet, blastmode, error, xdsc) 1670 struct xdc_softc *xdcsc; 1671 int quiet, blastmode, error; 1672 struct xd_softc *xdsc; 1673 1674 { 1675 int del = 0, lcv, poll = -1, retval = XD_ERR_AOK; 1676 int oldfree = xdcsc->nfree; 1677 struct xd_iorq *iorq; 1678 1679 /* soft reset hardware */ 1680 1681 if (!quiet) 1682 printf("%s: soft reset\n", xdcsc->sc_dev.dv_xname); 1683 xdcsc->xdc->xdc_csr = XDC_RESET; 1684 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1685 if (del <= 0) { 1686 blastmode = XD_RSET_ALL; /* dead, flush all requests */ 1687 retval = XD_ERR_FAIL; 1688 } 1689 if (xdsc) 1690 xdc_xdreset(xdcsc, xdsc); 1691 1692 /* fix queues based on "blast-mode" */ 1693 1694 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 1695 iorq = &xdcsc->reqs[lcv]; 1696 1697 if (XD_STATE(iorq->mode) != XD_SUB_POLL && 1698 XD_STATE(iorq->mode) != XD_SUB_WAIT && 1699 XD_STATE(iorq->mode) != XD_SUB_NORM) 1700 /* is it active? */ 1701 continue; 1702 1703 xdcsc->nrun--; /* it isn't running any more */ 1704 if (blastmode == XD_RSET_ALL || blastmode != lcv) { 1705 /* failed */ 1706 iorq->errno = error; 1707 xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1; 1708 switch (XD_STATE(iorq->mode)) { 1709 case XD_SUB_NORM: 1710 iorq->buf->b_error = EIO; 1711 iorq->buf->b_flags |= B_ERROR; 1712 iorq->buf->b_resid = 1713 iorq->sectcnt * XDFM_BPS; 1714 /* Sun3: map/unmap regardless of B_PHYS */ 1715 dvma_mapout(iorq->dbufbase, 1716 iorq->buf->b_bcount); 1717 disk_unbusy(&iorq->xd->sc_dk, 1718 (iorq->buf->b_bcount - iorq->buf->b_resid)); 1719 biodone(iorq->buf); 1720 XDC_FREE(xdcsc, lcv); /* add to free list */ 1721 break; 1722 case XD_SUB_WAIT: 1723 wakeup(iorq); 1724 case XD_SUB_POLL: 1725 xdcsc->ndone++; 1726 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1727 break; 1728 } 1729 1730 } else { 1731 1732 /* resubmit, put at front of wait queue */ 1733 XDC_HWAIT(xdcsc, lcv); 1734 } 1735 } 1736 1737 /* 1738 * now, if stuff is waiting, start it. 1739 * since we just reset it should go 1740 */ 1741 xdc_start(xdcsc, XDC_MAXIOPB); 1742 1743 /* ok, we did it */ 1744 if (oldfree == 0 && xdcsc->nfree) 1745 wakeup(&xdcsc->nfree); 1746 1747 #ifdef XDC_DIAG 1748 del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone; 1749 if (del != XDC_MAXIOPB) 1750 printf("%s: diag: xdc_reset miscount (%d should be %d)!\n", 1751 xdcsc->sc_dev.dv_xname, del, XDC_MAXIOPB); 1752 else 1753 if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM) 1754 printf("%s: diag: lots of done jobs (%d)\n", 1755 xdcsc->sc_dev.dv_xname, xdcsc->ndone); 1756 #endif 1757 printf("RESET DONE\n"); 1758 return (retval); 1759 } 1760 /* 1761 * xdc_start: start all waiting buffers 1762 */ 1763 1764 int 1765 xdc_start(xdcsc, maxio) 1766 struct xdc_softc *xdcsc; 1767 int maxio; 1768 1769 { 1770 int rqno; 1771 while (maxio && xdcsc->nwait && 1772 (xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) { 1773 XDC_GET_WAITER(xdcsc, rqno); /* note: rqno is an "out" 1774 * param */ 1775 if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK) 1776 panic("xdc_start"); /* should never happen */ 1777 maxio--; 1778 } 1779 } 1780 /* 1781 * xdc_remove_iorq: remove "done" IOPB's. 1782 */ 1783 1784 int 1785 xdc_remove_iorq(xdcsc) 1786 struct xdc_softc *xdcsc; 1787 1788 { 1789 int errno, rqno, comm, errs; 1790 struct xdc *xdc = xdcsc->xdc; 1791 u_long addr; 1792 struct xd_iopb *iopb; 1793 struct xd_iorq *iorq; 1794 struct buf *bp; 1795 1796 if (xdc->xdc_csr & XDC_F_ERROR) { 1797 /* 1798 * FATAL ERROR: should never happen under normal use. This 1799 * error is so bad, you can't even tell which IOPB is bad, so 1800 * we dump them all. 1801 */ 1802 errno = xdc->xdc_f_err; 1803 printf("%s: fatal error 0x%02x: %s\n", xdcsc->sc_dev.dv_xname, 1804 errno, xdc_e2str(errno)); 1805 if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errno, 0) != XD_ERR_AOK) { 1806 printf("%s: soft reset failed!\n", 1807 xdcsc->sc_dev.dv_xname); 1808 panic("xdc_remove_iorq: controller DEAD"); 1809 } 1810 return (XD_ERR_AOK); 1811 } 1812 1813 /* 1814 * get iopb that is done 1815 * 1816 * hmm... I used to read the address of the done IOPB off the VME 1817 * registers and calculate the rqno directly from that. that worked 1818 * until I started putting a load on the controller. when loaded, i 1819 * would get interrupts but neither the REMIOPB or F_ERROR bits would 1820 * be set, even after DELAY'ing a while! later on the timeout 1821 * routine would detect IOPBs that were marked "running" but their 1822 * "done" bit was set. rather than dealing directly with this 1823 * problem, it is just easier to look at all running IOPB's for the 1824 * done bit. 1825 */ 1826 if (xdc->xdc_csr & XDC_REMIOPB) { 1827 xdc->xdc_csr = XDC_CLRRIO; 1828 } 1829 1830 for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) { 1831 iorq = &xdcsc->reqs[rqno]; 1832 if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE) 1833 continue; /* free, or done */ 1834 iopb = &xdcsc->iopbase[rqno]; 1835 if (iopb->done == 0) 1836 continue; /* not done yet */ 1837 1838 #ifdef XDC_DEBUG 1839 { 1840 u_char *rio = (u_char *) iopb; 1841 int sz = sizeof(struct xd_iopb), lcv; 1842 printf("%s: rio #%d [", xdcsc->sc_dev.dv_xname, rqno); 1843 for (lcv = 0; lcv < sz; lcv++) 1844 printf(" %02x", rio[lcv]); 1845 printf("]\n"); 1846 } 1847 #endif /* XDC_DEBUG */ 1848 1849 xdcsc->nrun--; 1850 1851 comm = iopb->comm; 1852 errs = iopb->errs; 1853 1854 if (errs) 1855 iorq->errno = iopb->errno; 1856 else 1857 iorq->errno = 0; 1858 1859 /* handle non-fatal errors */ 1860 1861 if (errs && 1862 xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK) 1863 continue; /* AOK: we resubmitted it */ 1864 1865 1866 /* this iorq is now done (hasn't been restarted or anything) */ 1867 1868 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 1869 xdc_perror(iorq, iopb, 0); 1870 1871 /* now, if read/write check to make sure we got all the data 1872 * we needed. (this may not be the case if we got an error in 1873 * the middle of a multisector request). */ 1874 1875 if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 && 1876 (comm == XDCMD_RD || comm == XDCMD_WR)) { 1877 /* we just successfully processed a bad144 sector 1878 * note: if we are in bad 144 mode, the pointers have 1879 * been advanced already (see above) and are pointing 1880 * at the bad144 sector. to exit bad144 mode, we 1881 * must advance the pointers 1 sector and issue a new 1882 * request if there are still sectors left to process 1883 * 1884 */ 1885 XDC_ADVANCE(iorq, 1); /* advance 1 sector */ 1886 1887 /* exit b144 mode */ 1888 iorq->mode = iorq->mode & (~XD_MODE_B144); 1889 1890 if (iorq->sectcnt) { /* more to go! */ 1891 iorq->lasterror = iorq->errno = iopb->errno = 0; 1892 iopb->errs = iopb->done = 0; 1893 iorq->tries = 0; 1894 iopb->sectcnt = iorq->sectcnt; 1895 iopb->cylno = iorq->blockno / 1896 iorq->xd->sectpercyl; 1897 iopb->headno = 1898 (iorq->blockno / iorq->xd->nhead) % 1899 iorq->xd->nhead; 1900 iopb->sectno = iorq->blockno % XDFM_BPS; 1901 iopb->daddr = 1902 dvma_kvtopa((long)iorq->dbuf, BUS_VME32); 1903 XDC_HWAIT(xdcsc, rqno); 1904 xdc_start(xdcsc, 1); /* resubmit */ 1905 continue; 1906 } 1907 } 1908 /* final cleanup, totally done with this request */ 1909 1910 switch (XD_STATE(iorq->mode)) { 1911 case XD_SUB_NORM: 1912 bp = iorq->buf; 1913 if (errs) { 1914 bp->b_error = EIO; 1915 bp->b_flags |= B_ERROR; 1916 bp->b_resid = iorq->sectcnt * XDFM_BPS; 1917 } else { 1918 bp->b_resid = 0; /* done */ 1919 } 1920 /* Sun3: map/unmap regardless of B_PHYS */ 1921 dvma_mapout(iorq->dbufbase, 1922 iorq->buf->b_bcount); 1923 disk_unbusy(&iorq->xd->sc_dk, 1924 (bp->b_bcount - bp->b_resid)); 1925 XDC_FREE(xdcsc, rqno); 1926 biodone(bp); 1927 break; 1928 case XD_SUB_WAIT: 1929 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1930 xdcsc->ndone++; 1931 wakeup(iorq); 1932 break; 1933 case XD_SUB_POLL: 1934 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1935 xdcsc->ndone++; 1936 break; 1937 } 1938 } 1939 1940 return (XD_ERR_AOK); 1941 } 1942 1943 /* 1944 * xdc_perror: print error. 1945 * - if still_trying is true: we got an error, retried and got a 1946 * different error. in that case lasterror is the old error, 1947 * and errno is the new one. 1948 * - if still_trying is not true, then if we ever had an error it 1949 * is in lasterror. also, if iorq->errno == 0, then we recovered 1950 * from that error (otherwise iorq->errno == iorq->lasterror). 1951 */ 1952 void 1953 xdc_perror(iorq, iopb, still_trying) 1954 struct xd_iorq *iorq; 1955 struct xd_iopb *iopb; 1956 int still_trying; 1957 1958 { 1959 1960 int error = iorq->lasterror; 1961 1962 printf("%s", (iorq->xd) ? 1963 iorq->xd->sc_dev.dv_xname : 1964 iorq->xdc->sc_dev.dv_xname); 1965 if (iorq->buf) 1966 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev)); 1967 if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR) 1968 printf("%s %d/%d/%d: ", 1969 (iopb->comm == XDCMD_RD) ? "read" : "write", 1970 iopb->cylno, iopb->headno, iopb->sectno); 1971 printf("%s", xdc_e2str(error)); 1972 1973 if (still_trying) 1974 printf(" [still trying, new error=%s]", xdc_e2str(iorq->errno)); 1975 else 1976 if (iorq->errno == 0) 1977 printf(" [recovered in %d tries]", iorq->tries); 1978 1979 printf("\n"); 1980 } 1981 1982 /* 1983 * xdc_error: non-fatal error encountered... recover. 1984 * return AOK if resubmitted, return FAIL if this iopb is done 1985 */ 1986 int 1987 xdc_error(xdcsc, iorq, iopb, rqno, comm) 1988 struct xdc_softc *xdcsc; 1989 struct xd_iorq *iorq; 1990 struct xd_iopb *iopb; 1991 int rqno, comm; 1992 1993 { 1994 int errno = iorq->errno; 1995 int erract = errno & XD_ERA_MASK; 1996 int oldmode, advance, i; 1997 1998 if (erract == XD_ERA_RSET) { /* some errors require a reset */ 1999 oldmode = iorq->mode; 2000 iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode); 2001 xdcsc->ndone++; 2002 /* make xdc_start ignore us */ 2003 xdc_reset(xdcsc, 1, XD_RSET_NONE, errno, iorq->xd); 2004 iorq->mode = oldmode; 2005 xdcsc->ndone--; 2006 } 2007 /* check for read/write to a sector in bad144 table if bad: redirect 2008 * request to bad144 area */ 2009 2010 if ((comm == XDCMD_RD || comm == XDCMD_WR) && 2011 (iorq->mode & XD_MODE_B144) == 0) { 2012 advance = iorq->sectcnt - iopb->sectcnt; 2013 XDC_ADVANCE(iorq, advance); 2014 if ((i = isbad(&iorq->xd->dkb, iorq->blockno / iorq->xd->sectpercyl, 2015 (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead, 2016 iorq->blockno % iorq->xd->nsect)) != -1) { 2017 iorq->mode |= XD_MODE_B144; /* enter bad144 mode & 2018 * redirect */ 2019 iopb->errno = iopb->done = iopb->errs = 0; 2020 iopb->sectcnt = 1; 2021 iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2; 2022 /* second to last acyl */ 2023 i = iorq->xd->sectpercyl - 1 - i; /* follow bad144 2024 * standard */ 2025 iopb->headno = i / iorq->xd->nhead; 2026 iopb->sectno = i % iorq->xd->nhead; 2027 XDC_HWAIT(xdcsc, rqno); 2028 xdc_start(xdcsc, 1); /* resubmit */ 2029 return (XD_ERR_AOK); /* recovered! */ 2030 } 2031 } 2032 2033 /* 2034 * it isn't a bad144 sector, must be real error! see if we can retry 2035 * it? 2036 */ 2037 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 2038 xdc_perror(iorq, iopb, 1); /* inform of error state 2039 * change */ 2040 iorq->lasterror = errno; 2041 2042 if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD) 2043 && iorq->tries < XDC_MAXTRIES) { /* retry? */ 2044 iorq->tries++; 2045 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0; 2046 XDC_HWAIT(xdcsc, rqno); 2047 xdc_start(xdcsc, 1); /* restart */ 2048 return (XD_ERR_AOK); /* recovered! */ 2049 } 2050 2051 /* failed to recover from this error */ 2052 return (XD_ERR_FAIL); 2053 } 2054 2055 /* 2056 * xdc_tick: make sure xd is still alive and ticking (err, kicking). 2057 */ 2058 void 2059 xdc_tick(arg) 2060 void *arg; 2061 2062 { 2063 struct xdc_softc *xdcsc = arg; 2064 int lcv, s, reset = 0; 2065 #ifdef XDC_DIAG 2066 int wait, run, free, done, whd; 2067 u_char fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB]; 2068 s = splbio(); 2069 wait = xdcsc->nwait; 2070 run = xdcsc->nrun; 2071 free = xdcsc->nfree; 2072 done = xdcsc->ndone; 2073 bcopy(xdcsc->waitq, wqc, sizeof(wqc)); 2074 bcopy(xdcsc->freereq, fqc, sizeof(fqc)); 2075 splx(s); 2076 if (wait + run + free + done != XDC_MAXIOPB) { 2077 printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n", 2078 xdcsc->sc_dev.dv_xname, wait, free, run, done, XDC_MAXIOPB); 2079 bzero(mark, sizeof(mark)); 2080 printf("FREE: "); 2081 for (lcv = free; lcv > 0; lcv--) { 2082 printf("%d ", fqc[lcv - 1]); 2083 mark[fqc[lcv - 1]] = 1; 2084 } 2085 printf("\nWAIT: "); 2086 lcv = wait; 2087 while (lcv > 0) { 2088 printf("%d ", wqc[whd]); 2089 mark[wqc[whd]] = 1; 2090 whd = (whd + 1) % XDC_MAXIOPB; 2091 lcv--; 2092 } 2093 printf("\n"); 2094 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2095 if (mark[lcv] == 0) 2096 printf("MARK: running %d: mode %d done %d errs %d errno 0x%x ttl %d buf %x\n", 2097 lcv, xdcsc->reqs[lcv].mode, 2098 xdcsc->iopbase[lcv].done, 2099 xdcsc->iopbase[lcv].errs, 2100 xdcsc->iopbase[lcv].errno, 2101 xdcsc->reqs[lcv].ttl, xdcsc->reqs[lcv].buf); 2102 } 2103 } else 2104 if (done > XDC_MAXIOPB - XDC_SUBWAITLIM) 2105 printf("%s: diag: lots of done jobs (%d)\n", 2106 xdcsc->sc_dev.dv_xname, done); 2107 2108 #endif 2109 #ifdef XDC_DEBUG 2110 printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n", 2111 xdcsc->sc_dev.dv_xname, 2112 xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun, 2113 xdcsc->ndone); 2114 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2115 if (xdcsc->reqs[lcv].mode) 2116 printf("running %d: mode %d done %d errs %d errno 0x%x\n", 2117 lcv, 2118 xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done, 2119 xdcsc->iopbase[lcv].errs, xdcsc->iopbase[lcv].errno); 2120 } 2121 #endif 2122 2123 /* reduce ttl for each request if one goes to zero, reset xdc */ 2124 s = splbio(); 2125 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2126 if (xdcsc->reqs[lcv].mode == 0 || 2127 XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE) 2128 continue; 2129 xdcsc->reqs[lcv].ttl--; 2130 if (xdcsc->reqs[lcv].ttl == 0) 2131 reset = 1; 2132 } 2133 if (reset) { 2134 printf("%s: watchdog timeout\n", xdcsc->sc_dev.dv_xname); 2135 xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL); 2136 } 2137 splx(s); 2138 2139 /* until next time */ 2140 2141 timeout(xdc_tick, xdcsc, XDC_TICKCNT); 2142 } 2143 2144 /* 2145 * xdc_ioctlcmd: this function provides a user level interface to the 2146 * controller via ioctl. this allows "format" programs to be written 2147 * in user code, and is also useful for some debugging. we return 2148 * an error code. called at user priority. 2149 */ 2150 int 2151 xdc_ioctlcmd(xd, dev, xio) 2152 struct xd_softc *xd; 2153 dev_t dev; 2154 struct xd_iocmd *xio; 2155 2156 { 2157 int s, err, rqno, dummy; 2158 caddr_t dvmabuf = NULL; 2159 struct xdc_softc *xdcsc; 2160 2161 /* check sanity of requested command */ 2162 2163 switch (xio->cmd) { 2164 2165 case XDCMD_NOP: /* no op: everything should be zero */ 2166 if (xio->subfn || xio->dptr || xio->dlen || 2167 xio->block || xio->sectcnt) 2168 return (EINVAL); 2169 break; 2170 2171 case XDCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 2172 case XDCMD_WR: 2173 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 2174 xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL) 2175 return (EINVAL); 2176 break; 2177 2178 case XDCMD_SK: /* seek: doesn't seem useful to export this */ 2179 return (EINVAL); 2180 2181 case XDCMD_WRP: /* write parameters */ 2182 return (EINVAL);/* not useful, except maybe drive 2183 * parameters... but drive parameters should 2184 * go via disklabel changes */ 2185 2186 case XDCMD_RDP: /* read parameters */ 2187 if (xio->subfn != XDFUN_DRV || 2188 xio->dlen || xio->block || xio->dptr) 2189 return (EINVAL); /* allow read drive params to 2190 * get hw_spt */ 2191 xio->sectcnt = xd->hw_spt; /* we already know the answer */ 2192 return (0); 2193 break; 2194 2195 case XDCMD_XRD: /* extended read/write */ 2196 case XDCMD_XWR: 2197 2198 switch (xio->subfn) { 2199 2200 case XDFUN_THD:/* track headers */ 2201 if (xio->sectcnt != xd->hw_spt || 2202 (xio->block % xd->nsect) != 0 || 2203 xio->dlen != XD_IOCMD_HSZ * xd->hw_spt || 2204 xio->dptr == NULL) 2205 return (EINVAL); 2206 xio->sectcnt = 0; 2207 break; 2208 2209 case XDFUN_FMT:/* NOTE: also XDFUN_VFY */ 2210 if (xio->cmd == XDCMD_XRD) 2211 return (EINVAL); /* no XDFUN_VFY */ 2212 if (xio->sectcnt || xio->dlen || 2213 (xio->block % xd->nsect) != 0 || xio->dptr) 2214 return (EINVAL); 2215 break; 2216 2217 case XDFUN_HDR:/* header, header verify, data, data ECC */ 2218 return (EINVAL); /* not yet */ 2219 2220 case XDFUN_DM: /* defect map */ 2221 case XDFUN_DMX:/* defect map (alternate location) */ 2222 if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ || 2223 (xio->block % xd->nsect) != 0 || xio->dptr == NULL) 2224 return (EINVAL); 2225 break; 2226 2227 default: 2228 return (EINVAL); 2229 } 2230 break; 2231 2232 case XDCMD_TST: /* diagnostics */ 2233 return (EINVAL); 2234 2235 default: 2236 return (EINVAL);/* ??? */ 2237 } 2238 2239 /* create DVMA buffer for request if needed */ 2240 2241 if (xio->dlen) { 2242 dvmabuf = dvma_malloc(xio->dlen); 2243 if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) { 2244 if (err = copyin(xio->dptr, dvmabuf, xio->dlen)) { 2245 dvma_free(dvmabuf, xio->dlen); 2246 return (err); 2247 } 2248 } 2249 } 2250 /* do it! */ 2251 2252 err = 0; 2253 xdcsc = xd->parent; 2254 s = splbio(); 2255 rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block, 2256 xio->sectcnt, dvmabuf, XD_SUB_WAIT); 2257 if (rqno == XD_ERR_FAIL) { 2258 err = EIO; 2259 goto done; 2260 } 2261 xio->errno = xdcsc->reqs[rqno].errno; 2262 xio->tries = xdcsc->reqs[rqno].tries; 2263 XDC_DONE(xdcsc, rqno, dummy); 2264 2265 if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD) 2266 err = copyout(dvmabuf, xio->dptr, xio->dlen); 2267 2268 done: 2269 splx(s); 2270 if (dvmabuf) 2271 dvma_free(dvmabuf, xio->dlen); 2272 return (err); 2273 } 2274 2275 /* 2276 * xdc_e2str: convert error code number into an error string 2277 */ 2278 char * 2279 xdc_e2str(no) 2280 int no; 2281 { 2282 switch (no) { 2283 case XD_ERR_FAIL: 2284 return ("Software fatal error"); 2285 case XD_ERR_AOK: 2286 return ("Successful completion"); 2287 case XD_ERR_ICYL: 2288 return ("Illegal cylinder address"); 2289 case XD_ERR_IHD: 2290 return ("Illegal head address"); 2291 case XD_ERR_ISEC: 2292 return ("Illgal sector address"); 2293 case XD_ERR_CZER: 2294 return ("Count zero"); 2295 case XD_ERR_UIMP: 2296 return ("Unimplemented command"); 2297 case XD_ERR_IF1: 2298 return ("Illegal field length 1"); 2299 case XD_ERR_IF2: 2300 return ("Illegal field length 2"); 2301 case XD_ERR_IF3: 2302 return ("Illegal field length 3"); 2303 case XD_ERR_IF4: 2304 return ("Illegal field length 4"); 2305 case XD_ERR_IF5: 2306 return ("Illegal field length 5"); 2307 case XD_ERR_IF6: 2308 return ("Illegal field length 6"); 2309 case XD_ERR_IF7: 2310 return ("Illegal field length 7"); 2311 case XD_ERR_ISG: 2312 return ("Illegal scatter/gather length"); 2313 case XD_ERR_ISPT: 2314 return ("Not enough sectors per track"); 2315 case XD_ERR_ALGN: 2316 return ("Next IOPB address alignment error"); 2317 case XD_ERR_SGAL: 2318 return ("Scatter/gather address alignment error"); 2319 case XD_ERR_SGEC: 2320 return ("Scatter/gather with auto-ECC"); 2321 case XD_ERR_SECC: 2322 return ("Soft ECC corrected"); 2323 case XD_ERR_SIGN: 2324 return ("ECC ignored"); 2325 case XD_ERR_ASEK: 2326 return ("Auto-seek retry recovered"); 2327 case XD_ERR_RTRY: 2328 return ("Soft retry recovered"); 2329 case XD_ERR_HECC: 2330 return ("Hard data ECC"); 2331 case XD_ERR_NHDR: 2332 return ("Header not found"); 2333 case XD_ERR_NRDY: 2334 return ("Drive not ready"); 2335 case XD_ERR_TOUT: 2336 return ("Operation timeout"); 2337 case XD_ERR_VTIM: 2338 return ("VMEDMA timeout"); 2339 case XD_ERR_DSEQ: 2340 return ("Disk sequencer error"); 2341 case XD_ERR_HDEC: 2342 return ("Header ECC error"); 2343 case XD_ERR_RVFY: 2344 return ("Read verify"); 2345 case XD_ERR_VFER: 2346 return ("Fatail VMEDMA error"); 2347 case XD_ERR_VBUS: 2348 return ("VMEbus error"); 2349 case XD_ERR_DFLT: 2350 return ("Drive faulted"); 2351 case XD_ERR_HECY: 2352 return ("Header error/cyliner"); 2353 case XD_ERR_HEHD: 2354 return ("Header error/head"); 2355 case XD_ERR_NOCY: 2356 return ("Drive not on-cylinder"); 2357 case XD_ERR_SEEK: 2358 return ("Seek error"); 2359 case XD_ERR_ILSS: 2360 return ("Illegal sector size"); 2361 case XD_ERR_SEC: 2362 return ("Soft ECC"); 2363 case XD_ERR_WPER: 2364 return ("Write-protect error"); 2365 case XD_ERR_IRAM: 2366 return ("IRAM self test failure"); 2367 case XD_ERR_MT3: 2368 return ("Maintenance test 3 failure (DSKCEL RAM)"); 2369 case XD_ERR_MT4: 2370 return ("Maintenance test 4 failure (header shift reg)"); 2371 case XD_ERR_MT5: 2372 return ("Maintenance test 5 failure (VMEDMA regs)"); 2373 case XD_ERR_MT6: 2374 return ("Maintenance test 6 failure (REGCEL chip)"); 2375 case XD_ERR_MT7: 2376 return ("Maintenance test 7 failure (buffer parity)"); 2377 case XD_ERR_MT8: 2378 return ("Maintenance test 8 failure (disk FIFO)"); 2379 case XD_ERR_IOCK: 2380 return ("IOPB checksum miscompare"); 2381 case XD_ERR_IODM: 2382 return ("IOPB DMA fatal"); 2383 case XD_ERR_IOAL: 2384 return ("IOPB address alignment error"); 2385 case XD_ERR_FIRM: 2386 return ("Firmware error"); 2387 case XD_ERR_MMOD: 2388 return ("Illegal maintenance mode test number"); 2389 case XD_ERR_ACFL: 2390 return ("ACFAIL asserted"); 2391 default: 2392 return ("Unknown error"); 2393 } 2394 } 2395