1 /* $NetBSD: xd.c,v 1.12 1997/02/28 21:23:06 gwr Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1995 Charles D. Cranor 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * 36 * x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r 37 * 38 * author: Chuck Cranor <chuck@ccrc.wustl.edu> 39 * id: $NetBSD: xd.c,v 1.12 1997/02/28 21:23:06 gwr Exp $ 40 * started: 27-Feb-95 41 * references: [1] Xylogics Model 753 User's Manual 42 * part number: 166-753-001, Revision B, May 21, 1988. 43 * "Your Partner For Performance" 44 * [2] other NetBSD disk device drivers 45 * 46 * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking 47 * the time to answer some of my questions about the 753/7053. 48 * 49 * note: the 753 and the 7053 are programmed the same way, but are 50 * different sizes. the 753 is a 6U VME card, while the 7053 is a 9U 51 * VME card (found in many VME based suns). 52 */ 53 54 #undef XDC_DEBUG /* full debug */ 55 #define XDC_DIAG /* extra sanity checks */ 56 #if defined(DIAGNOSTIC) && !defined(XDC_DIAG) 57 #define XDC_DIAG /* link in with master DIAG option */ 58 #endif 59 60 #include <sys/param.h> 61 #include <sys/proc.h> 62 #include <sys/systm.h> 63 #include <sys/kernel.h> 64 #include <sys/file.h> 65 #include <sys/stat.h> 66 #include <sys/ioctl.h> 67 #include <sys/buf.h> 68 #include <sys/uio.h> 69 #include <sys/malloc.h> 70 #include <sys/device.h> 71 #include <sys/disklabel.h> 72 #include <sys/disk.h> 73 #include <sys/syslog.h> 74 #include <sys/dkbad.h> 75 #include <sys/conf.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_kern.h> 79 80 #include <machine/autoconf.h> 81 #include <machine/sun_disklabel.h> 82 #include <machine/dvma.h> 83 84 #include <sun3/dev/xdreg.h> 85 #include <sun3/dev/xdvar.h> 86 #include <sun3/dev/xio.h> 87 88 /* 89 * macros 90 */ 91 92 /* 93 * XDC_TWAIT: add iorq "N" to tail of SC's wait queue 94 */ 95 #define XDC_TWAIT(SC, N) { \ 96 (SC)->waitq[(SC)->waitend] = (N); \ 97 (SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \ 98 (SC)->nwait++; \ 99 } 100 101 /* 102 * XDC_HWAIT: add iorq "N" to head of SC's wait queue 103 */ 104 #define XDC_HWAIT(SC, N) { \ 105 (SC)->waithead = ((SC)->waithead == 0) ? \ 106 (XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \ 107 (SC)->waitq[(SC)->waithead] = (N); \ 108 (SC)->nwait++; \ 109 } 110 111 /* 112 * XDC_GET_WAITER: gets the first request waiting on the waitq 113 * and removes it (so it can be submitted) 114 */ 115 #define XDC_GET_WAITER(XDCSC, RQ) { \ 116 (RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \ 117 (XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \ 118 xdcsc->nwait--; \ 119 } 120 121 /* 122 * XDC_FREE: add iorq "N" to SC's free list 123 */ 124 #define XDC_FREE(SC, N) { \ 125 (SC)->freereq[(SC)->nfree++] = (N); \ 126 (SC)->reqs[N].mode = 0; \ 127 if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \ 128 } 129 130 131 /* 132 * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0). 133 */ 134 #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)] 135 136 /* 137 * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC 138 */ 139 #define XDC_GO(XDC, ADDR) { \ 140 (XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \ 141 (ADDR) = ((ADDR) >> 8); \ 142 (XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \ 143 (ADDR) = ((ADDR) >> 8); \ 144 (XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \ 145 (ADDR) = ((ADDR) >> 8); \ 146 (XDC)->xdc_iopbaddr3 = (ADDR); \ 147 (XDC)->xdc_iopbamod = XDC_ADDRMOD; \ 148 (XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \ 149 } 150 151 /* 152 * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME". 153 * LCV is a counter. If it goes to zero then we timed out. 154 */ 155 #define XDC_WAIT(XDC, LCV, TIME, BITS) { \ 156 (LCV) = (TIME); \ 157 while ((LCV) > 0) { \ 158 if ((XDC)->xdc_csr & (BITS)) break; \ 159 (LCV) = (LCV) - 1; \ 160 DELAY(1); \ 161 } \ 162 } 163 164 /* 165 * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd) 166 */ 167 #define XDC_DONE(SC,RQ,ER) { \ 168 if ((RQ) == XD_ERR_FAIL) { \ 169 (ER) = (RQ); \ 170 } else { \ 171 if ((SC)->ndone-- == XDC_SUBWAITLIM) \ 172 wakeup(&(SC)->ndone); \ 173 (ER) = (SC)->reqs[RQ].errno; \ 174 XDC_FREE((SC), (RQ)); \ 175 } \ 176 } 177 178 /* 179 * XDC_ADVANCE: advance iorq's pointers by a number of sectors 180 */ 181 #define XDC_ADVANCE(IORQ, N) { \ 182 if (N) { \ 183 (IORQ)->sectcnt -= (N); \ 184 (IORQ)->blockno += (N); \ 185 (IORQ)->dbuf += ((N)*XDFM_BPS); \ 186 } \ 187 } 188 189 /* 190 * note - addresses you can sleep on: 191 * [1] & of xd_softc's "state" (waiting for a chance to attach a drive) 192 * [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb) 193 * [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's 194 * to drop below XDC_SUBWAITLIM) 195 * [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish) 196 */ 197 198 199 /* 200 * function prototypes 201 * "xdc_*" functions are internal, all others are external interfaces 202 */ 203 204 /* internals */ 205 int xdc_cmd __P((struct xdc_softc *, int, int, int, int, int, char *, int)); 206 char *xdc_e2str __P((int)); 207 int xdc_error __P((struct xdc_softc *, struct xd_iorq *, 208 struct xd_iopb *, int, int)); 209 int xdc_ioctlcmd __P((struct xd_softc *, dev_t dev, struct xd_iocmd *)); 210 void xdc_perror __P((struct xd_iorq *, struct xd_iopb *, int)); 211 int xdc_piodriver __P((struct xdc_softc *, int, int)); 212 int xdc_remove_iorq __P((struct xdc_softc *)); 213 int xdc_reset __P((struct xdc_softc *, int, int, int, struct xd_softc *)); 214 inline void xdc_rqinit __P((struct xd_iorq *, struct xdc_softc *, 215 struct xd_softc *, int, u_long, int, 216 caddr_t, struct buf *)); 217 void xdc_rqtopb __P((struct xd_iorq *, struct xd_iopb *, int, int)); 218 int xdc_start __P((struct xdc_softc *, int)); 219 int xdc_startbuf __P((struct xdc_softc *, struct xd_softc *, struct buf *)); 220 int xdc_submit_iorq __P((struct xdc_softc *, int, int)); 221 void xdc_tick __P((void *)); 222 int xdc_xdreset __P((struct xdc_softc *, struct xd_softc *)); 223 224 /* machine interrupt hook */ 225 int xdcintr __P((void *)); 226 227 /* bdevsw, cdevsw */ 228 bdev_decl(xd); 229 cdev_decl(xd); 230 231 /* autoconf */ 232 int xdcmatch __P((struct device *, struct cfdata *, void *)); 233 void xdcattach __P((struct device *, struct device *, void *)); 234 int xdmatch __P((struct device *, struct cfdata *, void *)); 235 void xdattach __P((struct device *, struct device *, void *)); 236 int xdc_print __P((void *, char *name)); 237 238 static void xddummystrat __P((struct buf *)); 239 int xdgetdisklabel __P((struct xd_softc *, void *)); 240 241 /* 242 * cfdrivers: device driver interface to autoconfig 243 */ 244 245 struct cfattach xdc_ca = { 246 sizeof(struct xdc_softc), xdcmatch, xdcattach 247 }; 248 249 struct cfdriver xdc_cd = { 250 NULL, "xdc", DV_DULL 251 }; 252 253 struct cfattach xd_ca = { 254 sizeof(struct xd_softc), xdmatch, xdattach 255 }; 256 257 struct cfdriver xd_cd = { 258 NULL, "xd", DV_DISK 259 }; 260 261 struct xdc_attach_args { /* this is the "aux" args to xdattach */ 262 int driveno; /* unit number */ 263 char *dvmabuf; /* scratch buffer for reading disk label */ 264 int fullmode; /* submit mode */ 265 int booting; /* are we booting or not? */ 266 }; 267 268 /* 269 * dkdriver 270 */ 271 272 struct dkdriver xddkdriver = {xdstrategy}; 273 274 /* 275 * start: disk label fix code (XXX) 276 */ 277 278 static void *xd_labeldata; 279 280 static void 281 xddummystrat(bp) 282 struct buf *bp; 283 { 284 if (bp->b_bcount != XDFM_BPS) 285 panic("xddummystrat"); 286 bcopy(xd_labeldata, bp->b_un.b_addr, XDFM_BPS); 287 bp->b_flags |= B_DONE; 288 bp->b_flags &= ~B_BUSY; 289 } 290 291 int 292 xdgetdisklabel(xd, b) 293 struct xd_softc *xd; 294 void *b; 295 { 296 char *err; 297 struct sun_disklabel *sdl; 298 299 /* We already have the label data in `b'; setup for dummy strategy */ 300 xd_labeldata = b; 301 302 /* Required parameter for readdisklabel() */ 303 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; 304 305 err = readdisklabel(MAKEDISKDEV(0, xd->sc_dev.dv_unit, RAW_PART), 306 xddummystrat, 307 xd->sc_dk.dk_label, xd->sc_dk.dk_cpulabel); 308 if (err) { 309 printf("%s: %s\n", xd->sc_dev.dv_xname, err); 310 return(XD_ERR_FAIL); 311 } 312 313 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 314 sdl = (struct sun_disklabel *)xd->sc_dk.dk_cpulabel->cd_block; 315 if (sdl->sl_magic == SUN_DKMAGIC) 316 xd->pcyl = sdl->sl_pcyl; 317 else { 318 printf("%s: WARNING: no `pcyl' in disk label.\n", 319 xd->sc_dev.dv_xname); 320 xd->pcyl = xd->sc_dk.dk_label->d_ncylinders + 321 xd->sc_dk.dk_label->d_acylinders; 322 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 323 xd->sc_dev.dv_xname, xd->pcyl); 324 } 325 326 xd->ncyl = xd->sc_dk.dk_label->d_ncylinders; 327 xd->acyl = xd->sc_dk.dk_label->d_acylinders; 328 xd->nhead = xd->sc_dk.dk_label->d_ntracks; 329 xd->nsect = xd->sc_dk.dk_label->d_nsectors; 330 xd->sectpercyl = xd->nhead * xd->nsect; 331 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; /* not handled by 332 * sun->bsd */ 333 return(XD_ERR_AOK); 334 } 335 336 /* 337 * end: disk label fix code (XXX) 338 */ 339 340 /* 341 * a u t o c o n f i g f u n c t i o n s 342 */ 343 344 /* 345 * xdcmatch: determine if xdc is present or not. we do a 346 * soft reset to detect the xdc. 347 */ 348 349 int xdcmatch(parent, cf, aux) 350 struct device *parent; 351 struct cfdata *cf; 352 void *aux; 353 { 354 struct confargs *ca = aux; 355 int x; 356 357 if (ca->ca_bustype != BUS_VME32) 358 return (0); 359 360 /* Default interrupt priority always splbio==2 */ 361 if (ca->ca_intpri == -1) 362 ca->ca_intpri = 2; 363 364 x = bus_peek(ca->ca_bustype, ca->ca_paddr + 11, 1); 365 if (x == -1) 366 return (0); 367 368 return (1); 369 } 370 371 /* 372 * xdcattach: attach controller 373 */ 374 void 375 xdcattach(parent, self, aux) 376 struct device *parent, *self; 377 void *aux; 378 379 { 380 struct xdc_softc *xdc = (void *) self; 381 struct confargs *ca = aux; 382 struct xdc_attach_args xa; 383 int lcv, rqno, err, pri; 384 struct xd_iopb_ctrl *ctl; 385 386 /* get addressing and intr level stuff from autoconfig and load it 387 * into our xdc_softc. */ 388 389 xdc->xdc = (struct xdc *) 390 bus_mapin(ca->ca_bustype, ca->ca_paddr, sizeof(struct xdc)); 391 xdc->ipl = ca->ca_intpri; 392 xdc->vector = ca->ca_intvec; 393 394 for (lcv = 0; lcv < XDC_MAXDEV; lcv++) 395 xdc->sc_drives[lcv] = (struct xd_softc *) 0; 396 397 /* allocate and zero buffers 398 * 399 * note: we simplify the code by allocating the max number of iopbs and 400 * iorq's up front. thus, we avoid linked lists and the costs 401 * associated with them in exchange for wasting a little memory. */ 402 403 xdc->iopbase = (struct xd_iopb *) 404 dvma_malloc(XDC_MAXIOPB * sizeof(struct xd_iopb)); /* KVA */ 405 bzero(xdc->iopbase, XDC_MAXIOPB * sizeof(struct xd_iopb)); 406 xdc->dvmaiopb = (struct xd_iopb *) 407 dvma_kvtopa((long) xdc->iopbase, BUS_VME32); 408 xdc->reqs = (struct xd_iorq *) 409 malloc(XDC_MAXIOPB * sizeof(struct xd_iorq), M_DEVBUF, M_NOWAIT); 410 if (xdc->reqs == NULL) 411 panic("xdc malloc"); 412 bzero(xdc->reqs, XDC_MAXIOPB * sizeof(struct xd_iorq)); 413 414 /* init free list, iorq to iopb pointers, and non-zero fields in the 415 * iopb which never change. */ 416 417 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 418 xdc->reqs[lcv].iopb = &xdc->iopbase[lcv]; 419 xdc->freereq[lcv] = lcv; 420 xdc->iopbase[lcv].fixd = 1; /* always the same */ 421 xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */ 422 xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */ 423 } 424 xdc->nfree = XDC_MAXIOPB; 425 xdc->nrun = 0; 426 xdc->waithead = xdc->waitend = xdc->nwait = 0; 427 xdc->ndone = 0; 428 429 /* init queue of waiting bufs */ 430 431 xdc->sc_wq.b_active = 0; 432 xdc->sc_wq.b_actf = 0; 433 xdc->sc_wq.b_actb = &xdc->sc_wq.b_actf; 434 435 /* 436 * section 7 of the manual tells us how to init the controller: 437 * - read controller parameters (6/0) 438 * - write controller parameters (5/0) 439 */ 440 441 /* read controller parameters and insure we have a 753/7053 */ 442 443 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 444 if (rqno == XD_ERR_FAIL) { 445 printf(": couldn't read controller params\n"); 446 return; /* shouldn't ever happen */ 447 } 448 ctl = (struct xd_iopb_ctrl *) & xdc->iopbase[rqno]; 449 if (ctl->ctype != XDCT_753) { 450 if (xdc->reqs[rqno].errno) 451 printf(": %s: ", xdc_e2str(xdc->reqs[rqno].errno)); 452 printf(": doesn't identify as a 753/7053\n"); 453 XDC_DONE(xdc, rqno, err); 454 return; 455 } 456 printf(": Xylogics 753/7053, PROM=%x.%02x.%02x\n", 457 ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev); 458 XDC_DONE(xdc, rqno, err); 459 460 /* now write controller parameters (xdc_cmd sets all params for us) */ 461 462 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 463 XDC_DONE(xdc, rqno, err); 464 if (err) { 465 printf("%s: controller config error: %s\n", 466 xdc->sc_dev.dv_xname, xdc_e2str(err)); 467 return; 468 } 469 470 /* link in interrupt with higher level software */ 471 isr_add_vectored(xdcintr, (void *)xdc, 472 ca->ca_intpri, ca->ca_intvec); 473 evcnt_attach(&xdc->sc_dev, "intr", &xdc->sc_intrcnt); 474 475 /* now we must look for disks using autoconfig */ 476 xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS); 477 xa.fullmode = XD_SUB_POLL; 478 xa.booting = 1; 479 480 for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++) 481 (void) config_found(self, (void *) &xa, xdc_print); 482 483 dvma_free(xa.dvmabuf, XDFM_BPS); 484 485 /* start the watchdog clock */ 486 timeout(xdc_tick, xdc, XDC_TICKCNT); 487 } 488 489 int 490 xdc_print(aux, name) 491 void *aux; 492 char *name; 493 { 494 struct xdc_attach_args *xa = aux; 495 496 if (name != NULL) 497 printf("%s: ", name); 498 499 if (xa->driveno != -1) 500 printf(" drive %d", xa->driveno); 501 502 return UNCONF; 503 } 504 505 /* 506 * xdmatch: probe for disk. 507 * 508 * note: we almost always say disk is present. this allows us to 509 * spin up and configure a disk after the system is booted (we can 510 * call xdattach!). 511 */ 512 int 513 xdmatch(parent, cf, aux) 514 struct device *parent; 515 struct cfdata *cf; 516 void *aux; 517 518 { 519 struct xdc_softc *xdc = (void *) parent; 520 struct xdc_attach_args *xa = aux; 521 522 /* looking for autoconf wildcard or exact match */ 523 524 if (cf->cf_loc[0] != -1 && cf->cf_loc[0] != xa->driveno) 525 return 0; 526 527 return 1; 528 529 } 530 531 /* 532 * xdattach: attach a disk. this can be called from autoconf and also 533 * from xdopen/xdstrategy. 534 */ 535 void 536 xdattach(parent, self, aux) 537 struct device *parent, *self; 538 void *aux; 539 540 { 541 struct xd_softc *xd = (void *) self; 542 struct xdc_softc *xdc = (void *) parent; 543 struct xdc_attach_args *xa = aux; 544 int rqno, err, spt, mb, blk, lcv, fmode, s, newstate; 545 struct xd_iopb_drive *driopb; 546 struct dkbad *dkb; 547 struct bootpath *bp; 548 549 /* 550 * Always re-initialize the disk structure. We want statistics 551 * to start with a clean slate. 552 */ 553 bzero(&xd->sc_dk, sizeof(xd->sc_dk)); 554 xd->sc_dk.dk_driver = &xddkdriver; 555 xd->sc_dk.dk_name = xd->sc_dev.dv_xname; 556 557 /* if booting, init the xd_softc */ 558 559 if (xa->booting) { 560 xd->state = XD_DRIVE_UNKNOWN; /* to start */ 561 xd->flags = 0; 562 xd->parent = xdc; 563 } 564 xd->xd_drive = xa->driveno; 565 fmode = xa->fullmode; 566 xdc->sc_drives[xa->driveno] = xd; 567 568 /* if not booting, make sure we are the only process in the attach for 569 * this drive. if locked out, sleep on it. */ 570 571 if (!xa->booting) { 572 s = splbio(); 573 while (xd->state == XD_DRIVE_ATTACHING) { 574 if (tsleep(&xd->state, PRIBIO, "xdattach", 0)) { 575 splx(s); 576 return; 577 } 578 } 579 printf("%s at %s", 580 xd->sc_dev.dv_xname, 581 xd->parent->sc_dev.dv_xname); 582 } 583 /* we now have control */ 584 585 xd->state = XD_DRIVE_ATTACHING; 586 newstate = XD_DRIVE_UNKNOWN; 587 588 /* first try and reset the drive */ 589 590 rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fmode); 591 XDC_DONE(xdc, rqno, err); 592 if (err == XD_ERR_NRDY) { 593 printf(" drive %d: off-line\n", xa->driveno); 594 goto done; 595 } 596 if (err) { 597 printf(": ERROR 0x%02x (%s)\n", err, xdc_e2str(err)); 598 goto done; 599 } 600 printf(" drive %d: ready\n", xa->driveno); 601 602 /* now set format parameters */ 603 604 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive, 0, 0, 0, fmode); 605 XDC_DONE(xdc, rqno, err); 606 if (err) { 607 printf("%s: write format parameters failed: %s\n", 608 xd->sc_dev.dv_xname, xdc_e2str(err)); 609 goto done; 610 } 611 612 /* get drive parameters */ 613 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 614 if (rqno != XD_ERR_FAIL) { 615 driopb = (struct xd_iopb_drive *) & xdc->iopbase[rqno]; 616 spt = driopb->sectpertrk; 617 } 618 XDC_DONE(xdc, rqno, err); 619 if (err) { 620 printf("%s: read drive parameters failed: %s\n", 621 xd->sc_dev.dv_xname, xdc_e2str(err)); 622 goto done; 623 } 624 625 /* 626 * now set drive parameters (to semi-bogus values) so we can read the 627 * disk label. 628 */ 629 xd->pcyl = xd->ncyl = 1; 630 xd->acyl = 0; 631 xd->nhead = 1; 632 xd->nsect = 1; 633 xd->sectpercyl = 1; 634 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 635 xd->dkb.bt_bad[lcv].bt_cyl = xd->dkb.bt_bad[lcv].bt_trksec = 0xffff; 636 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 637 XDC_DONE(xdc, rqno, err); 638 if (err) { 639 printf("%s: write drive parameters failed: %s\n", 640 xd->sc_dev.dv_xname, xdc_e2str(err)); 641 goto done; 642 } 643 644 /* read disk label */ 645 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 0, 1, 646 xa->dvmabuf, fmode); 647 XDC_DONE(xdc, rqno, err); 648 if (err) { 649 printf("%s: reading disk label failed: %s\n", 650 xd->sc_dev.dv_xname, xdc_e2str(err)); 651 goto done; 652 } 653 newstate = XD_DRIVE_NOLABEL; 654 655 xd->hw_spt = spt; 656 /* Attach the disk: must be before getdisklabel to malloc label */ 657 disk_attach(&xd->sc_dk); 658 659 if (xdgetdisklabel(xd, xa->dvmabuf) != XD_ERR_AOK) 660 goto done; 661 662 /* inform the user of what is up */ 663 printf("%s: <%s>, pcyl %d, hw_spt %d\n", 664 xd->sc_dev.dv_xname, 665 xa->dvmabuf, xd->pcyl, spt); 666 mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS); 667 printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n", 668 xd->sc_dev.dv_xname, mb, 669 xd->ncyl, xd->nhead, xd->nsect, XDFM_BPS); 670 671 /* now set the real drive parameters! */ 672 673 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 674 XDC_DONE(xdc, rqno, err); 675 if (err) { 676 printf("%s: write real drive parameters failed: %s\n", 677 xd->sc_dev.dv_xname, xdc_e2str(err)); 678 goto done; 679 } 680 newstate = XD_DRIVE_ONLINE; 681 682 /* 683 * read bad144 table. this table resides on the first sector of the 684 * last track of the disk (i.e. second cyl of "acyl" area). 685 */ 686 687 blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */ 688 (xd->nhead - 1) * xd->nsect; /* last head */ 689 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, blk, 1, xa->dvmabuf, fmode); 690 XDC_DONE(xdc, rqno, err); 691 if (err) { 692 printf("%s: reading bad144 failed: %s\n", 693 xd->sc_dev.dv_xname, xdc_e2str(err)); 694 goto done; 695 } 696 697 /* check dkbad for sanity */ 698 dkb = (struct dkbad *) xa->dvmabuf; 699 for (lcv = 0; lcv < 126; lcv++) { 700 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 701 dkb->bt_bad[lcv].bt_cyl == 0) && 702 dkb->bt_bad[lcv].bt_trksec == 0xffff) 703 continue; /* blank */ 704 if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl) 705 break; 706 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead) 707 break; 708 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect) 709 break; 710 } 711 if (lcv != 126) { 712 printf("%s: warning: invalid bad144 sector!\n", 713 xd->sc_dev.dv_xname); 714 } else { 715 bcopy(xa->dvmabuf, &xd->dkb, XDFM_BPS); 716 } 717 718 /* XXX - Where is this and what does it do? -gwr */ 719 dk_establish(&xd->sc_dk, &xd->sc_dev); 720 721 done: 722 xd->state = newstate; 723 if (!xa->booting) { 724 wakeup(&xd->state); 725 splx(s); 726 } 727 } 728 729 /* 730 * end of autoconfig functions 731 */ 732 733 /* 734 * { b , c } d e v s w f u n c t i o n s 735 */ 736 737 /* 738 * xdclose: close device 739 */ 740 int 741 xdclose(dev, flag, fmt) 742 dev_t dev; 743 int flag, fmt; 744 745 { 746 struct xd_softc *xd = xd_cd.cd_devs[DISKUNIT(dev)]; 747 int part = DISKPART(dev); 748 749 /* clear mask bits */ 750 751 switch (fmt) { 752 case S_IFCHR: 753 xd->sc_dk.dk_copenmask &= ~(1 << part); 754 break; 755 case S_IFBLK: 756 xd->sc_dk.dk_bopenmask &= ~(1 << part); 757 break; 758 } 759 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 760 761 return 0; 762 } 763 764 /* 765 * xddump: crash dump system 766 */ 767 int 768 xddump(dev) 769 dev_t dev; 770 771 { 772 int unit, part; 773 struct xd_softc *xd; 774 775 unit = DISKUNIT(dev); 776 if (unit >= xd_cd.cd_ndevs) 777 return ENXIO; 778 part = DISKPART(dev); 779 780 xd = xd_cd.cd_devs[unit]; 781 782 printf("%s%c: crash dump not supported (yet)\n", 783 xd->sc_dev.dv_xname, 'a' + part); 784 785 return ENXIO; 786 787 /* outline: globals: "dumplo" == sector number of partition to start 788 * dump at (convert to physical sector with partition table) 789 * "dumpsize" == size of dump in clicks "physmem" == size of physical 790 * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 791 * physmem) 792 * 793 * dump a copy of physical memory to the dump device starting at sector 794 * "dumplo" in the swap partition (make sure > 0). map in pages as 795 * we go. use polled I/O. 796 * 797 * XXX how to handle NON_CONTIG? */ 798 799 } 800 801 /* 802 * xdioctl: ioctls on XD drives. based on ioctl's of other netbsd disks. 803 */ 804 int 805 xdioctl(dev, command, addr, flag, p) 806 dev_t dev; 807 u_long command; 808 caddr_t addr; 809 int flag; 810 struct proc *p; 811 812 { 813 struct xd_softc *xd; 814 struct xd_iocmd *xio; 815 int error, s, unit; 816 817 unit = DISKUNIT(dev); 818 819 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL) 820 return (ENXIO); 821 822 /* switch on ioctl type */ 823 824 switch (command) { 825 case DIOCSBAD: /* set bad144 info */ 826 if ((flag & FWRITE) == 0) 827 return EBADF; 828 s = splbio(); 829 bcopy(addr, &xd->dkb, sizeof(xd->dkb)); 830 splx(s); 831 return 0; 832 833 case DIOCGDINFO: /* get disk label */ 834 bcopy(xd->sc_dk.dk_label, addr, sizeof(struct disklabel)); 835 return 0; 836 837 case DIOCGPART: /* get partition info */ 838 ((struct partinfo *) addr)->disklab = xd->sc_dk.dk_label; 839 ((struct partinfo *) addr)->part = 840 &xd->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 841 return 0; 842 843 case DIOCSDINFO: /* set disk label */ 844 if ((flag & FWRITE) == 0) 845 return EBADF; 846 error = setdisklabel(xd->sc_dk.dk_label, 847 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0, 848 xd->sc_dk.dk_cpulabel); 849 if (error == 0) { 850 if (xd->state == XD_DRIVE_NOLABEL) 851 xd->state = XD_DRIVE_ONLINE; 852 } 853 return error; 854 855 case DIOCWLABEL: /* change write status of disk label */ 856 if ((flag & FWRITE) == 0) 857 return EBADF; 858 if (*(int *) addr) 859 xd->flags |= XD_WLABEL; 860 else 861 xd->flags &= ~XD_WLABEL; 862 return 0; 863 864 case DIOCWDINFO: /* write disk label */ 865 if ((flag & FWRITE) == 0) 866 return EBADF; 867 error = setdisklabel(xd->sc_dk.dk_label, 868 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0, 869 xd->sc_dk.dk_cpulabel); 870 if (error == 0) { 871 if (xd->state == XD_DRIVE_NOLABEL) 872 xd->state = XD_DRIVE_ONLINE; 873 874 /* Simulate opening partition 0 so write succeeds. */ 875 xd->sc_dk.dk_openmask |= (1 << 0); 876 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART), 877 xdstrategy, xd->sc_dk.dk_label, 878 xd->sc_dk.dk_cpulabel); 879 xd->sc_dk.dk_openmask = 880 xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 881 } 882 return error; 883 884 case DIOSXDCMD: 885 xio = (struct xd_iocmd *) addr; 886 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 887 return (error); 888 return (xdc_ioctlcmd(xd, dev, xio)); 889 890 default: 891 return ENOTTY; 892 } 893 } 894 /* 895 * xdopen: open drive 896 */ 897 898 int 899 xdopen(dev, flag, fmt) 900 dev_t dev; 901 int flag, fmt; 902 903 { 904 int unit, part; 905 struct xd_softc *xd; 906 struct xdc_attach_args xa; 907 908 /* first, could it be a valid target? */ 909 910 unit = DISKUNIT(dev); 911 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL) 912 return (ENXIO); 913 part = DISKPART(dev); 914 915 /* do we need to attach the drive? */ 916 917 if (xd->state == XD_DRIVE_UNKNOWN) { 918 xa.driveno = xd->xd_drive; 919 xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS); 920 xa.fullmode = XD_SUB_WAIT; 921 xa.booting = 0; 922 xdattach((struct device *) xd->parent, (struct device *) xd, &xa); 923 dvma_free(xa.dvmabuf, XDFM_BPS); 924 if (xd->state == XD_DRIVE_UNKNOWN) { 925 return (EIO); 926 } 927 } 928 /* check for partition */ 929 930 if (part != RAW_PART && 931 (part >= xd->sc_dk.dk_label->d_npartitions || 932 xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 933 return (ENXIO); 934 } 935 /* set open masks */ 936 937 switch (fmt) { 938 case S_IFCHR: 939 xd->sc_dk.dk_copenmask |= (1 << part); 940 break; 941 case S_IFBLK: 942 xd->sc_dk.dk_bopenmask |= (1 << part); 943 break; 944 } 945 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 946 947 return 0; 948 } 949 950 int 951 xdread(dev, uio) 952 dev_t dev; 953 struct uio *uio; 954 { 955 956 return (physio(xdstrategy, NULL, dev, B_READ, minphys, uio)); 957 } 958 959 int 960 xdwrite(dev, uio) 961 dev_t dev; 962 struct uio *uio; 963 { 964 965 return (physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio)); 966 } 967 968 969 /* 970 * xdsize: return size of a partition for a dump 971 */ 972 973 int 974 xdsize(dev) 975 dev_t dev; 976 977 { 978 struct xd_softc *xdsc; 979 int unit, part, size; 980 981 /* valid unit? try an open */ 982 983 if (xdopen(dev, 0, S_IFBLK) != 0) 984 return (-1); 985 986 /* do it */ 987 988 xdsc = xd_cd.cd_devs[DISKUNIT(dev)]; 989 part = DISKPART(dev); 990 if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 991 size = -1; /* only give valid size for swap partitions */ 992 else 993 size = xdsc->sc_dk.dk_label->d_partitions[part].p_size; 994 if (xdclose(dev, 0, S_IFBLK) != 0) 995 return -1; 996 return size; 997 } 998 /* 999 * xdstrategy: buffering system interface to xd. 1000 */ 1001 1002 void 1003 xdstrategy(bp) 1004 struct buf *bp; 1005 1006 { 1007 struct xd_softc *xd; 1008 struct xdc_softc *parent; 1009 struct buf *wq; 1010 int s, unit; 1011 struct xdc_attach_args xa; 1012 1013 unit = DISKUNIT(bp->b_dev); 1014 1015 /* check for live device */ 1016 1017 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == 0 || 1018 bp->b_blkno < 0 || 1019 (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) { 1020 bp->b_error = EINVAL; 1021 goto bad; 1022 } 1023 /* do we need to attach the drive? */ 1024 1025 if (xd->state == XD_DRIVE_UNKNOWN) { 1026 xa.driveno = xd->xd_drive; 1027 xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS); 1028 xa.fullmode = XD_SUB_WAIT; 1029 xa.booting = 0; 1030 xdattach((struct device *)xd->parent, (struct device *)xd, &xa); 1031 dvma_free(xa.dvmabuf, XDFM_BPS); 1032 if (xd->state == XD_DRIVE_UNKNOWN) { 1033 bp->b_error = EIO; 1034 goto bad; 1035 } 1036 } 1037 if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 1038 /* no I/O to unlabeled disks, unless raw partition */ 1039 bp->b_error = EIO; 1040 goto bad; 1041 } 1042 /* short circuit zero length request */ 1043 1044 if (bp->b_bcount == 0) 1045 goto done; 1046 1047 /* check bounds with label (disksubr.c). Determine the size of the 1048 * transfer, and make sure it is within the boundaries of the 1049 * partition. Adjust transfer if needed, and signal errors or early 1050 * completion. */ 1051 1052 if (bounds_check_with_label(bp, xd->sc_dk.dk_label, 1053 (xd->flags & XD_WLABEL) != 0) <= 0) 1054 goto done; 1055 1056 /* 1057 * now we know we have a valid buf structure that we need to do I/O 1058 * on. 1059 * 1060 * note that we don't disksort because the controller has a sorting 1061 * algorithm built into the hardware. 1062 */ 1063 1064 s = splbio(); /* protect the queues */ 1065 1066 /* first, give jobs in front of us a chance */ 1067 1068 parent = xd->parent; 1069 while (parent->nfree > 0 && parent->sc_wq.b_actf) 1070 if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK) 1071 break; 1072 1073 /* if there are no free iorq's, then we just queue and return. the 1074 * buffs will get picked up later by xdcintr(). */ 1075 1076 if (parent->nfree == 0) { 1077 wq = &xd->parent->sc_wq; 1078 bp->b_actf = 0; 1079 bp->b_actb = wq->b_actb; 1080 *wq->b_actb = bp; 1081 wq->b_actb = &bp->b_actf; 1082 splx(s); 1083 return; 1084 } 1085 /* now we have free iopb's and we are at splbio... start 'em up */ 1086 1087 if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) { 1088 return; 1089 } 1090 1091 /* done! */ 1092 1093 splx(s); 1094 return; 1095 1096 bad: /* tells upper layers we have an error */ 1097 bp->b_flags |= B_ERROR; 1098 done: /* tells upper layers we are done with this 1099 * buf */ 1100 bp->b_resid = bp->b_bcount; 1101 biodone(bp); 1102 } 1103 /* 1104 * end of {b,c}devsw functions 1105 */ 1106 1107 /* 1108 * i n t e r r u p t f u n c t i o n 1109 * 1110 * xdcintr: hardware interrupt. 1111 */ 1112 int 1113 xdcintr(v) 1114 void *v; 1115 1116 { 1117 struct xdc_softc *xdcsc = v; 1118 struct xd_softc *xd; 1119 struct buf *bp; 1120 1121 /* kick the event counter */ 1122 1123 xdcsc->sc_intrcnt.ev_count++; 1124 1125 /* remove as many done IOPBs as possible */ 1126 1127 xdc_remove_iorq(xdcsc); 1128 1129 /* start any iorq's already waiting */ 1130 1131 xdc_start(xdcsc, XDC_MAXIOPB); 1132 1133 /* fill up any remaining iorq's with queue'd buffers */ 1134 1135 while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf) 1136 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1137 break; 1138 1139 return (1); 1140 } 1141 /* 1142 * end of interrupt function 1143 */ 1144 1145 /* 1146 * i n t e r n a l f u n c t i o n s 1147 */ 1148 1149 /* 1150 * xdc_rqinit: fill out the fields of an I/O request 1151 */ 1152 1153 inline void 1154 xdc_rqinit(rq, xdc, xd, md, blk, cnt, db, bp) 1155 struct xd_iorq *rq; 1156 struct xdc_softc *xdc; 1157 struct xd_softc *xd; 1158 int md; 1159 u_long blk; 1160 int cnt; 1161 caddr_t db; 1162 struct buf *bp; 1163 { 1164 rq->xdc = xdc; 1165 rq->xd = xd; 1166 rq->ttl = XDC_MAXTTL + 10; 1167 rq->mode = md; 1168 rq->tries = rq->errno = rq->lasterror = 0; 1169 rq->blockno = blk; 1170 rq->sectcnt = cnt; 1171 rq->dbuf = rq->dbufbase = db; 1172 rq->buf = bp; 1173 } 1174 /* 1175 * xdc_rqtopb: load up an IOPB based on an iorq 1176 */ 1177 1178 void 1179 xdc_rqtopb(iorq, iopb, cmd, subfun) 1180 struct xd_iorq *iorq; 1181 struct xd_iopb *iopb; 1182 int cmd, subfun; 1183 1184 { 1185 u_long block, dp; 1186 1187 /* standard stuff */ 1188 1189 iopb->errs = iopb->done = 0; 1190 iopb->comm = cmd; 1191 iopb->errno = iopb->status = 0; 1192 iopb->subfun = subfun; 1193 if (iorq->xd) 1194 iopb->unit = iorq->xd->xd_drive; 1195 else 1196 iopb->unit = 0; 1197 1198 /* check for alternate IOPB format */ 1199 1200 if (cmd == XDCMD_WRP) { 1201 switch (subfun) { 1202 case XDFUN_CTL:{ 1203 struct xd_iopb_ctrl *ctrl = 1204 (struct xd_iopb_ctrl *) iopb; 1205 iopb->lll = 0; 1206 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1207 ? 0 1208 : iorq->xdc->ipl; 1209 ctrl->param_a = XDPA_TMOD | XDPA_DACF; 1210 ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC; 1211 ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR | 1212 XDPC_RBC | XDPC_ECC2; 1213 ctrl->throttle = XDC_THROTTLE; 1214 #ifdef sparc 1215 if (cputyp == CPU_SUN4 && cpumod == SUN4_300) 1216 ctrl->delay = XDC_DELAY_4_300; 1217 else 1218 ctrl->delay = XDC_DELAY_SPARC; 1219 #endif 1220 #ifdef sun3 1221 ctrl->delay = XDC_DELAY_SUN3; 1222 #endif 1223 break; 1224 } 1225 case XDFUN_DRV:{ 1226 struct xd_iopb_drive *drv = 1227 (struct xd_iopb_drive *)iopb; 1228 /* we assume that the disk label has the right 1229 * info */ 1230 if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1231 drv->dparam_ipl = (XDC_DPARAM << 3); 1232 else 1233 drv->dparam_ipl = (XDC_DPARAM << 3) | 1234 iorq->xdc->ipl; 1235 drv->maxsect = iorq->xd->nsect - 1; 1236 drv->maxsector = drv->maxsect; 1237 /* note: maxsector != maxsect only if you are 1238 * doing cyl sparing */ 1239 drv->headoff = 0; 1240 drv->maxcyl = iorq->xd->pcyl - 1; 1241 drv->maxhead = iorq->xd->nhead - 1; 1242 break; 1243 } 1244 case XDFUN_FMT:{ 1245 struct xd_iopb_format *form = 1246 (struct xd_iopb_format *) iopb; 1247 if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1248 form->interleave_ipl = (XDC_INTERLEAVE << 3); 1249 else 1250 form->interleave_ipl = (XDC_INTERLEAVE << 3) | 1251 iorq->xdc->ipl; 1252 form->field1 = XDFM_FIELD1; 1253 form->field2 = XDFM_FIELD2; 1254 form->field3 = XDFM_FIELD3; 1255 form->field4 = XDFM_FIELD4; 1256 form->bytespersec = XDFM_BPS; 1257 form->field6 = XDFM_FIELD6; 1258 form->field7 = XDFM_FIELD7; 1259 break; 1260 } 1261 } 1262 } else { 1263 1264 /* normal IOPB case (harmless to RDP command) */ 1265 1266 iopb->lll = 0; 1267 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1268 ? 0 1269 : iorq->xdc->ipl; 1270 iopb->sectcnt = iorq->sectcnt; 1271 block = iorq->blockno; 1272 if (iorq->xd == NULL || block == 0) { 1273 iopb->sectno = iopb->headno = iopb->cylno = 0; 1274 } else { 1275 iopb->sectno = block % iorq->xd->nsect; 1276 block = block / iorq->xd->nsect; 1277 iopb->headno = block % iorq->xd->nhead; 1278 block = block / iorq->xd->nhead; 1279 iopb->cylno = block; 1280 } 1281 iopb->daddr = dp = (iorq->dbuf == NULL) ? 0 : 1282 dvma_kvtopa((long)iorq->dbuf, BUS_VME32); 1283 iopb->addrmod = XDC_ADDRMOD; 1284 } 1285 } 1286 1287 /* 1288 * xdc_cmd: front end for POLL'd and WAIT'd commands. Returns rqno. 1289 * If you've already got an IORQ, you can call submit directly (currently 1290 * there is no need to do this). NORM requests are handled seperately. 1291 */ 1292 int 1293 xdc_cmd(xdcsc, cmd, subfn, unit, block, scnt, dptr, fullmode) 1294 struct xdc_softc *xdcsc; 1295 int cmd, subfn, unit, block, scnt; 1296 char *dptr; 1297 int fullmode; 1298 1299 { 1300 int rqno, submode = XD_STATE(fullmode), retry; 1301 u_long dp; 1302 struct xd_iorq *iorq; 1303 struct xd_iopb *iopb; 1304 1305 /* get iorq/iopb */ 1306 switch (submode) { 1307 case XD_SUB_POLL: 1308 while (xdcsc->nfree == 0) { 1309 if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK) 1310 return (XD_ERR_FAIL); 1311 } 1312 break; 1313 case XD_SUB_WAIT: 1314 retry = 1; 1315 while (retry) { 1316 while (xdcsc->nfree == 0) { 1317 if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0)) 1318 return (XD_ERR_FAIL); 1319 } 1320 while (xdcsc->ndone > XDC_SUBWAITLIM) { 1321 if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0)) 1322 return (XD_ERR_FAIL); 1323 } 1324 if (xdcsc->nfree) 1325 retry = 0; /* got it */ 1326 } 1327 break; 1328 default: 1329 return (XD_ERR_FAIL); /* illegal */ 1330 } 1331 if (xdcsc->nfree == 0) 1332 panic("xdcmd nfree"); 1333 rqno = XDC_RQALLOC(xdcsc); 1334 iorq = &xdcsc->reqs[rqno]; 1335 iopb = iorq->iopb; 1336 1337 1338 /* init iorq/iopb */ 1339 1340 xdc_rqinit(iorq, xdcsc, 1341 (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit], 1342 fullmode, block, scnt, dptr, NULL); 1343 1344 /* load IOPB from iorq */ 1345 1346 xdc_rqtopb(iorq, iopb, cmd, subfn); 1347 1348 /* submit it for processing */ 1349 1350 xdc_submit_iorq(xdcsc, rqno, fullmode); /* error code will be in iorq */ 1351 1352 return (rqno); 1353 } 1354 /* 1355 * xdc_startbuf 1356 * start a buffer running, assumes nfree > 0 1357 */ 1358 1359 int 1360 xdc_startbuf(xdcsc, xdsc, bp) 1361 struct xdc_softc *xdcsc; 1362 struct xd_softc *xdsc; 1363 struct buf *bp; 1364 1365 { 1366 int rqno, partno; 1367 struct xd_iorq *iorq; 1368 struct xd_iopb *iopb; 1369 struct buf *wq; 1370 u_long block, dp; 1371 caddr_t dbuf; 1372 1373 if (!xdcsc->nfree) 1374 panic("xdc_startbuf free"); 1375 rqno = XDC_RQALLOC(xdcsc); 1376 iorq = &xdcsc->reqs[rqno]; 1377 iopb = iorq->iopb; 1378 1379 /* get buf */ 1380 1381 if (bp == NULL) { 1382 bp = xdcsc->sc_wq.b_actf; 1383 if (!bp) 1384 panic("xdc_startbuf bp"); 1385 wq = bp->b_actf; 1386 if (wq) 1387 wq->b_actb = bp->b_actb; 1388 else 1389 xdcsc->sc_wq.b_actb = bp->b_actb; 1390 *bp->b_actb = wq; 1391 xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)]; 1392 } 1393 partno = DISKPART(bp->b_dev); 1394 #ifdef XDC_DEBUG 1395 printf("xdc_startbuf: %s%c: %s block %d\n", xdsc->sc_dev.dv_xname, 1396 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno); 1397 printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n", 1398 bp->b_bcount, bp->b_data); 1399 #endif 1400 1401 /* 1402 * load request. we have to calculate the correct block number based 1403 * on partition info. 1404 * 1405 * also, note that there are two kinds of buf structures, those with 1406 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is 1407 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users' 1408 * buffer which has already been mapped into DVMA space. (Not on sun3) 1409 * However, if B_PHYS is not set, then the buffer is a normal system 1410 * buffer which does *not* live in DVMA space. In that case we call 1411 * dvma_mapin to map it into DVMA space so we can do the DMA to it. 1412 * 1413 * in cases where we do a dvma_mapin, note that iorq points to the buffer 1414 * as mapped into DVMA space, where as the bp->b_data points to its 1415 * non-DVMA mapping. 1416 * 1417 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped 1418 * into dvma space, only that it was remapped into the kernel. 1419 * We ALWAYS have to remap the kernel buf into DVMA space. 1420 * (It is done inexpensively, using whole segments!) 1421 */ 1422 1423 block = bp->b_blkno + ((partno == RAW_PART) ? 0 : 1424 xdsc->sc_dk.dk_label->d_partitions[partno].p_offset); 1425 1426 dbuf = dvma_mapin(bp->b_data, bp->b_bcount); 1427 if (dbuf == NULL) { /* out of DVMA space */ 1428 printf("%s: warning: out of DVMA space\n", xdcsc->sc_dev.dv_xname); 1429 XDC_FREE(xdcsc, rqno); 1430 wq = &xdcsc->sc_wq; /* put at end of queue */ 1431 bp->b_actf = 0; 1432 bp->b_actb = wq->b_actb; 1433 *wq->b_actb = bp; 1434 wq->b_actb = &bp->b_actf; 1435 return (XD_ERR_FAIL); /* XXX: need some sort of 1436 * call-back scheme here? */ 1437 } 1438 1439 /* init iorq and load iopb from it */ 1440 1441 xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block, 1442 bp->b_bcount / XDFM_BPS, dbuf, bp); 1443 1444 xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0); 1445 1446 /* Instrumentation. */ 1447 disk_busy(&xdsc->sc_dk); 1448 1449 /* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */ 1450 1451 xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM); 1452 return (XD_ERR_AOK); 1453 } 1454 1455 1456 /* 1457 * xdc_submit_iorq: submit an iorq for processing. returns XD_ERR_AOK 1458 * if ok. if it fail returns an error code. type is XD_SUB_*. 1459 * 1460 * note: caller frees iorq in all cases except NORM 1461 * 1462 * return value: 1463 * NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request) 1464 * WAIT: XD_AOK (success), <error-code> (failed) 1465 * POLL: <same as WAIT> 1466 * NOQ : <same as NORM> 1467 * 1468 * there are three sources for i/o requests: 1469 * [1] xdstrategy: normal block I/O, using "struct buf" system. 1470 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1471 * [3] open/ioctl: these are I/O requests done in the context of a process, 1472 * and the process should block until they are done. 1473 * 1474 * software state is stored in the iorq structure. each iorq has an 1475 * iopb structure. the hardware understands the iopb structure. 1476 * every command must go through an iopb. a 7053 can only handle 1477 * XDC_MAXIOPB (31) active iopbs at one time. iopbs are allocated in 1478 * DVMA space at boot up time. what happens if we run out of iopb's? 1479 * for i/o type [1], the buffers are queued at the "buff" layer and 1480 * picked up later by the interrupt routine. for case [2] the 1481 * programmed i/o driver is called with a special flag that says 1482 * return when one iopb is free. for case [3] the process can sleep 1483 * on the iorq free list until some iopbs are avaliable. 1484 */ 1485 1486 1487 int 1488 xdc_submit_iorq(xdcsc, iorqno, type) 1489 struct xdc_softc *xdcsc; 1490 int iorqno; 1491 int type; 1492 1493 { 1494 u_long iopbaddr; 1495 struct xd_iorq *iorq = &xdcsc->reqs[iorqno]; 1496 1497 #ifdef XDC_DEBUG 1498 printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", xdcsc->sc_dev.dv_xname, 1499 iorqno, type); 1500 #endif 1501 1502 /* first check and see if controller is busy */ 1503 if (xdcsc->xdc->xdc_csr & XDC_ADDING) { 1504 #ifdef XDC_DEBUG 1505 printf("xdc_submit_iorq: XDC not ready (ADDING)\n"); 1506 #endif 1507 if (type == XD_SUB_NOQ) 1508 return (XD_ERR_FAIL); /* failed */ 1509 XDC_TWAIT(xdcsc, iorqno); /* put at end of waitq */ 1510 switch (type) { 1511 case XD_SUB_NORM: 1512 return XD_ERR_AOK; /* success */ 1513 case XD_SUB_WAIT: 1514 while (iorq->iopb->done == 0) { 1515 sleep(iorq, PRIBIO); 1516 } 1517 return (iorq->errno); 1518 case XD_SUB_POLL: 1519 return (xdc_piodriver(xdcsc, iorqno, 0)); 1520 default: 1521 panic("xdc_submit_iorq adding"); 1522 } 1523 } 1524 #ifdef XDC_DEBUG 1525 { 1526 u_char *rio = (u_char *) iorq->iopb; 1527 int sz = sizeof(struct xd_iopb), lcv; 1528 printf("%s: aio #%d [", 1529 xdcsc->sc_dev.dv_xname, iorq - xdcsc->reqs); 1530 for (lcv = 0; lcv < sz; lcv++) 1531 printf(" %02x", rio[lcv]); 1532 printf("]\n"); 1533 } 1534 #endif /* XDC_DEBUG */ 1535 1536 /* controller not busy, start command */ 1537 iopbaddr = dvma_kvtopa((long) iorq->iopb, BUS_VME32); 1538 XDC_GO(xdcsc->xdc, iopbaddr); /* go! */ 1539 xdcsc->nrun++; 1540 /* command now running, wrap it up */ 1541 switch (type) { 1542 case XD_SUB_NORM: 1543 case XD_SUB_NOQ: 1544 return (XD_ERR_AOK); /* success */ 1545 case XD_SUB_WAIT: 1546 while (iorq->iopb->done == 0) { 1547 sleep(iorq, PRIBIO); 1548 } 1549 return (iorq->errno); 1550 case XD_SUB_POLL: 1551 return (xdc_piodriver(xdcsc, iorqno, 0)); 1552 default: 1553 panic("xdc_submit_iorq wrap up"); 1554 } 1555 panic("xdc_submit_iorq"); 1556 return 0; /* not reached */ 1557 } 1558 1559 1560 /* 1561 * xdc_piodriver 1562 * 1563 * programmed i/o driver. this function takes over the computer 1564 * and drains off all i/o requests. it returns the status of the iorq 1565 * the caller is interesting in. if freeone is true, then it returns 1566 * when there is a free iorq. 1567 */ 1568 int 1569 xdc_piodriver(xdcsc, iorqno, freeone) 1570 struct xdc_softc *xdcsc; 1571 char iorqno; 1572 int freeone; 1573 1574 { 1575 int nreset = 0; 1576 int retval = 0; 1577 u_long count; 1578 struct xdc *xdc = xdcsc->xdc; 1579 #ifdef XDC_DEBUG 1580 printf("xdc_piodriver(%s, %d, freeone=%d)\n", xdcsc->sc_dev.dv_xname, 1581 iorqno, freeone); 1582 #endif 1583 1584 while (xdcsc->nwait || xdcsc->nrun) { 1585 #ifdef XDC_DEBUG 1586 printf("xdc_piodriver: wait=%d, run=%d\n", 1587 xdcsc->nwait, xdcsc->nrun); 1588 #endif 1589 XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR)); 1590 #ifdef XDC_DEBUG 1591 printf("xdc_piodriver: done wait with count = %d\n", count); 1592 #endif 1593 /* we expect some progress soon */ 1594 if (count == 0 && nreset >= 2) { 1595 xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0); 1596 #ifdef XDC_DEBUG 1597 printf("xdc_piodriver: timeout\n"); 1598 #endif 1599 return (XD_ERR_FAIL); 1600 } 1601 if (count == 0) { 1602 if (xdc_reset(xdcsc, 0, 1603 (nreset++ == 0) ? XD_RSET_NONE : iorqno, 1604 XD_ERR_FAIL, 1605 0) == XD_ERR_FAIL) 1606 return (XD_ERR_FAIL); /* flushes all but POLL 1607 * requests, resets */ 1608 continue; 1609 } 1610 xdc_remove_iorq(xdcsc); /* could resubmit request */ 1611 if (freeone) { 1612 if (xdcsc->nrun < XDC_MAXIOPB) { 1613 #ifdef XDC_DEBUG 1614 printf("xdc_piodriver: done: one free\n"); 1615 #endif 1616 return (XD_ERR_AOK); 1617 } 1618 continue; /* don't xdc_start */ 1619 } 1620 xdc_start(xdcsc, XDC_MAXIOPB); 1621 } 1622 1623 /* get return value */ 1624 1625 retval = xdcsc->reqs[iorqno].errno; 1626 1627 #ifdef XDC_DEBUG 1628 printf("xdc_piodriver: done, retval = 0x%x (%s)\n", 1629 xdcsc->reqs[iorqno].errno, xdc_e2str(xdcsc->reqs[iorqno].errno)); 1630 #endif 1631 1632 /* now that we've drained everything, start up any bufs that have 1633 * queued */ 1634 1635 while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf) 1636 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1637 break; 1638 1639 return (retval); 1640 } 1641 1642 /* 1643 * xdc_reset: reset one drive. NOTE: assumes xdc was just reset. 1644 * we steal iopb[0] for this, but we put it back when we are done. 1645 */ 1646 int 1647 xdc_xdreset(xdcsc, xdsc) 1648 struct xdc_softc *xdcsc; 1649 struct xd_softc *xdsc; 1650 1651 { 1652 struct xd_iopb tmpiopb; 1653 u_long addr; 1654 int del; 1655 bcopy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb)); 1656 bzero(xdcsc->iopbase, sizeof(tmpiopb)); 1657 xdcsc->iopbase->comm = XDCMD_RST; 1658 xdcsc->iopbase->unit = xdsc->xd_drive; 1659 addr = (u_long) xdcsc->dvmaiopb; 1660 XDC_GO(xdcsc->xdc, addr); /* go! */ 1661 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB); 1662 if (del <= 0 || xdcsc->iopbase->errs) { 1663 printf("%s: off-line: %s\n", xdcsc->sc_dev.dv_xname, 1664 xdc_e2str(xdcsc->iopbase->errno)); 1665 xdcsc->xdc->xdc_csr = XDC_RESET; 1666 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1667 if (del <= 0) 1668 panic("xdc_reset"); 1669 } else { 1670 xdcsc->xdc->xdc_csr = XDC_CLRRIO; /* clear RIO */ 1671 } 1672 bcopy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb)); 1673 } 1674 1675 1676 /* 1677 * xdc_reset: reset everything: requests are marked as errors except 1678 * a polled request (which is resubmitted) 1679 */ 1680 int 1681 xdc_reset(xdcsc, quiet, blastmode, error, xdsc) 1682 struct xdc_softc *xdcsc; 1683 int quiet, blastmode, error; 1684 struct xd_softc *xdsc; 1685 1686 { 1687 int del = 0, lcv, poll = -1, retval = XD_ERR_AOK; 1688 int oldfree = xdcsc->nfree; 1689 struct xd_iorq *iorq; 1690 1691 /* soft reset hardware */ 1692 1693 if (!quiet) 1694 printf("%s: soft reset\n", xdcsc->sc_dev.dv_xname); 1695 xdcsc->xdc->xdc_csr = XDC_RESET; 1696 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1697 if (del <= 0) { 1698 blastmode = XD_RSET_ALL; /* dead, flush all requests */ 1699 retval = XD_ERR_FAIL; 1700 } 1701 if (xdsc) 1702 xdc_xdreset(xdcsc, xdsc); 1703 1704 /* fix queues based on "blast-mode" */ 1705 1706 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 1707 iorq = &xdcsc->reqs[lcv]; 1708 1709 if (XD_STATE(iorq->mode) != XD_SUB_POLL && 1710 XD_STATE(iorq->mode) != XD_SUB_WAIT && 1711 XD_STATE(iorq->mode) != XD_SUB_NORM) 1712 /* is it active? */ 1713 continue; 1714 1715 xdcsc->nrun--; /* it isn't running any more */ 1716 if (blastmode == XD_RSET_ALL || blastmode != lcv) { 1717 /* failed */ 1718 iorq->errno = error; 1719 xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1; 1720 switch (XD_STATE(iorq->mode)) { 1721 case XD_SUB_NORM: 1722 iorq->buf->b_error = EIO; 1723 iorq->buf->b_flags |= B_ERROR; 1724 iorq->buf->b_resid = 1725 iorq->sectcnt * XDFM_BPS; 1726 /* Sun3: map/unmap regardless of B_PHYS */ 1727 dvma_mapout(iorq->dbufbase, 1728 iorq->buf->b_bcount); 1729 disk_unbusy(&iorq->xd->sc_dk, 1730 (iorq->buf->b_bcount - iorq->buf->b_resid)); 1731 biodone(iorq->buf); 1732 XDC_FREE(xdcsc, lcv); /* add to free list */ 1733 break; 1734 case XD_SUB_WAIT: 1735 wakeup(iorq); 1736 case XD_SUB_POLL: 1737 xdcsc->ndone++; 1738 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1739 break; 1740 } 1741 1742 } else { 1743 1744 /* resubmit, put at front of wait queue */ 1745 XDC_HWAIT(xdcsc, lcv); 1746 } 1747 } 1748 1749 /* 1750 * now, if stuff is waiting, start it. 1751 * since we just reset it should go 1752 */ 1753 xdc_start(xdcsc, XDC_MAXIOPB); 1754 1755 /* ok, we did it */ 1756 if (oldfree == 0 && xdcsc->nfree) 1757 wakeup(&xdcsc->nfree); 1758 1759 #ifdef XDC_DIAG 1760 del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone; 1761 if (del != XDC_MAXIOPB) 1762 printf("%s: diag: xdc_reset miscount (%d should be %d)!\n", 1763 xdcsc->sc_dev.dv_xname, del, XDC_MAXIOPB); 1764 else 1765 if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM) 1766 printf("%s: diag: lots of done jobs (%d)\n", 1767 xdcsc->sc_dev.dv_xname, xdcsc->ndone); 1768 #endif 1769 printf("RESET DONE\n"); 1770 return (retval); 1771 } 1772 /* 1773 * xdc_start: start all waiting buffers 1774 */ 1775 1776 int 1777 xdc_start(xdcsc, maxio) 1778 struct xdc_softc *xdcsc; 1779 int maxio; 1780 1781 { 1782 int rqno; 1783 while (maxio && xdcsc->nwait && 1784 (xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) { 1785 XDC_GET_WAITER(xdcsc, rqno); /* note: rqno is an "out" 1786 * param */ 1787 if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK) 1788 panic("xdc_start"); /* should never happen */ 1789 maxio--; 1790 } 1791 } 1792 /* 1793 * xdc_remove_iorq: remove "done" IOPB's. 1794 */ 1795 1796 int 1797 xdc_remove_iorq(xdcsc) 1798 struct xdc_softc *xdcsc; 1799 1800 { 1801 int errno, rqno, comm, errs; 1802 struct xdc *xdc = xdcsc->xdc; 1803 u_long addr; 1804 struct xd_iopb *iopb; 1805 struct xd_iorq *iorq; 1806 struct buf *bp; 1807 1808 if (xdc->xdc_csr & XDC_F_ERROR) { 1809 /* 1810 * FATAL ERROR: should never happen under normal use. This 1811 * error is so bad, you can't even tell which IOPB is bad, so 1812 * we dump them all. 1813 */ 1814 errno = xdc->xdc_f_err; 1815 printf("%s: fatal error 0x%02x: %s\n", xdcsc->sc_dev.dv_xname, 1816 errno, xdc_e2str(errno)); 1817 if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errno, 0) != XD_ERR_AOK) { 1818 printf("%s: soft reset failed!\n", 1819 xdcsc->sc_dev.dv_xname); 1820 panic("xdc_remove_iorq: controller DEAD"); 1821 } 1822 return (XD_ERR_AOK); 1823 } 1824 1825 /* 1826 * get iopb that is done 1827 * 1828 * hmm... I used to read the address of the done IOPB off the VME 1829 * registers and calculate the rqno directly from that. that worked 1830 * until I started putting a load on the controller. when loaded, i 1831 * would get interrupts but neither the REMIOPB or F_ERROR bits would 1832 * be set, even after DELAY'ing a while! later on the timeout 1833 * routine would detect IOPBs that were marked "running" but their 1834 * "done" bit was set. rather than dealing directly with this 1835 * problem, it is just easier to look at all running IOPB's for the 1836 * done bit. 1837 */ 1838 if (xdc->xdc_csr & XDC_REMIOPB) { 1839 xdc->xdc_csr = XDC_CLRRIO; 1840 } 1841 1842 for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) { 1843 iorq = &xdcsc->reqs[rqno]; 1844 if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE) 1845 continue; /* free, or done */ 1846 iopb = &xdcsc->iopbase[rqno]; 1847 if (iopb->done == 0) 1848 continue; /* not done yet */ 1849 1850 #ifdef XDC_DEBUG 1851 { 1852 u_char *rio = (u_char *) iopb; 1853 int sz = sizeof(struct xd_iopb), lcv; 1854 printf("%s: rio #%d [", xdcsc->sc_dev.dv_xname, rqno); 1855 for (lcv = 0; lcv < sz; lcv++) 1856 printf(" %02x", rio[lcv]); 1857 printf("]\n"); 1858 } 1859 #endif /* XDC_DEBUG */ 1860 1861 xdcsc->nrun--; 1862 1863 comm = iopb->comm; 1864 errs = iopb->errs; 1865 1866 if (errs) 1867 iorq->errno = iopb->errno; 1868 else 1869 iorq->errno = 0; 1870 1871 /* handle non-fatal errors */ 1872 1873 if (errs && 1874 xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK) 1875 continue; /* AOK: we resubmitted it */ 1876 1877 1878 /* this iorq is now done (hasn't been restarted or anything) */ 1879 1880 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 1881 xdc_perror(iorq, iopb, 0); 1882 1883 /* now, if read/write check to make sure we got all the data 1884 * we needed. (this may not be the case if we got an error in 1885 * the middle of a multisector request). */ 1886 1887 if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 && 1888 (comm == XDCMD_RD || comm == XDCMD_WR)) { 1889 /* we just successfully processed a bad144 sector 1890 * note: if we are in bad 144 mode, the pointers have 1891 * been advanced already (see above) and are pointing 1892 * at the bad144 sector. to exit bad144 mode, we 1893 * must advance the pointers 1 sector and issue a new 1894 * request if there are still sectors left to process 1895 * 1896 */ 1897 XDC_ADVANCE(iorq, 1); /* advance 1 sector */ 1898 1899 /* exit b144 mode */ 1900 iorq->mode = iorq->mode & (~XD_MODE_B144); 1901 1902 if (iorq->sectcnt) { /* more to go! */ 1903 iorq->lasterror = iorq->errno = iopb->errno = 0; 1904 iopb->errs = iopb->done = 0; 1905 iorq->tries = 0; 1906 iopb->sectcnt = iorq->sectcnt; 1907 iopb->cylno = iorq->blockno / 1908 iorq->xd->sectpercyl; 1909 iopb->headno = 1910 (iorq->blockno / iorq->xd->nhead) % 1911 iorq->xd->nhead; 1912 iopb->sectno = iorq->blockno % XDFM_BPS; 1913 iopb->daddr = 1914 dvma_kvtopa((long)iorq->dbuf, BUS_VME32); 1915 XDC_HWAIT(xdcsc, rqno); 1916 xdc_start(xdcsc, 1); /* resubmit */ 1917 continue; 1918 } 1919 } 1920 /* final cleanup, totally done with this request */ 1921 1922 switch (XD_STATE(iorq->mode)) { 1923 case XD_SUB_NORM: 1924 bp = iorq->buf; 1925 if (errs) { 1926 bp->b_error = EIO; 1927 bp->b_flags |= B_ERROR; 1928 bp->b_resid = iorq->sectcnt * XDFM_BPS; 1929 } else { 1930 bp->b_resid = 0; /* done */ 1931 } 1932 /* Sun3: map/unmap regardless of B_PHYS */ 1933 dvma_mapout(iorq->dbufbase, 1934 iorq->buf->b_bcount); 1935 disk_unbusy(&iorq->xd->sc_dk, 1936 (bp->b_bcount - bp->b_resid)); 1937 XDC_FREE(xdcsc, rqno); 1938 biodone(bp); 1939 break; 1940 case XD_SUB_WAIT: 1941 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1942 xdcsc->ndone++; 1943 wakeup(iorq); 1944 break; 1945 case XD_SUB_POLL: 1946 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1947 xdcsc->ndone++; 1948 break; 1949 } 1950 } 1951 1952 return (XD_ERR_AOK); 1953 } 1954 1955 /* 1956 * xdc_perror: print error. 1957 * - if still_trying is true: we got an error, retried and got a 1958 * different error. in that case lasterror is the old error, 1959 * and errno is the new one. 1960 * - if still_trying is not true, then if we ever had an error it 1961 * is in lasterror. also, if iorq->errno == 0, then we recovered 1962 * from that error (otherwise iorq->errno == iorq->lasterror). 1963 */ 1964 void 1965 xdc_perror(iorq, iopb, still_trying) 1966 struct xd_iorq *iorq; 1967 struct xd_iopb *iopb; 1968 int still_trying; 1969 1970 { 1971 1972 int error = iorq->lasterror; 1973 1974 printf("%s", (iorq->xd) ? 1975 iorq->xd->sc_dev.dv_xname : 1976 iorq->xdc->sc_dev.dv_xname); 1977 if (iorq->buf) 1978 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev)); 1979 if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR) 1980 printf("%s %d/%d/%d: ", 1981 (iopb->comm == XDCMD_RD) ? "read" : "write", 1982 iopb->cylno, iopb->headno, iopb->sectno); 1983 printf("%s", xdc_e2str(error)); 1984 1985 if (still_trying) 1986 printf(" [still trying, new error=%s]", xdc_e2str(iorq->errno)); 1987 else 1988 if (iorq->errno == 0) 1989 printf(" [recovered in %d tries]", iorq->tries); 1990 1991 printf("\n"); 1992 } 1993 1994 /* 1995 * xdc_error: non-fatal error encountered... recover. 1996 * return AOK if resubmitted, return FAIL if this iopb is done 1997 */ 1998 int 1999 xdc_error(xdcsc, iorq, iopb, rqno, comm) 2000 struct xdc_softc *xdcsc; 2001 struct xd_iorq *iorq; 2002 struct xd_iopb *iopb; 2003 int rqno, comm; 2004 2005 { 2006 int errno = iorq->errno; 2007 int erract = errno & XD_ERA_MASK; 2008 int oldmode, advance, i; 2009 2010 if (erract == XD_ERA_RSET) { /* some errors require a reset */ 2011 oldmode = iorq->mode; 2012 iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode); 2013 xdcsc->ndone++; 2014 /* make xdc_start ignore us */ 2015 xdc_reset(xdcsc, 1, XD_RSET_NONE, errno, iorq->xd); 2016 iorq->mode = oldmode; 2017 xdcsc->ndone--; 2018 } 2019 /* check for read/write to a sector in bad144 table if bad: redirect 2020 * request to bad144 area */ 2021 2022 if ((comm == XDCMD_RD || comm == XDCMD_WR) && 2023 (iorq->mode & XD_MODE_B144) == 0) { 2024 advance = iorq->sectcnt - iopb->sectcnt; 2025 XDC_ADVANCE(iorq, advance); 2026 if ((i = isbad(&iorq->xd->dkb, iorq->blockno / iorq->xd->sectpercyl, 2027 (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead, 2028 iorq->blockno % iorq->xd->nsect)) != -1) { 2029 iorq->mode |= XD_MODE_B144; /* enter bad144 mode & 2030 * redirect */ 2031 iopb->errno = iopb->done = iopb->errs = 0; 2032 iopb->sectcnt = 1; 2033 iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2; 2034 /* second to last acyl */ 2035 i = iorq->xd->sectpercyl - 1 - i; /* follow bad144 2036 * standard */ 2037 iopb->headno = i / iorq->xd->nhead; 2038 iopb->sectno = i % iorq->xd->nhead; 2039 XDC_HWAIT(xdcsc, rqno); 2040 xdc_start(xdcsc, 1); /* resubmit */ 2041 return (XD_ERR_AOK); /* recovered! */ 2042 } 2043 } 2044 2045 /* 2046 * it isn't a bad144 sector, must be real error! see if we can retry 2047 * it? 2048 */ 2049 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 2050 xdc_perror(iorq, iopb, 1); /* inform of error state 2051 * change */ 2052 iorq->lasterror = errno; 2053 2054 if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD) 2055 && iorq->tries < XDC_MAXTRIES) { /* retry? */ 2056 iorq->tries++; 2057 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0; 2058 XDC_HWAIT(xdcsc, rqno); 2059 xdc_start(xdcsc, 1); /* restart */ 2060 return (XD_ERR_AOK); /* recovered! */ 2061 } 2062 2063 /* failed to recover from this error */ 2064 return (XD_ERR_FAIL); 2065 } 2066 2067 /* 2068 * xdc_tick: make sure xd is still alive and ticking (err, kicking). 2069 */ 2070 void 2071 xdc_tick(arg) 2072 void *arg; 2073 2074 { 2075 struct xdc_softc *xdcsc = arg; 2076 int lcv, s, reset = 0; 2077 #ifdef XDC_DIAG 2078 int wait, run, free, done, whd; 2079 u_char fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB]; 2080 s = splbio(); 2081 wait = xdcsc->nwait; 2082 run = xdcsc->nrun; 2083 free = xdcsc->nfree; 2084 done = xdcsc->ndone; 2085 bcopy(xdcsc->waitq, wqc, sizeof(wqc)); 2086 bcopy(xdcsc->freereq, fqc, sizeof(fqc)); 2087 splx(s); 2088 if (wait + run + free + done != XDC_MAXIOPB) { 2089 printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n", 2090 xdcsc->sc_dev.dv_xname, wait, free, run, done, XDC_MAXIOPB); 2091 bzero(mark, sizeof(mark)); 2092 printf("FREE: "); 2093 for (lcv = free; lcv > 0; lcv--) { 2094 printf("%d ", fqc[lcv - 1]); 2095 mark[fqc[lcv - 1]] = 1; 2096 } 2097 printf("\nWAIT: "); 2098 lcv = wait; 2099 while (lcv > 0) { 2100 printf("%d ", wqc[whd]); 2101 mark[wqc[whd]] = 1; 2102 whd = (whd + 1) % XDC_MAXIOPB; 2103 lcv--; 2104 } 2105 printf("\n"); 2106 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2107 if (mark[lcv] == 0) 2108 printf("MARK: running %d: mode %d done %d errs %d errno 0x%x ttl %d buf %x\n", 2109 lcv, xdcsc->reqs[lcv].mode, 2110 xdcsc->iopbase[lcv].done, 2111 xdcsc->iopbase[lcv].errs, 2112 xdcsc->iopbase[lcv].errno, 2113 xdcsc->reqs[lcv].ttl, xdcsc->reqs[lcv].buf); 2114 } 2115 } else 2116 if (done > XDC_MAXIOPB - XDC_SUBWAITLIM) 2117 printf("%s: diag: lots of done jobs (%d)\n", 2118 xdcsc->sc_dev.dv_xname, done); 2119 2120 #endif 2121 #ifdef XDC_DEBUG 2122 printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n", 2123 xdcsc->sc_dev.dv_xname, 2124 xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun, 2125 xdcsc->ndone); 2126 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2127 if (xdcsc->reqs[lcv].mode) 2128 printf("running %d: mode %d done %d errs %d errno 0x%x\n", 2129 lcv, 2130 xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done, 2131 xdcsc->iopbase[lcv].errs, xdcsc->iopbase[lcv].errno); 2132 } 2133 #endif 2134 2135 /* reduce ttl for each request if one goes to zero, reset xdc */ 2136 s = splbio(); 2137 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2138 if (xdcsc->reqs[lcv].mode == 0 || 2139 XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE) 2140 continue; 2141 xdcsc->reqs[lcv].ttl--; 2142 if (xdcsc->reqs[lcv].ttl == 0) 2143 reset = 1; 2144 } 2145 if (reset) { 2146 printf("%s: watchdog timeout\n", xdcsc->sc_dev.dv_xname); 2147 xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL); 2148 } 2149 splx(s); 2150 2151 /* until next time */ 2152 2153 timeout(xdc_tick, xdcsc, XDC_TICKCNT); 2154 } 2155 2156 /* 2157 * xdc_ioctlcmd: this function provides a user level interface to the 2158 * controller via ioctl. this allows "format" programs to be written 2159 * in user code, and is also useful for some debugging. we return 2160 * an error code. called at user priority. 2161 */ 2162 int 2163 xdc_ioctlcmd(xd, dev, xio) 2164 struct xd_softc *xd; 2165 dev_t dev; 2166 struct xd_iocmd *xio; 2167 2168 { 2169 int s, err, rqno, dummy; 2170 caddr_t dvmabuf = NULL; 2171 struct xdc_softc *xdcsc; 2172 2173 /* check sanity of requested command */ 2174 2175 switch (xio->cmd) { 2176 2177 case XDCMD_NOP: /* no op: everything should be zero */ 2178 if (xio->subfn || xio->dptr || xio->dlen || 2179 xio->block || xio->sectcnt) 2180 return (EINVAL); 2181 break; 2182 2183 case XDCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 2184 case XDCMD_WR: 2185 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 2186 xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL) 2187 return (EINVAL); 2188 break; 2189 2190 case XDCMD_SK: /* seek: doesn't seem useful to export this */ 2191 return (EINVAL); 2192 2193 case XDCMD_WRP: /* write parameters */ 2194 return (EINVAL);/* not useful, except maybe drive 2195 * parameters... but drive parameters should 2196 * go via disklabel changes */ 2197 2198 case XDCMD_RDP: /* read parameters */ 2199 if (xio->subfn != XDFUN_DRV || 2200 xio->dlen || xio->block || xio->dptr) 2201 return (EINVAL); /* allow read drive params to 2202 * get hw_spt */ 2203 xio->sectcnt = xd->hw_spt; /* we already know the answer */ 2204 return (0); 2205 break; 2206 2207 case XDCMD_XRD: /* extended read/write */ 2208 case XDCMD_XWR: 2209 2210 switch (xio->subfn) { 2211 2212 case XDFUN_THD:/* track headers */ 2213 if (xio->sectcnt != xd->hw_spt || 2214 (xio->block % xd->nsect) != 0 || 2215 xio->dlen != XD_IOCMD_HSZ * xd->hw_spt || 2216 xio->dptr == NULL) 2217 return (EINVAL); 2218 xio->sectcnt = 0; 2219 break; 2220 2221 case XDFUN_FMT:/* NOTE: also XDFUN_VFY */ 2222 if (xio->cmd == XDCMD_XRD) 2223 return (EINVAL); /* no XDFUN_VFY */ 2224 if (xio->sectcnt || xio->dlen || 2225 (xio->block % xd->nsect) != 0 || xio->dptr) 2226 return (EINVAL); 2227 break; 2228 2229 case XDFUN_HDR:/* header, header verify, data, data ECC */ 2230 return (EINVAL); /* not yet */ 2231 2232 case XDFUN_DM: /* defect map */ 2233 case XDFUN_DMX:/* defect map (alternate location) */ 2234 if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ || 2235 (xio->block % xd->nsect) != 0 || xio->dptr == NULL) 2236 return (EINVAL); 2237 break; 2238 2239 default: 2240 return (EINVAL); 2241 } 2242 break; 2243 2244 case XDCMD_TST: /* diagnostics */ 2245 return (EINVAL); 2246 2247 default: 2248 return (EINVAL);/* ??? */ 2249 } 2250 2251 /* create DVMA buffer for request if needed */ 2252 2253 if (xio->dlen) { 2254 dvmabuf = dvma_malloc(xio->dlen); 2255 if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) { 2256 if (err = copyin(xio->dptr, dvmabuf, xio->dlen)) { 2257 dvma_free(dvmabuf, xio->dlen); 2258 return (err); 2259 } 2260 } 2261 } 2262 /* do it! */ 2263 2264 err = 0; 2265 xdcsc = xd->parent; 2266 s = splbio(); 2267 rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block, 2268 xio->sectcnt, dvmabuf, XD_SUB_WAIT); 2269 if (rqno == XD_ERR_FAIL) { 2270 err = EIO; 2271 goto done; 2272 } 2273 xio->errno = xdcsc->reqs[rqno].errno; 2274 xio->tries = xdcsc->reqs[rqno].tries; 2275 XDC_DONE(xdcsc, rqno, dummy); 2276 2277 if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD) 2278 err = copyout(dvmabuf, xio->dptr, xio->dlen); 2279 2280 done: 2281 splx(s); 2282 if (dvmabuf) 2283 dvma_free(dvmabuf, xio->dlen); 2284 return (err); 2285 } 2286 2287 /* 2288 * xdc_e2str: convert error code number into an error string 2289 */ 2290 char * 2291 xdc_e2str(no) 2292 int no; 2293 { 2294 switch (no) { 2295 case XD_ERR_FAIL: 2296 return ("Software fatal error"); 2297 case XD_ERR_AOK: 2298 return ("Successful completion"); 2299 case XD_ERR_ICYL: 2300 return ("Illegal cylinder address"); 2301 case XD_ERR_IHD: 2302 return ("Illegal head address"); 2303 case XD_ERR_ISEC: 2304 return ("Illgal sector address"); 2305 case XD_ERR_CZER: 2306 return ("Count zero"); 2307 case XD_ERR_UIMP: 2308 return ("Unimplemented command"); 2309 case XD_ERR_IF1: 2310 return ("Illegal field length 1"); 2311 case XD_ERR_IF2: 2312 return ("Illegal field length 2"); 2313 case XD_ERR_IF3: 2314 return ("Illegal field length 3"); 2315 case XD_ERR_IF4: 2316 return ("Illegal field length 4"); 2317 case XD_ERR_IF5: 2318 return ("Illegal field length 5"); 2319 case XD_ERR_IF6: 2320 return ("Illegal field length 6"); 2321 case XD_ERR_IF7: 2322 return ("Illegal field length 7"); 2323 case XD_ERR_ISG: 2324 return ("Illegal scatter/gather length"); 2325 case XD_ERR_ISPT: 2326 return ("Not enough sectors per track"); 2327 case XD_ERR_ALGN: 2328 return ("Next IOPB address alignment error"); 2329 case XD_ERR_SGAL: 2330 return ("Scatter/gather address alignment error"); 2331 case XD_ERR_SGEC: 2332 return ("Scatter/gather with auto-ECC"); 2333 case XD_ERR_SECC: 2334 return ("Soft ECC corrected"); 2335 case XD_ERR_SIGN: 2336 return ("ECC ignored"); 2337 case XD_ERR_ASEK: 2338 return ("Auto-seek retry recovered"); 2339 case XD_ERR_RTRY: 2340 return ("Soft retry recovered"); 2341 case XD_ERR_HECC: 2342 return ("Hard data ECC"); 2343 case XD_ERR_NHDR: 2344 return ("Header not found"); 2345 case XD_ERR_NRDY: 2346 return ("Drive not ready"); 2347 case XD_ERR_TOUT: 2348 return ("Operation timeout"); 2349 case XD_ERR_VTIM: 2350 return ("VMEDMA timeout"); 2351 case XD_ERR_DSEQ: 2352 return ("Disk sequencer error"); 2353 case XD_ERR_HDEC: 2354 return ("Header ECC error"); 2355 case XD_ERR_RVFY: 2356 return ("Read verify"); 2357 case XD_ERR_VFER: 2358 return ("Fatail VMEDMA error"); 2359 case XD_ERR_VBUS: 2360 return ("VMEbus error"); 2361 case XD_ERR_DFLT: 2362 return ("Drive faulted"); 2363 case XD_ERR_HECY: 2364 return ("Header error/cyliner"); 2365 case XD_ERR_HEHD: 2366 return ("Header error/head"); 2367 case XD_ERR_NOCY: 2368 return ("Drive not on-cylinder"); 2369 case XD_ERR_SEEK: 2370 return ("Seek error"); 2371 case XD_ERR_ILSS: 2372 return ("Illegal sector size"); 2373 case XD_ERR_SEC: 2374 return ("Soft ECC"); 2375 case XD_ERR_WPER: 2376 return ("Write-protect error"); 2377 case XD_ERR_IRAM: 2378 return ("IRAM self test failure"); 2379 case XD_ERR_MT3: 2380 return ("Maintenance test 3 failure (DSKCEL RAM)"); 2381 case XD_ERR_MT4: 2382 return ("Maintenance test 4 failure (header shift reg)"); 2383 case XD_ERR_MT5: 2384 return ("Maintenance test 5 failure (VMEDMA regs)"); 2385 case XD_ERR_MT6: 2386 return ("Maintenance test 6 failure (REGCEL chip)"); 2387 case XD_ERR_MT7: 2388 return ("Maintenance test 7 failure (buffer parity)"); 2389 case XD_ERR_MT8: 2390 return ("Maintenance test 8 failure (disk FIFO)"); 2391 case XD_ERR_IOCK: 2392 return ("IOPB checksum miscompare"); 2393 case XD_ERR_IODM: 2394 return ("IOPB DMA fatal"); 2395 case XD_ERR_IOAL: 2396 return ("IOPB address alignment error"); 2397 case XD_ERR_FIRM: 2398 return ("Firmware error"); 2399 case XD_ERR_MMOD: 2400 return ("Illegal maintenance mode test number"); 2401 case XD_ERR_ACFL: 2402 return ("ACFAIL asserted"); 2403 default: 2404 return ("Unknown error"); 2405 } 2406 } 2407